serverStream =
+ readClient.readRowsCallable().call(readRowsRequest);
- while (responseIterator.hasNext()) {
- ReadRowsResponse response = responseIterator.next();
+ for (ReadRowsResponse response : serverStream) {
rowCount += response.getRowCount();
if (rowCount >= rowOffset) {
return rowOffset;
@@ -1722,92 +1880,6 @@ private long ReadStreamToOffset(ReadStream readStream, long rowOffset) {
return rowCount;
}
- /**
- * Reads all the rows from the specified table.
- *
- * For every row, the consumer is called for processing.
- *
- * @param table
- * @param snapshotInMillis Optional. If specified, all rows up to timestamp will be returned.
- * @param filter Optional. If specified, it will be used to restrict returned data.
- * @param consumer that receives all Avro rows.
- * @throws IOException
- */
- private void ProcessRowsAtSnapshot(
- String table, Long snapshotInMillis, String filter, AvroRowConsumer consumer)
- throws IOException {
- Preconditions.checkNotNull(table);
- Preconditions.checkNotNull(consumer);
-
- CreateReadSessionRequest.Builder createSessionRequestBuilder =
- CreateReadSessionRequest.newBuilder()
- .setParent(parentProjectId)
- .setMaxStreamCount(1)
- .setReadSession(
- ReadSession.newBuilder().setTable(table).setDataFormat(DataFormat.AVRO).build());
-
- if (snapshotInMillis != null) {
- Timestamp snapshotTimestamp =
- Timestamp.newBuilder()
- .setSeconds(snapshotInMillis / 1_000)
- .setNanos((int) ((snapshotInMillis % 1000) * 1000000))
- .build();
- createSessionRequestBuilder
- .getReadSessionBuilder()
- .setTableModifiers(
- TableModifiers.newBuilder().setSnapshotTime(snapshotTimestamp).build());
- }
-
- if (filter != null && !filter.isEmpty()) {
- createSessionRequestBuilder
- .getReadSessionBuilder()
- .setReadOptions(TableReadOptions.newBuilder().setRowRestriction(filter).build());
- }
-
- ReadSession session = client.createReadSession(createSessionRequestBuilder.build());
- assertEquals(
- String.format(
- "Did not receive expected number of streams for table '%s' CreateReadSession"
- + " response:%n%s",
- table, session.toString()),
- 1,
- session.getStreamsCount());
-
- ReadRowsRequest readRowsRequest =
- ReadRowsRequest.newBuilder().setReadStream(session.getStreams(0).getName()).build();
-
- SimpleRowReaderAvro reader =
- new SimpleRowReaderAvro(new Schema.Parser().parse(session.getAvroSchema().getSchema()));
-
- ServerStream stream = client.readRowsCallable().call(readRowsRequest);
- for (ReadRowsResponse response : stream) {
- reader.processRows(response.getAvroRows(), consumer);
- }
- }
-
- /**
- * Reads all the rows from the specified table and returns a list as generic Avro records.
- *
- * @param table
- * @param filter Optional. If specified, it will be used to restrict returned data.
- * @return
- */
- List ReadAllRows(String table, String filter) throws IOException {
- final List rows = new ArrayList<>();
- ProcessRowsAtSnapshot(
- /* table= */ table,
- /* snapshotInMillis= */ null,
- /* filter= */ filter,
- new AvroRowConsumer() {
- @Override
- public void accept(GenericData.Record record) {
- // clone the record since that reference will be reused by the reader.
- rows.add(new GenericRecordBuilder(record).build());
- }
- });
- return rows;
- }
-
/**
* Runs a query job with WRITE_APPEND disposition to the destination table and returns the
* successfully completed job.
@@ -1817,9 +1889,9 @@ public void accept(GenericData.Record record) {
* @return
* @throws InterruptedException
*/
- private Job RunQueryAppendJobAndExpectSuccess(TableId destinationTableId, String query)
+ private Job runQueryAppendJobAndExpectSuccess(TableId destinationTableId, String query)
throws InterruptedException {
- return RunQueryJobAndExpectSuccess(
+ return runQueryJobAndExpectSuccess(
QueryJobConfiguration.newBuilder(query)
.setDestinationTable(destinationTableId)
.setUseQueryCache(false)
@@ -1835,7 +1907,7 @@ private Job RunQueryAppendJobAndExpectSuccess(TableId destinationTableId, String
* @return
* @throws InterruptedException
*/
- private Job RunQueryJobAndExpectSuccess(QueryJobConfiguration configuration)
+ private Job runQueryJobAndExpectSuccess(QueryJobConfiguration configuration)
throws InterruptedException {
Job job = bigquery.create(JobInfo.of(configuration));
Job completedJob =
@@ -1845,40 +1917,10 @@ private Job RunQueryJobAndExpectSuccess(QueryJobConfiguration configuration)
assertNotNull(completedJob);
assertNull(
+ /* object= */ completedJob.getStatus().getError(),
/* message= */ "Received a job status that is not a success: "
- + completedJob.getStatus().toString(),
- /* object= */ completedJob.getStatus().getError());
+ + completedJob.getStatus().toString());
return completedJob;
}
-
- static ServiceAccountCredentials loadCredentials(String credentialFile) {
- try {
- InputStream keyStream = new ByteArrayInputStream(credentialFile.getBytes());
- return ServiceAccountCredentials.fromStream(keyStream);
- } catch (IOException e) {
- fail("Couldn't create fake JSON credentials.");
- }
- return null;
- }
-
- static class AppendCompleteCallback implements ApiFutureCallback {
- private static final Object lock = new Object();
- private static int batchCount = 0;
-
- public void onSuccess(AppendRowsResponse response) {
- synchronized (lock) {
- if (response.hasError()) {
- System.out.format("Error: %s\n", response.getError());
- } else {
- ++batchCount;
- System.out.format("Wrote batch %d\n", batchCount);
- }
- }
- }
-
- public void onFailure(Throwable throwable) {
- System.out.format("Error: %s\n", throwable.toString());
- }
- }
}
diff --git a/google-cloud-bigquerystorage/src/test/java/com/google/cloud/bigquery/storage/v1/it/ITBigQueryWriteManualClientTest.java b/google-cloud-bigquerystorage/src/test/java/com/google/cloud/bigquery/storage/v1/it/ITBigQueryStorageWriteClientTest.java
similarity index 74%
rename from google-cloud-bigquerystorage/src/test/java/com/google/cloud/bigquery/storage/v1/it/ITBigQueryWriteManualClientTest.java
rename to google-cloud-bigquerystorage/src/test/java/com/google/cloud/bigquery/storage/v1/it/ITBigQueryStorageWriteClientTest.java
index 756dfcc793..b7e46be405 100644
--- a/google-cloud-bigquerystorage/src/test/java/com/google/cloud/bigquery/storage/v1/it/ITBigQueryWriteManualClientTest.java
+++ b/google-cloud-bigquerystorage/src/test/java/com/google/cloud/bigquery/storage/v1/it/ITBigQueryStorageWriteClientTest.java
@@ -16,14 +16,21 @@
package com.google.cloud.bigquery.storage.v1.it;
+import static com.google.cloud.bigquery.storage.v1.it.util.Helper.EXPECTED_TIMESTAMPS_HIGHER_PRECISION_ISO_OUTPUT;
+import static com.google.cloud.bigquery.storage.v1.it.util.Helper.TIMESTAMP_COLUMN_NAME;
+import static com.google.cloud.bigquery.storage.v1.it.util.Helper.TIMESTAMP_HIGHER_PRECISION_COLUMN_NAME;
import static com.google.common.truth.Truth.assertThat;
-import static org.junit.Assert.assertEquals;
-import static org.junit.Assert.assertFalse;
-import static org.junit.Assert.assertNotNull;
-import static org.junit.Assert.assertTrue;
+import static org.junit.jupiter.api.Assertions.assertEquals;
+import static org.junit.jupiter.api.Assertions.assertFalse;
+import static org.junit.jupiter.api.Assertions.assertNotNull;
+import static org.junit.jupiter.api.Assertions.assertNull;
+import static org.junit.jupiter.api.Assertions.assertThrows;
+import static org.junit.jupiter.api.Assertions.assertTrue;
+import static org.junit.jupiter.api.Assertions.fail;
import com.google.api.client.util.Sleeper;
import com.google.api.core.ApiFuture;
+import com.google.api.core.ApiFutures;
import com.google.api.gax.retrying.RetrySettings;
import com.google.api.gax.rpc.FixedHeaderProvider;
import com.google.api.gax.rpc.HeaderProvider;
@@ -40,13 +47,20 @@
import com.google.cloud.bigquery.storage.v1.Exceptions.OffsetOutOfRange;
import com.google.cloud.bigquery.storage.v1.Exceptions.SchemaMismatchedException;
import com.google.cloud.bigquery.storage.v1.Exceptions.StreamFinalizedException;
+import com.google.cloud.bigquery.storage.v1.it.util.BigQueryResource;
+import com.google.cloud.bigquery.storage.v1.it.util.Helper;
import com.google.cloud.bigquery.testing.RemoteBigQueryHelper;
import com.google.common.collect.ImmutableList;
+import com.google.common.util.concurrent.MoreExecutors;
import com.google.protobuf.ByteString;
+import com.google.protobuf.DescriptorProtos;
import com.google.protobuf.DescriptorProtos.DescriptorProto;
import com.google.protobuf.DescriptorProtos.FieldDescriptorProto;
import com.google.protobuf.Descriptors;
import com.google.protobuf.Descriptors.DescriptorValidationException;
+import com.google.protobuf.DynamicMessage;
+import com.google.protobuf.Int64Value;
+import com.google.protobuf.Message;
import io.grpc.Status;
import io.grpc.Status.Code;
import java.io.ByteArrayOutputStream;
@@ -64,6 +78,7 @@
import java.util.concurrent.*;
import java.util.concurrent.atomic.AtomicInteger;
import java.util.logging.Logger;
+import java.util.stream.Collectors;
import org.apache.arrow.memory.BufferAllocator;
import org.apache.arrow.memory.RootAllocator;
import org.apache.arrow.vector.*;
@@ -74,17 +89,20 @@
import org.apache.arrow.vector.ipc.message.MessageSerializer;
import org.apache.arrow.vector.types.pojo.ArrowType;
import org.apache.arrow.vector.types.pojo.FieldType;
+import org.apache.avro.generic.GenericData;
import org.json.JSONArray;
import org.json.JSONObject;
-import org.junit.AfterClass;
-import org.junit.Assert;
-import org.junit.BeforeClass;
-import org.junit.Test;
+import org.junit.jupiter.api.AfterAll;
+import org.junit.jupiter.api.BeforeAll;
+import org.junit.jupiter.api.Test;
+import org.junit.jupiter.api.parallel.Execution;
+import org.junit.jupiter.api.parallel.ExecutionMode;
/** Integration tests for BigQuery Write API. */
-public class ITBigQueryWriteManualClientTest {
+@Execution(ExecutionMode.SAME_THREAD)
+class ITBigQueryStorageWriteClientTest {
private static final Logger LOG =
- Logger.getLogger(ITBigQueryWriteManualClientTest.class.getName());
+ Logger.getLogger(ITBigQueryStorageWriteClientTest.class.getName());
private static final String DATASET = RemoteBigQueryHelper.generateDatasetName();
private static final String DATASET_EU = RemoteBigQueryHelper.generateDatasetName();
private static final String TABLE = "testtable";
@@ -94,7 +112,9 @@ public class ITBigQueryWriteManualClientTest {
private static final String DESCRIPTION = "BigQuery Write Java manual client test dataset";
- private static BigQueryWriteClient client;
+ private static BigQueryReadClient readClient;
+ private static BigQueryWriteClient writeClient;
+ private static String parentProjectId;
private static TableInfo tableInfo;
private static TableInfo tableInfo2;
@@ -109,7 +129,48 @@ public class ITBigQueryWriteManualClientTest {
private static final BufferAllocator allocator = new RootAllocator();
- public class StringWithSecondsNanos {
+ // Arrow is a bit special in that timestamps are limited to nanoseconds precision.
+ // The data will be padded to fit into the higher precision columns.
+ private static final Object[][] INPUT_ARROW_WRITE_TIMESTAMPS =
+ new Object[][] {
+ {1735734896123456L /* 2025-01-01T12:34:56.123456Z */, 1735734896123456789L},
+ {1580646896123456L /* 2020-02-02T12:34:56.123456Z */, 1580646896123456789L},
+ {636467696123456L /* 1990-03-03T12:34:56.123456Z */, 636467696123456789L},
+ {165846896123456L /* 1975-04-04T12:34:56.123456Z */, 165846896123456789L}
+ };
+
+ // Arrow's higher precision column is padded with extra 0's if configured to return
+ // ISO as output for any picosecond enabled column.
+ private static final Object[][] EXPECTED_ARROW_WRITE_TIMESTAMPS_ISO_OUTPUT =
+ new Object[][] {
+ {1735734896123456L /* 2025-01-01T12:34:56.123456Z */, "2025-01-01T12:34:56.123456789000Z"},
+ {1580646896123456L /* 2020-02-02T12:34:56.123456Z */, "2020-02-02T12:34:56.123456789000Z"},
+ {636467696123456L /* 1990-03-03T12:34:56.123456Z */, "1990-03-03T12:34:56.123456789000Z"},
+ {165846896123456L /* 1975-04-04T12:34:56.123456Z */, "1975-04-04T12:34:56.123456789000Z"}
+ };
+
+ // Special case where users can use the Write API with Protobuf messages
+ // The format is two fields: 1. Seconds from epoch and 2. Subsecond fractional (millis, micros,
+ // nano, or pico). This test case is using picos sub-second fractional
+ private static final Long[][] INPUT_PROTO_DESCRIPTOR_WRITE_TIMESTAMPS =
+ new Long[][] {
+ {1735734896L, 123456789123L}, /* 2025-01-01T12:34:56.123456789123Z */
+ {1580646896L, 123456789123L}, /* 2020-02-02T12:34:56.123456789123Z */
+ {636467696L, 123456789123L}, /* 1990-03-03T12:34:56.123456789123Z */
+ {165846896L, 123456789123L} /* 1975-04-04T12:34:56.123456789123Z */
+ };
+
+ // Expected ISO8601 output when using proto descriptors to write to BQ with pico precision
+ private static final String[]
+ EXPECTED_PROTO_DESCRIPTOR_WRITE_TIMESTAMPS_HIGH_PRECISION_ISO_OUTPUT =
+ new String[] {
+ "2025-01-01T12:34:56.123456789123Z",
+ "2020-02-02T12:34:56.123456789123Z",
+ "1990-03-03T12:34:56.123456789123Z",
+ "1975-04-04T12:34:56.123456789123Z"
+ };
+
+ static class StringWithSecondsNanos {
public String foo;
public long seconds;
public int nanos;
@@ -124,11 +185,14 @@ public StringWithSecondsNanos(String fooParam, long secondsParam, int nanosParam
private static final HeaderProvider USER_AGENT_HEADER_PROVIDER =
FixedHeaderProvider.create("User-Agent", "my_product_name/1.0 (GPN:Samples;test)");
- @BeforeClass
- public static void beforeClass() throws IOException {
+ @BeforeAll
+ static void beforeAll() throws IOException {
+ readClient = BigQueryReadClient.create();
+
BigQueryWriteSettings settings =
BigQueryWriteSettings.newBuilder().setHeaderProvider(USER_AGENT_HEADER_PROVIDER).build();
- client = BigQueryWriteClient.create(settings);
+ writeClient = BigQueryWriteClient.create(settings);
+ parentProjectId = String.format("projects/%s", ServiceOptions.getDefaultProjectId());
RemoteBigQueryHelper bigqueryHelper = RemoteBigQueryHelper.create();
bigquery = bigqueryHelper.getOptions().getService();
@@ -215,19 +279,25 @@ public static void beforeClass() throws IOException {
bigquery.create(tableInfoEU);
}
- @AfterClass
- public static void afterClass() {
- if (client != null) {
- client.close();
+ @AfterAll
+ static void afterAll() throws InterruptedException {
+ if (writeClient != null) {
+ writeClient.close();
+ writeClient.awaitTermination(10, TimeUnit.SECONDS);
+ }
+
+ if (readClient != null) {
+ readClient.close();
+ readClient.awaitTermination(10, TimeUnit.SECONDS);
}
if (bigquery != null) {
RemoteBigQueryHelper.forceDelete(bigquery, DATASET);
- LOG.info("Deleted test dataset: " + DATASET);
+ RemoteBigQueryHelper.forceDelete(bigquery, DATASET_EU);
}
}
- ProtoRows CreateProtoRows(String[] messages) {
+ ProtoRows createProtoRows(String[] messages) {
ProtoRows.Builder rows = ProtoRows.newBuilder();
for (String message : messages) {
FooType foo = FooType.newBuilder().setFoo(message).build();
@@ -236,7 +306,7 @@ ProtoRows CreateProtoRows(String[] messages) {
return rows.build();
}
- ProtoSchema CreateProtoSchemaWithColField() {
+ ProtoSchema createProtoSchemaWithColField() {
return ProtoSchema.newBuilder()
.setProtoDescriptor(
DescriptorProto.newBuilder()
@@ -251,7 +321,7 @@ ProtoSchema CreateProtoSchemaWithColField() {
.build();
}
- ProtoRows CreateProtoOptionalRows(String[] messages) {
+ ProtoRows createProtoOptionalRows(String[] messages) {
ProtoRows.Builder rows = ProtoRows.newBuilder();
for (String message : messages) {
FooOptionalType foo = FooOptionalType.newBuilder().setFoo(message).build();
@@ -260,7 +330,7 @@ ProtoRows CreateProtoOptionalRows(String[] messages) {
return rows.build();
}
- ProtoRows CreateProtoRowsMultipleColumns(String[] messages) {
+ ProtoRows createProtoRowsMultipleColumns(String[] messages) {
ProtoRows.Builder rows = ProtoRows.newBuilder();
for (String message : messages) {
UpdatedFooType foo = UpdatedFooType.newBuilder().setFoo(message).setBar(message).build();
@@ -269,7 +339,7 @@ ProtoRows CreateProtoRowsMultipleColumns(String[] messages) {
return rows.build();
}
- ProtoRows CreateProtoRowsComplex(String[] messages) {
+ ProtoRows createProtoRowsComplex(String[] messages) {
ProtoRows.Builder rows = ProtoRows.newBuilder();
for (String message : messages) {
ComplicateType foo =
@@ -281,7 +351,7 @@ ProtoRows CreateProtoRowsComplex(String[] messages) {
return rows.build();
}
- ProtoRows CreateProtoRowsMixed(StringWithSecondsNanos[] messages) {
+ ProtoRows createProtoRowsMixed(StringWithSecondsNanos[] messages) {
ProtoRows.Builder rows = ProtoRows.newBuilder();
for (StringWithSecondsNanos message : messages) {
FooTimestampType datum =
@@ -299,65 +369,67 @@ ProtoRows CreateProtoRowsMixed(StringWithSecondsNanos[] messages) {
}
@Test
- public void testBatchWriteWithCommittedStreamEU()
+ void testBatchWriteWithCommittedStreamEU()
throws IOException, InterruptedException, ExecutionException {
WriteStream writeStream =
- client.createWriteStream(
+ writeClient.createWriteStream(
CreateWriteStreamRequest.newBuilder()
.setParent(tableIdEU)
.setWriteStream(
WriteStream.newBuilder().setType(WriteStream.Type.COMMITTED).build())
.build());
- StreamWriter streamWriter =
+ ApiFuture response1;
+ ApiFuture response2;
+ try (StreamWriter streamWriter =
StreamWriter.newBuilder(writeStream.getName())
.setWriterSchema(ProtoSchemaConverter.convert(FooType.getDescriptor()))
- .build();
- LOG.info("Sending one message");
+ .build()) {
+ LOG.info("Sending one message");
- ApiFuture response =
- streamWriter.append(CreateProtoRows(new String[] {"aaa"}), 0);
- assertEquals(0, response.get().getAppendResult().getOffset().getValue());
+ ApiFuture response =
+ streamWriter.append(createProtoRows(new String[] {"aaa"}), 0);
+ assertEquals(0, response.get().getAppendResult().getOffset().getValue());
- LOG.info("Sending two more messages");
- ApiFuture response1 =
- streamWriter.append(CreateProtoRows(new String[] {"bbb", "ccc"}), 1);
- ApiFuture response2 =
- streamWriter.append(CreateProtoRows(new String[] {"ddd"}), 3);
+ LOG.info("Sending two more messages");
+ response1 = streamWriter.append(createProtoRows(new String[] {"bbb", "ccc"}), 1);
+ response2 = streamWriter.append(createProtoRows(new String[] {"ddd"}), 3);
+ }
assertEquals(1, response1.get().getAppendResult().getOffset().getValue());
assertEquals(3, response2.get().getAppendResult().getOffset().getValue());
}
@Test
- public void testProto3OptionalBatchWriteWithCommittedStream()
+ void testProto3OptionalBatchWriteWithCommittedStream()
throws IOException, InterruptedException, ExecutionException {
WriteStream writeStream =
- client.createWriteStream(
+ writeClient.createWriteStream(
CreateWriteStreamRequest.newBuilder()
.setParent(tableId)
.setWriteStream(
WriteStream.newBuilder().setType(WriteStream.Type.COMMITTED).build())
.build());
- StreamWriter streamWriter =
+ ApiFuture response1;
+ ApiFuture response2;
+ try (StreamWriter streamWriter =
StreamWriter.newBuilder(writeStream.getName())
.setWriterSchema(ProtoSchemaConverter.convert(FooOptionalType.getDescriptor()))
- .build();
- LOG.info("Sending one message");
+ .build()) {
+ LOG.info("Sending one message");
- ApiFuture response =
- streamWriter.append(CreateProtoOptionalRows(new String[] {"aaa"}), 0);
- assertEquals(0, response.get().getAppendResult().getOffset().getValue());
+ ApiFuture response =
+ streamWriter.append(createProtoOptionalRows(new String[] {"aaa"}), 0);
+ assertEquals(0, response.get().getAppendResult().getOffset().getValue());
- LOG.info("Sending two more messages");
- ApiFuture response1 =
- streamWriter.append(CreateProtoOptionalRows(new String[] {"bbb", "ccc"}), 1);
- ApiFuture response2 =
- streamWriter.append(CreateProtoOptionalRows(new String[] {""}), 3);
+ LOG.info("Sending two more messages");
+ response1 = streamWriter.append(createProtoOptionalRows(new String[] {"bbb", "ccc"}), 1);
+ response2 = streamWriter.append(createProtoOptionalRows(new String[] {""}), 3);
+ }
assertEquals(1, response1.get().getAppendResult().getOffset().getValue());
assertEquals(3, response2.get().getAppendResult().getOffset().getValue());
}
@Test
- public void testJsonStreamWriterCommittedStream()
+ void testJsonStreamWriterCommittedStream()
throws IOException,
InterruptedException,
ExecutionException,
@@ -382,7 +454,7 @@ public void testJsonStreamWriterCommittedStream()
bigquery.create(tableInfo);
TableName parent = TableName.of(ServiceOptions.getDefaultProjectId(), DATASET, tableName);
WriteStream writeStream =
- client.createWriteStream(
+ writeClient.createWriteStream(
CreateWriteStreamRequest.newBuilder()
.setParent(parent.toString())
.setWriteStream(
@@ -443,12 +515,12 @@ public void testJsonStreamWriterCommittedStream()
assertEquals("bbb", iter.next().get(0).getStringValue());
assertEquals("ccc", iter.next().get(0).getStringValue());
assertEquals("ddd", iter.next().get(0).getStringValue());
- assertEquals(false, iter.hasNext());
+ assertFalse(iter.hasNext());
}
}
@Test
- public void testRowErrors()
+ void testRowErrors()
throws IOException,
InterruptedException,
ExecutionException,
@@ -469,60 +541,59 @@ public void testRowErrors()
.build();
bigquery.create(tableInfo);
TableName parent = TableName.of(ServiceOptions.getDefaultProjectId(), DATASET, tableName);
- StreamWriter streamWriter =
+ ApiFuture futureResponse1;
+ try (StreamWriter streamWriter =
StreamWriter.newBuilder(parent.toString() + "/_default")
.setWriterSchema(ProtoSchemaConverter.convert(FooTimestampType.getDescriptor()))
- .build();
+ .build()) {
- LOG.info("Sending three messages");
- StringWithSecondsNanos[] myBadList = {
- new StringWithSecondsNanos("aaabbbcccddd", 1663821424, 0),
- new StringWithSecondsNanos("bbb", Long.MIN_VALUE, 0),
- new StringWithSecondsNanos("cccdddeeefffggg", 1663621424, 0)
- };
- ApiFuture futureResponse =
- streamWriter.append(CreateProtoRowsMixed(myBadList), -1);
- AppendRowsResponse actualResponse = null;
- try {
- actualResponse = futureResponse.get();
- } catch (Throwable t) {
- assertTrue(t instanceof ExecutionException);
- t = t.getCause();
- assertTrue(t instanceof AppendSerializationError);
- AppendSerializationError e = (AppendSerializationError) t;
- LOG.info("Found row errors on stream: " + e.getStreamName());
- assertEquals(
- "Field foo: STRING(10) has maximum length 10 but got a value with length 12 on field"
- + " foo.",
- e.getRowIndexToErrorMessage().get(0));
- assertEquals(
- "Timestamp field value is out of range: -9223372036854775808 on field bar.",
- e.getRowIndexToErrorMessage().get(1));
- assertEquals(
- "Field foo: STRING(10) has maximum length 10 but got a value with length 15 on field"
- + " foo.",
- e.getRowIndexToErrorMessage().get(2));
- for (Map.Entry entry : e.getRowIndexToErrorMessage().entrySet()) {
- LOG.info("Bad row index: " + entry.getKey() + ", has problem: " + entry.getValue());
+ LOG.info("Sending three messages");
+ StringWithSecondsNanos[] myBadList = {
+ new StringWithSecondsNanos("aaabbbcccddd", 1663821424, 0),
+ new StringWithSecondsNanos("bbb", Long.MIN_VALUE, 0),
+ new StringWithSecondsNanos("cccdddeeefffggg", 1663621424, 0)
+ };
+ ApiFuture futureResponse =
+ streamWriter.append(createProtoRowsMixed(myBadList), -1);
+ AppendRowsResponse actualResponse = null;
+ try {
+ actualResponse = futureResponse.get();
+ } catch (Throwable t) {
+ assertTrue(t instanceof ExecutionException);
+ t = t.getCause();
+ assertTrue(t instanceof AppendSerializationError);
+ AppendSerializationError e = (AppendSerializationError) t;
+ LOG.info("Found row errors on stream: " + e.getStreamName());
+ assertEquals(
+ "Field foo: STRING(10) has maximum length 10 but got a value with length 12 on field"
+ + " foo.",
+ e.getRowIndexToErrorMessage().get(0));
+ assertEquals(
+ "Timestamp field value is out of range: -9223372036854775808 on field bar.",
+ e.getRowIndexToErrorMessage().get(1));
+ assertEquals(
+ "Field foo: STRING(10) has maximum length 10 but got a value with length 15 on field"
+ + " foo.",
+ e.getRowIndexToErrorMessage().get(2));
+ for (Map.Entry entry : e.getRowIndexToErrorMessage().entrySet()) {
+ LOG.info("Bad row index: " + entry.getKey() + ", has problem: " + entry.getValue());
+ }
}
+ assertNull(actualResponse);
+
+ LOG.info("Resending with three good messages");
+ StringWithSecondsNanos[] myGoodList = {
+ new StringWithSecondsNanos("aaa", 1664821424, 0),
+ new StringWithSecondsNanos("bbb", 1663821424, 0),
+ new StringWithSecondsNanos("ccc", 1664801424, 0)
+ };
+ futureResponse1 = streamWriter.append(createProtoRowsMixed(myGoodList), -1);
}
- assertEquals(null, actualResponse);
-
- LOG.info("Resending with three good messages");
- StringWithSecondsNanos[] myGoodList = {
- new StringWithSecondsNanos("aaa", 1664821424, 0),
- new StringWithSecondsNanos("bbb", 1663821424, 0),
- new StringWithSecondsNanos("ccc", 1664801424, 0)
- };
- ApiFuture futureResponse1 =
- streamWriter.append(CreateProtoRowsMixed(myGoodList), -1);
assertEquals(0, futureResponse1.get().getAppendResult().getOffset().getValue());
TableResult result =
bigquery.listTableData(tableInfo.getTableId(), BigQuery.TableDataListOption.startIndex(0L));
- Iterator iterDump = result.getValues().iterator();
- while (iterDump.hasNext()) {
- FieldValueList currentRow = iterDump.next();
+ for (FieldValueList currentRow : result.getValues()) {
LOG.info("Table row contains " + currentRow.size() + " field values.");
LOG.info("Table column has foo: " + currentRow.get(0).getStringValue());
LOG.info("Table column has bar: " + currentRow.get(1).getTimestampValue());
@@ -538,11 +609,11 @@ public void testRowErrors()
currentRow = iter.next();
assertEquals("ccc", currentRow.get(0).getStringValue());
assertEquals(1664801424000000L, currentRow.get(1).getTimestampValue());
- assertEquals(false, iter.hasNext());
+ assertFalse(iter.hasNext());
}
@Test
- public void testRequestProfilerWithCommittedStream()
+ void testRequestProfilerWithCommittedStream()
throws DescriptorValidationException, IOException, InterruptedException {
String tableName = "TestProfiler";
TableId tableId = TableId.of(DATASET, tableName);
@@ -553,7 +624,7 @@ public void testRequestProfilerWithCommittedStream()
TableName parent = TableName.of(ServiceOptions.getDefaultProjectId(), DATASET, tableName);
WriteStream writeStream =
- client.createWriteStream(
+ writeClient.createWriteStream(
CreateWriteStreamRequest.newBuilder()
.setParent(parent.toString())
.setWriteStream(
@@ -585,17 +656,17 @@ public void testRequestProfilerWithCommittedStream()
LOG.info("Waiting for all responses to come back");
for (int i = 0; i < totalRequest; i++) {
try {
- Assert.assertEquals(
+ assertEquals(
allResponses.get(i).get().getAppendResult().getOffset().getValue(), i * rowBatch);
} catch (ExecutionException ex) {
- Assert.fail("Unexpected error " + ex);
+ fail("Unexpected error " + ex);
}
}
RequestProfiler.disableAndResetProfiler();
}
@Test
- public void testJsonStreamWriterWithDefaultSchema()
+ void testJsonStreamWriterWithDefaultSchema()
throws IOException,
InterruptedException,
ExecutionException,
@@ -633,7 +704,7 @@ public void testJsonStreamWriterWithDefaultSchema()
// Create JsonStreamWriter with newBuilder(streamOrTable, client)
try (JsonStreamWriter jsonStreamWriter =
- JsonStreamWriter.newBuilder(parent.toString(), client)
+ JsonStreamWriter.newBuilder(parent.toString(), writeClient)
.setIgnoreUnknownFields(true)
.build()) {
LOG.info("Sending one message");
@@ -696,9 +767,9 @@ public void testJsonStreamWriterWithDefaultSchema()
ApiFuture response3 = jsonStreamWriter.append(jsonArr3, -1);
LOG.info("Sending one more message");
ApiFuture response4 = jsonStreamWriter.append(jsonArr4, -1);
- Assert.assertFalse(response2.get().getAppendResult().hasOffset());
- Assert.assertFalse(response3.get().getAppendResult().hasOffset());
- Assert.assertFalse(response4.get().getAppendResult().hasOffset());
+ assertFalse(response2.get().getAppendResult().hasOffset());
+ assertFalse(response3.get().getAppendResult().hasOffset());
+ assertFalse(response4.get().getAppendResult().hasOffset());
TableResult result =
bigquery.listTableData(
@@ -716,28 +787,28 @@ public void testJsonStreamWriterWithDefaultSchema()
FieldValueList currentRow2 = iter.next();
assertEquals("YQ==", currentRow2.get(3).getRepeatedValue().get(0).getStringValue());
assertEquals("Yg==", currentRow2.get(3).getRepeatedValue().get(1).getStringValue());
- assertEquals(false, iter.hasNext());
+ assertFalse(iter.hasNext());
}
}
@Test
- public void testJsonStreamWriterWithDefaultSchemaNoTable() {
+ void testJsonStreamWriterWithDefaultSchemaNoTable() {
String tableName = "JsonStreamWriterWithDefaultSchemaNoTable";
TableName parent = TableName.of(ServiceOptions.getDefaultProjectId(), DATASET, tableName);
// Create JsonStreamWriter with newBuilder(streamOrTable, client)
- try {
- JsonStreamWriter jsonStreamWriter =
- JsonStreamWriter.newBuilder(parent.toString(), client)
- .setIgnoreUnknownFields(true)
- .build();
+ try (JsonStreamWriter ignore =
+ JsonStreamWriter.newBuilder(parent.toString(), writeClient)
+ .setIgnoreUnknownFields(true)
+ .build()) {
+ // Do nothing
} catch (Exception exception) {
assertTrue(exception.getMessage().contains("it may not exist"));
}
}
@Test
- public void testJsonStreamWriterWithDefaultStream()
+ void testJsonStreamWriterWithDefaultStream()
throws IOException,
InterruptedException,
ExecutionException,
@@ -870,9 +941,9 @@ public void testJsonStreamWriterWithDefaultStream()
ApiFuture response3 = jsonStreamWriter.append(jsonArr3, -1);
LOG.info("Sending one more message");
ApiFuture response4 = jsonStreamWriter.append(jsonArr4, -1);
- Assert.assertFalse(response2.get().getAppendResult().hasOffset());
- Assert.assertFalse(response3.get().getAppendResult().hasOffset());
- Assert.assertFalse(response4.get().getAppendResult().hasOffset());
+ assertFalse(response2.get().getAppendResult().hasOffset());
+ assertFalse(response3.get().getAppendResult().hasOffset());
+ assertFalse(response4.get().getAppendResult().hasOffset());
TableResult result =
bigquery.listTableData(
@@ -898,12 +969,12 @@ public void testJsonStreamWriterWithDefaultStream()
FieldValueList currentRow2 = iter.next();
assertEquals("YQ==", currentRow2.get(3).getRepeatedValue().get(0).getStringValue());
assertEquals("Yg==", currentRow2.get(3).getRepeatedValue().get(1).getStringValue());
- assertEquals(false, iter.hasNext());
+ assertFalse(iter.hasNext());
}
}
@Test
- public void testJsonDefaultStreamOnTableWithDefaultValue_SchemaNotGiven()
+ void testJsonDefaultStreamOnTableWithDefaultValue_SchemaNotGiven()
throws IOException,
InterruptedException,
ExecutionException,
@@ -918,7 +989,7 @@ public void testJsonDefaultStreamOnTableWithDefaultValue_SchemaNotGiven()
TableInfo.newBuilder(TableId.of(DATASET, tableName), defaultValueTableDefinition).build();
bigquery.create(tableInfo);
try (JsonStreamWriter jsonStreamWriter =
- JsonStreamWriter.newBuilder(defaultTableId, client)
+ JsonStreamWriter.newBuilder(defaultTableId, writeClient)
.setDefaultMissingValueInterpretation(MissingValueInterpretation.DEFAULT_VALUE)
.build()) {
testJsonStreamWriterForDefaultValue(jsonStreamWriter);
@@ -926,7 +997,7 @@ public void testJsonDefaultStreamOnTableWithDefaultValue_SchemaNotGiven()
}
@Test
- public void testJsonExclusiveStreamOnTableWithDefaultValue_GiveTableSchema()
+ void testJsonExclusiveStreamOnTableWithDefaultValue_GiveTableSchema()
throws IOException,
InterruptedException,
ExecutionException,
@@ -941,7 +1012,7 @@ public void testJsonExclusiveStreamOnTableWithDefaultValue_GiveTableSchema()
TableInfo.newBuilder(TableId.of(DATASET, tableName), defaultValueTableDefinition).build();
bigquery.create(tableInfo);
WriteStream writeStream =
- client.createWriteStream(
+ writeClient.createWriteStream(
CreateWriteStreamRequest.newBuilder()
.setParent(exclusiveTableId)
.setWriteStream(
@@ -1005,18 +1076,18 @@ private void testJsonStreamWriterForDefaultValue(JsonStreamWriter jsonStreamWrit
currentRow = iter.next();
assertEquals("default_value_for_test", currentRow.get(0).getStringValue());
- assertEquals(null, currentRow.get(1).getValue());
+ assertNull(currentRow.get(1).getValue());
assertFalse(currentRow.get(2).getStringValue().isEmpty());
// Check whether the recorded value is up to date enough.
parsedInstant =
Instant.ofEpochSecond(Double.valueOf(currentRow.get(2).getStringValue()).longValue());
assertTrue(parsedInstant.isAfter(Instant.now().minus(1, ChronoUnit.HOURS)));
- assertEquals(false, iter.hasNext());
+ assertFalse(iter.hasNext());
}
@Test
- public void testStreamWriterWithDefaultValue() throws ExecutionException, InterruptedException {
+ void testStreamWriterWithDefaultValue() throws ExecutionException, InterruptedException {
String tableName = "streamWriterWithDefaultValue";
String exclusiveTableId =
String.format(
@@ -1071,7 +1142,7 @@ public void testStreamWriterWithDefaultValue() throws ExecutionException, Interr
currentRow = iter.next();
assertEquals("default_value_for_test", currentRow.get(0).getStringValue());
- assertEquals(null, currentRow.get(1).getValue());
+ assertNull(currentRow.get(1).getValue());
assertFalse(currentRow.get(2).getStringValue().isEmpty());
// Check whether the recorded value is up to date enough.
Instant parsedInstant =
@@ -1083,13 +1154,13 @@ public void testStreamWriterWithDefaultValue() throws ExecutionException, Interr
}
@Test
- public void testArrowIngestionWithSerializedInput()
+ void testArrowIngestionWithSerializedInput()
throws IOException, InterruptedException, ExecutionException, TimeoutException {
testArrowIngestion(/* serializedInput= */ true);
}
@Test
- public void testArrowIngestionWithUnSerializedInput()
+ void testArrowIngestionWithUnSerializedInput()
throws IOException, InterruptedException, ExecutionException, TimeoutException {
testArrowIngestion(/* serializedInput= */ false);
}
@@ -1178,7 +1249,7 @@ private void testArrowIngestion(boolean serializedInput)
}
if (serializedInput) {
try (StreamWriter streamWriter =
- StreamWriter.newBuilder(tableId + "/_default", client)
+ StreamWriter.newBuilder(tableId + "/_default", writeClient)
.setWriterSchema(v1ArrowSchema)
.setTraceId(TEST_TRACE_ID)
.setMaxRetryDuration(java.time.Duration.ofSeconds(5))
@@ -1195,7 +1266,7 @@ private void testArrowIngestion(boolean serializedInput)
}
} else {
try (StreamWriter streamWriter =
- StreamWriter.newBuilder(tableId + "/_default", client)
+ StreamWriter.newBuilder(tableId + "/_default", writeClient)
.setWriterSchema(arrowSchema)
.setTraceId(TEST_TRACE_ID)
.setMaxRetryDuration(java.time.Duration.ofSeconds(5))
@@ -1218,21 +1289,21 @@ private void testArrowIngestion(boolean serializedInput)
FieldValueList currentRow = iter.next();
assertEquals("A", currentRow.get(0).getStringValue());
assertEquals("1", currentRow.get(1).getStringValue());
- assertEquals(true, currentRow.get(2).getBooleanValue());
+ assertTrue(currentRow.get(2).getBooleanValue());
currentRow = iter.next();
assertEquals("B", currentRow.get(0).getStringValue());
assertEquals("2", currentRow.get(1).getStringValue());
- assertEquals(false, currentRow.get(2).getBooleanValue());
+ assertFalse(currentRow.get(2).getBooleanValue());
currentRow = iter.next();
assertEquals("C", currentRow.get(0).getStringValue());
assertEquals("3", currentRow.get(1).getStringValue());
- assertEquals(true, currentRow.get(2).getBooleanValue());
- assertEquals(false, iter.hasNext());
+ assertTrue(currentRow.get(2).getBooleanValue());
+ assertFalse(iter.hasNext());
}
// This test runs about 1 min.
@Test
- public void testJsonStreamWriterWithMessagesOver10M()
+ void testJsonStreamWriterWithMessagesOver10M()
throws IOException,
InterruptedException,
ExecutionException,
@@ -1246,7 +1317,7 @@ public void testJsonStreamWriterWithMessagesOver10M()
TableName parent = TableName.of(ServiceOptions.getDefaultProjectId(), DATASET, tableName);
WriteStream writeStream =
- client.createWriteStream(
+ writeClient.createWriteStream(
CreateWriteStreamRequest.newBuilder()
.setParent(parent.toString())
.setWriteStream(
@@ -1274,16 +1345,16 @@ public void testJsonStreamWriterWithMessagesOver10M()
LOG.info("Waiting for all responses to come back");
for (int i = 0; i < totalRequest; i++) {
try {
- Assert.assertEquals(
+ assertEquals(
allResponses.get(i).get().getAppendResult().getOffset().getValue(), i * rowBatch);
} catch (ExecutionException ex) {
- Assert.fail("Unexpected error " + ex);
+ fail("Unexpected error " + ex);
}
}
}
@Test
- public void testJsonStreamWriterSchemaUpdate()
+ void testJsonStreamWriterSchemaUpdate()
throws DescriptorValidationException, IOException, InterruptedException, ExecutionException {
String tableName = "SchemaUpdateTestTable";
TableId tableId = TableId.of(DATASET, tableName);
@@ -1294,14 +1365,14 @@ public void testJsonStreamWriterSchemaUpdate()
bigquery.create(tableInfo);
TableName parent = TableName.of(ServiceOptions.getDefaultProjectId(), DATASET, tableName);
WriteStream writeStream =
- client.createWriteStream(
+ writeClient.createWriteStream(
CreateWriteStreamRequest.newBuilder()
.setParent(parent.toString())
.setWriteStream(
WriteStream.newBuilder().setType(WriteStream.Type.COMMITTED).build())
.build());
try (JsonStreamWriter jsonStreamWriter =
- JsonStreamWriter.newBuilder(writeStream.getName(), client).build()) {
+ JsonStreamWriter.newBuilder(writeStream.getName(), writeClient).build()) {
// write the 1st row
JSONObject foo = new JSONObject();
foo.put("col1", "aaa");
@@ -1367,7 +1438,7 @@ public void testJsonStreamWriterSchemaUpdate()
}
@Test
- public void testJsonStreamWriterSchemaUpdateConcurrent()
+ void testJsonStreamWriterSchemaUpdateConcurrent()
throws DescriptorValidationException, IOException, InterruptedException {
// Create test table and test stream
String tableName = "ConcurrentSchemaUpdateTestTable";
@@ -1379,7 +1450,7 @@ public void testJsonStreamWriterSchemaUpdateConcurrent()
bigquery.create(tableInfo);
TableName parent = TableName.of(ServiceOptions.getDefaultProjectId(), DATASET, tableName);
WriteStream writeStream =
- client.createWriteStream(
+ writeClient.createWriteStream(
CreateWriteStreamRequest.newBuilder()
.setParent(parent.toString())
.setWriteStream(
@@ -1411,12 +1482,14 @@ public void testJsonStreamWriterSchemaUpdateConcurrent()
// Start writing using the JsonWriter
try (JsonStreamWriter jsonStreamWriter =
- JsonStreamWriter.newBuilder(writeStream.getName(), client).build()) {
+ JsonStreamWriter.newBuilder(writeStream.getName(), writeClient).build()) {
int numberOfThreads = 5;
+ CountDownLatch latch;
+ AtomicInteger next;
ExecutorService streamTaskExecutor = Executors.newFixedThreadPool(5);
- CountDownLatch latch = new CountDownLatch(numberOfThreads);
+ latch = new CountDownLatch(numberOfThreads);
// Used to verify data correctness
- AtomicInteger next = new AtomicInteger();
+ next = new AtomicInteger();
// update TableSchema async
Runnable updateTableSchemaTask =
@@ -1475,6 +1548,7 @@ public void testJsonStreamWriterSchemaUpdateConcurrent()
});
}
latch.await();
+ streamTaskExecutor.shutdown();
// verify that the last 5 rows streamed are ccc,ddd
Iterator rowsIter = bigquery.listTableData(tableId).getValues().iterator();
@@ -1492,19 +1566,15 @@ public void testJsonStreamWriterSchemaUpdateConcurrent()
}
@Test
- public void testJsonStreamWriterSchemaUpdateWithMissingValueInterpretationMap()
- throws DescriptorValidationException,
- ExecutionException,
- IOException,
- InterruptedException,
- ParseException {
+ void testJsonStreamWriterSchemaUpdateWithMissingValueInterpretationMap()
+ throws DescriptorValidationException, ExecutionException, IOException, InterruptedException {
String tableName = "SchemaUpdateMissingValueMapTestTable";
TableId tableId = TableId.of(DATASET, tableName);
tableInfo = TableInfo.newBuilder(tableId, defaultValueTableDefinition).build();
bigquery.create(tableInfo);
TableName parent = TableName.of(ServiceOptions.getDefaultProjectId(), DATASET, tableName);
WriteStream writeStream =
- client.createWriteStream(
+ writeClient.createWriteStream(
CreateWriteStreamRequest.newBuilder()
.setParent(parent.toString())
.setWriteStream(
@@ -1517,7 +1587,7 @@ public void testJsonStreamWriterSchemaUpdateWithMissingValueInterpretationMap()
"date_with_default_to_current", AppendRowsRequest.MissingValueInterpretation.DEFAULT_VALUE);
try (JsonStreamWriter jsonStreamWriter =
- JsonStreamWriter.newBuilder(writeStream.getName(), client)
+ JsonStreamWriter.newBuilder(writeStream.getName(), writeClient)
.setMissingValueInterpretationMap(missingValueMap)
.build()) {
// Verify the missing value map
@@ -1618,7 +1688,7 @@ public void testJsonStreamWriterSchemaUpdateWithMissingValueInterpretationMap()
}
@Test
- public void testJsonStreamWriterWithFlexibleColumnName()
+ void testJsonStreamWriterWithFlexibleColumnName()
throws IOException,
InterruptedException,
ExecutionException,
@@ -1643,7 +1713,7 @@ public void testJsonStreamWriterWithFlexibleColumnName()
bigquery.create(tableInfo);
TableName parent = TableName.of(ServiceOptions.getDefaultProjectId(), DATASET, tableName);
WriteStream writeStream =
- client.createWriteStream(
+ writeClient.createWriteStream(
CreateWriteStreamRequest.newBuilder()
.setParent(parent.toString())
.setWriteStream(
@@ -1704,12 +1774,12 @@ public void testJsonStreamWriterWithFlexibleColumnName()
assertEquals("bbb", iter.next().get(0).getStringValue());
assertEquals("ccc", iter.next().get(0).getStringValue());
assertEquals("ddd", iter.next().get(0).getStringValue());
- assertEquals(false, iter.hasNext());
+ assertFalse(iter.hasNext());
}
}
@Test
- public void testJsonStreamWriterWithNestedFlexibleColumnName()
+ void testJsonStreamWriterWithNestedFlexibleColumnName()
throws IOException,
InterruptedException,
ExecutionException,
@@ -1736,7 +1806,7 @@ public void testJsonStreamWriterWithNestedFlexibleColumnName()
bigquery.create(tableInfo);
TableName parent = TableName.of(ServiceOptions.getDefaultProjectId(), DATASET, tableName);
WriteStream writeStream =
- client.createWriteStream(
+ writeClient.createWriteStream(
CreateWriteStreamRequest.newBuilder()
.setParent(parent.toString())
.setWriteStream(
@@ -1797,12 +1867,12 @@ public void testJsonStreamWriterWithNestedFlexibleColumnName()
FieldValueList lastRecord = lastRow.get(1).getRepeatedValue().get(0).getRecordValue();
assertEquals("nested-str2", lastRecord.get(0).getStringValue());
assertEquals("20", lastRecord.get(1).getStringValue());
- assertEquals(false, iter.hasNext());
+ assertFalse(iter.hasNext());
}
}
@Test
- public void testJsonStreamWriterSchemaUpdateWithFlexibleColumnName()
+ void testJsonStreamWriterSchemaUpdateWithFlexibleColumnName()
throws DescriptorValidationException, IOException, InterruptedException, ExecutionException {
String tableName = "SchemaUpdateFlexColumnTestTable";
TableId tableId = TableId.of(DATASET, tableName);
@@ -1813,14 +1883,14 @@ public void testJsonStreamWriterSchemaUpdateWithFlexibleColumnName()
bigquery.create(tableInfo);
TableName parent = TableName.of(ServiceOptions.getDefaultProjectId(), DATASET, tableName);
WriteStream writeStream =
- client.createWriteStream(
+ writeClient.createWriteStream(
CreateWriteStreamRequest.newBuilder()
.setParent(parent.toString())
.setWriteStream(
WriteStream.newBuilder().setType(WriteStream.Type.COMMITTED).build())
.build());
try (JsonStreamWriter jsonStreamWriter =
- JsonStreamWriter.newBuilder(writeStream.getName(), client).build()) {
+ JsonStreamWriter.newBuilder(writeStream.getName(), writeClient).build()) {
// write the 1st row
JSONObject foo = new JSONObject();
foo.put("col1-列", "aaa");
@@ -1886,27 +1956,27 @@ public void testJsonStreamWriterSchemaUpdateWithFlexibleColumnName()
}
@Test
- public void testComplicateSchemaWithPendingStream()
+ void testComplicateSchemaWithPendingStream()
throws IOException, InterruptedException, ExecutionException {
LOG.info("Create a write stream");
WriteStream writeStream =
- client.createWriteStream(
+ writeClient.createWriteStream(
CreateWriteStreamRequest.newBuilder()
.setParent(tableId2)
.setWriteStream(WriteStream.newBuilder().setType(WriteStream.Type.PENDING).build())
.build());
- FinalizeWriteStreamResponse finalizeResponse = FinalizeWriteStreamResponse.getDefaultInstance();
+ FinalizeWriteStreamResponse finalizeResponse;
try (StreamWriter streamWriter =
StreamWriter.newBuilder(writeStream.getName())
.setWriterSchema(ProtoSchemaConverter.convert(ComplicateType.getDescriptor()))
.build()) {
LOG.info("Sending two messages");
ApiFuture response =
- streamWriter.append(CreateProtoRowsComplex(new String[] {"aaa"}), 0L);
+ streamWriter.append(createProtoRowsComplex(new String[] {"aaa"}), 0L);
assertEquals(0, response.get().getAppendResult().getOffset().getValue());
ApiFuture response2 =
- streamWriter.append(CreateProtoRowsComplex(new String[] {"bbb"}), 1L);
+ streamWriter.append(createProtoRowsComplex(new String[] {"bbb"}), 1L);
assertEquals(1, response2.get().getAppendResult().getOffset().getValue());
// Nothing showed up since rows are not committed.
@@ -1914,31 +1984,27 @@ public void testComplicateSchemaWithPendingStream()
bigquery.listTableData(
tableInfo2.getTableId(), BigQuery.TableDataListOption.startIndex(0L));
Iterator iter = result.getValues().iterator();
- assertEquals(false, iter.hasNext());
+ assertFalse(iter.hasNext());
LOG.info("Finalize a write stream");
finalizeResponse =
- client.finalizeWriteStream(
+ writeClient.finalizeWriteStream(
FinalizeWriteStreamRequest.newBuilder().setName(writeStream.getName()).build());
ApiFuture response3 =
- streamWriter.append(CreateProtoRows(new String[] {"ccc"}), 2L);
- try {
- response3.get();
- Assert.fail("Append to finalized stream should fail.");
- } catch (Exception expected) {
- LOG.info("Got exception: " + expected.toString());
- }
+ streamWriter.append(createProtoRows(new String[] {"ccc"}), 2L);
+ ExecutionException expected = assertThrows(ExecutionException.class, () -> response3.get());
+ LOG.info("Got exception: " + expected.toString());
}
assertEquals(2, finalizeResponse.getRowCount());
LOG.info("Commit a write stream");
BatchCommitWriteStreamsResponse batchCommitWriteStreamsResponse =
- client.batchCommitWriteStreams(
+ writeClient.batchCommitWriteStreams(
BatchCommitWriteStreamsRequest.newBuilder()
.setParent(tableId2)
.addWriteStreams(writeStream.getName())
.build());
- assertEquals(true, batchCommitWriteStreamsResponse.hasCommitTime());
+ assertTrue(batchCommitWriteStreamsResponse.hasCommitTime());
TableResult queryResult =
bigquery.query(
QueryJobConfiguration.newBuilder("SELECT * from " + DATASET + '.' + TABLE2).build());
@@ -1958,9 +2024,9 @@ public void testComplicateSchemaWithPendingStream()
}
@Test
- public void testStreamError() throws IOException, InterruptedException, ExecutionException {
+ void testStreamError() throws IOException, InterruptedException, ExecutionException {
WriteStream writeStream =
- client.createWriteStream(
+ writeClient.createWriteStream(
CreateWriteStreamRequest.newBuilder()
.setParent(tableId)
.setWriteStream(
@@ -1971,30 +2037,26 @@ public void testStreamError() throws IOException, InterruptedException, Executio
.setWriterSchema(ProtoSchemaConverter.convert(FooType.getDescriptor()))
.build()) {
ApiFuture response =
- streamWriter.append(CreateProtoRows(new String[] {"aaa"}), -1L);
+ streamWriter.append(createProtoRows(new String[] {"aaa"}), -1L);
assertEquals(0L, response.get().getAppendResult().getOffset().getValue());
// Send in a bogus stream name should cause in connection error.
ApiFuture response2 =
- streamWriter.append(CreateProtoRows(new String[] {"aaa"}), 100L);
- try {
- response2.get();
- Assert.fail("Should fail");
- } catch (ExecutionException e) {
- assertThat(e.getCause().getMessage())
- .contains("OUT_OF_RANGE: The offset is beyond stream, expected offset 1, received 100");
- }
+ streamWriter.append(createProtoRows(new String[] {"aaa"}), 100L);
+ ExecutionException e = assertThrows(ExecutionException.class, () -> response2.get());
+ assertThat(e.getCause().getMessage())
+ .contains("OUT_OF_RANGE: The offset is beyond stream, expected offset 1, received 100");
// We can keep sending requests on the same stream.
ApiFuture response3 =
- streamWriter.append(CreateProtoRows(new String[] {"aaa"}), -1L);
+ streamWriter.append(createProtoRows(new String[] {"aaa"}), -1L);
assertEquals(1L, response3.get().getAppendResult().getOffset().getValue());
} finally {
}
}
@Test
- public void testStreamSchemaMisMatchError() throws IOException, InterruptedException {
+ void testStreamSchemaMisMatchError() throws IOException, InterruptedException {
WriteStream writeStream =
- client.createWriteStream(
+ writeClient.createWriteStream(
CreateWriteStreamRequest.newBuilder()
.setParent(tableId)
.setWriteStream(
@@ -2008,25 +2070,20 @@ public void testStreamSchemaMisMatchError() throws IOException, InterruptedExcep
// Create a proto row that has extra fields than the table schema defined which should trigger
// the SCHEMA_MISMATCH_EXTRA_FIELDS error
ApiFuture response =
- streamWriter.append(CreateProtoRowsMultipleColumns(new String[] {"a"}), /* offset= */ 0);
- try {
- response.get();
- Assert.fail("Should fail");
- } catch (ExecutionException e) {
- assertEquals(Exceptions.SchemaMismatchedException.class, e.getCause().getClass());
- Exceptions.SchemaMismatchedException actualError = (SchemaMismatchedException) e.getCause();
- assertNotNull(actualError.getStreamName());
- // This verifies that the Beam connector can consume this custom exception's grpc StatusCode
- assertEquals(Code.INVALID_ARGUMENT, Status.fromThrowable(e.getCause()).getCode());
- }
+ streamWriter.append(createProtoRowsMultipleColumns(new String[] {"a"}), /* offset= */ 0);
+ ExecutionException e = assertThrows(ExecutionException.class, () -> response.get());
+ assertEquals(Exceptions.SchemaMismatchedException.class, e.getCause().getClass());
+ Exceptions.SchemaMismatchedException actualError = (SchemaMismatchedException) e.getCause();
+ assertNotNull(actualError.getStreamName());
+ // This verifies that the Beam connector can consume this custom exception's grpc StatusCode
+ assertEquals(Code.INVALID_ARGUMENT, Status.fromThrowable(e.getCause()).getCode());
}
}
@Test
- public void testStreamFinalizedError()
- throws IOException, InterruptedException, ExecutionException {
+ void testStreamFinalizedError() throws IOException, InterruptedException, ExecutionException {
WriteStream writeStream =
- client.createWriteStream(
+ writeClient.createWriteStream(
CreateWriteStreamRequest.newBuilder()
.setParent(tableId)
.setWriteStream(
@@ -2038,33 +2095,28 @@ public void testStreamFinalizedError()
.build()) {
// Append once before finalizing the stream
ApiFuture response =
- streamWriter.append(CreateProtoRowsMultipleColumns(new String[] {"a"}), /* offset= */ 0);
+ streamWriter.append(createProtoRowsMultipleColumns(new String[] {"a"}), /* offset= */ 0);
response.get();
// Finalize the stream in order to trigger STREAM_FINALIZED error
- client.finalizeWriteStream(
+ writeClient.finalizeWriteStream(
FinalizeWriteStreamRequest.newBuilder().setName(writeStream.getName()).build());
// Try to append to a finalized stream
ApiFuture response2 =
- streamWriter.append(CreateProtoRowsMultipleColumns(new String[] {"a"}), /* offset= */ 1);
- try {
- response2.get();
- Assert.fail("Should fail");
- } catch (ExecutionException e) {
- assertEquals(Exceptions.StreamFinalizedException.class, e.getCause().getClass());
- Exceptions.StreamFinalizedException actualError = (StreamFinalizedException) e.getCause();
- assertNotNull(actualError.getStreamName());
- // This verifies that the Beam connector can consume this custom exception's grpc StatusCode
- assertEquals(Code.INVALID_ARGUMENT, Status.fromThrowable(e.getCause()).getCode());
- assertThat(e.getCause().getMessage()).contains("Stream has been finalized");
- }
+ streamWriter.append(createProtoRowsMultipleColumns(new String[] {"a"}), /* offset= */ 1);
+ ExecutionException e = assertThrows(ExecutionException.class, () -> response2.get());
+ assertEquals(Exceptions.StreamFinalizedException.class, e.getCause().getClass());
+ Exceptions.StreamFinalizedException actualError = (StreamFinalizedException) e.getCause();
+ assertNotNull(actualError.getStreamName());
+ // This verifies that the Beam connector can consume this custom exception's grpc StatusCode
+ assertEquals(Code.INVALID_ARGUMENT, Status.fromThrowable(e.getCause()).getCode());
+ assertThat(e.getCause().getMessage()).contains("Stream has been finalized");
}
}
@Test
- public void testOffsetAlreadyExistsError()
- throws IOException, ExecutionException, InterruptedException {
+ void testOffsetAlreadyExistsError() throws IOException, ExecutionException, InterruptedException {
WriteStream writeStream =
- client.createWriteStream(
+ writeClient.createWriteStream(
CreateWriteStreamRequest.newBuilder()
.setParent(tableId)
.setWriteStream(
@@ -2076,31 +2128,27 @@ public void testOffsetAlreadyExistsError()
.build()) {
// Append once with correct offset
ApiFuture response =
- streamWriter.append(CreateProtoRowsMultipleColumns(new String[] {"a"}), /* offset= */ 0);
+ streamWriter.append(createProtoRowsMultipleColumns(new String[] {"a"}), /* offset= */ 0);
response.get();
// Append again with the same offset
ApiFuture response2 =
- streamWriter.append(CreateProtoRowsMultipleColumns(new String[] {"a"}), /* offset= */ 0);
- try {
- response2.get();
- Assert.fail("Should fail");
- } catch (ExecutionException e) {
- assertEquals(Exceptions.OffsetAlreadyExists.class, e.getCause().getClass());
- Exceptions.OffsetAlreadyExists actualError = (OffsetAlreadyExists) e.getCause();
- assertNotNull(actualError.getStreamName());
- assertEquals(1, actualError.getExpectedOffset());
- assertEquals(0, actualError.getActualOffset());
- assertEquals(Code.ALREADY_EXISTS, Status.fromThrowable(e.getCause()).getCode());
- assertThat(e.getCause().getMessage())
- .contains("The offset is within stream, expected offset 1, received 0");
- }
+ streamWriter.append(createProtoRowsMultipleColumns(new String[] {"a"}), /* offset= */ 0);
+ ExecutionException e = assertThrows(ExecutionException.class, () -> response2.get());
+ assertEquals(Exceptions.OffsetAlreadyExists.class, e.getCause().getClass());
+ Exceptions.OffsetAlreadyExists actualError = (OffsetAlreadyExists) e.getCause();
+ assertNotNull(actualError.getStreamName());
+ assertEquals(1, actualError.getExpectedOffset());
+ assertEquals(0, actualError.getActualOffset());
+ assertEquals(Code.ALREADY_EXISTS, Status.fromThrowable(e.getCause()).getCode());
+ assertThat(e.getCause().getMessage())
+ .contains("The offset is within stream, expected offset 1, received 0");
}
}
@Test
- public void testOffsetOutOfRangeError() throws IOException, InterruptedException {
+ void testOffsetOutOfRangeError() throws IOException, InterruptedException {
WriteStream writeStream =
- client.createWriteStream(
+ writeClient.createWriteStream(
CreateWriteStreamRequest.newBuilder()
.setParent(tableId)
.setWriteStream(
@@ -2112,27 +2160,23 @@ public void testOffsetOutOfRangeError() throws IOException, InterruptedException
.build()) {
// Append with an out of range offset
ApiFuture response =
- streamWriter.append(CreateProtoRowsMultipleColumns(new String[] {"a"}), /* offset= */ 10);
- try {
- response.get();
- Assert.fail("Should fail");
- } catch (ExecutionException e) {
- assertEquals(Exceptions.OffsetOutOfRange.class, e.getCause().getClass());
- Exceptions.OffsetOutOfRange actualError = (OffsetOutOfRange) e.getCause();
- assertNotNull(actualError.getStreamName());
- assertEquals(0, actualError.getExpectedOffset());
- assertEquals(10, actualError.getActualOffset());
- assertEquals(Code.OUT_OF_RANGE, Status.fromThrowable(e.getCause()).getCode());
- assertThat(e.getCause().getMessage())
- .contains("The offset is beyond stream, expected offset 0, received 10");
- }
+ streamWriter.append(createProtoRowsMultipleColumns(new String[] {"a"}), /* offset= */ 10);
+ ExecutionException e = assertThrows(ExecutionException.class, () -> response.get());
+ assertEquals(Exceptions.OffsetOutOfRange.class, e.getCause().getClass());
+ Exceptions.OffsetOutOfRange actualError = (OffsetOutOfRange) e.getCause();
+ assertNotNull(actualError.getStreamName());
+ assertEquals(0, actualError.getExpectedOffset());
+ assertEquals(10, actualError.getActualOffset());
+ assertEquals(Code.OUT_OF_RANGE, Status.fromThrowable(e.getCause()).getCode());
+ assertThat(e.getCause().getMessage())
+ .contains("The offset is beyond stream, expected offset 0, received 10");
}
}
@Test
- public void testStreamReconnect() throws IOException, InterruptedException, ExecutionException {
+ void testStreamReconnect() throws IOException, InterruptedException, ExecutionException {
WriteStream writeStream =
- client.createWriteStream(
+ writeClient.createWriteStream(
CreateWriteStreamRequest.newBuilder()
.setParent(tableId)
.setWriteStream(
@@ -2143,7 +2187,7 @@ public void testStreamReconnect() throws IOException, InterruptedException, Exec
.setWriterSchema(ProtoSchemaConverter.convert(FooType.getDescriptor()))
.build()) {
ApiFuture response =
- streamWriter.append(CreateProtoRows(new String[] {"aaa"}), 0L);
+ streamWriter.append(createProtoRows(new String[] {"aaa"}), 0L);
assertEquals(0L, response.get().getAppendResult().getOffset().getValue());
}
@@ -2154,13 +2198,13 @@ public void testStreamReconnect() throws IOException, InterruptedException, Exec
// Currently there is a bug that reconnection must wait 5 seconds to get the real row count.
Thread.sleep(5000L);
ApiFuture response =
- streamWriter.append(CreateProtoRows(new String[] {"bbb"}), 1L);
+ streamWriter.append(createProtoRows(new String[] {"bbb"}), 1L);
assertEquals(1L, response.get().getAppendResult().getOffset().getValue());
}
}
@Test
- public void testMultiplexingMixedLocation()
+ void testMultiplexingMixedLocation()
throws IOException, InterruptedException, ExecutionException {
ConnectionWorkerPool.setOptions(
ConnectionWorkerPool.Settings.builder()
@@ -2199,11 +2243,11 @@ public void testMultiplexingMixedLocation()
.setTraceId(TEST_TRACE_ID)
.build();
ApiFuture response1 =
- streamWriter1.append(CreateProtoRows(new String[] {"aaa"}));
+ streamWriter1.append(createProtoRows(new String[] {"aaa"}));
ApiFuture response2 =
- streamWriter2.append(CreateProtoRowsComplex(new String[] {"aaa"}));
+ streamWriter2.append(createProtoRowsComplex(new String[] {"aaa"}));
ApiFuture response3 =
- streamWriter3.append(CreateProtoRows(new String[] {"bbb"}));
+ streamWriter3.append(createProtoRows(new String[] {"bbb"}));
assertEquals(0L, response1.get().getAppendResult().getOffset().getValue());
assertEquals(0L, response2.get().getAppendResult().getOffset().getValue());
assertEquals(0L, response3.get().getAppendResult().getOffset().getValue());
@@ -2216,7 +2260,7 @@ public void testMultiplexingMixedLocation()
}
@Test
- public void testLargeRequest() throws IOException, InterruptedException, ExecutionException {
+ void testLargeRequest() throws IOException, InterruptedException, ExecutionException {
String tableName = "largeRequestTable";
TableId tableId = TableId.of(DATASET, tableName);
Field col1 = Field.newBuilder("col1", StandardSQLTypeName.STRING).build();
@@ -2227,7 +2271,7 @@ public void testLargeRequest() throws IOException, InterruptedException, Executi
TableName parent = TableName.of(ServiceOptions.getDefaultProjectId(), DATASET, tableName);
try (StreamWriter streamWriter =
StreamWriter.newBuilder(parent.toString() + "/_default")
- .setWriterSchema(CreateProtoSchemaWithColField())
+ .setWriterSchema(createProtoSchemaWithColField())
.build()) {
List sizeSet = Arrays.asList(15 * 1024 * 1024, 1024);
List> responseList =
@@ -2238,7 +2282,7 @@ public void testLargeRequest() throws IOException, InterruptedException, Executi
LOG.info("Sending size: " + size);
responseList.add(
streamWriter.append(
- CreateProtoRows(
+ createProtoRows(
new String[] {
new String(new char[size]).replace('\u0000', (char) (r.nextInt(26) + 'a'))
})));
@@ -2255,4 +2299,289 @@ public void testLargeRequest() throws IOException, InterruptedException, Executi
assertEquals("50", queryIter.next().get(0).getStringValue());
}
}
+
+ // Tests that inputs for micro and picos are able to use Arrow to write
+ // to BQ
+ @Test
+ void timestamp_arrowWrite() throws IOException {
+ String tableName = "bqstorage_timestamp_write_arrow";
+ // Opt to create a new table to write to instead of re-using table to prevent
+ // the test from failing due to any issues with deleting data after test.
+ // Increases the test time duration, but would be more resilient to transient
+ // failures
+ createTimestampTable(tableName);
+
+ // Define the fields as Arrow types that are compatible with BQ Schema types
+ List fields =
+ ImmutableList.of(
+ new org.apache.arrow.vector.types.pojo.Field(
+ TIMESTAMP_COLUMN_NAME,
+ FieldType.nullable(
+ new ArrowType.Timestamp(
+ org.apache.arrow.vector.types.TimeUnit.MICROSECOND, "UTC")),
+ null),
+ new org.apache.arrow.vector.types.pojo.Field(
+ TIMESTAMP_HIGHER_PRECISION_COLUMN_NAME,
+ FieldType.nullable(
+ new ArrowType.Timestamp(
+ org.apache.arrow.vector.types.TimeUnit.NANOSECOND, "UTC")),
+ null));
+ org.apache.arrow.vector.types.pojo.Schema arrowSchema =
+ new org.apache.arrow.vector.types.pojo.Schema(fields, null);
+
+ int numRows = INPUT_ARROW_WRITE_TIMESTAMPS.length;
+ TableName parent = TableName.of(ServiceOptions.getDefaultProjectId(), DATASET, tableName);
+ try (StreamWriter streamWriter =
+ StreamWriter.newBuilder(parent.toString() + "/_default")
+ .setWriterSchema(arrowSchema)
+ .build()) {
+ try (VectorSchemaRoot root = VectorSchemaRoot.create(arrowSchema, allocator)) {
+ TimeStampMicroTZVector timestampVector =
+ (TimeStampMicroTZVector) root.getVector(TIMESTAMP_COLUMN_NAME);
+ TimeStampNanoTZVector timestampHigherPrecisionVector =
+ (TimeStampNanoTZVector) root.getVector(TIMESTAMP_HIGHER_PRECISION_COLUMN_NAME);
+ timestampVector.allocateNew(numRows);
+ timestampHigherPrecisionVector.allocateNew(numRows);
+
+ for (int i = 0; i < numRows; i++) {
+ timestampVector.set(i, (Long) INPUT_ARROW_WRITE_TIMESTAMPS[i][0]);
+ timestampHigherPrecisionVector.set(i, (Long) INPUT_ARROW_WRITE_TIMESTAMPS[i][1]);
+ }
+ root.setRowCount(numRows);
+
+ CompressionCodec codec =
+ NoCompressionCodec.Factory.INSTANCE.createCodec(
+ CompressionUtil.CodecType.NO_COMPRESSION);
+ VectorUnloader vectorUnloader =
+ new VectorUnloader(root, /* includeNullCount= */ true, codec, /* alignBuffers= */ true);
+ org.apache.arrow.vector.ipc.message.ArrowRecordBatch batch =
+ vectorUnloader.getRecordBatch();
+ // Asynchronous append.
+ ApiFuture future = streamWriter.append(batch);
+ ApiFutures.addCallback(
+ future, new Helper.AppendCompleteCallback(), MoreExecutors.directExecutor());
+ }
+ }
+ assertTimestamps(tableName, EXPECTED_ARROW_WRITE_TIMESTAMPS_ISO_OUTPUT);
+ }
+
+ // Tests that inputs for micro and picos are able to converted to protobuf
+ // and written to BQ
+ @Test
+ void timestamp_protobufWrite()
+ throws IOException, DescriptorValidationException, InterruptedException {
+ String tableName = "bqstorage_timestamp_write_protobuf_schema_aware";
+ // Opt to create a new table to write to instead of re-using table to prevent
+ // the test from failing due to any issues with deleting data after test.
+ // Increases the test time duration, but would be more resilient to transient
+ // failures
+ createTimestampTable(tableName);
+
+ // Define the table schema so that the automatic converter is able to
+ // determine how to convert from Json -> Protobuf
+ TableFieldSchema testTimestamp =
+ TableFieldSchema.newBuilder()
+ .setName(TIMESTAMP_COLUMN_NAME)
+ .setType(TableFieldSchema.Type.TIMESTAMP)
+ .setMode(TableFieldSchema.Mode.NULLABLE)
+ .build();
+ TableFieldSchema testTimestampHighPrecision =
+ TableFieldSchema.newBuilder()
+ .setName(TIMESTAMP_HIGHER_PRECISION_COLUMN_NAME)
+ .setTimestampPrecision(
+ Int64Value.newBuilder().setValue(Helper.PICOSECOND_PRECISION).build())
+ .setType(TableFieldSchema.Type.TIMESTAMP)
+ .setMode(TableFieldSchema.Mode.NULLABLE)
+ .build();
+ TableSchema tableSchema =
+ TableSchema.newBuilder()
+ .addFields(testTimestamp)
+ .addFields(testTimestampHighPrecision)
+ .build();
+
+ TableName parent = TableName.of(ServiceOptions.getDefaultProjectId(), DATASET, tableName);
+ try (JsonStreamWriter jsonStreamWriter =
+ JsonStreamWriter.newBuilder(parent.toString(), tableSchema).build()) {
+
+ // Creates a single payload to append (JsonArray with multiple JsonObjects)
+ // Each JsonObject contains a row (one micros, one picos)
+ JSONArray jsonArray = new JSONArray();
+ for (Object[] timestampData : Helper.INPUT_TIMESTAMPS) {
+ JSONObject row = new JSONObject();
+ row.put(TIMESTAMP_COLUMN_NAME, timestampData[0]);
+ row.put(TIMESTAMP_HIGHER_PRECISION_COLUMN_NAME, timestampData[1]);
+ jsonArray.put(row);
+ }
+ ApiFuture future = jsonStreamWriter.append(jsonArray);
+ ApiFutures.addCallback(
+ future, new Helper.AppendCompleteCallback(), MoreExecutors.directExecutor());
+ }
+ assertTimestamps(tableName, EXPECTED_TIMESTAMPS_HIGHER_PRECISION_ISO_OUTPUT);
+ }
+
+ // Tests that users can use a Protobuf message that contains second a fractional
+ // part (pico) to be written to BQ
+ @Test
+ void timestamp_protobufWrite_customMessage_higherPrecision()
+ throws IOException, DescriptorValidationException {
+ String tableName = "bqstorage_timestamp_write_protobuf_custom_descriptor";
+ // Opt to create a new table to write to instead of re-using table to prevent
+ // the test from failing due to any issues with deleting data after test.
+ // Increases the test time duration, but would be more resilient to transient
+ // failures
+ createTimestampTable(tableName);
+
+ /*
+ A sample protobuf format:
+ message Wrapper {
+ message TimestampPicos {
+ int64 seconds = 1;
+ int64 picoseconds = 2;
+ }
+ Wrapper timestampHigherPrecision = 1;
+ // ...
+ }
+ */
+ String wrapperProtoName = "Wrapper";
+ String timestampPicosProtoName = "TimestampPicos";
+ String secondsProtoName = "seconds";
+ String picosProtoName = "picoseconds";
+ DescriptorProto timestampPicosDescriptor =
+ DescriptorProto.newBuilder()
+ .setName(timestampPicosProtoName)
+ .addField(
+ DescriptorProtos.FieldDescriptorProto.newBuilder()
+ .setName(secondsProtoName)
+ .setNumber(1)
+ .setType(DescriptorProtos.FieldDescriptorProto.Type.TYPE_INT64)
+ .build())
+ .addField(
+ DescriptorProtos.FieldDescriptorProto.newBuilder()
+ .setName(picosProtoName)
+ .setNumber(2)
+ .setType(DescriptorProtos.FieldDescriptorProto.Type.TYPE_INT64)
+ .build())
+ .build();
+ DescriptorProto wrapperDescriptor =
+ DescriptorProto.newBuilder()
+ .setName(wrapperProtoName) // random name
+ .addField(
+ DescriptorProtos.FieldDescriptorProto.newBuilder()
+ .setName(TIMESTAMP_HIGHER_PRECISION_COLUMN_NAME)
+ .setNumber(3)
+ .setType(DescriptorProtos.FieldDescriptorProto.Type.TYPE_MESSAGE)
+ .setTypeName(timestampPicosDescriptor.getName())
+ .build())
+ .addNestedType(timestampPicosDescriptor)
+ .build();
+ ProtoSchema protoSchema =
+ ProtoSchema.newBuilder().setProtoDescriptor(wrapperDescriptor).build();
+
+ TableName parent = TableName.of(ServiceOptions.getDefaultProjectId(), DATASET, tableName);
+ try (StreamWriter streamWriter =
+ StreamWriter.newBuilder(parent.toString() + "/_default", writeClient)
+ .setWriterSchema(protoSchema)
+ .build()) {
+ DescriptorProtos.FileDescriptorProto fileProto =
+ DescriptorProtos.FileDescriptorProto.newBuilder()
+ .setName("test.proto") // dummy proto file
+ .addMessageType(wrapperDescriptor)
+ .build();
+
+ // Build the runtime descriptor (resolves types and names)
+ Descriptors.FileDescriptor file =
+ Descriptors.FileDescriptor.buildFrom(fileProto, new Descriptors.FileDescriptor[] {});
+
+ // Get the handle to the "wrapper" message type
+ Descriptors.Descriptor descriptor = file.findMessageTypeByName(wrapperProtoName);
+
+ ProtoRows.Builder rowsBuilder = ProtoRows.newBuilder();
+ for (Long[] timestampParts : INPUT_PROTO_DESCRIPTOR_WRITE_TIMESTAMPS) {
+ Message message =
+ DynamicMessage.newBuilder(descriptor)
+ .setField(
+ descriptor.findFieldByName(TIMESTAMP_HIGHER_PRECISION_COLUMN_NAME),
+ DynamicMessage.newBuilder(
+ descriptor.findNestedTypeByName(timestampPicosProtoName))
+ .setField(
+ descriptor
+ .findNestedTypeByName(timestampPicosProtoName)
+ .findFieldByName(secondsProtoName),
+ timestampParts[0])
+ .setField(
+ descriptor
+ .findNestedTypeByName(timestampPicosProtoName)
+ .findFieldByName(picosProtoName),
+ timestampParts[1])
+ .build())
+ .build();
+ rowsBuilder.addSerializedRows(message.toByteString());
+ }
+ ApiFuture future = streamWriter.append(rowsBuilder.build());
+ ApiFutures.addCallback(
+ future, new Helper.AppendCompleteCallback(), MoreExecutors.directExecutor());
+ }
+ String table =
+ BigQueryResource.formatTableResource(
+ ServiceOptions.getDefaultProjectId(), DATASET, tableName);
+
+ // Read all the data as Avro GenericRecords
+ List rows = Helper.readAllRows(readClient, parentProjectId, table, null);
+ List timestampHigherPrecision =
+ rows.stream()
+ .map(x -> x.get(TIMESTAMP_HIGHER_PRECISION_COLUMN_NAME).toString())
+ .collect(Collectors.toList());
+ assertEquals(
+ EXPECTED_PROTO_DESCRIPTOR_WRITE_TIMESTAMPS_HIGH_PRECISION_ISO_OUTPUT.length,
+ timestampHigherPrecision.size());
+ for (int i = 0;
+ i < EXPECTED_PROTO_DESCRIPTOR_WRITE_TIMESTAMPS_HIGH_PRECISION_ISO_OUTPUT.length;
+ i++) {
+ assertEquals(
+ EXPECTED_PROTO_DESCRIPTOR_WRITE_TIMESTAMPS_HIGH_PRECISION_ISO_OUTPUT[i],
+ timestampHigherPrecision.get(i));
+ }
+ }
+
+ private void createTimestampTable(String tableName) {
+ Schema bqTableSchema =
+ Schema.of(
+ Field.newBuilder(TIMESTAMP_COLUMN_NAME, StandardSQLTypeName.TIMESTAMP)
+ .setMode(Mode.NULLABLE)
+ .build(),
+ Field.newBuilder(TIMESTAMP_HIGHER_PRECISION_COLUMN_NAME, StandardSQLTypeName.TIMESTAMP)
+ .setMode(Mode.NULLABLE)
+ .setTimestampPrecision(Helper.PICOSECOND_PRECISION)
+ .build());
+
+ TableId testTableId = TableId.of(DATASET, tableName);
+ bigquery.create(
+ TableInfo.of(
+ testTableId, StandardTableDefinition.newBuilder().setSchema(bqTableSchema).build()));
+ }
+
+ private void assertTimestamps(String tableName, Object[][] expected) throws IOException {
+ String table =
+ BigQueryResource.formatTableResource(
+ ServiceOptions.getDefaultProjectId(), DATASET, tableName);
+
+ // Read all the data as Avro GenericRecords
+ List rows = Helper.readAllRows(readClient, parentProjectId, table, null);
+
+ // Each timestamp response is expected to contain two fields:
+ // 1. Micros from timestamp as a Long and 2. ISO8601 instant with picos precision
+ List timestamps =
+ rows.stream().map(x -> (Long) x.get(TIMESTAMP_COLUMN_NAME)).collect(Collectors.toList());
+ List timestampHigherPrecision =
+ rows.stream()
+ .map(x -> x.get(TIMESTAMP_HIGHER_PRECISION_COLUMN_NAME).toString())
+ .collect(Collectors.toList());
+
+ assertEquals(expected.length, timestamps.size());
+ assertEquals(expected.length, timestampHigherPrecision.size());
+ for (int i = 0; i < timestampHigherPrecision.size(); i++) {
+ assertEquals(expected[i][0], timestamps.get(i));
+ assertEquals(expected[i][1], timestampHigherPrecision.get(i));
+ }
+ }
}
diff --git a/google-cloud-bigquerystorage/src/test/java/com/google/cloud/bigquery/storage/v1/it/ITBigQueryTimeEncoderTest.java b/google-cloud-bigquerystorage/src/test/java/com/google/cloud/bigquery/storage/v1/it/ITBigQueryTimeEncoderTest.java
index a653143ed8..b3c84ec403 100644
--- a/google-cloud-bigquerystorage/src/test/java/com/google/cloud/bigquery/storage/v1/it/ITBigQueryTimeEncoderTest.java
+++ b/google-cloud-bigquerystorage/src/test/java/com/google/cloud/bigquery/storage/v1/it/ITBigQueryTimeEncoderTest.java
@@ -16,7 +16,8 @@
package com.google.cloud.bigquery.storage.v1.it;
-import static org.junit.Assert.assertEquals;
+import static org.junit.jupiter.api.Assertions.assertEquals;
+import static org.junit.jupiter.api.Assertions.assertFalse;
import com.google.api.core.ApiFuture;
import com.google.cloud.ServiceOptions;
@@ -44,14 +45,14 @@
import java.time.LocalTime;
import java.util.Iterator;
import java.util.concurrent.ExecutionException;
+import java.util.concurrent.TimeUnit;
import org.json.JSONArray;
import org.json.JSONObject;
-import org.junit.AfterClass;
-import org.junit.Assert;
-import org.junit.BeforeClass;
-import org.junit.Test;
+import org.junit.jupiter.api.AfterAll;
+import org.junit.jupiter.api.BeforeAll;
+import org.junit.jupiter.api.Test;
-public class ITBigQueryTimeEncoderTest {
+class ITBigQueryTimeEncoderTest {
private static final String DATASET = RemoteBigQueryHelper.generateDatasetName();
private static final String TABLE = "testtable";
private static final String DESCRIPTION = "BigQuery Write Java manual client test dataset";
@@ -60,8 +61,8 @@ public class ITBigQueryTimeEncoderTest {
private static TableInfo tableInfo;
private static BigQuery bigquery;
- @BeforeClass
- public static void beforeClass() throws IOException {
+ @BeforeAll
+ static void beforeAll() throws IOException {
client = BigQueryWriteClient.create();
RemoteBigQueryHelper bigqueryHelper = RemoteBigQueryHelper.create();
@@ -97,10 +98,11 @@ public static void beforeClass() throws IOException {
bigquery.create(tableInfo);
}
- @AfterClass
- public static void afterClass() {
+ @AfterAll
+ static void afterAll() throws InterruptedException {
if (client != null) {
client.close();
+ client.awaitTermination(10, TimeUnit.SECONDS);
}
if (bigquery != null) {
RemoteBigQueryHelper.forceDelete(bigquery, DATASET);
@@ -108,7 +110,7 @@ public static void afterClass() {
}
@Test
- public void TestTimeEncoding()
+ void TestTimeEncoding()
throws IOException,
InterruptedException,
ExecutionException,
@@ -187,7 +189,7 @@ public void TestTimeEncoding()
row.put("test_date", 300);
JSONArray jsonArr = new JSONArray(new JSONObject[] {row});
ApiFuture response = jsonStreamWriter.append(jsonArr, -1);
- Assert.assertFalse(response.get().getAppendResult().hasOffset());
+ assertFalse(response.get().getAppendResult().hasOffset());
TableResult result =
bigquery.listTableData(
tableInfo.getTableId(), BigQuery.TableDataListOption.startIndex(0L));
diff --git a/google-cloud-bigquerystorage/src/test/java/com/google/cloud/bigquery/storage/v1/it/ITBigQueryWriteNonQuotaRetryTest.java b/google-cloud-bigquerystorage/src/test/java/com/google/cloud/bigquery/storage/v1/it/ITBigQueryWriteNonQuotaRetryTest.java
index 90adb81b1e..2d9378341d 100644
--- a/google-cloud-bigquerystorage/src/test/java/com/google/cloud/bigquery/storage/v1/it/ITBigQueryWriteNonQuotaRetryTest.java
+++ b/google-cloud-bigquerystorage/src/test/java/com/google/cloud/bigquery/storage/v1/it/ITBigQueryWriteNonQuotaRetryTest.java
@@ -17,7 +17,8 @@
package com.google.cloud.bigquery.storage.v1.it;
import static com.google.common.truth.Truth.assertThat;
-import static org.junit.Assert.assertEquals;
+import static org.junit.jupiter.api.Assertions.assertEquals;
+import static org.junit.jupiter.api.Assertions.assertThrows;
import com.google.api.core.ApiFuture;
import com.google.cloud.bigquery.BigQuery;
@@ -38,6 +39,7 @@
import com.google.cloud.bigquery.storage.v1.StreamWriter;
import com.google.cloud.bigquery.storage.v1.TableName;
import com.google.cloud.bigquery.storage.v1.WriteStream;
+import com.google.cloud.bigquery.storage.v1.it.util.WriteRetryTestUtil;
import com.google.cloud.bigquery.testing.RemoteBigQueryHelper;
import com.google.protobuf.DescriptorProtos.DescriptorProto;
import com.google.protobuf.DescriptorProtos.FieldDescriptorProto;
@@ -45,14 +47,14 @@
import io.grpc.Status.Code;
import java.io.IOException;
import java.util.concurrent.ExecutionException;
+import java.util.concurrent.TimeUnit;
import java.util.logging.Logger;
-import org.junit.AfterClass;
-import org.junit.Assert;
-import org.junit.BeforeClass;
-import org.junit.Test;
+import org.junit.jupiter.api.AfterAll;
+import org.junit.jupiter.api.BeforeAll;
+import org.junit.jupiter.api.Test;
/** Integration tests for BigQuery Write API. */
-public class ITBigQueryWriteNonQuotaRetryTest {
+class ITBigQueryWriteNonQuotaRetryTest {
private static final Logger LOG = Logger.getLogger(ITBigQueryWriteQuotaRetryTest.class.getName());
private static final String DATASET = RemoteBigQueryHelper.generateDatasetName();
private static final String TABLE = "testtable";
@@ -63,8 +65,8 @@ public class ITBigQueryWriteNonQuotaRetryTest {
private static BigQueryWriteClient client;
private static BigQuery bigquery;
- @BeforeClass
- public static void beforeClass() throws IOException {
+ @BeforeAll
+ static void beforeAll() throws IOException {
client = BigQueryWriteClient.create();
RemoteBigQueryHelper bigqueryHelper = RemoteBigQueryHelper.create();
@@ -85,10 +87,11 @@ public static void beforeClass() throws IOException {
bigquery.create(tableInfo);
}
- @AfterClass
- public static void afterClass() {
+ @AfterAll
+ static void afterAll() throws InterruptedException {
if (client != null) {
client.close();
+ client.awaitTermination(10, TimeUnit.SECONDS);
}
if (bigquery != null) {
@@ -107,7 +110,7 @@ ProtoRows CreateProtoRows(String[] messages) {
}
@Test
- public void testJsonStreamWriterCommittedStreamWithNonQuotaRetry()
+ void testJsonStreamWriterCommittedStreamWithNonQuotaRetry()
throws IOException, InterruptedException, DescriptorValidationException {
WriteRetryTestUtil.runExclusiveRetryTest(
bigquery,
@@ -120,7 +123,7 @@ public void testJsonStreamWriterCommittedStreamWithNonQuotaRetry()
}
@Test
- public void testJsonStreamWriterDefaultStreamWithNonQuotaRetry()
+ void testJsonStreamWriterDefaultStreamWithNonQuotaRetry()
throws IOException, InterruptedException, DescriptorValidationException {
WriteRetryTestUtil.runDefaultRetryTest(
bigquery,
@@ -131,11 +134,10 @@ public void testJsonStreamWriterDefaultStreamWithNonQuotaRetry()
/* rowBatchSize= */ 1);
}
- // Moved to ITBigQueryWriteNonQuotaRetryTest from ITBigQueryWriteManualClientTest, as it requires
+ // Moved to ITBigQueryWriteNonQuotaRetryTest from ITBigQueryWriteClientTest, as it requires
// usage of the project this file uses to inject errors (bq-write-api-java-retry-test).
@Test
- public void testDefaultRequestLimit()
- throws IOException, InterruptedException, ExecutionException {
+ void testDefaultRequestLimit() throws IOException, InterruptedException, ExecutionException {
DatasetId datasetId =
DatasetId.of(NON_QUOTA_RETRY_PROJECT_ID, RemoteBigQueryHelper.generateDatasetName());
DatasetInfo datasetInfo = DatasetInfo.newBuilder(datasetId).build();
@@ -170,44 +172,28 @@ public void testDefaultRequestLimit()
streamWriter.append(
CreateProtoRows(
new String[] {new String(new char[19 * 1024 * 1024]).replace("\0", "a")}));
- try {
- AppendRowsResponse resp = response.get();
- LOG.info(
- "Message succeded. Dataset info: "
- + datasetInfo.toString()
- + " tableinfo: "
- + tableInfo.toString()
- + " parent: "
- + parent
- + "streamWriter: "
- + streamWriter.toString()
- + "response: "
- + resp);
- Assert.fail("Large request should fail with InvalidArgumentError");
- } catch (ExecutionException ex) {
- LOG.info(
- "Message failed. Dataset info: "
- + datasetInfo.toString()
- + " tableinfo: "
- + tableInfo.toString()
- + " parent: "
- + parent
- + "streamWriter: "
- + streamWriter);
- assertEquals(io.grpc.StatusRuntimeException.class, ex.getCause().getClass());
- io.grpc.StatusRuntimeException actualError =
- (io.grpc.StatusRuntimeException) ex.getCause();
- // This verifies that the Beam connector can consume this custom exception's grpc
- // StatusCode
- // TODO(yiru): temp fix to unblock test, while final fix is being rolled out.
- if (actualError.getStatus().getCode() != Code.INTERNAL) {
- assertEquals(Code.INVALID_ARGUMENT, actualError.getStatus().getCode());
- assertThat(
- actualError
- .getStatus()
- .getDescription()
- .contains("AppendRows request too large: 19923131 limit 10485760"));
- }
+ ExecutionException ex = assertThrows(ExecutionException.class, () -> response.get());
+ LOG.info(
+ "Message failed. Dataset info: "
+ + datasetInfo.toString()
+ + " tableinfo: "
+ + tableInfo.toString()
+ + " parent: "
+ + parent
+ + "streamWriter: "
+ + streamWriter);
+ assertEquals(io.grpc.StatusRuntimeException.class, ex.getCause().getClass());
+ io.grpc.StatusRuntimeException actualError = (io.grpc.StatusRuntimeException) ex.getCause();
+ // This verifies that the Beam connector can consume this custom exception's grpc
+ // StatusCode
+ // TODO(yiru): temp fix to unblock test, while final fix is being rolled out.
+ if (actualError.getStatus().getCode() != Code.INTERNAL) {
+ assertEquals(Code.INVALID_ARGUMENT, actualError.getStatus().getCode());
+ assertThat(
+ actualError
+ .getStatus()
+ .getDescription()
+ .contains("AppendRows request too large: 19923131 limit 10485760"));
}
}
} finally {
diff --git a/google-cloud-bigquerystorage/src/test/java/com/google/cloud/bigquery/storage/v1/it/ITBigQueryWriteQuotaRetryTest.java b/google-cloud-bigquerystorage/src/test/java/com/google/cloud/bigquery/storage/v1/it/ITBigQueryWriteQuotaRetryTest.java
index 86207508fa..b4069a7aee 100644
--- a/google-cloud-bigquerystorage/src/test/java/com/google/cloud/bigquery/storage/v1/it/ITBigQueryWriteQuotaRetryTest.java
+++ b/google-cloud-bigquerystorage/src/test/java/com/google/cloud/bigquery/storage/v1/it/ITBigQueryWriteQuotaRetryTest.java
@@ -26,13 +26,15 @@
import com.google.cloud.bigquery.TableInfo;
import com.google.cloud.bigquery.storage.v1.BigQueryWriteClient;
import com.google.cloud.bigquery.storage.v1.WriteStream;
+import com.google.cloud.bigquery.storage.v1.it.util.WriteRetryTestUtil;
import com.google.cloud.bigquery.testing.RemoteBigQueryHelper;
import com.google.protobuf.Descriptors.DescriptorValidationException;
import java.io.IOException;
+import java.util.concurrent.TimeUnit;
import java.util.logging.Logger;
-import org.junit.AfterClass;
-import org.junit.BeforeClass;
-import org.junit.Test;
+import org.junit.jupiter.api.AfterAll;
+import org.junit.jupiter.api.BeforeAll;
+import org.junit.jupiter.api.Test;
/** Integration tests for BigQuery Write API. */
public class ITBigQueryWriteQuotaRetryTest {
@@ -46,8 +48,8 @@ public class ITBigQueryWriteQuotaRetryTest {
private static BigQueryWriteClient client;
private static BigQuery bigquery;
- @BeforeClass
- public static void beforeClass() throws IOException {
+ @BeforeAll
+ static void beforeAll() throws IOException {
client = BigQueryWriteClient.create();
RemoteBigQueryHelper bigqueryHelper = RemoteBigQueryHelper.create();
@@ -68,10 +70,11 @@ public static void beforeClass() throws IOException {
bigquery.create(tableInfo);
}
- @AfterClass
- public static void afterClass() {
+ @AfterAll
+ static void afterAll() throws InterruptedException {
if (client != null) {
client.close();
+ client.awaitTermination(10, TimeUnit.SECONDS);
}
if (bigquery != null) {
@@ -81,7 +84,7 @@ public static void afterClass() {
}
@Test
- public void testJsonStreamWriterCommittedStreamWithQuotaRetry()
+ void testJsonStreamWriterCommittedStreamWithQuotaRetry()
throws IOException, InterruptedException, DescriptorValidationException {
WriteRetryTestUtil.runExclusiveRetryTest(
bigquery,
@@ -94,7 +97,7 @@ public void testJsonStreamWriterCommittedStreamWithQuotaRetry()
}
@Test
- public void testJsonStreamWriterDefaultStreamWithQuotaRetry()
+ void testJsonStreamWriterDefaultStreamWithQuotaRetry()
throws IOException, InterruptedException, DescriptorValidationException {
WriteRetryTestUtil.runDefaultRetryTest(
bigquery,
diff --git a/google-cloud-bigquerystorage/src/test/java/com/google/cloud/bigquery/storage/v1/it/BigQueryResource.java b/google-cloud-bigquerystorage/src/test/java/com/google/cloud/bigquery/storage/v1/it/util/BigQueryResource.java
similarity index 90%
rename from google-cloud-bigquerystorage/src/test/java/com/google/cloud/bigquery/storage/v1/it/BigQueryResource.java
rename to google-cloud-bigquerystorage/src/test/java/com/google/cloud/bigquery/storage/v1/it/util/BigQueryResource.java
index b42ff26e63..04daffb348 100644
--- a/google-cloud-bigquerystorage/src/test/java/com/google/cloud/bigquery/storage/v1/it/BigQueryResource.java
+++ b/google-cloud-bigquerystorage/src/test/java/com/google/cloud/bigquery/storage/v1/it/util/BigQueryResource.java
@@ -14,7 +14,7 @@
* limitations under the License.
*/
-package com.google.cloud.bigquery.storage.v1.it;
+package com.google.cloud.bigquery.storage.v1.it.util;
/** Test helper class to generate BigQuery resource paths. */
public class BigQueryResource {
@@ -28,7 +28,7 @@ public class BigQueryResource {
* @param tableId
* @return a path to a table resource.
*/
- public static String FormatTableResource(String projectId, String datasetId, String tableId) {
+ public static String formatTableResource(String projectId, String datasetId, String tableId) {
return String.format("projects/%s/datasets/%s/tables/%s", projectId, datasetId, tableId);
}
}
diff --git a/google-cloud-bigquerystorage/src/test/java/com/google/cloud/bigquery/storage/v1/it/util/Helper.java b/google-cloud-bigquerystorage/src/test/java/com/google/cloud/bigquery/storage/v1/it/util/Helper.java
new file mode 100644
index 0000000000..1e1b0e2fb0
--- /dev/null
+++ b/google-cloud-bigquerystorage/src/test/java/com/google/cloud/bigquery/storage/v1/it/util/Helper.java
@@ -0,0 +1,204 @@
+/*
+ * Copyright 2025 Google LLC
+ *
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+package com.google.cloud.bigquery.storage.v1.it.util;
+
+import static org.junit.jupiter.api.Assertions.assertEquals;
+import static org.junit.jupiter.api.Assertions.fail;
+
+import com.google.api.core.ApiFutureCallback;
+import com.google.api.gax.rpc.ServerStream;
+import com.google.auth.oauth2.ServiceAccountCredentials;
+import com.google.cloud.bigquery.storage.v1.AppendRowsResponse;
+import com.google.cloud.bigquery.storage.v1.AvroSerializationOptions;
+import com.google.cloud.bigquery.storage.v1.BigQueryReadClient;
+import com.google.cloud.bigquery.storage.v1.CreateReadSessionRequest;
+import com.google.cloud.bigquery.storage.v1.DataFormat;
+import com.google.cloud.bigquery.storage.v1.ReadRowsRequest;
+import com.google.cloud.bigquery.storage.v1.ReadRowsResponse;
+import com.google.cloud.bigquery.storage.v1.ReadSession;
+import com.google.common.base.Preconditions;
+import com.google.protobuf.util.Timestamps;
+import java.io.ByteArrayInputStream;
+import java.io.IOException;
+import java.io.InputStream;
+import java.util.ArrayList;
+import java.util.List;
+import org.apache.avro.Schema;
+import org.apache.avro.generic.GenericData;
+import org.apache.avro.generic.GenericRecordBuilder;
+
+public class Helper {
+
+ public static final long PICOSECOND_PRECISION = 12;
+ public static final String TIMESTAMP_COLUMN_NAME = "timestamp";
+ public static final String TIMESTAMP_HIGHER_PRECISION_COLUMN_NAME = "timestampHigherPrecision";
+
+ // Sample test cases for timestamps. First element is micros from epcoh and the second element
+ // is the ISO format in with picosecond precision
+ public static final Object[][] INPUT_TIMESTAMPS =
+ new Object[][] {
+ {1735734896123456L /* 2025-01-01T12:34:56.123456Z */, "2025-01-01T12:34:56.123456789123Z"},
+ {1580646896123456L /* 2020-02-02T12:34:56.123456Z */, "2020-02-02T12:34:56.123456789123Z"},
+ {636467696123456L /* 1990-03-03T12:34:56.123456Z */, "1990-03-03T12:34:56.123456789123Z"},
+ {165846896123456L /* 1975-04-04T12:34:56.123456Z */, "1975-04-04T12:34:56.123456789123Z"}
+ };
+
+ // Expected response for timestamps from the input. If enabled with ISO as output, it will
+ // ISO8601 format for any picosecond enabled column.
+ public static final Object[][] EXPECTED_TIMESTAMPS_HIGHER_PRECISION_ISO_OUTPUT =
+ new Object[][] {
+ {1735734896123456L /* 2025-01-01T12:34:56.123456Z */, "2025-01-01T12:34:56.123456789123Z"},
+ {1580646896123456L /* 2020-02-02T12:34:56.123456Z */, "2020-02-02T12:34:56.123456789123Z"},
+ {636467696123456L /* 1990-03-03T12:34:56.123456Z */, "1990-03-03T12:34:56.123456789123Z"},
+ {165846896123456L /* 1975-04-04T12:34:56.123456Z */, "1975-04-04T12:34:56.123456789123Z"}
+ };
+
+ public static ServiceAccountCredentials loadCredentials(String credentialFile) {
+ try (InputStream keyStream = new ByteArrayInputStream(credentialFile.getBytes())) {
+ return ServiceAccountCredentials.fromStream(keyStream);
+ } catch (IOException e) {
+ fail("Couldn't create fake JSON credentials.");
+ }
+ return null;
+ }
+
+ public static class AppendCompleteCallback implements ApiFutureCallback {
+ private final Object lock = new Object();
+ private int batchCount = 0;
+
+ public void onSuccess(AppendRowsResponse response) {
+ synchronized (lock) {
+ if (response.hasError()) {
+ System.out.format("Error: %s\n", response.getError());
+ } else {
+ ++batchCount;
+ System.out.format("Wrote batch %d\n", batchCount);
+ }
+ }
+ }
+
+ public void onFailure(Throwable throwable) {
+ System.out.format("Error: %s\n", throwable.toString());
+ }
+ }
+
+ /**
+ * Reads all the rows from the specified table.
+ *
+ * For every row, the consumer is called for processing.
+ *
+ * @param table
+ * @param snapshotInMillis Optional. If specified, all rows up to timestamp will be returned.
+ * @param filter Optional. If specified, it will be used to restrict returned data.
+ * @param consumer that receives all Avro rows.
+ * @throws IOException
+ */
+ public static void processRowsAtSnapshot(
+ BigQueryReadClient client,
+ String parentProjectId,
+ String table,
+ Long snapshotInMillis,
+ String filter,
+ SimpleRowReaderAvro.AvroRowConsumer consumer)
+ throws IOException {
+ Preconditions.checkNotNull(table);
+ Preconditions.checkNotNull(consumer);
+
+ CreateReadSessionRequest.Builder createSessionRequestBuilder =
+ CreateReadSessionRequest.newBuilder()
+ .setParent(parentProjectId)
+ .setMaxStreamCount(1)
+ .setReadSession(
+ ReadSession.newBuilder()
+ .setTable(table)
+ .setDataFormat(DataFormat.AVRO)
+ .setReadOptions(
+ ReadSession.TableReadOptions.newBuilder()
+ .setAvroSerializationOptions(
+ AvroSerializationOptions.newBuilder()
+ .setPicosTimestampPrecision(
+ // This serialization option only impacts columns that are
+ // type. `TIMESTAMP_PICOS` and has no impact on other
+ // columns types.
+ AvroSerializationOptions.PicosTimestampPrecision
+ .TIMESTAMP_PRECISION_PICOS)
+ .build())
+ .build())
+ .build());
+
+ if (snapshotInMillis != null) {
+ createSessionRequestBuilder
+ .getReadSessionBuilder()
+ .setTableModifiers(
+ ReadSession.TableModifiers.newBuilder()
+ .setSnapshotTime(Timestamps.fromMillis(snapshotInMillis))
+ .build());
+ }
+
+ if (filter != null && !filter.isEmpty()) {
+ createSessionRequestBuilder
+ .getReadSessionBuilder()
+ .setReadOptions(
+ ReadSession.TableReadOptions.newBuilder().setRowRestriction(filter).build());
+ }
+
+ ReadSession session = client.createReadSession(createSessionRequestBuilder.build());
+ assertEquals(
+ 1,
+ session.getStreamsCount(),
+ String.format(
+ "Did not receive expected number of streams for table '%s' CreateReadSession"
+ + " response:%n%s",
+ table, session.toString()));
+
+ ReadRowsRequest readRowsRequest =
+ ReadRowsRequest.newBuilder().setReadStream(session.getStreams(0).getName()).build();
+
+ SimpleRowReaderAvro reader =
+ new SimpleRowReaderAvro(new Schema.Parser().parse(session.getAvroSchema().getSchema()));
+
+ ServerStream stream = client.readRowsCallable().call(readRowsRequest);
+ for (ReadRowsResponse response : stream) {
+ reader.processRows(response.getAvroRows(), consumer);
+ }
+ }
+
+ /**
+ * Reads all the rows from the specified table and returns a list as generic Avro records.
+ *
+ * @param table
+ * @param filter Optional. If specified, it will be used to restrict returned data.
+ * @return
+ */
+ public static List readAllRows(
+ BigQueryReadClient client, String parentProjectId, String table, String filter)
+ throws IOException {
+ final List rows = new ArrayList<>();
+ processRowsAtSnapshot(
+ client,
+ parentProjectId,
+ /* table= */ table,
+ /* snapshotInMillis= */ null,
+ /* filter= */ filter,
+ (SimpleRowReaderAvro.AvroRowConsumer)
+ record -> {
+ // clone the record since that reference will be reused by the reader.
+ rows.add(new GenericRecordBuilder(record).build());
+ });
+ return rows;
+ }
+}
diff --git a/google-cloud-bigquerystorage/src/test/java/com/google/cloud/bigquery/storage/v1/it/SimpleRowReaderArrow.java b/google-cloud-bigquerystorage/src/test/java/com/google/cloud/bigquery/storage/v1/it/util/SimpleRowReaderArrow.java
similarity index 79%
rename from google-cloud-bigquerystorage/src/test/java/com/google/cloud/bigquery/storage/v1/it/SimpleRowReaderArrow.java
rename to google-cloud-bigquerystorage/src/test/java/com/google/cloud/bigquery/storage/v1/it/util/SimpleRowReaderArrow.java
index 685f72fbc9..ff5b423c2c 100644
--- a/google-cloud-bigquerystorage/src/test/java/com/google/cloud/bigquery/storage/v1/it/SimpleRowReaderArrow.java
+++ b/google-cloud-bigquerystorage/src/test/java/com/google/cloud/bigquery/storage/v1/it/util/SimpleRowReaderArrow.java
@@ -14,8 +14,10 @@
* limitations under the License.
*/
-package com.google.cloud.bigquery.storage.v1.it;
+package com.google.cloud.bigquery.storage.v1.it.util;
+import static com.google.cloud.bigquery.storage.v1.it.util.Helper.TIMESTAMP_COLUMN_NAME;
+import static com.google.cloud.bigquery.storage.v1.it.util.Helper.TIMESTAMP_HIGHER_PRECISION_COLUMN_NAME;
import static com.google.common.truth.Truth.assertThat;
import com.google.cloud.bigquery.FieldElementType;
@@ -23,7 +25,6 @@
import com.google.cloud.bigquery.storage.v1.ArrowRecordBatch;
import com.google.cloud.bigquery.storage.v1.ArrowSchema;
import com.google.common.base.Preconditions;
-import com.google.common.collect.ImmutableMap;
import java.io.IOException;
import java.time.LocalDateTime;
import java.util.ArrayList;
@@ -50,17 +51,44 @@ public interface ArrowBatchConsumer {
void accept(VectorSchemaRoot root);
}
+ public static class ArrowTimestampBatchConsumer implements ArrowBatchConsumer {
+ private final Object[][] expectedTimestampValues;
+
+ public ArrowTimestampBatchConsumer(Object[][] expectedTimestampValues) {
+ this.expectedTimestampValues = expectedTimestampValues;
+ }
+
+ @Override
+ public void accept(VectorSchemaRoot root) {
+ FieldVector timestampFieldVector = root.getVector(TIMESTAMP_COLUMN_NAME);
+ FieldVector timestampHigherPrecisionFieldVector =
+ root.getVector(TIMESTAMP_HIGHER_PRECISION_COLUMN_NAME);
+ assertThat(timestampFieldVector.getValueCount())
+ .isEqualTo(timestampHigherPrecisionFieldVector.getValueCount());
+ int count = timestampFieldVector.getValueCount();
+ for (int i = 0; i < count; i++) {
+ long timestampMicros = (Long) timestampFieldVector.getObject(i);
+ assertThat(timestampMicros).isEqualTo(expectedTimestampValues[i][0]);
+
+ // The Object comes back as `Text` which cannot be cast to String
+ // (use `toString()` instead)
+ String timestampHigherPrecisionISO =
+ timestampHigherPrecisionFieldVector.getObject(i).toString();
+ assertThat(timestampHigherPrecisionISO).isEqualTo(expectedTimestampValues[i][1]);
+ }
+ }
+ }
+
/** ArrowRangeBatchConsumer accepts batch Arrow data and validate the range values. */
public static class ArrowRangeBatchConsumer implements ArrowBatchConsumer {
-
- private final ImmutableMap expectedRangeDateValues;
- private final ImmutableMap expectedRangeDatetimeValues;
- private final ImmutableMap expectedRangeTimestampValues;
+ private final Map expectedRangeDateValues;
+ private final Map expectedRangeDatetimeValues;
+ private final Map expectedRangeTimestampValues;
public ArrowRangeBatchConsumer(
- ImmutableMap expectedRangeDateValues,
- ImmutableMap expectedRangeDatetimeValues,
- ImmutableMap expectedRangeTimestampValues) {
+ Map expectedRangeDateValues,
+ Map expectedRangeDatetimeValues,
+ Map expectedRangeTimestampValues) {
this.expectedRangeDateValues = expectedRangeDateValues;
this.expectedRangeDatetimeValues = expectedRangeDatetimeValues;
this.expectedRangeTimestampValues = expectedRangeTimestampValues;
diff --git a/google-cloud-bigquerystorage/src/test/java/com/google/cloud/bigquery/storage/v1/it/SimpleRowReaderAvro.java b/google-cloud-bigquerystorage/src/test/java/com/google/cloud/bigquery/storage/v1/it/util/SimpleRowReaderAvro.java
similarity index 97%
rename from google-cloud-bigquerystorage/src/test/java/com/google/cloud/bigquery/storage/v1/it/SimpleRowReaderAvro.java
rename to google-cloud-bigquerystorage/src/test/java/com/google/cloud/bigquery/storage/v1/it/util/SimpleRowReaderAvro.java
index a23179c8c8..4914e93f5b 100644
--- a/google-cloud-bigquerystorage/src/test/java/com/google/cloud/bigquery/storage/v1/it/SimpleRowReaderAvro.java
+++ b/google-cloud-bigquerystorage/src/test/java/com/google/cloud/bigquery/storage/v1/it/util/SimpleRowReaderAvro.java
@@ -14,7 +14,7 @@
* limitations under the License.
*/
-package com.google.cloud.bigquery.storage.v1.it;
+package com.google.cloud.bigquery.storage.v1.it.util;
import com.google.cloud.bigquery.storage.v1.AvroRows;
import com.google.common.base.Preconditions;
diff --git a/google-cloud-bigquerystorage/src/test/java/com/google/cloud/bigquery/storage/v1/it/WriteRetryTestUtil.java b/google-cloud-bigquerystorage/src/test/java/com/google/cloud/bigquery/storage/v1/it/util/WriteRetryTestUtil.java
similarity index 95%
rename from google-cloud-bigquerystorage/src/test/java/com/google/cloud/bigquery/storage/v1/it/WriteRetryTestUtil.java
rename to google-cloud-bigquerystorage/src/test/java/com/google/cloud/bigquery/storage/v1/it/util/WriteRetryTestUtil.java
index e11e0707df..d5518f790f 100644
--- a/google-cloud-bigquerystorage/src/test/java/com/google/cloud/bigquery/storage/v1/it/WriteRetryTestUtil.java
+++ b/google-cloud-bigquerystorage/src/test/java/com/google/cloud/bigquery/storage/v1/it/util/WriteRetryTestUtil.java
@@ -14,9 +14,11 @@
* limitations under the License.
*/
-package com.google.cloud.bigquery.storage.v1.it;
+package com.google.cloud.bigquery.storage.v1.it.util;
-import static org.junit.Assert.assertFalse;
+import static org.junit.jupiter.api.Assertions.assertEquals;
+import static org.junit.jupiter.api.Assertions.assertFalse;
+import static org.junit.jupiter.api.Assertions.fail;
import com.google.api.core.ApiFuture;
import com.google.api.gax.retrying.RetrySettings;
@@ -43,7 +45,6 @@
import java.util.logging.Logger;
import org.json.JSONArray;
import org.json.JSONObject;
-import org.junit.Assert;
public class WriteRetryTestUtil {
private static final Logger LOG =
@@ -100,10 +101,10 @@ public static void runExclusiveRetryTest(
for (int i = 0; i < requestCount; i++) {
LOG.info("Waiting for request " + i);
try {
- Assert.assertEquals(
+ assertEquals(
allResponses.get(i).get().getAppendResult().getOffset().getValue(), i * rowBatchSize);
} catch (ExecutionException ex) {
- Assert.fail("Unexpected error " + ex);
+ fail("Unexpected error " + ex);
}
}
}
@@ -151,7 +152,7 @@ private static void runDefaultRetryTestInternal(
try {
assertFalse(allResponses.get(i).get().hasError());
} catch (Exception ex) {
- Assert.fail("Unexpected error " + ex);
+ fail("Unexpected error " + ex);
}
}
}
diff --git a/google-cloud-bigquerystorage/src/test/java/com/google/cloud/bigquery/storage/v1/stub/EnhancedBigQueryReadStubSettingsTest.java b/google-cloud-bigquerystorage/src/test/java/com/google/cloud/bigquery/storage/v1/stub/EnhancedBigQueryReadStubSettingsTest.java
index ab6a8d1298..1befb2a949 100644
--- a/google-cloud-bigquerystorage/src/test/java/com/google/cloud/bigquery/storage/v1/stub/EnhancedBigQueryReadStubSettingsTest.java
+++ b/google-cloud-bigquerystorage/src/test/java/com/google/cloud/bigquery/storage/v1/stub/EnhancedBigQueryReadStubSettingsTest.java
@@ -32,16 +32,13 @@
import com.google.cloud.bigquery.storage.v1.SplitReadStreamResponse;
import java.time.Duration;
import java.util.Set;
-import org.junit.Test;
-import org.junit.runner.RunWith;
-import org.junit.runners.JUnit4;
+import org.junit.jupiter.api.Test;
import org.mockito.Mockito;
-@RunWith(JUnit4.class)
public class EnhancedBigQueryReadStubSettingsTest {
@Test
- public void testSettingsArePreserved() {
+ void testSettingsArePreserved() {
String endpoint = "some.other.host:123";
CredentialsProvider credentialsProvider = Mockito.mock(CredentialsProvider.class);
Duration watchdogInterval = Duration.ofSeconds(12);
@@ -100,14 +97,14 @@ private void verifySettings(
}
@Test
- public void testCreateReadSessionSettings() {
+ void testCreateReadSessionSettings() {
UnaryCallSettings.Builder builder =
EnhancedBigQueryReadStubSettings.newBuilder().createReadSessionSettings();
verifyRetrySettings(builder.getRetryableCodes(), builder.getRetrySettings());
}
@Test
- public void testReadRowsSettings() {
+ void testReadRowsSettings() {
ServerStreamingCallSettings.Builder builder =
EnhancedBigQueryReadStubSettings.newBuilder().readRowsSettings();
assertThat(builder.getRetryableCodes()).contains(Code.UNAVAILABLE);
@@ -123,7 +120,7 @@ public void testReadRowsSettings() {
}
@Test
- public void testSplitReadStreamSettings() {
+ void testSplitReadStreamSettings() {
UnaryCallSettings.Builder builder =
EnhancedBigQueryReadStubSettings.newBuilder().splitReadStreamSettings();
verifyRetrySettings(builder.getRetryableCodes(), builder.getRetrySettings());
diff --git a/google-cloud-bigquerystorage/src/test/java/com/google/cloud/bigquery/storage/v1/stub/ResourceHeaderTest.java b/google-cloud-bigquerystorage/src/test/java/com/google/cloud/bigquery/storage/v1/stub/ResourceHeaderTest.java
index a68f6e3ae5..90f0c395f3 100644
--- a/google-cloud-bigquerystorage/src/test/java/com/google/cloud/bigquery/storage/v1/stub/ResourceHeaderTest.java
+++ b/google-cloud-bigquerystorage/src/test/java/com/google/cloud/bigquery/storage/v1/stub/ResourceHeaderTest.java
@@ -28,16 +28,14 @@
import com.google.cloud.bigquery.storage.v1.ReadRowsRequest;
import com.google.cloud.bigquery.storage.v1.ReadSession;
import com.google.cloud.bigquery.storage.v1.SplitReadStreamRequest;
+import java.util.concurrent.TimeUnit;
import java.util.regex.Pattern;
-import org.junit.After;
-import org.junit.AfterClass;
-import org.junit.Before;
-import org.junit.BeforeClass;
-import org.junit.Test;
-import org.junit.runner.RunWith;
-import org.junit.runners.JUnit4;
-
-@RunWith(JUnit4.class)
+import org.junit.jupiter.api.AfterAll;
+import org.junit.jupiter.api.AfterEach;
+import org.junit.jupiter.api.BeforeAll;
+import org.junit.jupiter.api.BeforeEach;
+import org.junit.jupiter.api.Test;
+
public class ResourceHeaderTest {
private static final String TEST_TABLE_REFERENCE =
@@ -45,7 +43,7 @@ public class ResourceHeaderTest {
private static final String TEST_STREAM_NAME = "streamName";
- private static final String NAME = "resource-header-test:123";
+ private static final String NAME = "resource-header-test:123-v1";
private static final String HEADER_NAME = "x-goog-request-params";
@@ -68,14 +66,14 @@ public class ResourceHeaderTest {
private LocalChannelProvider channelProvider;
private BigQueryReadClient client;
- @BeforeClass
+ @BeforeAll
public static void setUpClass() throws Exception {
server = new InProcessServer<>(new BigQueryReadImplBase() {}, NAME);
server.start();
}
- @Before
- public void setUp() throws Exception {
+ @BeforeEach
+ void setUp() throws Exception {
channelProvider = LocalChannelProvider.create(NAME);
BigQueryReadSettings.Builder settingsBuilder =
BigQueryReadSettings.newBuilder()
@@ -85,19 +83,20 @@ public void setUp() throws Exception {
client = BigQueryReadClient.create(settingsBuilder.build());
}
- @After
- public void tearDown() throws Exception {
+ @AfterEach
+ void tearDown() throws Exception {
client.close();
+ client.awaitTermination(10, TimeUnit.SECONDS);
}
- @AfterClass
- public static void tearDownClass() throws Exception {
+ @AfterAll
+ static void tearDownClass() throws Exception {
server.stop();
server.blockUntilShutdown();
}
@Test
- public void createReadSessionTest() {
+ void createReadSessionTest() {
try {
client.createReadSession(
"parents/project", ReadSession.newBuilder().setTable(TEST_TABLE_REFERENCE).build(), 1);
@@ -108,7 +107,7 @@ public void createReadSessionTest() {
}
@Test
- public void readRowsTest() {
+ void readRowsTest() {
try {
ReadRowsRequest request =
ReadRowsRequest.newBuilder().setReadStream(TEST_STREAM_NAME).setOffset(125).build();
@@ -121,7 +120,7 @@ public void readRowsTest() {
}
@Test
- public void splitReadStreamTest() {
+ void splitReadStreamTest() {
try {
client.splitReadStream(SplitReadStreamRequest.newBuilder().setName(TEST_STREAM_NAME).build());
} catch (UnimplementedException e) {
diff --git a/google-cloud-bigquerystorage/src/test/java/com/google/cloud/bigquery/storage/v1/stub/WriteHeaderTest.java b/google-cloud-bigquerystorage/src/test/java/com/google/cloud/bigquery/storage/v1/stub/WriteHeaderTest.java
index cc009d9796..638414b5e2 100644
--- a/google-cloud-bigquerystorage/src/test/java/com/google/cloud/bigquery/storage/v1/stub/WriteHeaderTest.java
+++ b/google-cloud-bigquerystorage/src/test/java/com/google/cloud/bigquery/storage/v1/stub/WriteHeaderTest.java
@@ -30,15 +30,12 @@
import com.google.cloud.bigquery.storage.v1.*;
import com.google.cloud.bigquery.storage.v1.BigQueryWriteGrpc.BigQueryWriteImplBase;
import java.util.regex.Pattern;
-import org.junit.After;
-import org.junit.AfterClass;
-import org.junit.Before;
-import org.junit.BeforeClass;
-import org.junit.Test;
-import org.junit.runner.RunWith;
-import org.junit.runners.JUnit4;
-
-@RunWith(JUnit4.class)
+import org.junit.jupiter.api.AfterAll;
+import org.junit.jupiter.api.AfterEach;
+import org.junit.jupiter.api.BeforeAll;
+import org.junit.jupiter.api.BeforeEach;
+import org.junit.jupiter.api.Test;
+
public class WriteHeaderTest {
private static final String TEST_TABLE_REFERENCE =
@@ -61,14 +58,14 @@ public class WriteHeaderTest {
private LocalChannelProvider channelProvider;
private BigQueryWriteClient client;
- @BeforeClass
+ @BeforeAll
public static void setUpClass() throws Exception {
server = new InProcessServer<>(new BigQueryWriteImplBase() {}, NAME);
server.start();
}
- @Before
- public void setUp() throws Exception {
+ @BeforeEach
+ void setUp() throws Exception {
channelProvider = LocalChannelProvider.create(NAME);
BigQueryWriteSettings.Builder settingsBuilder =
BigQueryWriteSettings.newBuilder()
@@ -83,19 +80,19 @@ public void setUp() throws Exception {
client = BigQueryWriteClient.create(settingsBuilder.build());
}
- @After
- public void tearDown() throws Exception {
+ @AfterEach
+ void tearDown() throws Exception {
client.close();
}
- @AfterClass
- public static void tearDownClass() throws Exception {
+ @AfterAll
+ static void tearDownClass() throws Exception {
server.stop();
server.blockUntilShutdown();
}
@Test
- public void createWriteStreamTest() {
+ void createWriteStreamTest() {
CreateWriteStreamRequest request =
CreateWriteStreamRequest.newBuilder()
.setParent(TEST_TABLE_REFERENCE)
@@ -110,7 +107,7 @@ public void createWriteStreamTest() {
}
@Test
- public void writeRowsTest() {
+ void writeRowsTest() {
BidiStreamingCallable callable =
client.appendRowsCallable();
ApiCallContext apiCallContext = null;
diff --git a/google-cloud-bigquerystorage/src/test/java/com/google/cloud/bigquery/storage/v1/stub/readrows/ReadRowsRetryTest.java b/google-cloud-bigquerystorage/src/test/java/com/google/cloud/bigquery/storage/v1/stub/readrows/ReadRowsRetryTest.java
index 2a2e513bec..711049d0c0 100644
--- a/google-cloud-bigquerystorage/src/test/java/com/google/cloud/bigquery/storage/v1/stub/readrows/ReadRowsRetryTest.java
+++ b/google-cloud-bigquerystorage/src/test/java/com/google/cloud/bigquery/storage/v1/stub/readrows/ReadRowsRetryTest.java
@@ -15,9 +15,12 @@
*/
package com.google.cloud.bigquery.storage.v1.stub.readrows;
+import static org.junit.jupiter.api.Assertions.assertEquals;
+import static org.junit.jupiter.api.Assertions.assertNotNull;
+
import com.google.api.gax.core.NoCredentialsProvider;
-import com.google.api.gax.grpc.GrpcTransportChannel;
-import com.google.api.gax.rpc.FixedTransportChannelProvider;
+import com.google.api.gax.grpc.testing.InProcessServer;
+import com.google.api.gax.grpc.testing.LocalChannelProvider;
import com.google.api.gax.rpc.ServerStream;
import com.google.cloud.bigquery.storage.v1.BigQueryReadClient;
import com.google.cloud.bigquery.storage.v1.BigQueryReadGrpc.BigQueryReadImplBase;
@@ -27,50 +30,45 @@
import com.google.common.collect.Queues;
import io.grpc.Status.Code;
import io.grpc.stub.StreamObserver;
-import io.grpc.testing.GrpcServerRule;
-import java.io.IOException;
import java.util.ArrayList;
import java.util.List;
import java.util.Queue;
-import org.junit.After;
-import org.junit.Assert;
-import org.junit.Before;
-import org.junit.Rule;
-import org.junit.Test;
-import org.junit.runner.RunWith;
-import org.mockito.runners.MockitoJUnitRunner;
-
-@RunWith(MockitoJUnitRunner.class)
-public class ReadRowsRetryTest {
+import java.util.UUID;
+import org.junit.jupiter.api.AfterEach;
+import org.junit.jupiter.api.BeforeEach;
+import org.junit.jupiter.api.Test;
- @Rule public GrpcServerRule serverRule = new GrpcServerRule();
+class ReadRowsRetryTest {
private TestBigQueryStorageService service;
private BigQueryReadClient client;
+ private InProcessServer> server;
- @Before
- public void setUp() throws IOException {
+ @BeforeEach
+ void setUp() throws Exception {
service = new TestBigQueryStorageService();
- serverRule.getServiceRegistry().addService(service);
+ String serverName = UUID.randomUUID().toString();
+ server = new InProcessServer<>(service, serverName);
+ server.start();
BigQueryReadSettings settings =
BigQueryReadSettings.newBuilder()
.setCredentialsProvider(NoCredentialsProvider.create())
- .setTransportChannelProvider(
- FixedTransportChannelProvider.create(
- GrpcTransportChannel.create(serverRule.getChannel())))
+ .setTransportChannelProvider(LocalChannelProvider.create(serverName))
.build();
client = BigQueryReadClient.create(settings);
}
- @After
- public void tearDown() throws Exception {
+ @AfterEach
+ void tearDown() throws Exception {
client.close();
+ server.stop();
+ server.blockUntilShutdown();
}
@Test
- public void happyPathTest() {
+ void happyPathTest() {
ReadRowsRequest request = RpcExpectation.createRequest("fake-stream", 0);
service.expectations.add(
RpcExpectation.create()
@@ -78,11 +76,11 @@ public void happyPathTest() {
.respondWithNumberOfRows(10)
.respondWithNumberOfRows(7));
- Assert.assertEquals(17, getRowCount(request));
+ assertEquals(17, getRowCount(request));
}
@Test
- public void immediateRetryTest() {
+ void immediateRetryTest() {
ReadRowsRequest request = RpcExpectation.createRequest("fake-stream", 0);
service.expectations.add(
RpcExpectation.create()
@@ -95,11 +93,11 @@ public void immediateRetryTest() {
.respondWithNumberOfRows(10)
.respondWithNumberOfRows(7));
- Assert.assertEquals(17, getRowCount(request));
+ assertEquals(17, getRowCount(request));
}
@Test
- public void multipleRetryTestWithZeroInitialOffset() {
+ void multipleRetryTestWithZeroInitialOffset() {
ReadRowsRequest request = RpcExpectation.createRequest("fake-stream", 0);
service.expectations.add(
RpcExpectation.create()
@@ -117,11 +115,11 @@ public void multipleRetryTestWithZeroInitialOffset() {
service.expectations.add(
RpcExpectation.create().expectRequest("fake-stream", 22).respondWithNumberOfRows(6));
- Assert.assertEquals(28, getRowCount(request));
+ assertEquals(28, getRowCount(request));
}
@Test
- public void multipleRetryTestWithNonZeroInitialOffset() {
+ void multipleRetryTestWithNonZeroInitialOffset() {
ReadRowsRequest request = RpcExpectation.createRequest("fake-stream", 17);
service.expectations.add(
RpcExpectation.create()
@@ -139,11 +137,11 @@ public void multipleRetryTestWithNonZeroInitialOffset() {
service.expectations.add(
RpcExpectation.create().expectRequest("fake-stream", 39).respondWithNumberOfRows(3));
- Assert.assertEquals(25, getRowCount(request));
+ assertEquals(25, getRowCount(request));
}
@Test
- public void errorAtTheVeryEndTest() {
+ void errorAtTheVeryEndTest() {
ReadRowsRequest request = RpcExpectation.createRequest("fake-stream", 0);
service.expectations.add(
RpcExpectation.create()
@@ -155,7 +153,7 @@ public void errorAtTheVeryEndTest() {
service.expectations.add(
RpcExpectation.create().expectRequest("fake-stream", 17).respondWithNumberOfRows(0));
- Assert.assertEquals(17, getRowCount(request));
+ assertEquals(17, getRowCount(request));
}
private int getRowCount(ReadRowsRequest request) {
@@ -179,17 +177,15 @@ public void readRows(
RpcExpectation expectedRpc = expectations.poll();
currentRequestIndex++;
- Assert.assertNotNull(
- "Unexpected request #" + currentRequestIndex + ": " + request.toString(), expectedRpc);
-
- Assert.assertEquals(
+ assertNotNull(
+ expectedRpc, "Unexpected request #" + currentRequestIndex + ": " + request.toString());
+ assertEquals(
+ expectedRpc.expectedRequest,
+ request,
"Expected request #"
+ currentRequestIndex
+ " does not match actual request: "
- + request.toString(),
- expectedRpc.expectedRequest,
- request);
-
+ + request.toString());
for (ReadRowsResponse response : expectedRpc.responses) {
responseObserver.onNext(response);
}
diff --git a/google-cloud-bigquerystorage/src/test/java/com/google/cloud/bigquery/storage/v1alpha/MetastorePartitionServiceClientTest.java b/google-cloud-bigquerystorage/src/test/java/com/google/cloud/bigquery/storage/v1alpha/MetastorePartitionServiceClientTest.java
index 1488d84014..87547df18c 100644
--- a/google-cloud-bigquerystorage/src/test/java/com/google/cloud/bigquery/storage/v1alpha/MetastorePartitionServiceClientTest.java
+++ b/google-cloud-bigquerystorage/src/test/java/com/google/cloud/bigquery/storage/v1alpha/MetastorePartitionServiceClientTest.java
@@ -1,5 +1,5 @@
/*
- * Copyright 2025 Google LLC
+ * Copyright 2026 Google LLC
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
diff --git a/google-cloud-bigquerystorage/src/test/java/com/google/cloud/bigquery/storage/v1alpha/MockMetastorePartitionService.java b/google-cloud-bigquerystorage/src/test/java/com/google/cloud/bigquery/storage/v1alpha/MockMetastorePartitionService.java
index 9435f45ded..7881256590 100644
--- a/google-cloud-bigquerystorage/src/test/java/com/google/cloud/bigquery/storage/v1alpha/MockMetastorePartitionService.java
+++ b/google-cloud-bigquerystorage/src/test/java/com/google/cloud/bigquery/storage/v1alpha/MockMetastorePartitionService.java
@@ -1,5 +1,5 @@
/*
- * Copyright 2025 Google LLC
+ * Copyright 2026 Google LLC
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
diff --git a/google-cloud-bigquerystorage/src/test/java/com/google/cloud/bigquery/storage/v1alpha/MockMetastorePartitionServiceImpl.java b/google-cloud-bigquerystorage/src/test/java/com/google/cloud/bigquery/storage/v1alpha/MockMetastorePartitionServiceImpl.java
index b2086c079d..d0e6ae8c8a 100644
--- a/google-cloud-bigquerystorage/src/test/java/com/google/cloud/bigquery/storage/v1alpha/MockMetastorePartitionServiceImpl.java
+++ b/google-cloud-bigquerystorage/src/test/java/com/google/cloud/bigquery/storage/v1alpha/MockMetastorePartitionServiceImpl.java
@@ -1,5 +1,5 @@
/*
- * Copyright 2025 Google LLC
+ * Copyright 2026 Google LLC
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
diff --git a/google-cloud-bigquerystorage/src/test/java/com/google/cloud/bigquery/storage/v1beta/MetastorePartitionServiceClientTest.java b/google-cloud-bigquerystorage/src/test/java/com/google/cloud/bigquery/storage/v1beta/MetastorePartitionServiceClientTest.java
index d932d89d26..420fc9d5c8 100644
--- a/google-cloud-bigquerystorage/src/test/java/com/google/cloud/bigquery/storage/v1beta/MetastorePartitionServiceClientTest.java
+++ b/google-cloud-bigquerystorage/src/test/java/com/google/cloud/bigquery/storage/v1beta/MetastorePartitionServiceClientTest.java
@@ -1,5 +1,5 @@
/*
- * Copyright 2025 Google LLC
+ * Copyright 2026 Google LLC
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
diff --git a/google-cloud-bigquerystorage/src/test/java/com/google/cloud/bigquery/storage/v1beta/MockMetastorePartitionService.java b/google-cloud-bigquerystorage/src/test/java/com/google/cloud/bigquery/storage/v1beta/MockMetastorePartitionService.java
index 9e8c74228b..ce8b9667a5 100644
--- a/google-cloud-bigquerystorage/src/test/java/com/google/cloud/bigquery/storage/v1beta/MockMetastorePartitionService.java
+++ b/google-cloud-bigquerystorage/src/test/java/com/google/cloud/bigquery/storage/v1beta/MockMetastorePartitionService.java
@@ -1,5 +1,5 @@
/*
- * Copyright 2025 Google LLC
+ * Copyright 2026 Google LLC
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
diff --git a/google-cloud-bigquerystorage/src/test/java/com/google/cloud/bigquery/storage/v1beta/MockMetastorePartitionServiceImpl.java b/google-cloud-bigquerystorage/src/test/java/com/google/cloud/bigquery/storage/v1beta/MockMetastorePartitionServiceImpl.java
index a7968167a9..c342ddfd11 100644
--- a/google-cloud-bigquerystorage/src/test/java/com/google/cloud/bigquery/storage/v1beta/MockMetastorePartitionServiceImpl.java
+++ b/google-cloud-bigquerystorage/src/test/java/com/google/cloud/bigquery/storage/v1beta/MockMetastorePartitionServiceImpl.java
@@ -1,5 +1,5 @@
/*
- * Copyright 2025 Google LLC
+ * Copyright 2026 Google LLC
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
diff --git a/google-cloud-bigquerystorage/src/test/java/com/google/cloud/bigquery/storage/v1beta1/BaseBigQueryStorageClientTest.java b/google-cloud-bigquerystorage/src/test/java/com/google/cloud/bigquery/storage/v1beta1/BaseBigQueryStorageClientTest.java
index 9a171fa23f..cc018cedda 100644
--- a/google-cloud-bigquerystorage/src/test/java/com/google/cloud/bigquery/storage/v1beta1/BaseBigQueryStorageClientTest.java
+++ b/google-cloud-bigquerystorage/src/test/java/com/google/cloud/bigquery/storage/v1beta1/BaseBigQueryStorageClientTest.java
@@ -1,5 +1,5 @@
/*
- * Copyright 2025 Google LLC
+ * Copyright 2026 Google LLC
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
diff --git a/google-cloud-bigquerystorage/src/test/java/com/google/cloud/bigquery/storage/v1beta1/BigQueryStorageClientTest.java b/google-cloud-bigquerystorage/src/test/java/com/google/cloud/bigquery/storage/v1beta1/BigQueryStorageClientTest.java
index d12166157b..417cbed9fc 100644
--- a/google-cloud-bigquerystorage/src/test/java/com/google/cloud/bigquery/storage/v1beta1/BigQueryStorageClientTest.java
+++ b/google-cloud-bigquerystorage/src/test/java/com/google/cloud/bigquery/storage/v1beta1/BigQueryStorageClientTest.java
@@ -15,6 +15,10 @@
*/
package com.google.cloud.bigquery.storage.v1beta1;
+import static org.junit.jupiter.api.Assertions.assertEquals;
+import static org.junit.jupiter.api.Assertions.assertThrows;
+import static org.junit.jupiter.api.Assertions.assertTrue;
+
import com.google.api.gax.core.NoCredentialsProvider;
import com.google.api.gax.grpc.GaxGrpcProperties;
import com.google.api.gax.grpc.GrpcStatusCode;
@@ -54,14 +58,16 @@
import java.util.Arrays;
import java.util.List;
import java.util.concurrent.ExecutionException;
-import org.junit.After;
-import org.junit.AfterClass;
-import org.junit.Assert;
-import org.junit.Before;
-import org.junit.BeforeClass;
-import org.junit.Test;
-
-public class BigQueryStorageClientTest {
+import org.junit.jupiter.api.AfterAll;
+import org.junit.jupiter.api.AfterEach;
+import org.junit.jupiter.api.BeforeAll;
+import org.junit.jupiter.api.BeforeEach;
+import org.junit.jupiter.api.Test;
+import org.junit.jupiter.api.parallel.Execution;
+import org.junit.jupiter.api.parallel.ExecutionMode;
+
+@Execution(ExecutionMode.SAME_THREAD)
+class BigQueryStorageClientTest {
private static MockBigQueryStorage mockBigQueryStorage;
private static MockServiceHelper serviceHelper;
private BigQueryStorageClient client;
@@ -69,21 +75,21 @@ public class BigQueryStorageClientTest {
private int retryCount;
private Code lastRetryStatusCode;
- @BeforeClass
- public static void startStaticServer() {
+ @BeforeAll
+ static void startStaticServer() {
mockBigQueryStorage = new MockBigQueryStorage();
serviceHelper =
new MockServiceHelper("in-process-1", Arrays.asList(mockBigQueryStorage));
serviceHelper.start();
}
- @AfterClass
- public static void stopServer() {
+ @AfterAll
+ static void stopServer() {
serviceHelper.stop();
}
- @Before
- public void setUp() throws IOException {
+ @BeforeEach
+ void setUp() throws IOException {
serviceHelper.reset();
channelProvider = serviceHelper.createChannelProvider();
retryCount = 0;
@@ -106,14 +112,14 @@ public void onRetryAttempt(Status prevStatus, Metadata prevMetadata) {
client = BigQueryStorageClient.create(settings);
}
- @After
- public void tearDown() throws Exception {
+ @AfterEach
+ void tearDown() throws Exception {
client.close();
}
@Test
@SuppressWarnings("all")
- public void createReadSessionTest() {
+ void createReadSessionTest() {
String name = "name3373707";
ReadSession expectedResponse = ReadSession.newBuilder().setName(name).build();
mockBigQueryStorage.addResponse(expectedResponse);
@@ -123,16 +129,16 @@ public void createReadSessionTest() {
int requestedStreams = 1017221410;
ReadSession actualResponse = client.createReadSession(tableReference, parent, requestedStreams);
- Assert.assertEquals(expectedResponse, actualResponse);
+ assertEquals(expectedResponse, actualResponse);
List actualRequests = mockBigQueryStorage.getRequests();
- Assert.assertEquals(1, actualRequests.size());
+ assertEquals(1, actualRequests.size());
CreateReadSessionRequest actualRequest = (CreateReadSessionRequest) actualRequests.get(0);
- Assert.assertEquals(tableReference, actualRequest.getTableReference());
- Assert.assertEquals(parent, actualRequest.getParent());
- Assert.assertEquals(requestedStreams, actualRequest.getRequestedStreams());
- Assert.assertTrue(
+ assertEquals(tableReference, actualRequest.getTableReference());
+ assertEquals(parent, actualRequest.getParent());
+ assertEquals(requestedStreams, actualRequest.getRequestedStreams());
+ assertTrue(
channelProvider.isHeaderSent(
ApiClientHeaderProvider.getDefaultApiClientHeaderKey(),
GaxGrpcProperties.getDefaultApiClientHeaderPattern()));
@@ -140,25 +146,22 @@ public void createReadSessionTest() {
@Test
@SuppressWarnings("all")
- public void createReadSessionExceptionTest() throws Exception {
+ void createReadSessionExceptionTest() throws Exception {
StatusRuntimeException exception = new StatusRuntimeException(Status.INVALID_ARGUMENT);
mockBigQueryStorage.addException(exception);
- try {
- TableReference tableReference = TableReference.newBuilder().build();
- String parent = "parent-995424086";
- int requestedStreams = 1017221410;
+ TableReference tableReference = TableReference.newBuilder().build();
+ String parent = "parent-995424086";
+ int requestedStreams = 1017221410;
- client.createReadSession(tableReference, parent, requestedStreams);
- Assert.fail("No exception raised");
- } catch (InvalidArgumentException e) {
- // Expected exception
- }
+ assertThrows(
+ InvalidArgumentException.class,
+ () -> client.createReadSession(tableReference, parent, requestedStreams));
}
@Test
@SuppressWarnings("all")
- public void readRowsTest() throws Exception {
+ void readRowsTest() throws Exception {
ReadRowsResponse expectedResponse = ReadRowsResponse.newBuilder().build();
mockBigQueryStorage.addResponse(expectedResponse);
StreamPosition readPosition = StreamPosition.newBuilder().build();
@@ -170,16 +173,16 @@ public void readRowsTest() throws Exception {
callable.serverStreamingCall(request, responseObserver);
List actualResponses = responseObserver.future().get();
- Assert.assertEquals(1, actualResponses.size());
- Assert.assertEquals(expectedResponse, actualResponses.get(0));
+ assertEquals(1, actualResponses.size());
+ assertEquals(expectedResponse, actualResponses.get(0));
- Assert.assertEquals(retryCount, 0);
- Assert.assertEquals(lastRetryStatusCode, Code.OK);
+ assertEquals(retryCount, 0);
+ assertEquals(lastRetryStatusCode, Code.OK);
}
@Test
@SuppressWarnings("all")
- public void readRowsExceptionTest() throws Exception {
+ void readRowsExceptionTest() throws Exception {
StatusRuntimeException exception = new StatusRuntimeException(Status.INVALID_ARGUMENT);
mockBigQueryStorage.addException(exception);
StreamPosition readPosition = StreamPosition.newBuilder().build();
@@ -190,22 +193,19 @@ public void readRowsExceptionTest() throws Exception {
ServerStreamingCallable callable = client.readRowsCallable();
callable.serverStreamingCall(request, responseObserver);
- try {
- List actualResponses = responseObserver.future().get();
- Assert.fail("No exception thrown");
- } catch (ExecutionException e) {
- Assert.assertTrue(e.getCause() instanceof InvalidArgumentException);
- InvalidArgumentException apiException = (InvalidArgumentException) e.getCause();
- Assert.assertEquals(StatusCode.Code.INVALID_ARGUMENT, apiException.getStatusCode().getCode());
- }
-
- Assert.assertEquals(retryCount, 0);
- Assert.assertEquals(lastRetryStatusCode, Code.OK);
+ ExecutionException e =
+ assertThrows(ExecutionException.class, () -> responseObserver.future().get());
+ assertTrue(e.getCause() instanceof InvalidArgumentException);
+ InvalidArgumentException apiException = (InvalidArgumentException) e.getCause();
+ assertEquals(StatusCode.Code.INVALID_ARGUMENT, apiException.getStatusCode().getCode());
+
+ assertEquals(retryCount, 0);
+ assertEquals(lastRetryStatusCode, Code.OK);
}
@Test
@SuppressWarnings("all")
- public void batchCreateReadSessionStreamsTest() {
+ void batchCreateReadSessionStreamsTest() {
BatchCreateReadSessionStreamsResponse expectedResponse =
BatchCreateReadSessionStreamsResponse.newBuilder().build();
mockBigQueryStorage.addResponse(expectedResponse);
@@ -215,16 +215,16 @@ public void batchCreateReadSessionStreamsTest() {
BatchCreateReadSessionStreamsResponse actualResponse =
client.batchCreateReadSessionStreams(session, requestedStreams);
- Assert.assertEquals(expectedResponse, actualResponse);
+ assertEquals(expectedResponse, actualResponse);
List actualRequests = mockBigQueryStorage.getRequests();
- Assert.assertEquals(1, actualRequests.size());
+ assertEquals(1, actualRequests.size());
BatchCreateReadSessionStreamsRequest actualRequest =
(BatchCreateReadSessionStreamsRequest) actualRequests.get(0);
- Assert.assertEquals(session, actualRequest.getSession());
- Assert.assertEquals(requestedStreams, actualRequest.getRequestedStreams());
- Assert.assertTrue(
+ assertEquals(session, actualRequest.getSession());
+ assertEquals(requestedStreams, actualRequest.getRequestedStreams());
+ assertTrue(
channelProvider.isHeaderSent(
ApiClientHeaderProvider.getDefaultApiClientHeaderKey(),
GaxGrpcProperties.getDefaultApiClientHeaderPattern()));
@@ -232,24 +232,21 @@ public void batchCreateReadSessionStreamsTest() {
@Test
@SuppressWarnings("all")
- public void batchCreateReadSessionStreamsExceptionTest() throws Exception {
+ void batchCreateReadSessionStreamsExceptionTest() throws Exception {
StatusRuntimeException exception = new StatusRuntimeException(Status.INVALID_ARGUMENT);
mockBigQueryStorage.addException(exception);
- try {
- ReadSession session = ReadSession.newBuilder().build();
- int requestedStreams = 1017221410;
+ ReadSession session = ReadSession.newBuilder().build();
+ int requestedStreams = 1017221410;
- client.batchCreateReadSessionStreams(session, requestedStreams);
- Assert.fail("No exception raised");
- } catch (InvalidArgumentException e) {
- // Expected exception
- }
+ assertThrows(
+ InvalidArgumentException.class,
+ () -> client.batchCreateReadSessionStreams(session, requestedStreams));
}
@Test
@SuppressWarnings("all")
- public void finalizeStreamTest() {
+ void finalizeStreamTest() {
Empty expectedResponse = Empty.newBuilder().build();
mockBigQueryStorage.addResponse(expectedResponse);
@@ -258,11 +255,11 @@ public void finalizeStreamTest() {
client.finalizeStream(stream);
List actualRequests = mockBigQueryStorage.getRequests();
- Assert.assertEquals(1, actualRequests.size());
+ assertEquals(1, actualRequests.size());
FinalizeStreamRequest actualRequest = (FinalizeStreamRequest) actualRequests.get(0);
- Assert.assertEquals(stream, actualRequest.getStream());
- Assert.assertTrue(
+ assertEquals(stream, actualRequest.getStream());
+ assertTrue(
channelProvider.isHeaderSent(
ApiClientHeaderProvider.getDefaultApiClientHeaderKey(),
GaxGrpcProperties.getDefaultApiClientHeaderPattern()));
@@ -270,37 +267,32 @@ public void finalizeStreamTest() {
@Test
@SuppressWarnings("all")
- public void finalizeStreamExceptionTest() throws Exception {
+ void finalizeStreamExceptionTest() throws Exception {
StatusRuntimeException exception = new StatusRuntimeException(Status.INVALID_ARGUMENT);
mockBigQueryStorage.addException(exception);
- try {
- Stream stream = Stream.newBuilder().build();
+ Stream stream = Stream.newBuilder().build();
- client.finalizeStream(stream);
- Assert.fail("No exception raised");
- } catch (InvalidArgumentException e) {
- // Expected exception
- }
+ assertThrows(InvalidArgumentException.class, () -> client.finalizeStream(stream));
}
@Test
@SuppressWarnings("all")
- public void splitReadStreamTest() {
+ void splitReadStreamTest() {
SplitReadStreamResponse expectedResponse = SplitReadStreamResponse.newBuilder().build();
mockBigQueryStorage.addResponse(expectedResponse);
Stream originalStream = Stream.newBuilder().build();
SplitReadStreamResponse actualResponse = client.splitReadStream(originalStream);
- Assert.assertEquals(expectedResponse, actualResponse);
+ assertEquals(expectedResponse, actualResponse);
List actualRequests = mockBigQueryStorage.getRequests();
- Assert.assertEquals(1, actualRequests.size());
+ assertEquals(1, actualRequests.size());
SplitReadStreamRequest actualRequest = (SplitReadStreamRequest) actualRequests.get(0);
- Assert.assertEquals(originalStream, actualRequest.getOriginalStream());
- Assert.assertTrue(
+ assertEquals(originalStream, actualRequest.getOriginalStream());
+ assertTrue(
channelProvider.isHeaderSent(
ApiClientHeaderProvider.getDefaultApiClientHeaderKey(),
GaxGrpcProperties.getDefaultApiClientHeaderPattern()));
@@ -308,23 +300,18 @@ public void splitReadStreamTest() {
@Test
@SuppressWarnings("all")
- public void splitReadStreamExceptionTest() throws Exception {
+ void splitReadStreamExceptionTest() throws Exception {
StatusRuntimeException exception = new StatusRuntimeException(Status.INVALID_ARGUMENT);
mockBigQueryStorage.addException(exception);
- try {
- Stream originalStream = Stream.newBuilder().build();
+ Stream originalStream = Stream.newBuilder().build();
- client.splitReadStream(originalStream);
- Assert.fail("No exception raised");
- } catch (InvalidArgumentException e) {
- // Expected exception
- }
+ assertThrows(InvalidArgumentException.class, () -> client.splitReadStream(originalStream));
}
@Test
@SuppressWarnings("all")
- public void readRowsRetryingEOSExceptionTest() throws ExecutionException, InterruptedException {
+ void readRowsRetryingEOSExceptionTest() throws ExecutionException, InterruptedException {
ApiException exception =
new InternalException(
new StatusRuntimeException(
@@ -343,15 +330,15 @@ public void readRowsRetryingEOSExceptionTest() throws ExecutionException, Interr
ServerStreamingCallable callable = client.readRowsCallable();
callable.serverStreamingCall(request, responseObserver);
List actualResponses = responseObserver.future().get();
- Assert.assertEquals(1, actualResponses.size());
+ assertEquals(1, actualResponses.size());
- Assert.assertEquals(retryCount, 1);
- Assert.assertEquals(lastRetryStatusCode, Code.INTERNAL);
+ assertEquals(retryCount, 1);
+ assertEquals(lastRetryStatusCode, Code.INTERNAL);
}
@Test
@SuppressWarnings("all")
- public void readRowsRetryingHttp2StreamRstTest() throws ExecutionException, InterruptedException {
+ void readRowsRetryingHttp2StreamRstTest() throws ExecutionException, InterruptedException {
ApiException exception =
new InternalException(
new StatusRuntimeException(
@@ -370,15 +357,15 @@ public void readRowsRetryingHttp2StreamRstTest() throws ExecutionException, Inte
ServerStreamingCallable callable = client.readRowsCallable();
callable.serverStreamingCall(request, responseObserver);
List actualResponses = responseObserver.future().get();
- Assert.assertEquals(1, actualResponses.size());
+ assertEquals(1, actualResponses.size());
- Assert.assertEquals(retryCount, 1);
- Assert.assertEquals(lastRetryStatusCode, Code.INTERNAL);
+ assertEquals(retryCount, 1);
+ assertEquals(lastRetryStatusCode, Code.INTERNAL);
}
@Test
@SuppressWarnings("all")
- public void readRowsNoRetryForResourceExhaustedWithoutRetryInfo()
+ void readRowsNoRetryForResourceExhaustedWithoutRetryInfo()
throws ExecutionException, InterruptedException {
ApiException exception =
new ResourceExhaustedException(
@@ -397,23 +384,19 @@ public void readRowsNoRetryForResourceExhaustedWithoutRetryInfo()
ServerStreamingCallable callable = client.readRowsCallable();
callable.serverStreamingCall(request, responseObserver);
- try {
- List actualResponses = responseObserver.future().get();
- Assert.fail("No exception thrown");
- } catch (ExecutionException e) {
- Assert.assertTrue(e.getCause() instanceof ResourceExhaustedException);
- ResourceExhaustedException apiException = (ResourceExhaustedException) e.getCause();
- Assert.assertEquals(
- StatusCode.Code.RESOURCE_EXHAUSTED, apiException.getStatusCode().getCode());
- }
-
- Assert.assertEquals(retryCount, 0);
- Assert.assertEquals(lastRetryStatusCode, Code.OK);
+ ExecutionException e =
+ assertThrows(ExecutionException.class, () -> responseObserver.future().get());
+ assertTrue(e.getCause() instanceof ResourceExhaustedException);
+ ResourceExhaustedException apiException = (ResourceExhaustedException) e.getCause();
+ assertEquals(StatusCode.Code.RESOURCE_EXHAUSTED, apiException.getStatusCode().getCode());
+
+ assertEquals(retryCount, 0);
+ assertEquals(lastRetryStatusCode, Code.OK);
}
@Test
@SuppressWarnings("all")
- public void readRowsNoRetryForResourceExhaustedWithRetryInfo()
+ void readRowsNoRetryForResourceExhaustedWithRetryInfo()
throws ExecutionException, InterruptedException {
RetryInfo retryInfo =
RetryInfo.newBuilder()
@@ -459,9 +442,9 @@ public RetryInfo parseBytes(byte[] serialized) {
ServerStreamingCallable callable = client.readRowsCallable();
callable.serverStreamingCall(request, responseObserver);
List actualResponses = responseObserver.future().get();
- Assert.assertEquals(1, actualResponses.size());
+ assertEquals(1, actualResponses.size());
- Assert.assertEquals(retryCount, 1);
- Assert.assertEquals(lastRetryStatusCode, Code.RESOURCE_EXHAUSTED);
+ assertEquals(retryCount, 1);
+ assertEquals(lastRetryStatusCode, Code.RESOURCE_EXHAUSTED);
}
}
diff --git a/google-cloud-bigquerystorage/src/test/java/com/google/cloud/bigquery/storage/v1beta1/MockBigQueryStorage.java b/google-cloud-bigquerystorage/src/test/java/com/google/cloud/bigquery/storage/v1beta1/MockBigQueryStorage.java
index 0757e8fb59..f33d9b5b85 100644
--- a/google-cloud-bigquerystorage/src/test/java/com/google/cloud/bigquery/storage/v1beta1/MockBigQueryStorage.java
+++ b/google-cloud-bigquerystorage/src/test/java/com/google/cloud/bigquery/storage/v1beta1/MockBigQueryStorage.java
@@ -1,5 +1,5 @@
/*
- * Copyright 2025 Google LLC
+ * Copyright 2026 Google LLC
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
diff --git a/google-cloud-bigquerystorage/src/test/java/com/google/cloud/bigquery/storage/v1beta1/MockBigQueryStorageImpl.java b/google-cloud-bigquerystorage/src/test/java/com/google/cloud/bigquery/storage/v1beta1/MockBigQueryStorageImpl.java
index a03f78bd39..6ebe39bbfa 100644
--- a/google-cloud-bigquerystorage/src/test/java/com/google/cloud/bigquery/storage/v1beta1/MockBigQueryStorageImpl.java
+++ b/google-cloud-bigquerystorage/src/test/java/com/google/cloud/bigquery/storage/v1beta1/MockBigQueryStorageImpl.java
@@ -1,5 +1,5 @@
/*
- * Copyright 2025 Google LLC
+ * Copyright 2026 Google LLC
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
diff --git a/google-cloud-bigquerystorage/src/test/java/com/google/cloud/bigquery/storage/v1beta1/it/ITBigQueryStorageLongRunningTest.java b/google-cloud-bigquerystorage/src/test/java/com/google/cloud/bigquery/storage/v1beta1/it/ITBigQueryStorageLongRunningTest.java
index df4635effb..f9e7c03610 100644
--- a/google-cloud-bigquerystorage/src/test/java/com/google/cloud/bigquery/storage/v1beta1/it/ITBigQueryStorageLongRunningTest.java
+++ b/google-cloud-bigquerystorage/src/test/java/com/google/cloud/bigquery/storage/v1beta1/it/ITBigQueryStorageLongRunningTest.java
@@ -16,7 +16,7 @@
package com.google.cloud.bigquery.storage.v1beta1.it;
-import static org.junit.Assert.assertEquals;
+import static org.junit.jupiter.api.Assertions.assertEquals;
import com.google.api.gax.rpc.ServerStream;
import com.google.cloud.ServiceOptions;
@@ -36,17 +36,18 @@
import java.util.concurrent.ExecutorService;
import java.util.concurrent.Executors;
import java.util.concurrent.Future;
+import java.util.concurrent.TimeUnit;
import java.util.logging.Logger;
-import org.junit.AfterClass;
-import org.junit.Assume;
-import org.junit.BeforeClass;
-import org.junit.Test;
+import org.junit.jupiter.api.AfterAll;
+import org.junit.jupiter.api.Assumptions;
+import org.junit.jupiter.api.BeforeAll;
+import org.junit.jupiter.api.Test;
/**
* Integration tests for BigQuery Storage API which target long running sessions. These tests can be
* enabled by setting the system property 'bigquery.storage.enable_long_running_tests' to true.
*/
-public class ITBigQueryStorageLongRunningTest {
+class ITBigQueryStorageLongRunningTest {
private static final Logger LOG =
Logger.getLogger(ITBigQueryStorageLongRunningTest.class.getName());
@@ -63,9 +64,10 @@ public class ITBigQueryStorageLongRunningTest {
private static BigQueryStorageClient client;
private static String parentProjectId;
- @BeforeClass
- public static void beforeClass() throws IOException {
- Assume.assumeTrue(LONG_TESTS_DISABLED_MESSAGE, Boolean.getBoolean(LONG_TESTS_ENABLED_PROPERTY));
+ @BeforeAll
+ static void beforeAll() throws IOException {
+ Assumptions.assumeTrue(
+ Boolean.getBoolean(LONG_TESTS_ENABLED_PROPERTY), LONG_TESTS_DISABLED_MESSAGE);
client = BigQueryStorageClient.create();
parentProjectId = String.format("projects/%s", ServiceOptions.getDefaultProjectId());
@@ -75,15 +77,16 @@ public static void beforeClass() throws IOException {
ITBigQueryStorageLongRunningTest.class.getSimpleName(), parentProjectId));
}
- @AfterClass
- public static void afterClass() {
+ @AfterAll
+ static void afterAll() throws InterruptedException {
if (client != null) {
client.close();
+ client.awaitTermination(10, TimeUnit.SECONDS);
}
}
@Test
- public void testLongRunningReadSession() throws InterruptedException, ExecutionException {
+ void testLongRunningReadSession() throws InterruptedException, ExecutionException {
// This test reads a larger table with the goal of doing a simple validation of timeout settings
// for a longer running session.
@@ -100,26 +103,21 @@ public void testLongRunningReadSession() throws InterruptedException, ExecutionE
/* parent= */ parentProjectId,
/* requestedStreams= */ 5);
assertEquals(
+ 5,
+ session.getStreamsCount(),
String.format(
"Did not receive expected number of streams for table reference '%s' CreateReadSession"
+ " response:%n%s",
- TextFormat.printer().shortDebugString(tableReference), session.toString()),
- 5,
- session.getStreamsCount());
+ TextFormat.printer().shortDebugString(tableReference), session.toString()));
List> tasks = new ArrayList<>(session.getStreamsCount());
for (final Stream stream : session.getStreamsList()) {
- tasks.add(
- new Callable() {
- @Override
- public Long call() throws Exception {
- return readAllRowsFromStream(stream);
- }
- });
+ tasks.add(() -> readAllRowsFromStream(stream));
}
ExecutorService executor = Executors.newFixedThreadPool(tasks.size());
List> results = executor.invokeAll(tasks);
+ executor.shutdown();
long rowCount = 0;
for (Future result : results) {
diff --git a/google-cloud-bigquerystorage/src/test/java/com/google/cloud/bigquery/storage/v1beta1/it/ITBigQueryStorageTest.java b/google-cloud-bigquerystorage/src/test/java/com/google/cloud/bigquery/storage/v1beta1/it/ITBigQueryStorageTest.java
index bc772f0119..04191a38a4 100644
--- a/google-cloud-bigquerystorage/src/test/java/com/google/cloud/bigquery/storage/v1beta1/it/ITBigQueryStorageTest.java
+++ b/google-cloud-bigquerystorage/src/test/java/com/google/cloud/bigquery/storage/v1beta1/it/ITBigQueryStorageTest.java
@@ -18,11 +18,12 @@
import static com.google.common.truth.Truth.assertThat;
import static com.google.common.truth.Truth.assertWithMessage;
-import static org.junit.Assert.assertArrayEquals;
-import static org.junit.Assert.assertEquals;
-import static org.junit.Assert.assertNotNull;
-import static org.junit.Assert.assertNull;
-import static org.junit.Assert.fail;
+import static org.junit.jupiter.api.Assertions.assertArrayEquals;
+import static org.junit.jupiter.api.Assertions.assertEquals;
+import static org.junit.jupiter.api.Assertions.assertNotNull;
+import static org.junit.jupiter.api.Assertions.assertNull;
+import static org.junit.jupiter.api.Assertions.assertThrows;
+import static org.junit.jupiter.api.Assertions.fail;
import com.google.api.gax.core.FixedCredentialsProvider;
import com.google.api.gax.rpc.ServerStream;
@@ -78,6 +79,8 @@
import java.util.Collections;
import java.util.Iterator;
import java.util.List;
+import java.util.UUID;
+import java.util.concurrent.TimeUnit;
import java.util.logging.Logger;
import org.apache.avro.Conversions;
import org.apache.avro.LogicalTypes;
@@ -85,12 +88,12 @@
import org.apache.avro.generic.GenericData;
import org.apache.avro.generic.GenericRecordBuilder;
import org.apache.avro.util.Utf8;
-import org.junit.AfterClass;
-import org.junit.BeforeClass;
-import org.junit.Test;
+import org.junit.jupiter.api.AfterAll;
+import org.junit.jupiter.api.BeforeAll;
+import org.junit.jupiter.api.Test;
/** Integration tests for BigQuery Storage API. */
-public class ITBigQueryStorageTest {
+class ITBigQueryStorageTest {
private static final Logger LOG = Logger.getLogger(ITBigQueryStorageTest.class.getName());
private static final String DATASET = RemoteBigQueryHelper.generateDatasetName();
@@ -178,8 +181,8 @@ public class ITBigQueryStorageTest {
+ " \"universe_domain\": \"fake.domain\"\n"
+ "}";
- @BeforeClass
- public static void beforeClass() throws IOException {
+ @BeforeAll
+ static void beforeAll() throws IOException {
client = BigQueryStorageClient.create();
parentProjectId = String.format("projects/%s", ServiceOptions.getDefaultProjectId());
@@ -196,10 +199,11 @@ public static void beforeClass() throws IOException {
LOG.info("Created test dataset: " + DATASET);
}
- @AfterClass
- public static void afterClass() {
+ @AfterAll
+ static void afterAll() throws InterruptedException {
if (client != null) {
client.close();
+ client.awaitTermination(10, TimeUnit.SECONDS);
}
if (bigquery != null) {
@@ -209,7 +213,7 @@ public static void afterClass() {
}
@Test
- public void testSimpleRead() {
+ void testSimpleRead() {
TableReference tableReference =
TableReference.newBuilder()
.setProjectId("bigquery-public-data")
@@ -223,12 +227,12 @@ public void testSimpleRead() {
/* parent= */ parentProjectId,
/* requestedStreams= */ 1);
assertEquals(
+ 1,
+ session.getStreamsCount(),
String.format(
"Did not receive expected number of streams for table reference '%s' CreateReadSession"
+ " response:%n%s",
- TextFormat.printer().shortDebugString(tableReference), session.toString()),
- 1,
- session.getStreamsCount());
+ TextFormat.printer().shortDebugString(tableReference), session.toString()));
StreamPosition readPosition =
StreamPosition.newBuilder().setStream(session.getStreams(0)).build();
@@ -246,7 +250,7 @@ public void testSimpleRead() {
}
@Test
- public void testSimpleReadArrow() {
+ void testSimpleReadArrow() {
TableReference tableReference =
TableReference.newBuilder()
.setProjectId("bigquery-public-data")
@@ -263,12 +267,12 @@ public void testSimpleReadArrow() {
.build();
ReadSession session = client.createReadSession(request);
assertEquals(
+ 1,
+ session.getStreamsCount(),
String.format(
"Did not receive expected number of streams for table reference '%s' CreateReadSession"
+ " response:%n%s",
- TextFormat.printer().shortDebugString(tableReference), session.toString()),
- 1,
- session.getStreamsCount());
+ TextFormat.printer().shortDebugString(tableReference), session.toString()));
StreamPosition readPosition =
StreamPosition.newBuilder().setStream(session.getStreams(0)).build();
@@ -287,9 +291,9 @@ public void testSimpleReadArrow() {
}
@Test
- public void testRangeType() throws InterruptedException {
+ void testRangeType() throws InterruptedException {
// Create table with Range values.
- String tableName = "test_range_type";
+ String tableName = "test_range_type" + UUID.randomUUID().toString().substring(0, 8);
QueryJobConfiguration createTable =
QueryJobConfiguration.newBuilder(
String.format(
@@ -321,12 +325,12 @@ public void testRangeType() throws InterruptedException {
.build();
ReadSession session = client.createReadSession(createReadSessionRequestrequest);
assertEquals(
+ 1,
+ session.getStreamsCount(),
String.format(
"Did not receive expected number of streams for table reference '%s' CreateReadSession"
+ " response:%n%s",
- TextFormat.printer().shortDebugString(tableReference), session.toString()),
- 1,
- session.getStreamsCount());
+ TextFormat.printer().shortDebugString(tableReference), session.toString()));
StreamPosition readPosition =
StreamPosition.newBuilder().setStream(session.getStreams(0)).build();
@@ -345,7 +349,7 @@ public void testRangeType() throws InterruptedException {
}
@Test
- public void testSimpleReadAndResume() {
+ void testSimpleReadAndResume() {
TableReference tableReference =
TableReference.newBuilder()
.setProjectId("bigquery-public-data")
@@ -359,12 +363,12 @@ public void testSimpleReadAndResume() {
/* parent= */ parentProjectId,
/* requestedStreams= */ 1);
assertEquals(
+ 1,
+ session.getStreamsCount(),
String.format(
"Did not receive expected number of streams for table reference '%s' CreateReadSession"
+ " response:%n%s",
- TextFormat.printer().shortDebugString(tableReference), session.toString()),
- 1,
- session.getStreamsCount());
+ TextFormat.printer().shortDebugString(tableReference), session.toString()));
// We have to read some number of rows in order to be able to resume. More details:
// https://cloud.google.com/bigquery/docs/reference/storage/rpc/google.cloud.bigquery.storage.v1beta1#google.cloud.bigquery.storage.v1beta1.ReadRowsRequest
@@ -389,7 +393,7 @@ public void testSimpleReadAndResume() {
}
@Test
- public void testFilter() throws IOException {
+ void testFilter() throws IOException {
TableReference tableReference =
TableReference.newBuilder()
.setProjectId("bigquery-public-data")
@@ -411,12 +415,12 @@ public void testFilter() throws IOException {
ReadSession session = client.createReadSession(request);
assertEquals(
+ 1,
+ session.getStreamsCount(),
String.format(
"Did not receive expected number of streams for table reference '%s' CreateReadSession"
+ " response:%n%s",
- TextFormat.printer().shortDebugString(tableReference), session.toString()),
- 1,
- session.getStreamsCount());
+ TextFormat.printer().shortDebugString(tableReference), session.toString()));
StreamPosition readPosition =
StreamPosition.newBuilder().setStream(session.getStreams(0)).build();
@@ -449,7 +453,7 @@ public void accept(GenericData.Record record) {
}
@Test
- public void testColumnSelection() throws IOException {
+ void testColumnSelection() throws IOException {
TableReference tableReference =
TableReference.newBuilder()
.setProjectId("bigquery-public-data")
@@ -475,12 +479,12 @@ public void testColumnSelection() throws IOException {
ReadSession session = client.createReadSession(request);
assertEquals(
+ 1,
+ session.getStreamsCount(),
String.format(
"Did not receive expected number of streams for table reference '%s' CreateReadSession"
+ " response:%n%s",
- TextFormat.printer().shortDebugString(tableReference), session.toString()),
- 1,
- session.getStreamsCount());
+ TextFormat.printer().shortDebugString(tableReference), session.toString()));
StreamPosition readPosition =
StreamPosition.newBuilder().setStream(session.getStreams(0)).build();
@@ -493,16 +497,18 @@ public void testColumnSelection() throws IOException {
String actualSchemaMessage =
String.format(
"Unexpected schema. Actual schema:%n%s", avroSchema.toString(/* pretty= */ true));
- assertEquals(actualSchemaMessage, Schema.Type.RECORD, avroSchema.getType());
- assertEquals(actualSchemaMessage, "__root__", avroSchema.getName());
+ assertEquals(Schema.Type.RECORD, avroSchema.getType(), actualSchemaMessage);
+ assertEquals("__root__", avroSchema.getName(), actualSchemaMessage);
- assertEquals(actualSchemaMessage, 2, avroSchema.getFields().size());
+ assertEquals(2, avroSchema.getFields().size(), actualSchemaMessage);
+ assertEquals(
+ Schema.Type.STRING, avroSchema.getField("word").schema().getType(), actualSchemaMessage);
assertEquals(
- actualSchemaMessage, Schema.Type.STRING, avroSchema.getField("word").schema().getType());
+ Schema.Type.STRING, avroSchema.getField("word").schema().getType(), actualSchemaMessage);
assertEquals(
- actualSchemaMessage,
Schema.Type.LONG,
- avroSchema.getField("word_count").schema().getType());
+ avroSchema.getField("word_count").schema().getType(),
+ actualSchemaMessage);
SimpleRowReader reader = new SimpleRowReader(avroSchema);
@@ -531,7 +537,7 @@ public void accept(GenericData.Record record) {
}
@Test
- public void testReadAtSnapshot() throws InterruptedException, IOException {
+ void testReadAtSnapshot() throws InterruptedException, IOException {
Field intFieldSchema =
Field.newBuilder("col", LegacySQLTypeName.INTEGER)
.setMode(Mode.REQUIRED)
@@ -587,8 +593,9 @@ public void accept(GenericData.Record record) {
}
@Test
- public void testColumnPartitionedTableByDateField() throws InterruptedException, IOException {
- String partitionedTableName = "test_column_partition_table_by_date";
+ void testColumnPartitionedTableByDateField() throws InterruptedException, IOException {
+ String partitionedTableName =
+ "test_column_partition_table_by_date" + UUID.randomUUID().toString().substring(0, 8);
String createTableStatement =
String.format(
" CREATE TABLE %s.%s (num_field INT64, date_field DATE) "
@@ -615,19 +622,19 @@ public void testColumnPartitionedTableByDateField() throws InterruptedException,
List unfilteredRows =
ReadAllRows(/* tableReference= */ tableReference, /* filter= */ null);
- assertEquals("Actual rows read: " + unfilteredRows.toString(), 3, unfilteredRows.size());
+ assertEquals(3, unfilteredRows.size(), "Actual rows read: " + unfilteredRows.toString());
List partitionFilteredRows =
ReadAllRows(
/* tableReference= */ tableReference,
/* filter= */ "date_field = CAST(\"2019-01-02\" AS DATE)");
assertEquals(
- "Actual rows read: " + partitionFilteredRows.toString(), 1, partitionFilteredRows.size());
+ 1, partitionFilteredRows.size(), "Actual rows read: " + partitionFilteredRows.toString());
assertEquals(2L, partitionFilteredRows.get(0).get("num_field"));
}
@Test
- public void testIngestionTimePartitionedTable() throws InterruptedException, IOException {
+ void testIngestionTimePartitionedTable() throws InterruptedException, IOException {
Field intFieldSchema =
Field.newBuilder("num_field", LegacySQLTypeName.INTEGER)
.setMode(Mode.REQUIRED)
@@ -667,19 +674,19 @@ public void testIngestionTimePartitionedTable() throws InterruptedException, IOE
List unfilteredRows =
ReadAllRows(/* tableReference= */ tableReference, /* filter= */ null);
- assertEquals("Actual rows read: " + unfilteredRows.toString(), 2, unfilteredRows.size());
+ assertEquals(2, unfilteredRows.size(), "Actual rows read: " + unfilteredRows.toString());
List partitionFilteredRows =
ReadAllRows(
/* tableReference= */ tableReference, /* filter= */ "_PARTITIONDATE > \"2019-01-01\"");
assertEquals(
- "Actual rows read: " + partitionFilteredRows.toString(), 1, partitionFilteredRows.size());
+ 1, partitionFilteredRows.size(), "Actual rows read: " + partitionFilteredRows.toString());
assertEquals(2L, partitionFilteredRows.get(0).get("num_field"));
}
@Test
- public void testBasicSqlTypes() throws InterruptedException, IOException {
- String table_name = "test_basic_sql_types";
+ void testBasicSqlTypes() throws InterruptedException, IOException {
+ String tableName = "test_basic_sql_types" + UUID.randomUUID().toString().substring(0, 8);
String createTableStatement =
String.format(
" CREATE TABLE %s.%s "
@@ -700,20 +707,20 @@ public void testBasicSqlTypes() throws InterruptedException, IOException {
+ " TRUE,"
+ " \"String field value\","
+ " b\"абвгд\"",
- DATASET, table_name);
+ DATASET, tableName);
RunQueryJobAndExpectSuccess(QueryJobConfiguration.newBuilder(createTableStatement).build());
TableReference tableReference =
TableReference.newBuilder()
- .setTableId(table_name)
+ .setTableId(tableName)
.setDatasetId(DATASET)
.setProjectId(ServiceOptions.getDefaultProjectId())
.build();
List rows =
ReadAllRows(/* tableReference= */ tableReference, /* filter= */ null);
- assertEquals("Actual rows read: " + rows.toString(), 1, rows.size());
+ assertEquals(1, rows.size(), "Actual rows read: " + rows.toString());
GenericData.Record record = rows.get(0);
Schema avroSchema = record.getSchema();
@@ -723,22 +730,22 @@ public void testBasicSqlTypes() throws InterruptedException, IOException {
"Unexpected schema. Actual schema:%n%s", avroSchema.toString(/* pretty= */ true));
String rowAssertMessage = String.format("Row not matching expectations: %s", record.toString());
- assertEquals(actualSchemaMessage, Schema.Type.RECORD, avroSchema.getType());
- assertEquals(actualSchemaMessage, "__root__", avroSchema.getName());
- assertEquals(actualSchemaMessage, 6, avroSchema.getFields().size());
+ assertEquals(Schema.Type.RECORD, avroSchema.getType(), actualSchemaMessage);
+ assertEquals("__root__", avroSchema.getName(), actualSchemaMessage);
+ assertEquals(6, avroSchema.getFields().size(), actualSchemaMessage);
assertEquals(
- actualSchemaMessage, Schema.Type.LONG, avroSchema.getField("int_field").schema().getType());
- assertEquals(rowAssertMessage, 17L, (long) record.get("int_field"));
+ Schema.Type.LONG, avroSchema.getField("int_field").schema().getType(), actualSchemaMessage);
+ assertEquals(17L, (long) record.get("int_field"), rowAssertMessage);
assertEquals(
- actualSchemaMessage,
Schema.Type.BYTES,
- avroSchema.getField("num_field").schema().getType());
+ avroSchema.getField("num_field").schema().getType(),
+ actualSchemaMessage);
assertEquals(
- actualSchemaMessage,
LogicalTypes.decimal(/* precision= */ 38, /* scale= */ 9),
- avroSchema.getField("num_field").schema().getLogicalType());
+ avroSchema.getField("num_field").schema().getLogicalType(),
+ actualSchemaMessage);
BigDecimal actual_num_field =
new Conversions.DecimalConversion()
.fromBytes(
@@ -746,45 +753,46 @@ public void testBasicSqlTypes() throws InterruptedException, IOException {
avroSchema,
avroSchema.getField("num_field").schema().getLogicalType());
assertEquals(
- rowAssertMessage,
BigDecimal.valueOf(/* unscaledVal= */ 1_234_560_000_000L, /* scale= */ 9),
- actual_num_field);
+ actual_num_field,
+ rowAssertMessage);
assertEquals(
- actualSchemaMessage,
Schema.Type.DOUBLE,
- avroSchema.getField("float_field").schema().getType());
+ avroSchema.getField("float_field").schema().getType(),
+ actualSchemaMessage);
assertEquals(
- rowAssertMessage,
/* expected= */ 6.547678d,
/* actual= */ (double) record.get("float_field"),
- /* delta= */ 0.0001);
+ /* delta= */ 0.0001,
+ rowAssertMessage);
assertEquals(
- actualSchemaMessage,
Schema.Type.BOOLEAN,
- avroSchema.getField("bool_field").schema().getType());
- assertEquals(rowAssertMessage, true, record.get("bool_field"));
+ avroSchema.getField("bool_field").schema().getType(),
+ actualSchemaMessage);
+ assertEquals(true, record.get("bool_field"), rowAssertMessage);
assertEquals(
- actualSchemaMessage,
Schema.Type.STRING,
- avroSchema.getField("str_field").schema().getType());
- assertEquals(rowAssertMessage, new Utf8("String field value"), record.get("str_field"));
+ avroSchema.getField("str_field").schema().getType(),
+ actualSchemaMessage);
+ assertEquals(new Utf8("String field value"), record.get("str_field"), rowAssertMessage);
assertEquals(
- actualSchemaMessage,
Schema.Type.BYTES,
- avroSchema.getField("bytes_field").schema().getType());
+ avroSchema.getField("bytes_field").schema().getType(),
+ actualSchemaMessage);
assertArrayEquals(
- rowAssertMessage,
Utf8.getBytesFor("абвгд"),
- ((ByteBuffer) (record.get("bytes_field"))).array());
+ ((ByteBuffer) (record.get("bytes_field"))).array(),
+ rowAssertMessage);
}
@Test
- public void testDateAndTimeSqlTypes() throws InterruptedException, IOException {
- String table_name = "test_date_and_time_sql_types";
+ void testDateAndTimeSqlTypes() throws InterruptedException, IOException {
+ String tableName =
+ "test_date_and_time_sql_types" + UUID.randomUUID().toString().substring(0, 8);
String createTableStatement =
String.format(
" CREATE TABLE %s.%s "
@@ -801,20 +809,20 @@ public void testDateAndTimeSqlTypes() throws InterruptedException, IOException {
+ " CAST(\"2019-04-30 21:47:59.999999\" AS DATETIME),"
+ " CAST(\"21:47:59.999999\" AS TIME),"
+ " CAST(\"2019-04-30 19:24:19.123456 UTC\" AS TIMESTAMP)",
- DATASET, table_name);
+ DATASET, tableName);
RunQueryJobAndExpectSuccess(QueryJobConfiguration.newBuilder(createTableStatement).build());
TableReference tableReference =
TableReference.newBuilder()
- .setTableId(table_name)
+ .setTableId(tableName)
.setDatasetId(DATASET)
.setProjectId(ServiceOptions.getDefaultProjectId())
.build();
List rows =
ReadAllRows(/* tableReference= */ tableReference, /* filter= */ null);
- assertEquals("Actual rows read: " + rows.toString(), 1, rows.size());
+ assertEquals(1, rows.size(), "Actual rows read: " + rows.toString());
GenericData.Record record = rows.get(0);
Schema avroSchema = record.getSchema();
@@ -824,56 +832,56 @@ public void testDateAndTimeSqlTypes() throws InterruptedException, IOException {
"Unexpected schema. Actual schema:%n%s", avroSchema.toString(/* pretty= */ true));
String rowAssertMessage = String.format("Row not matching expectations: %s", record.toString());
- assertEquals(actualSchemaMessage, Schema.Type.RECORD, avroSchema.getType());
- assertEquals(actualSchemaMessage, "__root__", avroSchema.getName());
- assertEquals(actualSchemaMessage, 4, avroSchema.getFields().size());
+ assertEquals(Schema.Type.RECORD, avroSchema.getType(), actualSchemaMessage);
+ assertEquals("__root__", avroSchema.getName(), actualSchemaMessage);
+ assertEquals(4, avroSchema.getFields().size(), actualSchemaMessage);
assertEquals(
- actualSchemaMessage, Schema.Type.INT, avroSchema.getField("date_field").schema().getType());
+ Schema.Type.INT, avroSchema.getField("date_field").schema().getType(), actualSchemaMessage);
assertEquals(
- actualSchemaMessage,
LogicalTypes.date(),
- avroSchema.getField("date_field").schema().getLogicalType());
+ avroSchema.getField("date_field").schema().getLogicalType(),
+ actualSchemaMessage);
assertEquals(
- rowAssertMessage,
LocalDate.of(/* year= */ 2019, /* month= */ 5, /* dayOfMonth= */ 31),
- LocalDate.ofEpochDay((int) record.get("date_field")));
+ LocalDate.ofEpochDay((int) record.get("date_field")),
+ rowAssertMessage);
assertEquals(
- actualSchemaMessage,
Schema.Type.STRING,
- avroSchema.getField("datetime_field").schema().getType());
+ avroSchema.getField("datetime_field").schema().getType(),
+ actualSchemaMessage);
assertEquals(
- actualSchemaMessage,
"datetime",
- avroSchema.getField("datetime_field").schema().getObjectProp("logicalType"));
+ avroSchema.getField("datetime_field").schema().getObjectProp("logicalType"),
+ actualSchemaMessage);
assertEquals(
- rowAssertMessage,
new Utf8("2019-04-30T21:47:59.999999"),
- (Utf8) record.get("datetime_field"));
+ (Utf8) record.get("datetime_field"),
+ rowAssertMessage);
assertEquals(
- actualSchemaMessage,
Schema.Type.LONG,
- avroSchema.getField("time_field").schema().getType());
+ avroSchema.getField("time_field").schema().getType(),
+ actualSchemaMessage);
assertEquals(
- actualSchemaMessage,
LogicalTypes.timeMicros(),
- avroSchema.getField("time_field").schema().getLogicalType());
+ avroSchema.getField("time_field").schema().getLogicalType(),
+ actualSchemaMessage);
assertEquals(
- rowAssertMessage,
LocalTime.of(
/* hour= */ 21, /* minute= */ 47, /* second= */ 59, /* nanoOfSecond= */ 999_999_000),
- LocalTime.ofNanoOfDay(1_000L * (long) record.get("time_field")));
+ LocalTime.ofNanoOfDay(1_000L * (long) record.get("time_field")),
+ rowAssertMessage);
assertEquals(
- actualSchemaMessage,
Schema.Type.LONG,
- avroSchema.getField("timestamp_field").schema().getType());
+ avroSchema.getField("timestamp_field").schema().getType(),
+ actualSchemaMessage);
assertEquals(
- actualSchemaMessage,
LogicalTypes.timestampMicros(),
- avroSchema.getField("timestamp_field").schema().getLogicalType());
+ avroSchema.getField("timestamp_field").schema().getLogicalType(),
+ actualSchemaMessage);
ZonedDateTime expected_timestamp =
ZonedDateTime.parse(
"2019-04-30T19:24:19Z", DateTimeFormatter.ISO_INSTANT.withZone(ZoneOffset.UTC))
@@ -885,12 +893,12 @@ public void testDateAndTimeSqlTypes() throws InterruptedException, IOException {
/* epochSecond= */ actual_timestamp_micros / 1_000_000,
(actual_timestamp_micros % 1_000_000) * 1_000),
ZoneOffset.UTC);
- assertEquals(rowAssertMessage, expected_timestamp, actual_timestamp);
+ assertEquals(expected_timestamp, actual_timestamp, rowAssertMessage);
}
@Test
- public void testGeographySqlType() throws InterruptedException, IOException {
- String table_name = "test_geography_sql_type";
+ void testGeographySqlType() throws InterruptedException, IOException {
+ String tableName = "test_geography_sql_type" + UUID.randomUUID().toString().substring(0, 8);
String createTableStatement =
String.format(
" CREATE TABLE %s.%s "
@@ -900,20 +908,20 @@ public void testGeographySqlType() throws InterruptedException, IOException {
+ " ) "
+ "AS "
+ " SELECT ST_GEOGPOINT(1.1, 2.2)",
- DATASET, table_name);
+ DATASET, tableName);
RunQueryJobAndExpectSuccess(QueryJobConfiguration.newBuilder(createTableStatement).build());
TableReference tableReference =
TableReference.newBuilder()
- .setTableId(table_name)
+ .setTableId(tableName)
.setDatasetId(DATASET)
.setProjectId(ServiceOptions.getDefaultProjectId())
.build();
List rows =
ReadAllRows(/* tableReference= */ tableReference, /* filter= */ null);
- assertEquals("Actual rows read: " + rows.toString(), 1, rows.size());
+ assertEquals(1, rows.size(), "Actual rows read: " + rows.toString());
GenericData.Record record = rows.get(0);
Schema avroSchema = record.getSchema();
@@ -923,44 +931,45 @@ public void testGeographySqlType() throws InterruptedException, IOException {
"Unexpected schema. Actual schema:%n%s", avroSchema.toString(/* pretty= */ true));
String rowAssertMessage = String.format("Row not matching expectations: %s", record.toString());
- assertEquals(actualSchemaMessage, Schema.Type.RECORD, avroSchema.getType());
- assertEquals(actualSchemaMessage, "__root__", avroSchema.getName());
- assertEquals(actualSchemaMessage, 1, avroSchema.getFields().size());
+ assertEquals(Schema.Type.RECORD, avroSchema.getType(), actualSchemaMessage);
+ assertEquals("__root__", avroSchema.getName(), actualSchemaMessage);
+ assertEquals(1, avroSchema.getFields().size(), actualSchemaMessage);
assertEquals(
- actualSchemaMessage,
Schema.Type.STRING,
- avroSchema.getField("geo_field").schema().getType());
+ avroSchema.getField("geo_field").schema().getType(),
+ actualSchemaMessage);
assertEquals(
- actualSchemaMessage,
"GEOGRAPHY",
- avroSchema.getField("geo_field").schema().getObjectProp("sqlType"));
- assertEquals(rowAssertMessage, new Utf8("POINT(1.1 2.2)"), (Utf8) record.get("geo_field"));
+ avroSchema.getField("geo_field").schema().getObjectProp("sqlType"),
+ actualSchemaMessage);
+ assertEquals(new Utf8("POINT(1.1 2.2)"), (Utf8) record.get("geo_field"), rowAssertMessage);
}
@Test
- public void testStructAndArraySqlTypes() throws InterruptedException, IOException {
- String table_name = "test_struct_and_array_sql_types";
+ void testStructAndArraySqlTypes() throws InterruptedException, IOException {
+ String tableName =
+ "test_struct_and_array_sql_types" + UUID.randomUUID().toString().substring(0, 8);
String createTableStatement =
String.format(
" CREATE TABLE %s.%s (array_field ARRAY, struct_field STRUCT NOT NULL) OPTIONS( description=\"a"
+ " table with array and time column types\" ) AS SELECT [1, 2, 3], "
+ " (10, 'abc')",
- DATASET, table_name);
+ DATASET, tableName);
RunQueryJobAndExpectSuccess(QueryJobConfiguration.newBuilder(createTableStatement).build());
TableReference tableReference =
TableReference.newBuilder()
- .setTableId(table_name)
+ .setTableId(tableName)
.setDatasetId(DATASET)
.setProjectId(ServiceOptions.getDefaultProjectId())
.build();
List rows =
ReadAllRows(/* tableReference= */ tableReference, /* filter= */ null);
- assertEquals("Actual rows read: " + rows.toString(), 1, rows.size());
+ assertEquals(1, rows.size(), "Actual rows read: " + rows.toString());
GenericData.Record record = rows.get(0);
Schema avroSchema = record.getSchema();
@@ -970,48 +979,47 @@ public void testStructAndArraySqlTypes() throws InterruptedException, IOExceptio
"Unexpected schema. Actual schema:%n%s", avroSchema.toString(/* pretty= */ true));
String rowAssertMessage = String.format("Row not matching expectations: %s", record.toString());
- assertEquals(actualSchemaMessage, Schema.Type.RECORD, avroSchema.getType());
- assertEquals(actualSchemaMessage, "__root__", avroSchema.getName());
- assertEquals(actualSchemaMessage, 2, avroSchema.getFields().size());
+ assertEquals(Schema.Type.RECORD, avroSchema.getType(), actualSchemaMessage);
+ assertEquals("__root__", avroSchema.getName(), actualSchemaMessage);
+ assertEquals(2, avroSchema.getFields().size(), actualSchemaMessage);
assertEquals(
- actualSchemaMessage,
Schema.Type.ARRAY,
- avroSchema.getField("array_field").schema().getType());
+ avroSchema.getField("array_field").schema().getType(),
+ actualSchemaMessage);
assertEquals(
- actualSchemaMessage,
Schema.Type.LONG,
- avroSchema.getField("array_field").schema().getElementType().getType());
+ avroSchema.getField("array_field").schema().getElementType().getType(),
+ actualSchemaMessage);
assertArrayEquals(
- rowAssertMessage,
new Long[] {1L, 2L, 3L},
- ((GenericData.Array) record.get("array_field")).toArray(new Long[0]));
+ ((GenericData.Array) record.get("array_field")).toArray(new Long[0]),
+ rowAssertMessage);
// Validate the STRUCT field and its members.
Schema structSchema = avroSchema.getField("struct_field").schema();
- assertEquals(actualSchemaMessage, Schema.Type.RECORD, structSchema.getType());
+ assertEquals(Schema.Type.RECORD, structSchema.getType(), actualSchemaMessage);
GenericData.Record structRecord = (GenericData.Record) record.get("struct_field");
assertEquals(
- actualSchemaMessage,
Schema.Type.LONG,
- structSchema.getField("int_field").schema().getType());
- assertEquals(rowAssertMessage, 10L, (long) structRecord.get("int_field"));
+ structSchema.getField("int_field").schema().getType(),
+ actualSchemaMessage);
+ assertEquals(10L, (long) structRecord.get("int_field"), rowAssertMessage);
assertEquals(
- actualSchemaMessage,
Schema.Type.STRING,
- structSchema.getField("str_field").schema().getType());
- assertEquals(rowAssertMessage, new Utf8("abc"), structRecord.get("str_field"));
+ structSchema.getField("str_field").schema().getType(),
+ actualSchemaMessage);
+ assertEquals(new Utf8("abc"), structRecord.get("str_field"), rowAssertMessage);
}
@Test
- public void testUniverseDomainWithInvalidUniverseDomain() throws IOException {
+ void testUniverseDomainWithInvalidUniverseDomain() throws IOException {
BigQueryStorageSettings bigQueryStorageSettings =
BigQueryStorageSettings.newBuilder()
.setCredentialsProvider(
- FixedCredentialsProvider.create(
- loadCredentials(FAKE_JSON_CRED_WITH_INVALID_DOMAIN)))
+ FixedCredentialsProvider.create(loadCredentials(FAKE_JSON_CRED_WITH_GOOGLE_DOMAIN)))
.setUniverseDomain("invalid.domain")
.build();
@@ -1024,23 +1032,23 @@ public void testUniverseDomainWithInvalidUniverseDomain() throws IOException {
.setTableId("shakespeare")
.build();
- try {
- localClient.createReadSession(
- /* tableReference= */ tableReference,
- /* parent= */ parentProjectId,
- /* requestedStreams= */ 1);
- fail("RPCs to invalid universe domain should fail");
- } catch (UnauthenticatedException e) {
- assertThat(
- (e.getMessage()
- .contains("does not match the universe domain found in the credentials")))
- .isTrue();
- }
+ UnauthenticatedException e =
+ assertThrows(
+ UnauthenticatedException.class,
+ () ->
+ localClient.createReadSession(
+ /* tableReference= */ tableReference,
+ /* parent= */ parentProjectId,
+ /* requestedStreams= */ 1));
+ assertThat(
+ (e.getMessage()
+ .contains("does not match the universe domain found in the credentials")))
+ .isTrue();
localClient.close();
}
@Test
- public void testInvalidUniverseDomainWithMismatchCredentials() throws IOException {
+ void testInvalidUniverseDomainWithMismatchCredentials() throws IOException {
BigQueryStorageSettings bigQueryStorageSettings =
BigQueryStorageSettings.newBuilder()
.setCredentialsProvider(
@@ -1058,23 +1066,23 @@ public void testInvalidUniverseDomainWithMismatchCredentials() throws IOExceptio
.setTableId("shakespeare")
.build();
- try {
- localClient.createReadSession(
- /* tableReference= */ tableReference,
- /* parent= */ parentProjectId,
- /* requestedStreams= */ 1);
- fail("RPCs to invalid universe domain should fail");
- } catch (UnauthenticatedException e) {
- assertThat(
- (e.getMessage()
- .contains("does not match the universe domain found in the credentials")))
- .isTrue();
- }
+ UnauthenticatedException e =
+ assertThrows(
+ UnauthenticatedException.class,
+ () ->
+ localClient.createReadSession(
+ /* tableReference= */ tableReference,
+ /* parent= */ parentProjectId,
+ /* requestedStreams= */ 1));
+ assertThat(
+ (e.getMessage()
+ .contains("does not match the universe domain found in the credentials")))
+ .isTrue();
localClient.close();
}
@Test
- public void testUniverseDomainWithMatchingDomain() throws IOException {
+ void testUniverseDomainWithMatchingDomain() throws IOException {
// Test a valid domain using the default credentials and Google default universe domain.
BigQueryStorageSettings bigQueryStorageSettings =
BigQueryStorageSettings.newBuilder().setUniverseDomain("googleapis.com").build();
@@ -1094,12 +1102,12 @@ public void testUniverseDomainWithMatchingDomain() throws IOException {
/* requestedStreams= */ 1);
assertEquals(
+ 1,
+ session.getStreamsCount(),
String.format(
"Did not receive expected number of streams for table reference '%s' CreateReadSession"
+ " response:%n%s",
- TextFormat.printer().shortDebugString(tableReference), session.toString()),
- 1,
- session.getStreamsCount());
+ TextFormat.printer().shortDebugString(tableReference), session.toString()));
StreamPosition readPosition =
StreamPosition.newBuilder().setStream(session.getStreams(0)).build();
@@ -1117,10 +1125,10 @@ public void testUniverseDomainWithMatchingDomain() throws IOException {
localClient.close();
}
- public void testUniverseDomain() throws IOException {
+ void testUniverseDomain() throws IOException {
// This test is not yet part presubmit integration test as it requires the apis-tpclp.goog
// universe domain credentials.
- // Test a valid read session in the universe domain gdutst.
+ // Test a valid domain using the default credentials and Google default universe domain.
BigQueryStorageSettings bigQueryStorageSettings =
BigQueryStorageSettings.newBuilder().setUniverseDomain("apis-tpclp.goog").build();
BigQueryStorageClient localClient = BigQueryStorageClient.create(bigQueryStorageSettings);
@@ -1224,12 +1232,12 @@ private void ProcessRowsAtSnapshot(
ReadSession session = client.createReadSession(createSessionRequestBuilder.build());
assertEquals(
+ 1,
+ session.getStreamsCount(),
String.format(
"Did not receive expected number of streams for table reference '%s' CreateReadSession"
+ " response:%n%s",
- TextFormat.printer().shortDebugString(tableReference), session.toString()),
- 1,
- session.getStreamsCount());
+ TextFormat.printer().shortDebugString(tableReference), session.toString()));
StreamPosition readPosition =
StreamPosition.newBuilder().setStream(session.getStreams(0)).build();
@@ -1308,16 +1316,15 @@ private Job RunQueryJobAndExpectSuccess(QueryJobConfiguration configuration)
assertNotNull(completedJob);
assertNull(
+ /* object= */ completedJob.getStatus().getError(),
/* message= */ "Received a job status that is not a success: "
- + completedJob.getStatus().toString(),
- /* object= */ completedJob.getStatus().getError());
+ + completedJob.getStatus().toString());
return completedJob;
}
static ServiceAccountCredentials loadCredentials(String credentialFile) {
- try {
- InputStream keyStream = new ByteArrayInputStream(credentialFile.getBytes());
+ try (InputStream keyStream = new ByteArrayInputStream(credentialFile.getBytes())) {
return ServiceAccountCredentials.fromStream(keyStream);
} catch (IOException e) {
fail("Couldn't create fake JSON credentials.");
diff --git a/google-cloud-bigquerystorage/src/test/java/com/google/cloud/bigquery/storage/v1beta1/it/SimpleRowReader.java b/google-cloud-bigquerystorage/src/test/java/com/google/cloud/bigquery/storage/v1beta1/it/SimpleRowReader.java
index 2520933863..5b685fddcd 100644
--- a/google-cloud-bigquerystorage/src/test/java/com/google/cloud/bigquery/storage/v1beta1/it/SimpleRowReader.java
+++ b/google-cloud-bigquerystorage/src/test/java/com/google/cloud/bigquery/storage/v1beta1/it/SimpleRowReader.java
@@ -62,7 +62,7 @@ public SimpleRowReader(Schema schema) {
* @param avroRows object returned from the ReadRowsResponse.
* @param rowConsumer consumer that accepts GenericRecord.
*/
- public void processRows(AvroRows avroRows, AvroRowConsumer rowConsumer) throws IOException {
+ void processRows(AvroRows avroRows, AvroRowConsumer rowConsumer) throws IOException {
Preconditions.checkNotNull(avroRows);
Preconditions.checkNotNull(rowConsumer);
decoder =
diff --git a/google-cloud-bigquerystorage/src/test/java/com/google/cloud/bigquery/storage/v1beta1/stub/EnhancedBigQueryStorageStubSettingsTest.java b/google-cloud-bigquerystorage/src/test/java/com/google/cloud/bigquery/storage/v1beta1/stub/EnhancedBigQueryStorageStubSettingsTest.java
index a81a1da255..ac42094776 100644
--- a/google-cloud-bigquerystorage/src/test/java/com/google/cloud/bigquery/storage/v1beta1/stub/EnhancedBigQueryStorageStubSettingsTest.java
+++ b/google-cloud-bigquerystorage/src/test/java/com/google/cloud/bigquery/storage/v1beta1/stub/EnhancedBigQueryStorageStubSettingsTest.java
@@ -36,16 +36,13 @@
import com.google.protobuf.Empty;
import java.time.Duration;
import java.util.Set;
-import org.junit.Test;
-import org.junit.runner.RunWith;
-import org.junit.runners.JUnit4;
+import org.junit.jupiter.api.Test;
import org.mockito.Mockito;
-@RunWith(JUnit4.class)
public class EnhancedBigQueryStorageStubSettingsTest {
@Test
- public void testSettingsArePreserved() {
+ void testSettingsArePreserved() {
String endpoint = "some.other.host:123";
CredentialsProvider credentialsProvider = Mockito.mock(CredentialsProvider.class);
Duration watchdogInterval = Duration.ofSeconds(12);
@@ -104,14 +101,14 @@ private void verifySettings(
}
@Test
- public void testCreateReadSessionSettings() {
+ void testCreateReadSessionSettings() {
UnaryCallSettings.Builder builder =
EnhancedBigQueryStorageStubSettings.newBuilder().createReadSessionSettings();
verifyRetrySettings(builder.getRetryableCodes(), builder.getRetrySettings());
}
@Test
- public void testReadRowsSettings() {
+ void testReadRowsSettings() {
ServerStreamingCallSettings.Builder builder =
EnhancedBigQueryStorageStubSettings.newBuilder().readRowsSettings();
assertThat(builder.getRetryableCodes()).contains(Code.UNAVAILABLE);
@@ -127,7 +124,7 @@ public void testReadRowsSettings() {
}
@Test
- public void testBatchCreateReadSessionStreamsSettings() {
+ void testBatchCreateReadSessionStreamsSettings() {
UnaryCallSettings.Builder<
BatchCreateReadSessionStreamsRequest, BatchCreateReadSessionStreamsResponse>
builder =
@@ -137,14 +134,14 @@ public void testBatchCreateReadSessionStreamsSettings() {
}
@Test
- public void testFinalizeStreamSettings() {
+ void testFinalizeStreamSettings() {
UnaryCallSettings.Builder builder =
EnhancedBigQueryStorageStubSettings.newBuilder().finalizeStreamSettings();
verifyRetrySettings(builder.getRetryableCodes(), builder.getRetrySettings());
}
@Test
- public void testSplitReadStreamSettings() {
+ void testSplitReadStreamSettings() {
UnaryCallSettings.Builder builder =
EnhancedBigQueryStorageStubSettings.newBuilder().splitReadStreamSettings();
verifyRetrySettings(builder.getRetryableCodes(), builder.getRetrySettings());
diff --git a/google-cloud-bigquerystorage/src/test/java/com/google/cloud/bigquery/storage/v1beta1/stub/ResourceHeaderTest.java b/google-cloud-bigquerystorage/src/test/java/com/google/cloud/bigquery/storage/v1beta1/stub/ResourceHeaderTest.java
index c2e2df1ebb..b319042b59 100644
--- a/google-cloud-bigquerystorage/src/test/java/com/google/cloud/bigquery/storage/v1beta1/stub/ResourceHeaderTest.java
+++ b/google-cloud-bigquerystorage/src/test/java/com/google/cloud/bigquery/storage/v1beta1/stub/ResourceHeaderTest.java
@@ -31,15 +31,12 @@
import com.google.cloud.bigquery.storage.v1beta1.Storage.StreamPosition;
import com.google.cloud.bigquery.storage.v1beta1.TableReferenceProto.TableReference;
import java.util.regex.Pattern;
-import org.junit.After;
-import org.junit.AfterClass;
-import org.junit.Before;
-import org.junit.BeforeClass;
-import org.junit.Test;
-import org.junit.runner.RunWith;
-import org.junit.runners.JUnit4;
-
-@RunWith(JUnit4.class)
+import org.junit.jupiter.api.AfterAll;
+import org.junit.jupiter.api.AfterEach;
+import org.junit.jupiter.api.BeforeAll;
+import org.junit.jupiter.api.BeforeEach;
+import org.junit.jupiter.api.Test;
+
public class ResourceHeaderTest {
private static final TableReference TEST_TABLE_REFERENCE =
@@ -54,7 +51,7 @@ public class ResourceHeaderTest {
private static final Stream TEST_STREAM = Stream.newBuilder().setName("streamName").build();
- private static final String NAME = "resource-header-test:123";
+ private static final String NAME = "resource-header-test:123-v1beta1";
private static final String HEADER_NAME = "x-goog-request-params";
@@ -80,14 +77,14 @@ public class ResourceHeaderTest {
private LocalChannelProvider channelProvider;
private BigQueryStorageClient client;
- @BeforeClass
+ @BeforeAll
public static void setUpClass() throws Exception {
server = new InProcessServer<>(new BigQueryStorageImplBase() {}, NAME);
server.start();
}
- @Before
- public void setUp() throws Exception {
+ @BeforeEach
+ void setUp() throws Exception {
channelProvider = LocalChannelProvider.create(NAME);
BigQueryStorageSettings.Builder settingsBuilder =
BigQueryStorageSettings.newBuilder()
@@ -97,19 +94,19 @@ public void setUp() throws Exception {
client = BigQueryStorageClient.create(settingsBuilder.build());
}
- @After
- public void tearDown() throws Exception {
+ @AfterEach
+ void tearDown() throws Exception {
client.close();
}
- @AfterClass
- public static void tearDownClass() throws Exception {
+ @AfterAll
+ static void tearDownClass() throws Exception {
server.stop();
server.blockUntilShutdown();
}
@Test
- public void createReadSessionTest() {
+ void createReadSessionTest() {
try {
client.createReadSession(TEST_TABLE_REFERENCE, "parents/project", 1);
} catch (UnimplementedException e) {
@@ -119,7 +116,7 @@ public void createReadSessionTest() {
}
@Test
- public void readRowsTest() {
+ void readRowsTest() {
try {
ReadRowsRequest request =
ReadRowsRequest.newBuilder()
@@ -134,7 +131,7 @@ public void readRowsTest() {
}
@Test
- public void batchCreateReadStreamsForSessionTest() {
+ void batchCreateReadStreamsForSessionTest() {
try {
client.batchCreateReadSessionStreams(TEST_SESSION, 1);
} catch (UnimplementedException e) {
@@ -145,7 +142,7 @@ public void batchCreateReadStreamsForSessionTest() {
}
@Test
- public void finalizeStreamTest() {
+ void finalizeStreamTest() {
try {
client.finalizeStream(TEST_STREAM);
} catch (UnimplementedException e) {
@@ -156,7 +153,7 @@ public void finalizeStreamTest() {
}
@Test
- public void splitReadStreamTest() {
+ void splitReadStreamTest() {
try {
client.splitReadStream(TEST_STREAM);
} catch (UnimplementedException e) {
diff --git a/google-cloud-bigquerystorage/src/test/java/com/google/cloud/bigquery/storage/v1beta1/stub/readrows/ReadRowsRetryTest.java b/google-cloud-bigquerystorage/src/test/java/com/google/cloud/bigquery/storage/v1beta1/stub/readrows/ReadRowsRetryTest.java
index 714e30b390..5f734ae746 100644
--- a/google-cloud-bigquerystorage/src/test/java/com/google/cloud/bigquery/storage/v1beta1/stub/readrows/ReadRowsRetryTest.java
+++ b/google-cloud-bigquerystorage/src/test/java/com/google/cloud/bigquery/storage/v1beta1/stub/readrows/ReadRowsRetryTest.java
@@ -15,9 +15,12 @@
*/
package com.google.cloud.bigquery.storage.v1beta1.stub.readrows;
+import static org.junit.jupiter.api.Assertions.assertEquals;
+import static org.junit.jupiter.api.Assertions.assertNotNull;
+
import com.google.api.gax.core.NoCredentialsProvider;
-import com.google.api.gax.grpc.GrpcTransportChannel;
-import com.google.api.gax.rpc.FixedTransportChannelProvider;
+import com.google.api.gax.grpc.testing.InProcessServer;
+import com.google.api.gax.grpc.testing.LocalChannelProvider;
import com.google.api.gax.rpc.ServerStream;
import com.google.cloud.bigquery.storage.v1beta1.BigQueryStorageClient;
import com.google.cloud.bigquery.storage.v1beta1.BigQueryStorageGrpc.BigQueryStorageImplBase;
@@ -29,52 +32,49 @@
import com.google.common.collect.Queues;
import io.grpc.Status.Code;
import io.grpc.stub.StreamObserver;
-import io.grpc.testing.GrpcServerRule;
-import java.io.IOException;
import java.util.ArrayList;
import java.util.List;
import java.util.Queue;
-import org.junit.After;
-import org.junit.Assert;
-import org.junit.Before;
-import org.junit.Rule;
-import org.junit.Test;
-import org.junit.runner.RunWith;
-import org.mockito.runners.MockitoJUnitRunner;
+import java.util.UUID;
+import org.junit.jupiter.api.AfterEach;
+import org.junit.jupiter.api.BeforeEach;
+import org.junit.jupiter.api.Test;
-@RunWith(MockitoJUnitRunner.class)
-public class ReadRowsRetryTest {
+class ReadRowsRetryTest {
private static final Stream DEFAULT_STREAM = Stream.newBuilder().setName("defaultStream").build();
- @Rule public GrpcServerRule serverRule = new GrpcServerRule();
-
private TestBigQueryStorageService service;
private BigQueryStorageClient client;
+ private InProcessServer> server;
+ private LocalChannelProvider channelProvider;
- @Before
- public void setUp() throws IOException {
+ @BeforeEach
+ void setUp() throws Exception {
service = new TestBigQueryStorageService();
- serverRule.getServiceRegistry().addService(service);
+ String serverName = UUID.randomUUID().toString();
+ server = new InProcessServer<>(service, serverName);
+ server.start();
+ channelProvider = LocalChannelProvider.create(serverName);
BigQueryStorageSettings settings =
BigQueryStorageSettings.newBuilder()
.setCredentialsProvider(NoCredentialsProvider.create())
- .setTransportChannelProvider(
- FixedTransportChannelProvider.create(
- GrpcTransportChannel.create(serverRule.getChannel())))
+ .setTransportChannelProvider(channelProvider)
.build();
client = BigQueryStorageClient.create(settings);
}
- @After
- public void tearDown() throws Exception {
+ @AfterEach
+ void tearDown() throws Exception {
client.close();
+ server.stop();
+ server.blockUntilShutdown();
}
@Test
- public void happyPathTest() {
+ void happyPathTest() {
ReadRowsRequest request = RpcExpectation.createRequest("fake-stream", 0);
service.expectations.add(
RpcExpectation.create()
@@ -82,11 +82,11 @@ public void happyPathTest() {
.respondWithNumberOfRows(10)
.respondWithNumberOfRows(7));
- Assert.assertEquals(17, getRowCount(request));
+ assertEquals(17, getRowCount(request));
}
@Test
- public void immediateRetryTest() {
+ void immediateRetryTest() {
ReadRowsRequest request = RpcExpectation.createRequest("fake-stream", 0);
service.expectations.add(
RpcExpectation.create()
@@ -99,11 +99,11 @@ public void immediateRetryTest() {
.respondWithNumberOfRows(10)
.respondWithNumberOfRows(7));
- Assert.assertEquals(17, getRowCount(request));
+ assertEquals(17, getRowCount(request));
}
@Test
- public void multipleRetryTestWithZeroInitialOffset() {
+ void multipleRetryTestWithZeroInitialOffset() {
ReadRowsRequest request = RpcExpectation.createRequest("fake-stream", 0);
service.expectations.add(
RpcExpectation.create()
@@ -121,11 +121,11 @@ public void multipleRetryTestWithZeroInitialOffset() {
service.expectations.add(
RpcExpectation.create().expectRequest("fake-stream", 22).respondWithNumberOfRows(6));
- Assert.assertEquals(28, getRowCount(request));
+ assertEquals(28, getRowCount(request));
}
@Test
- public void multipleRetryTestWithNonZeroInitialOffset() {
+ void multipleRetryTestWithNonZeroInitialOffset() {
ReadRowsRequest request = RpcExpectation.createRequest("fake-stream", 17);
service.expectations.add(
RpcExpectation.create()
@@ -143,11 +143,11 @@ public void multipleRetryTestWithNonZeroInitialOffset() {
service.expectations.add(
RpcExpectation.create().expectRequest("fake-stream", 39).respondWithNumberOfRows(3));
- Assert.assertEquals(25, getRowCount(request));
+ assertEquals(25, getRowCount(request));
}
@Test
- public void errorAtTheVeryEndTest() {
+ void errorAtTheVeryEndTest() {
ReadRowsRequest request = RpcExpectation.createRequest("fake-stream", 0);
service.expectations.add(
RpcExpectation.create()
@@ -159,7 +159,7 @@ public void errorAtTheVeryEndTest() {
service.expectations.add(
RpcExpectation.create().expectRequest("fake-stream", 17).respondWithNumberOfRows(0));
- Assert.assertEquals(17, getRowCount(request));
+ assertEquals(17, getRowCount(request));
}
private int getRowCount(ReadRowsRequest request) {
@@ -183,17 +183,15 @@ public void readRows(
RpcExpectation expectedRpc = expectations.poll();
currentRequestIndex++;
- Assert.assertNotNull(
- "Unexpected request #" + currentRequestIndex + ": " + request.toString(), expectedRpc);
-
- Assert.assertEquals(
+ assertNotNull(
+ expectedRpc, "Unexpected request #" + currentRequestIndex + ": " + request.toString());
+ assertEquals(
+ expectedRpc.expectedRequest,
+ request,
"Expected request #"
+ currentRequestIndex
+ " does not match actual request: "
- + request.toString(),
- expectedRpc.expectedRequest,
- request);
-
+ + request.toString());
for (ReadRowsResponse response : expectedRpc.responses) {
responseObserver.onNext(response);
}
diff --git a/google-cloud-bigquerystorage/src/test/java/com/google/cloud/bigquery/storage/v1beta2/BQTableSchemaToProtoDescriptorTest.java b/google-cloud-bigquerystorage/src/test/java/com/google/cloud/bigquery/storage/v1beta2/BQTableSchemaToProtoDescriptorTest.java
deleted file mode 100644
index 8e08418237..0000000000
--- a/google-cloud-bigquerystorage/src/test/java/com/google/cloud/bigquery/storage/v1beta2/BQTableSchemaToProtoDescriptorTest.java
+++ /dev/null
@@ -1,527 +0,0 @@
-/*
- * Copyright 2020 Google LLC
- *
- * Licensed under the Apache License, Version 2.0 (the "License");
- * you may not use this file except in compliance with the License.
- * You may obtain a copy of the License at
- *
- * https://www.apache.org/licenses/LICENSE-2.0
- *
- * Unless required by applicable law or agreed to in writing, software
- * distributed under the License is distributed on an "AS IS" BASIS,
- * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
- * See the License for the specific language governing permissions and
- * limitations under the License.
- */
-package com.google.cloud.bigquery.storage.v1beta2;
-
-import static org.junit.Assert.*;
-
-import com.google.cloud.bigquery.storage.test.JsonTest.*;
-import com.google.cloud.bigquery.storage.test.SchemaTest.*;
-import com.google.common.collect.ImmutableMap;
-import com.google.protobuf.Descriptors.Descriptor;
-import com.google.protobuf.Descriptors.FieldDescriptor;
-import java.util.HashMap;
-import java.util.Map;
-import org.junit.Test;
-import org.junit.runner.RunWith;
-import org.junit.runners.JUnit4;
-
-@RunWith(JUnit4.class)
-public class BQTableSchemaToProtoDescriptorTest {
- // This is a map between the TableFieldSchema.Type and the descriptor it is supposed to
- // produce. The produced descriptor will be used to check against the entry values here.
- private static ImmutableMap
- BQTableTypeToCorrectProtoDescriptorTest =
- new ImmutableMap.Builder()
- .put(TableFieldSchema.Type.BOOL, BoolType.getDescriptor())
- .put(TableFieldSchema.Type.BYTES, BytesType.getDescriptor())
- .put(TableFieldSchema.Type.DATE, Int32Type.getDescriptor())
- .put(TableFieldSchema.Type.DATETIME, Int64Type.getDescriptor())
- .put(TableFieldSchema.Type.DOUBLE, DoubleType.getDescriptor())
- .put(TableFieldSchema.Type.GEOGRAPHY, StringType.getDescriptor())
- .put(TableFieldSchema.Type.INT64, Int64Type.getDescriptor())
- .put(TableFieldSchema.Type.NUMERIC, BytesType.getDescriptor())
- .put(TableFieldSchema.Type.STRING, StringType.getDescriptor())
- .put(TableFieldSchema.Type.TIME, Int64Type.getDescriptor())
- .put(TableFieldSchema.Type.TIMESTAMP, Int64Type.getDescriptor())
- .build();
-
- // Creates mapping from descriptor to how many times it was reused.
- private void mapDescriptorToCount(Descriptor descriptor, HashMap map) {
- for (FieldDescriptor field : descriptor.getFields()) {
- if (field.getType() == FieldDescriptor.Type.MESSAGE) {
- Descriptor subDescriptor = field.getMessageType();
- String messageName = subDescriptor.getName();
- if (map.containsKey(messageName)) {
- map.put(messageName, map.get(messageName) + 1);
- } else {
- map.put(messageName, 1);
- }
- mapDescriptorToCount(subDescriptor, map);
- }
- }
- }
-
- private void isDescriptorEqual(Descriptor convertedProto, Descriptor originalProto) {
- // Check same number of fields
- assertEquals(convertedProto.getFields().size(), originalProto.getFields().size());
- for (FieldDescriptor convertedField : convertedProto.getFields()) {
- // Check field name
- FieldDescriptor originalField = originalProto.findFieldByName(convertedField.getName());
- assertNotNull(originalField);
- // Check type
- FieldDescriptor.Type convertedType = convertedField.getType();
- FieldDescriptor.Type originalType = originalField.getType();
- assertEquals(convertedField.getName(), convertedType, originalType);
- // Check mode
- assertTrue(
- (originalField.isRepeated() == convertedField.isRepeated())
- && (originalField.isRequired() == convertedField.isRequired())
- && (originalField.isOptional() == convertedField.isOptional()));
- // Recursively check nested messages
- if (convertedType == FieldDescriptor.Type.MESSAGE) {
- isDescriptorEqual(convertedField.getMessageType(), originalField.getMessageType());
- }
- }
- }
-
- @Test
- public void testSimpleTypes() throws Exception {
- for (Map.Entry entry :
- BQTableTypeToCorrectProtoDescriptorTest.entrySet()) {
- final TableFieldSchema tableFieldSchema =
- TableFieldSchema.newBuilder()
- .setType(entry.getKey())
- .setMode(TableFieldSchema.Mode.NULLABLE)
- .setName("test_field_type")
- .build();
- final TableSchema tableSchema =
- TableSchema.newBuilder().addFields(0, tableFieldSchema).build();
- final Descriptor descriptor =
- BQTableSchemaToProtoDescriptor.convertBQTableSchemaToProtoDescriptor(tableSchema);
- isDescriptorEqual(descriptor, entry.getValue());
- }
- }
-
- @Test
- public void testStructSimple() throws Exception {
- final TableFieldSchema StringType =
- TableFieldSchema.newBuilder()
- .setType(TableFieldSchema.Type.STRING)
- .setMode(TableFieldSchema.Mode.NULLABLE)
- .setName("test_field_type")
- .build();
- final TableFieldSchema tableFieldSchema =
- TableFieldSchema.newBuilder()
- .setType(TableFieldSchema.Type.STRUCT)
- .setMode(TableFieldSchema.Mode.NULLABLE)
- .setName("test_field_type")
- .addFields(0, StringType)
- .build();
- final TableSchema tableSchema = TableSchema.newBuilder().addFields(0, tableFieldSchema).build();
- final Descriptor descriptor =
- BQTableSchemaToProtoDescriptor.convertBQTableSchemaToProtoDescriptor(tableSchema);
- isDescriptorEqual(descriptor, MessageType.getDescriptor());
- }
-
- @Test
- public void testStructComplex() throws Exception {
- final TableFieldSchema test_int =
- TableFieldSchema.newBuilder()
- .setType(TableFieldSchema.Type.INT64)
- .setMode(TableFieldSchema.Mode.NULLABLE)
- .setName("test_int")
- .build();
- final TableFieldSchema test_string =
- TableFieldSchema.newBuilder()
- .setType(TableFieldSchema.Type.STRING)
- .setMode(TableFieldSchema.Mode.REPEATED)
- .setName("test_string")
- .build();
- final TableFieldSchema test_bytes =
- TableFieldSchema.newBuilder()
- .setType(TableFieldSchema.Type.BYTES)
- .setMode(TableFieldSchema.Mode.REQUIRED)
- .setName("test_bytes")
- .build();
- final TableFieldSchema test_bool =
- TableFieldSchema.newBuilder()
- .setType(TableFieldSchema.Type.BOOL)
- .setMode(TableFieldSchema.Mode.NULLABLE)
- .setName("test_bool")
- .build();
- final TableFieldSchema test_double =
- TableFieldSchema.newBuilder()
- .setType(TableFieldSchema.Type.DOUBLE)
- .setMode(TableFieldSchema.Mode.REPEATED)
- .setName("test_double")
- .build();
- final TableFieldSchema test_date =
- TableFieldSchema.newBuilder()
- .setType(TableFieldSchema.Type.DATE)
- .setMode(TableFieldSchema.Mode.REQUIRED)
- .setName("test_date")
- .build();
- final TableFieldSchema test_datetime =
- TableFieldSchema.newBuilder()
- .setType(TableFieldSchema.Type.DATETIME)
- .setMode(TableFieldSchema.Mode.NULLABLE)
- .setName("test_datetime")
- .build();
- final TableFieldSchema test_datetime_str =
- TableFieldSchema.newBuilder()
- .setType(TableFieldSchema.Type.DATETIME)
- .setMode(TableFieldSchema.Mode.REPEATED)
- .setName("test_datetime_str")
- .build();
- final TableFieldSchema ComplexLvl2 =
- TableFieldSchema.newBuilder()
- .setType(TableFieldSchema.Type.STRUCT)
- .setMode(TableFieldSchema.Mode.REQUIRED)
- .addFields(0, test_int)
- .setName("complex_lvl2")
- .build();
- final TableFieldSchema ComplexLvl1 =
- TableFieldSchema.newBuilder()
- .setType(TableFieldSchema.Type.STRUCT)
- .setMode(TableFieldSchema.Mode.REQUIRED)
- .addFields(0, test_int)
- .addFields(1, ComplexLvl2)
- .setName("complex_lvl1")
- .build();
- final TableFieldSchema TEST_NUMERIC =
- TableFieldSchema.newBuilder()
- .setType(TableFieldSchema.Type.NUMERIC)
- .setMode(TableFieldSchema.Mode.NULLABLE)
- .setName("test_numeric")
- .build();
- final TableFieldSchema TEST_GEO =
- TableFieldSchema.newBuilder()
- .setType(TableFieldSchema.Type.GEOGRAPHY)
- .setMode(TableFieldSchema.Mode.NULLABLE)
- .setName("test_geo")
- .build();
- final TableFieldSchema TEST_TIMESTAMP =
- TableFieldSchema.newBuilder()
- .setType(TableFieldSchema.Type.TIMESTAMP)
- .setMode(TableFieldSchema.Mode.NULLABLE)
- .setName("test_timestamp")
- .build();
- final TableFieldSchema TEST_TIME =
- TableFieldSchema.newBuilder()
- .setType(TableFieldSchema.Type.TIME)
- .setMode(TableFieldSchema.Mode.NULLABLE)
- .setName("test_time")
- .build();
- final TableFieldSchema TEST_TIME_STR =
- TableFieldSchema.newBuilder()
- .setType(TableFieldSchema.Type.TIME)
- .setMode(TableFieldSchema.Mode.NULLABLE)
- .setName("test_time_str")
- .build();
- final TableFieldSchema TEST_NUMERIC_REPEATED =
- TableFieldSchema.newBuilder()
- .setType(TableFieldSchema.Type.NUMERIC)
- .setMode(TableFieldSchema.Mode.REPEATED)
- .setName("test_numeric_repeated")
- .build();
- final TableFieldSchema TEST_NUMERIC_STR =
- TableFieldSchema.newBuilder()
- .setType(TableFieldSchema.Type.NUMERIC)
- .setMode(TableFieldSchema.Mode.NULLABLE)
- .setName("test_numeric_str")
- .build();
- final TableFieldSchema TEST_NUMERIC_SHORT =
- TableFieldSchema.newBuilder()
- .setType(TableFieldSchema.Type.NUMERIC)
- .setMode(TableFieldSchema.Mode.NULLABLE)
- .setName("test_numeric_short")
- .build();
- final TableFieldSchema TEST_NUMERIC_INT =
- TableFieldSchema.newBuilder()
- .setType(TableFieldSchema.Type.NUMERIC)
- .setMode(TableFieldSchema.Mode.NULLABLE)
- .setName("test_numeric_int")
- .build();
- final TableFieldSchema TEST_NUMERIC_LONG =
- TableFieldSchema.newBuilder()
- .setType(TableFieldSchema.Type.NUMERIC)
- .setMode(TableFieldSchema.Mode.NULLABLE)
- .setName("test_numeric_long")
- .build();
- final TableFieldSchema TEST_NUMERIC_FLOAT =
- TableFieldSchema.newBuilder()
- .setType(TableFieldSchema.Type.NUMERIC)
- .setMode(TableFieldSchema.Mode.NULLABLE)
- .setName("test_numeric_float")
- .build();
- final TableFieldSchema TEST_NUMERIC_DOUBLE =
- TableFieldSchema.newBuilder()
- .setType(TableFieldSchema.Type.NUMERIC)
- .setMode(TableFieldSchema.Mode.NULLABLE)
- .setName("test_numeric_double")
- .build();
- final TableFieldSchema TEST_BIGNUMERIC =
- TableFieldSchema.newBuilder()
- .setType(TableFieldSchema.Type.NUMERIC)
- .setMode(TableFieldSchema.Mode.NULLABLE)
- .setName("test_bignumeric")
- .build();
- final TableFieldSchema TEST_BIGNUMERIC_STR =
- TableFieldSchema.newBuilder()
- .setType(TableFieldSchema.Type.NUMERIC)
- .setMode(TableFieldSchema.Mode.REPEATED)
- .setName("test_bignumeric_str")
- .build();
-
- final TableFieldSchema TEST_BIGNUMERIC_SHORT =
- TableFieldSchema.newBuilder()
- .setType(TableFieldSchema.Type.NUMERIC)
- .setMode(TableFieldSchema.Mode.NULLABLE)
- .setName("test_bignumeric_short")
- .build();
- final TableFieldSchema TEST_BIGNUMERIC_INT =
- TableFieldSchema.newBuilder()
- .setType(TableFieldSchema.Type.NUMERIC)
- .setMode(TableFieldSchema.Mode.NULLABLE)
- .setName("test_bignumeric_int")
- .build();
- final TableFieldSchema TEST_BIGNUMERIC_LONG =
- TableFieldSchema.newBuilder()
- .setType(TableFieldSchema.Type.NUMERIC)
- .setMode(TableFieldSchema.Mode.NULLABLE)
- .setName("test_bignumeric_long")
- .build();
- final TableFieldSchema TEST_BIGNUMERIC_FLOAT =
- TableFieldSchema.newBuilder()
- .setType(TableFieldSchema.Type.NUMERIC)
- .setMode(TableFieldSchema.Mode.NULLABLE)
- .setName("test_bignumeric_float")
- .build();
- final TableFieldSchema TEST_BIGNUMERIC_DOUBLE =
- TableFieldSchema.newBuilder()
- .setType(TableFieldSchema.Type.NUMERIC)
- .setMode(TableFieldSchema.Mode.NULLABLE)
- .setName("test_bignumeric_double")
- .build();
- final TableFieldSchema TEST_INTERVAL =
- TableFieldSchema.newBuilder()
- .setType(TableFieldSchema.Type.INTERVAL)
- .setMode(TableFieldSchema.Mode.NULLABLE)
- .setName("test_interval")
- .build();
- final TableFieldSchema TEST_JSON =
- TableFieldSchema.newBuilder()
- .setType(TableFieldSchema.Type.JSON)
- .setMode(TableFieldSchema.Mode.REPEATED)
- .setName("test_json")
- .build();
- final TableSchema tableSchema =
- TableSchema.newBuilder()
- .addFields(0, test_int)
- .addFields(1, test_string)
- .addFields(2, test_bytes)
- .addFields(3, test_bool)
- .addFields(4, test_double)
- .addFields(5, test_date)
- .addFields(6, test_datetime)
- .addFields(7, test_datetime_str)
- .addFields(8, ComplexLvl1)
- .addFields(9, ComplexLvl2)
- .addFields(10, TEST_NUMERIC)
- .addFields(11, TEST_GEO)
- .addFields(12, TEST_TIMESTAMP)
- .addFields(13, TEST_TIME)
- .addFields(14, TEST_TIME_STR)
- .addFields(15, TEST_NUMERIC_REPEATED)
- .addFields(16, TEST_NUMERIC_STR)
- .addFields(17, TEST_NUMERIC_SHORT)
- .addFields(18, TEST_NUMERIC_INT)
- .addFields(19, TEST_NUMERIC_LONG)
- .addFields(20, TEST_NUMERIC_FLOAT)
- .addFields(21, TEST_NUMERIC_DOUBLE)
- .addFields(22, TEST_BIGNUMERIC)
- .addFields(23, TEST_BIGNUMERIC_STR)
- .addFields(24, TEST_BIGNUMERIC_SHORT)
- .addFields(25, TEST_BIGNUMERIC_INT)
- .addFields(26, TEST_BIGNUMERIC_FLOAT)
- .addFields(27, TEST_BIGNUMERIC_DOUBLE)
- .addFields(28, TEST_BIGNUMERIC_LONG)
- .addFields(29, TEST_INTERVAL)
- .addFields(30, TEST_JSON)
- .build();
- final Descriptor descriptor =
- BQTableSchemaToProtoDescriptor.convertBQTableSchemaToProtoDescriptor(tableSchema);
- isDescriptorEqual(descriptor, ComplexRoot.getDescriptor());
- }
-
- @Test
- public void testCasingComplexStruct() throws Exception {
- final TableFieldSchema required =
- TableFieldSchema.newBuilder()
- .setType(TableFieldSchema.Type.INT64)
- .setMode(TableFieldSchema.Mode.REQUIRED)
- .setName("tEsT_ReQuIrEd")
- .build();
- final TableFieldSchema repeated =
- TableFieldSchema.newBuilder()
- .setType(TableFieldSchema.Type.INT64)
- .setMode(TableFieldSchema.Mode.REPEATED)
- .setName("tESt_repEATed")
- .build();
- final TableFieldSchema optional =
- TableFieldSchema.newBuilder()
- .setType(TableFieldSchema.Type.INT64)
- .setMode(TableFieldSchema.Mode.NULLABLE)
- .setName("test_opTIONal")
- .build();
- final TableFieldSchema test_int =
- TableFieldSchema.newBuilder()
- .setType(TableFieldSchema.Type.INT64)
- .setMode(TableFieldSchema.Mode.NULLABLE)
- .setName("TEST_INT")
- .build();
- final TableFieldSchema test_string =
- TableFieldSchema.newBuilder()
- .setType(TableFieldSchema.Type.STRING)
- .setMode(TableFieldSchema.Mode.REPEATED)
- .setName("TEST_STRING")
- .build();
- final TableFieldSchema test_bytes =
- TableFieldSchema.newBuilder()
- .setType(TableFieldSchema.Type.BYTES)
- .setMode(TableFieldSchema.Mode.REQUIRED)
- .setName("TEST_BYTES")
- .build();
- final TableFieldSchema test_bool =
- TableFieldSchema.newBuilder()
- .setType(TableFieldSchema.Type.BOOL)
- .setMode(TableFieldSchema.Mode.NULLABLE)
- .setName("TEST_BOOL")
- .build();
- final TableFieldSchema test_double =
- TableFieldSchema.newBuilder()
- .setType(TableFieldSchema.Type.DOUBLE)
- .setMode(TableFieldSchema.Mode.REPEATED)
- .setName("TEST_DOUBLE")
- .build();
- final TableFieldSchema test_date =
- TableFieldSchema.newBuilder()
- .setType(TableFieldSchema.Type.DATE)
- .setMode(TableFieldSchema.Mode.REQUIRED)
- .setName("TEST_DATE")
- .build();
- final TableFieldSchema option_test =
- TableFieldSchema.newBuilder()
- .setType(TableFieldSchema.Type.STRUCT)
- .setMode(TableFieldSchema.Mode.REQUIRED)
- .addFields(0, required)
- .addFields(1, repeated)
- .addFields(2, optional)
- .setName("option_test")
- .build();
- final TableSchema tableSchema =
- TableSchema.newBuilder()
- .addFields(0, test_int)
- .addFields(1, test_string)
- .addFields(2, test_bytes)
- .addFields(3, test_bool)
- .addFields(4, test_double)
- .addFields(5, test_date)
- .addFields(6, option_test)
- .build();
- final Descriptor descriptor =
- BQTableSchemaToProtoDescriptor.convertBQTableSchemaToProtoDescriptor(tableSchema);
- isDescriptorEqual(descriptor, CasingComplex.getDescriptor());
- }
-
- @Test
- public void testOptions() throws Exception {
- final TableFieldSchema required =
- TableFieldSchema.newBuilder()
- .setType(TableFieldSchema.Type.INT64)
- .setMode(TableFieldSchema.Mode.REQUIRED)
- .setName("test_required")
- .build();
- final TableFieldSchema repeated =
- TableFieldSchema.newBuilder()
- .setType(TableFieldSchema.Type.INT64)
- .setMode(TableFieldSchema.Mode.REPEATED)
- .setName("test_repeated")
- .build();
- final TableFieldSchema optional =
- TableFieldSchema.newBuilder()
- .setType(TableFieldSchema.Type.INT64)
- .setMode(TableFieldSchema.Mode.NULLABLE)
- .setName("test_optional")
- .build();
- final TableSchema tableSchema =
- TableSchema.newBuilder()
- .addFields(0, required)
- .addFields(1, repeated)
- .addFields(2, optional)
- .build();
- final Descriptor descriptor =
- BQTableSchemaToProtoDescriptor.convertBQTableSchemaToProtoDescriptor(tableSchema);
- isDescriptorEqual(descriptor, OptionTest.getDescriptor());
- }
-
- @Test
- public void testDescriptorReuseDuringCreation() throws Exception {
- final TableFieldSchema test_int =
- TableFieldSchema.newBuilder()
- .setType(TableFieldSchema.Type.INT64)
- .setMode(TableFieldSchema.Mode.NULLABLE)
- .setName("test_int")
- .build();
- final TableFieldSchema reuse_lvl2 =
- TableFieldSchema.newBuilder()
- .setType(TableFieldSchema.Type.STRUCT)
- .setMode(TableFieldSchema.Mode.NULLABLE)
- .setName("reuse_lvl2")
- .addFields(0, test_int)
- .build();
- final TableFieldSchema reuse_lvl1 =
- TableFieldSchema.newBuilder()
- .setType(TableFieldSchema.Type.STRUCT)
- .setMode(TableFieldSchema.Mode.NULLABLE)
- .setName("reuse_lvl1")
- .addFields(0, test_int)
- .addFields(0, reuse_lvl2)
- .build();
- final TableFieldSchema reuse_lvl1_1 =
- TableFieldSchema.newBuilder()
- .setType(TableFieldSchema.Type.STRUCT)
- .setMode(TableFieldSchema.Mode.NULLABLE)
- .setName("reuse_lvl1_1")
- .addFields(0, test_int)
- .addFields(0, reuse_lvl2)
- .build();
- final TableFieldSchema reuse_lvl1_2 =
- TableFieldSchema.newBuilder()
- .setType(TableFieldSchema.Type.STRUCT)
- .setMode(TableFieldSchema.Mode.NULLABLE)
- .setName("reuse_lvl1_2")
- .addFields(0, test_int)
- .addFields(0, reuse_lvl2)
- .build();
- final TableSchema tableSchema =
- TableSchema.newBuilder()
- .addFields(0, reuse_lvl1)
- .addFields(1, reuse_lvl1_1)
- .addFields(2, reuse_lvl1_2)
- .build();
- final Descriptor descriptor =
- BQTableSchemaToProtoDescriptor.convertBQTableSchemaToProtoDescriptor(tableSchema);
- HashMap descriptorToCount = new HashMap();
- mapDescriptorToCount(descriptor, descriptorToCount);
- assertEquals(descriptorToCount.size(), 2);
- assertTrue(descriptorToCount.containsKey("root__reuse_lvl1"));
- assertEquals(descriptorToCount.get("root__reuse_lvl1").intValue(), 3);
- assertTrue(descriptorToCount.containsKey("root__reuse_lvl1__reuse_lvl2"));
- assertEquals(descriptorToCount.get("root__reuse_lvl1__reuse_lvl2").intValue(), 3);
- isDescriptorEqual(descriptor, ReuseRoot.getDescriptor());
- }
-}
diff --git a/google-cloud-bigquerystorage/src/test/java/com/google/cloud/bigquery/storage/v1beta2/BaseBigQueryReadClientTest.java b/google-cloud-bigquerystorage/src/test/java/com/google/cloud/bigquery/storage/v1beta2/BaseBigQueryReadClientTest.java
index d3c9d7ac92..abf8927eb3 100644
--- a/google-cloud-bigquerystorage/src/test/java/com/google/cloud/bigquery/storage/v1beta2/BaseBigQueryReadClientTest.java
+++ b/google-cloud-bigquerystorage/src/test/java/com/google/cloud/bigquery/storage/v1beta2/BaseBigQueryReadClientTest.java
@@ -1,5 +1,5 @@
/*
- * Copyright 2025 Google LLC
+ * Copyright 2026 Google LLC
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
diff --git a/google-cloud-bigquerystorage/src/test/java/com/google/cloud/bigquery/storage/v1beta2/BigDecimalByteStringEncoderTest.java b/google-cloud-bigquerystorage/src/test/java/com/google/cloud/bigquery/storage/v1beta2/BigDecimalByteStringEncoderTest.java
index f73a0e1549..5891641986 100644
--- a/google-cloud-bigquerystorage/src/test/java/com/google/cloud/bigquery/storage/v1beta2/BigDecimalByteStringEncoderTest.java
+++ b/google-cloud-bigquerystorage/src/test/java/com/google/cloud/bigquery/storage/v1beta2/BigDecimalByteStringEncoderTest.java
@@ -16,47 +16,40 @@
package com.google.cloud.bigquery.storage.v1beta2;
+import static org.junit.jupiter.api.Assertions.assertEquals;
+
import com.google.protobuf.ByteString;
import java.math.BigDecimal;
-import org.junit.Assert;
-import org.junit.Test;
-import org.junit.runner.RunWith;
-import org.junit.runners.JUnit4;
+import org.junit.jupiter.api.Test;
-@RunWith(JUnit4.class)
-public class BigDecimalByteStringEncoderTest {
+class BigDecimalByteStringEncoderTest {
@Test
- public void testEncodeBigDecimalandEncodeByteString() {
+ void testEncodeBigDecimalandEncodeByteString() {
BigDecimal testBD = new BigDecimal("0"); // expected result bd
ByteString testBS =
BigDecimalByteStringEncoder.encodeToNumericByteString(testBD); // convert expected to bs
BigDecimal resultBD =
BigDecimalByteStringEncoder.decodeNumericByteString(testBS); // convert bs to bd
- Assert.assertEquals(
- 0, resultBD.compareTo(testBD)); // ensure converted bd is equal to expected bd
+ assertEquals(0, resultBD.compareTo(testBD)); // ensure converted bd is equal to expected bd
testBD = new BigDecimal("1.2");
testBS = BigDecimalByteStringEncoder.encodeToNumericByteString(testBD);
resultBD = BigDecimalByteStringEncoder.decodeNumericByteString(testBS);
- Assert.assertEquals(
- 0, resultBD.compareTo(testBD)); // ensure converted bd is equal to expected bd
+ assertEquals(0, resultBD.compareTo(testBD)); // ensure converted bd is equal to expected bd
testBD = new BigDecimal("-1.2");
testBS = BigDecimalByteStringEncoder.encodeToNumericByteString(testBD);
resultBD = BigDecimalByteStringEncoder.decodeNumericByteString(testBS);
- Assert.assertEquals(
- 0, resultBD.compareTo(testBD)); // ensure converted bd is equal to expected bd
+ assertEquals(0, resultBD.compareTo(testBD)); // ensure converted bd is equal to expected bd
testBD = new BigDecimal("99999999999999999999999999999.999999999");
testBS = BigDecimalByteStringEncoder.encodeToNumericByteString(testBD);
resultBD = BigDecimalByteStringEncoder.decodeNumericByteString(testBS);
- Assert.assertEquals(
- 0, resultBD.compareTo(testBD)); // ensure converted bd is equal to expected bd
+ assertEquals(0, resultBD.compareTo(testBD)); // ensure converted bd is equal to expected bd
testBD = new BigDecimal("-99999999999999999999999999999.999999999");
testBS = BigDecimalByteStringEncoder.encodeToNumericByteString(testBD);
resultBD = BigDecimalByteStringEncoder.decodeNumericByteString(testBS);
- Assert.assertEquals(
- 0, resultBD.compareTo(testBD)); // ensure converted bd is equal to expected bd
+ assertEquals(0, resultBD.compareTo(testBD)); // ensure converted bd is equal to expected bd
}
}
diff --git a/google-cloud-bigquerystorage/src/test/java/com/google/cloud/bigquery/storage/v1beta2/BigQueryReadClientTest.java b/google-cloud-bigquerystorage/src/test/java/com/google/cloud/bigquery/storage/v1beta2/BigQueryReadClientTest.java
index 6485d6ab55..b274569b95 100644
--- a/google-cloud-bigquerystorage/src/test/java/com/google/cloud/bigquery/storage/v1beta2/BigQueryReadClientTest.java
+++ b/google-cloud-bigquerystorage/src/test/java/com/google/cloud/bigquery/storage/v1beta2/BigQueryReadClientTest.java
@@ -15,6 +15,10 @@
*/
package com.google.cloud.bigquery.storage.v1beta2;
+import static org.junit.jupiter.api.Assertions.assertEquals;
+import static org.junit.jupiter.api.Assertions.assertThrows;
+import static org.junit.jupiter.api.Assertions.assertTrue;
+
import com.google.api.gax.core.NoCredentialsProvider;
import com.google.api.gax.grpc.GaxGrpcProperties;
import com.google.api.gax.grpc.GrpcStatusCode;
@@ -42,14 +46,17 @@
import java.util.List;
import java.util.UUID;
import java.util.concurrent.ExecutionException;
-import org.junit.After;
-import org.junit.AfterClass;
-import org.junit.Assert;
-import org.junit.Before;
-import org.junit.BeforeClass;
-import org.junit.Test;
-
-public class BigQueryReadClientTest {
+import java.util.concurrent.TimeUnit;
+import org.junit.jupiter.api.AfterAll;
+import org.junit.jupiter.api.AfterEach;
+import org.junit.jupiter.api.BeforeAll;
+import org.junit.jupiter.api.BeforeEach;
+import org.junit.jupiter.api.Test;
+import org.junit.jupiter.api.parallel.Execution;
+import org.junit.jupiter.api.parallel.ExecutionMode;
+
+@Execution(ExecutionMode.SAME_THREAD)
+class BigQueryReadClientTest {
private static MockBigQueryRead mockBigQueryRead;
private static MockServiceHelper serviceHelper;
private BigQueryReadClient client;
@@ -57,8 +64,8 @@ public class BigQueryReadClientTest {
private int retryCount;
private Code lastRetryStatusCode;
- @BeforeClass
- public static void startStaticServer() {
+ @BeforeAll
+ static void startStaticServer() {
mockBigQueryRead = new MockBigQueryRead();
serviceHelper =
new MockServiceHelper(
@@ -66,13 +73,13 @@ public static void startStaticServer() {
serviceHelper.start();
}
- @AfterClass
- public static void stopServer() {
+ @AfterAll
+ static void stopServer() {
serviceHelper.stop();
}
- @Before
- public void setUp() throws IOException {
+ @BeforeEach
+ void setUp() throws IOException {
serviceHelper.reset();
channelProvider = serviceHelper.createChannelProvider();
retryCount = 0;
@@ -95,14 +102,15 @@ public void onRetryAttempt(Status prevStatus, Metadata prevMetadata) {
client = BigQueryReadClient.create(settings);
}
- @After
- public void tearDown() throws Exception {
+ @AfterEach
+ void tearDown() throws Exception {
client.close();
+ client.awaitTermination(10, TimeUnit.SECONDS);
}
@Test
@SuppressWarnings("all")
- public void createReadSessionTest() {
+ void createReadSessionTest() {
String name = "name3373707";
String table = "table110115790";
ReadSession expectedResponse = ReadSession.newBuilder().setName(name).setTable(table).build();
@@ -113,16 +121,16 @@ public void createReadSessionTest() {
int maxStreamCount = 940837515;
ReadSession actualResponse = client.createReadSession(parent, readSession, maxStreamCount);
- Assert.assertEquals(expectedResponse, actualResponse);
+ assertEquals(expectedResponse, actualResponse);
List actualRequests = mockBigQueryRead.getRequests();
- Assert.assertEquals(1, actualRequests.size());
+ assertEquals(1, actualRequests.size());
CreateReadSessionRequest actualRequest = (CreateReadSessionRequest) actualRequests.get(0);
- Assert.assertEquals(parent, actualRequest.getParent());
- Assert.assertEquals(readSession, actualRequest.getReadSession());
- Assert.assertEquals(maxStreamCount, actualRequest.getMaxStreamCount());
- Assert.assertTrue(
+ assertEquals(parent, actualRequest.getParent());
+ assertEquals(readSession, actualRequest.getReadSession());
+ assertEquals(maxStreamCount, actualRequest.getMaxStreamCount());
+ assertTrue(
channelProvider.isHeaderSent(
ApiClientHeaderProvider.getDefaultApiClientHeaderKey(),
GaxGrpcProperties.getDefaultApiClientHeaderPattern()));
@@ -130,25 +138,22 @@ public void createReadSessionTest() {
@Test
@SuppressWarnings("all")
- public void createReadSessionExceptionTest() throws Exception {
+ void createReadSessionExceptionTest() throws Exception {
StatusRuntimeException exception = new StatusRuntimeException(Status.INVALID_ARGUMENT);
mockBigQueryRead.addException(exception);
- try {
- String parent = "parent-995424086";
- ReadSession readSession = ReadSession.newBuilder().build();
- int maxStreamCount = 940837515;
+ String parent = "parent-995424086";
+ ReadSession readSession = ReadSession.newBuilder().build();
+ int maxStreamCount = 940837515;
- client.createReadSession(parent, readSession, maxStreamCount);
- Assert.fail("No exception raised");
- } catch (InvalidArgumentException e) {
- // Expected exception
- }
+ assertThrows(
+ InvalidArgumentException.class,
+ () -> client.createReadSession(parent, readSession, maxStreamCount));
}
@Test
@SuppressWarnings("all")
- public void readRowsTest() throws Exception {
+ void readRowsTest() throws Exception {
long rowCount = 1340416618L;
ReadRowsResponse expectedResponse = ReadRowsResponse.newBuilder().setRowCount(rowCount).build();
mockBigQueryRead.addResponse(expectedResponse);
@@ -160,16 +165,16 @@ public void readRowsTest() throws Exception {
callable.serverStreamingCall(request, responseObserver);
List actualResponses = responseObserver.future().get();
- Assert.assertEquals(1, actualResponses.size());
- Assert.assertEquals(expectedResponse, actualResponses.get(0));
+ assertEquals(1, actualResponses.size());
+ assertEquals(expectedResponse, actualResponses.get(0));
- Assert.assertEquals(retryCount, 0);
- Assert.assertEquals(lastRetryStatusCode, Code.OK);
+ assertEquals(retryCount, 0);
+ assertEquals(lastRetryStatusCode, Code.OK);
}
@Test
@SuppressWarnings("all")
- public void readRowsExceptionTest() throws Exception {
+ void readRowsExceptionTest() throws Exception {
StatusRuntimeException exception = new StatusRuntimeException(Status.INVALID_ARGUMENT);
mockBigQueryRead.addException(exception);
ReadRowsRequest request = ReadRowsRequest.newBuilder().build();
@@ -179,22 +184,19 @@ public void readRowsExceptionTest() throws Exception {
ServerStreamingCallable callable = client.readRowsCallable();
callable.serverStreamingCall(request, responseObserver);
- try {
- List actualResponses = responseObserver.future().get();
- Assert.fail("No exception thrown");
- } catch (ExecutionException e) {
- Assert.assertTrue(e.getCause() instanceof InvalidArgumentException);
- InvalidArgumentException apiException = (InvalidArgumentException) e.getCause();
- Assert.assertEquals(StatusCode.Code.INVALID_ARGUMENT, apiException.getStatusCode().getCode());
- }
-
- Assert.assertEquals(retryCount, 0);
- Assert.assertEquals(lastRetryStatusCode, Code.OK);
+ ExecutionException e =
+ assertThrows(ExecutionException.class, () -> responseObserver.future().get());
+ assertTrue(e.getCause() instanceof InvalidArgumentException);
+ InvalidArgumentException apiException = (InvalidArgumentException) e.getCause();
+ assertEquals(StatusCode.Code.INVALID_ARGUMENT, apiException.getStatusCode().getCode());
+
+ assertEquals(retryCount, 0);
+ assertEquals(lastRetryStatusCode, Code.OK);
}
@Test
@SuppressWarnings("all")
- public void readRowsRetryingEOSExceptionTest() throws ExecutionException, InterruptedException {
+ void readRowsRetryingEOSExceptionTest() throws ExecutionException, InterruptedException {
ApiException exception =
new InternalException(
new StatusRuntimeException(
@@ -213,15 +215,15 @@ public void readRowsRetryingEOSExceptionTest() throws ExecutionException, Interr
ServerStreamingCallable callable = client.readRowsCallable();
callable.serverStreamingCall(request, responseObserver);
List actualResponses = responseObserver.future().get();
- Assert.assertEquals(1, actualResponses.size());
+ assertEquals(1, actualResponses.size());
- Assert.assertEquals(retryCount, 1);
- Assert.assertEquals(lastRetryStatusCode, Code.INTERNAL);
+ assertEquals(retryCount, 1);
+ assertEquals(lastRetryStatusCode, Code.INTERNAL);
}
@Test
@SuppressWarnings("all")
- public void readRowsRetryingHttp2StreamRstTest() throws ExecutionException, InterruptedException {
+ void readRowsRetryingHttp2StreamRstTest() throws ExecutionException, InterruptedException {
ApiException exception =
new InternalException(
new StatusRuntimeException(
@@ -240,15 +242,15 @@ public void readRowsRetryingHttp2StreamRstTest() throws ExecutionException, Inte
ServerStreamingCallable callable = client.readRowsCallable();
callable.serverStreamingCall(request, responseObserver);
List actualResponses = responseObserver.future().get();
- Assert.assertEquals(1, actualResponses.size());
+ assertEquals(1, actualResponses.size());
- Assert.assertEquals(retryCount, 1);
- Assert.assertEquals(lastRetryStatusCode, Code.INTERNAL);
+ assertEquals(retryCount, 1);
+ assertEquals(lastRetryStatusCode, Code.INTERNAL);
}
@Test
@SuppressWarnings("all")
- public void readRowsNoRetryForResourceExhaustedWithoutRetryInfo()
+ void readRowsNoRetryForResourceExhaustedWithoutRetryInfo()
throws ExecutionException, InterruptedException {
ApiException exception =
new ResourceExhaustedException(
@@ -267,23 +269,19 @@ public void readRowsNoRetryForResourceExhaustedWithoutRetryInfo()
ServerStreamingCallable callable = client.readRowsCallable();
callable.serverStreamingCall(request, responseObserver);
- try {
- List actualResponses = responseObserver.future().get();
- Assert.fail("No exception thrown");
- } catch (ExecutionException e) {
- Assert.assertTrue(e.getCause() instanceof ResourceExhaustedException);
- ResourceExhaustedException apiException = (ResourceExhaustedException) e.getCause();
- Assert.assertEquals(
- StatusCode.Code.RESOURCE_EXHAUSTED, apiException.getStatusCode().getCode());
- }
-
- Assert.assertEquals(retryCount, 0);
- Assert.assertEquals(lastRetryStatusCode, Code.OK);
+ ExecutionException e =
+ assertThrows(ExecutionException.class, () -> responseObserver.future().get());
+ assertTrue(e.getCause() instanceof ResourceExhaustedException);
+ ResourceExhaustedException apiException = (ResourceExhaustedException) e.getCause();
+ assertEquals(StatusCode.Code.RESOURCE_EXHAUSTED, apiException.getStatusCode().getCode());
+
+ assertEquals(retryCount, 0);
+ assertEquals(lastRetryStatusCode, Code.OK);
}
@Test
@SuppressWarnings("all")
- public void readRowsNoRetryForResourceExhaustedWithRetryInfo()
+ void readRowsNoRetryForResourceExhaustedWithRetryInfo()
throws ExecutionException, InterruptedException {
RetryInfo retryInfo =
RetryInfo.newBuilder()
@@ -329,9 +327,9 @@ public RetryInfo parseBytes(byte[] serialized) {
ServerStreamingCallable callable = client.readRowsCallable();
callable.serverStreamingCall(request, responseObserver);
List