diff --git a/dev/deps/spark-deps-hadoop-3-hive-2.3 b/dev/deps/spark-deps-hadoop-3-hive-2.3
index c38658a1ebf10..c4a5535936c6d 100644
--- a/dev/deps/spark-deps-hadoop-3-hive-2.3
+++ b/dev/deps/spark-deps-hadoop-3-hive-2.3
@@ -15,12 +15,12 @@ antlr4-runtime/4.13.1//antlr4-runtime-4.13.1.jar
aopalliance-repackaged/3.0.6//aopalliance-repackaged-3.0.6.jar
arpack/3.1.1//arpack-3.1.1.jar
arpack_combined_all/0.1//arpack_combined_all-0.1.jar
-arrow-compression/18.3.0//arrow-compression-18.3.0.jar
-arrow-format/18.3.0//arrow-format-18.3.0.jar
-arrow-memory-core/18.3.0//arrow-memory-core-18.3.0.jar
-arrow-memory-netty-buffer-patch/18.3.0//arrow-memory-netty-buffer-patch-18.3.0.jar
-arrow-memory-netty/18.3.0//arrow-memory-netty-18.3.0.jar
-arrow-vector/18.3.0//arrow-vector-18.3.0.jar
+arrow-compression/19.0.0//arrow-compression-19.0.0.jar
+arrow-format/19.0.0//arrow-format-19.0.0.jar
+arrow-memory-core/19.0.0//arrow-memory-core-19.0.0.jar
+arrow-memory-netty-buffer-patch/19.0.0//arrow-memory-netty-buffer-patch-19.0.0.jar
+arrow-memory-netty/19.0.0//arrow-memory-netty-19.0.0.jar
+arrow-vector/19.0.0//arrow-vector-19.0.0.jar
audience-annotations/0.12.0//audience-annotations-0.12.0.jar
avro-ipc/1.12.1//avro-ipc-1.12.1.jar
avro-mapred/1.12.1//avro-mapred-1.12.1.jar
diff --git a/pom.xml b/pom.xml
index 13abe5e58a73c..2a1861570955f 100644
--- a/pom.xml
+++ b/pom.xml
@@ -231,7 +231,7 @@
./python/pyspark/sql/pandas/utils.py, ./python/packaging/classic/setup.py,
./python/packaging/client/setup.py, and ./python/packaging/connect/setup.py too.
-->
- 18.3.0
+ 19.0.0
3.0.8
0.13.0
diff --git a/sql/connect/common/src/main/scala/org/apache/spark/sql/connect/client/SparkResult.scala b/sql/connect/common/src/main/scala/org/apache/spark/sql/connect/client/SparkResult.scala
index 2ab250673b76a..375d18514fbf3 100644
--- a/sql/connect/common/src/main/scala/org/apache/spark/sql/connect/client/SparkResult.scala
+++ b/sql/connect/common/src/main/scala/org/apache/spark/sql/connect/client/SparkResult.scala
@@ -239,13 +239,16 @@ private[sql] class SparkResult[T](
throw new IllegalStateException(
s"Expected $expectedNumRows rows in arrow batch but got $numRecordsInBatch.")
}
+ val messagesInBatch = messages.result()
// Skip the entire result if it is empty.
if (numRecordsInBatch > 0) {
numRecords += numRecordsInBatch
- resultMap.put(nextResultIndex, (reader.bytesRead, messages.result()))
+ resultMap.put(nextResultIndex, (reader.bytesRead, messagesInBatch))
nextResultIndex += 1
nonEmpty |= true
stop |= stopOnFirstNonEmptyResponse
+ } else {
+ messagesInBatch.foreach(_.close())
}
}
}