Skip to content
Closed
Show file tree
Hide file tree
Changes from all commits
Commits
File filter

Filter by extension

Filter by extension

Conversations
Failed to load comments.
Loading
Jump to
Jump to file
Failed to load files.
Loading
Diff view
Diff view
264 changes: 178 additions & 86 deletions dev/diffs/3.4.3.diff

Large diffs are not rendered by default.

98 changes: 95 additions & 3 deletions dev/diffs/3.5.8.diff
Original file line number Diff line number Diff line change
Expand Up @@ -472,7 +472,17 @@ diff --git a/sql/core/src/test/scala/org/apache/spark/sql/ExplainSuite.scala b/s
index a206e97c353..fea1149b67d 100644
--- a/sql/core/src/test/scala/org/apache/spark/sql/ExplainSuite.scala
+++ b/sql/core/src/test/scala/org/apache/spark/sql/ExplainSuite.scala
@@ -467,7 +467,8 @@ class ExplainSuite extends ExplainSuiteHelper with DisableAdaptiveExecutionSuite
@@ -267,7 +267,8 @@ trait ExplainSuiteHelper extends QueryTest with SharedSparkSession {
}
}

- test("SPARK-33853: explain codegen - check presence of subquery") {
+ test("SPARK-33853: explain codegen - check presence of subquery",
+ IgnoreComet("Comet replaces WholeStageCodegen subtrees with native operators")) {
withSQLConf(SQLConf.WHOLESTAGE_CODEGEN_ENABLED.key -> "true") {
withTempView("df") {
val df1 = spark.range(1, 100)
@@ -467,7 +468,8 @@ class ExplainSuite extends ExplainSuiteHelper with DisableAdaptiveExecutionSuite
}
}

Expand Down Expand Up @@ -1470,7 +1480,7 @@ index 2f8e401e743..a4f94417dcc 100644
import org.apache.spark.scheduler.{SparkListener, SparkListenerEvent, SparkListenerJobStart}
import org.apache.spark.shuffle.sort.SortShuffleManager
-import org.apache.spark.sql.{Dataset, QueryTest, Row, SparkSession, Strategy}
+import org.apache.spark.sql.{Dataset, IgnoreComet, QueryTest, Row, SparkSession, Strategy}
+import org.apache.spark.sql.{Dataset, IgnoreComet, IgnoreCometNativeDataFusion, QueryTest, Row, SparkSession, Strategy}
import org.apache.spark.sql.catalyst.optimizer.{BuildLeft, BuildRight}
import org.apache.spark.sql.catalyst.plans.logical.{Aggregate, LogicalPlan}
+import org.apache.spark.sql.comet._
Expand Down Expand Up @@ -1889,6 +1899,46 @@ index 2f8e401e743..a4f94417dcc 100644
plan.inputPlan.output.zip(plan.finalPhysicalPlan.output).foreach { case (o1, o2) =>
assert(o1.semanticEquals(o2), "Different output column order after AQE optimization")
}
@@ -1551,7 +1553,8 @@ class AdaptiveQueryExecSuite
}
}

- test("SPARK-35585: Support propagate empty relation through project/filter") {
+ test("SPARK-35585: Support propagate empty relation through project/filter",
+ IgnoreCometNativeDataFusion("CometHashAggregate prevents AQE empty relation propagation")) {
withSQLConf(SQLConf.ADAPTIVE_EXECUTION_ENABLED.key -> "true",
SQLConf.AUTO_BROADCASTJOIN_THRESHOLD.key -> "-1") {
val (plan1, adaptivePlan1) = runAdaptiveAndVerifyResult(
@@ -1567,7 +1570,8 @@ class AdaptiveQueryExecSuite
}
}

- test("SPARK-35442: Support propagate empty relation through aggregate") {
+ test("SPARK-35442: Support propagate empty relation through aggregate",
+ IgnoreCometNativeDataFusion("CometHashAggregate prevents AQE empty relation propagation")) {
withSQLConf(SQLConf.ADAPTIVE_EXECUTION_ENABLED.key -> "true") {
val (plan1, adaptivePlan1) = runAdaptiveAndVerifyResult(
"SELECT key, count(*) FROM testData WHERE value = 'no_match' GROUP BY key")
@@ -1586,7 +1590,8 @@ class AdaptiveQueryExecSuite
}
}

- test("SPARK-35442: Support propagate empty relation through union") {
+ test("SPARK-35442: Support propagate empty relation through union",
+ IgnoreCometNativeDataFusion("CometHashAggregate prevents AQE empty relation propagation")) {
def checkNumUnion(plan: SparkPlan, numUnion: Int): Unit = {
assert(
collect(plan) {
@@ -1918,7 +1923,8 @@ class AdaptiveQueryExecSuite
}
}

- test("SPARK-34980: Support coalesce partition through union") {
+ test("SPARK-34980: Support coalesce partition through union",
+ IgnoreCometNativeDataFusion("CometHashAggregate prevents AQE partition coalescing")) {
def checkResultPartition(
df: Dataset[Row],
numUnion: Int,
diff --git a/sql/core/src/test/scala/org/apache/spark/sql/execution/datasources/SchemaPruningSuite.scala b/sql/core/src/test/scala/org/apache/spark/sql/execution/datasources/SchemaPruningSuite.scala
index fd52d038ca6..154c800be67 100644
--- a/sql/core/src/test/scala/org/apache/spark/sql/execution/datasources/SchemaPruningSuite.scala
Expand Down Expand Up @@ -2355,6 +2405,28 @@ index 3f47c5e506f..f1ce3194279 100644
import testImplicits._

withTempPath { dir =>
diff --git a/sql/core/src/test/scala/org/apache/spark/sql/execution/ui/SQLAppStatusListenerSuite.scala b/sql/core/src/test/scala/org/apache/spark/sql/execution/ui/SQLAppStatusListenerSuite.scala
index placeholder..placeholder 100644
--- a/sql/core/src/test/scala/org/apache/spark/sql/execution/ui/SQLAppStatusListenerSuite.scala
+++ b/sql/core/src/test/scala/org/apache/spark/sql/execution/ui/SQLAppStatusListenerSuite.scala
@@ -38,6 +38,7 @@ import org.apache.spark.sql.{DataFrame, SparkSession}
import org.apache.spark.sql.catalyst.InternalRow
import org.apache.spark.sql.catalyst.expressions.Attribute
import org.apache.spark.sql.catalyst.plans.logical.LocalRelation
+import org.apache.spark.sql.IgnoreComet
import org.apache.spark.sql.catalyst.types.DataTypeUtils.toAttributes
import org.apache.spark.sql.catalyst.util.quietly
import org.apache.spark.sql.connector.{CSVDataWriter, CSVDataWriterFactory, RangeInputPartition, SimpleScanBuilder, SimpleWritableDataSource, TestLocalScanTable}
@@ -701,7 +702,8 @@ abstract class SQLAppStatusListenerSuite extends SharedSparkSession with JsonTest
assert(statusStore.execution(2) === None)
}

- test("SPARK-29894 test Codegen Stage Id in SparkPlanInfo",
+ test("SPARK-29894 test Codegen Stage Id in SparkPlanInfo",
+ IgnoreComet("Comet replaces WholeStageCodegen with native operators"),
DisableAdaptiveExecution("WSCG rule is applied later in AQE")) {
// with AQE on, the WholeStageCodegen rule is applied when running QueryStageExec.
val df = createTestDataFrame.select(count("*"))
diff --git a/sql/core/src/test/scala/org/apache/spark/sql/execution/debug/DebuggingSuite.scala b/sql/core/src/test/scala/org/apache/spark/sql/execution/debug/DebuggingSuite.scala
index b8f3ea3c6f3..bbd44221288 100644
--- a/sql/core/src/test/scala/org/apache/spark/sql/execution/debug/DebuggingSuite.scala
Expand All @@ -2367,7 +2439,27 @@ index b8f3ea3c6f3..bbd44221288 100644
import org.apache.spark.sql.catalyst.InternalRow
import org.apache.spark.sql.catalyst.expressions.Attribute
import org.apache.spark.sql.catalyst.expressions.codegen.CodegenContext
@@ -125,7 +126,8 @@ class DebuggingSuite extends DebuggingSuiteBase with DisableAdaptiveExecutionSui
@@ -46,7 +47,8 @@ abstract class DebuggingSuiteBase extends SharedSparkSession {
testData.as[TestData].debug()
}

- test("debugCodegen") {
+ test("debugCodegen",
+ IgnoreComet("Comet replaces WholeStageCodegen subtrees with native operators")) {
val df = spark.range(10).groupBy(col("id") * 2).count()
df.collect()
val res = codegenString(df.queryExecution.executedPlan)
@@ -53,7 +55,8 @@ abstract class DebuggingSuiteBase extends SharedSparkSession {
assert(res.contains("Object[]"))
}

- test("debugCodegenStringSeq") {
+ test("debugCodegenStringSeq",
+ IgnoreComet("Comet replaces WholeStageCodegen subtrees with native operators")) {
val df = spark.range(10).groupBy(col("id") * 2).count()
df.collect()
val res = codegenStringSeq(df.queryExecution.executedPlan)
@@ -125,7 +130,8 @@ class DebuggingSuite extends DebuggingSuiteBase with DisableAdaptiveExecutionSui
| id LongType: {}""".stripMargin))
}

Expand Down
Loading
Loading