Skip to content
Merged
Show file tree
Hide file tree
Changes from all commits
Commits
File filter

Filter by extension

Filter by extension

Conversations
Failed to load comments.
Loading
Jump to
Jump to file
Failed to load files.
Loading
Diff view
Diff view
5 changes: 0 additions & 5 deletions .baseline/checkstyle/checkstyle-suppressions.xml
Original file line number Diff line number Diff line change
Expand Up @@ -53,11 +53,6 @@
<suppress files="org.apache.iceberg.flink.maintenance.api.ZkLockFactory" id="BanShadedClasses"/>
<suppress files="org.apache.iceberg.flink.maintenance.api.TestZkLockFactory" id="BanShadedClasses"/>

<!-- Suppress checks for CometColumnReader -->
<suppress files="org.apache.iceberg.spark.data.vectorized.CometColumnReader" checks="IllegalImport"/>
<!-- Suppress checks for CometDeletedColumnVector -->
<suppress files="org.apache.iceberg.spark.data.vectorized.CometDeletedColumnVector" checks="IllegalImport"/>

<!-- Suppress TestClassNamingConvention for main source files -->
<suppress files=".*[/\\]src[/\\]main[/\\].*" id="TestClassNamingConvention" />
</suppressions>
1 change: 0 additions & 1 deletion docs/docs/spark-configuration.md
Original file line number Diff line number Diff line change
Expand Up @@ -174,7 +174,6 @@ val spark = SparkSession.builder()
| Spark option | Default | Description |
|--------------------------------------------------------|----------------------------------------------------------------|---------------------------------------------------------------------------------------------------------------------------------|
| spark.sql.iceberg.vectorization.enabled | Table default | Enables vectorized reads of data files |
| spark.sql.iceberg.parquet.reader-type | ICEBERG | Sets Parquet reader implementation (`ICEBERG`,`COMET`) |
| spark.sql.iceberg.check-nullability | true | Validate that the write schema's nullability matches the table's nullability |
| spark.sql.iceberg.check-ordering | true | Validates the write schema column order matches the table schema order |
| spark.sql.iceberg.planning.preserve-data-grouping | false | When true, co-locate scan tasks for the same partition in the same read split, used in Storage Partitioned Joins |
Expand Down
1 change: 0 additions & 1 deletion gradle/libs.versions.toml
Original file line number Diff line number Diff line change
Expand Up @@ -39,7 +39,6 @@ awssdk-s3accessgrants = "2.4.1"
bson-ver = "4.11.5"
caffeine = "2.9.3"
calcite = "1.41.0"
comet = "0.12.0"
datasketches = "6.2.0"
delta-standalone = "3.3.2"
delta-spark = "3.3.2"
Expand Down
3 changes: 0 additions & 3 deletions spark/v3.4/build.gradle
Original file line number Diff line number Diff line change
Expand Up @@ -75,8 +75,6 @@ project(":iceberg-spark:iceberg-spark-${sparkMajorVersion}_${scalaVersion}") {
exclude group: 'org.roaringbitmap'
}

compileOnly "org.apache.datafusion:comet-spark-spark${sparkMajorVersion}_${scalaVersion}:${libs.versions.comet.get()}"

implementation libs.parquet.column
implementation libs.parquet.hadoop

Expand Down Expand Up @@ -184,7 +182,6 @@ project(":iceberg-spark:iceberg-spark-extensions-${sparkMajorVersion}_${scalaVer
testImplementation libs.avro.avro
testImplementation libs.parquet.hadoop
testImplementation libs.awaitility
testImplementation "org.apache.datafusion:comet-spark-spark${sparkMajorVersion}_${scalaVersion}:${libs.versions.comet.get()}"

// Required because we remove antlr plugin dependencies from the compile configuration, see note above
runtimeOnly libs.antlr.runtime
Expand Down
Original file line number Diff line number Diff line change
Expand Up @@ -24,6 +24,4 @@
@Value.Immutable
public interface ParquetBatchReadConf extends Serializable {
int batchSize();

ParquetReaderType readerType();
}

This file was deleted.

Original file line number Diff line number Diff line change
Expand Up @@ -374,12 +374,4 @@ public boolean reportColumnStats() {
.defaultValue(SparkSQLProperties.REPORT_COLUMN_STATS_DEFAULT)
.parse();
}

public ParquetReaderType parquetReaderType() {
return confParser
.enumConf(ParquetReaderType::fromString)
.sessionConf(SparkSQLProperties.PARQUET_READER_TYPE)
.defaultValue(SparkSQLProperties.PARQUET_READER_TYPE_DEFAULT)
.parse();
}
}
Original file line number Diff line number Diff line change
Expand Up @@ -27,10 +27,6 @@ private SparkSQLProperties() {}
// Controls whether vectorized reads are enabled
public static final String VECTORIZATION_ENABLED = "spark.sql.iceberg.vectorization.enabled";

// Controls which Parquet reader implementation to use
public static final String PARQUET_READER_TYPE = "spark.sql.iceberg.parquet.reader-type";
public static final ParquetReaderType PARQUET_READER_TYPE_DEFAULT = ParquetReaderType.ICEBERG;

// Controls whether reading/writing timestamps without timezones is allowed
@Deprecated
public static final String HANDLE_TIMESTAMP_WITHOUT_TIMEZONE =
Expand Down

This file was deleted.

Loading
Loading