diff --git a/examples/src/main/java/org/apache/spark/examples/sql/JavaSQLDataSourceExample.java b/examples/src/main/java/org/apache/spark/examples/sql/JavaSQLDataSourceExample.java index fc92446783381..f9087e059385e 100644 --- a/examples/src/main/java/org/apache/spark/examples/sql/JavaSQLDataSourceExample.java +++ b/examples/src/main/java/org/apache/spark/examples/sql/JavaSQLDataSourceExample.java @@ -221,7 +221,8 @@ private static void runJsonDatasetExample(SparkSession spark) { // an RDD[String] storing one JSON object per string. List jsonData = Arrays.asList( "{\"name\":\"Yin\",\"address\":{\"city\":\"Columbus\",\"state\":\"Ohio\"}}"); - JavaRDD anotherPeopleRDD = new JavaSparkContext(spark.sparkContext()).parallelize(jsonData); + JavaRDD anotherPeopleRDD = + new JavaSparkContext(spark.sparkContext()).parallelize(jsonData); Dataset anotherPeople = spark.read().json(anotherPeopleRDD); anotherPeople.show(); // +---------------+----+ diff --git a/sql/catalyst/src/main/java/org/apache/spark/sql/catalyst/expressions/FixedLengthRowBasedKeyValueBatch.java b/sql/catalyst/src/main/java/org/apache/spark/sql/catalyst/expressions/FixedLengthRowBasedKeyValueBatch.java index b6130d1f332b9..85529f6a0aa1e 100644 --- a/sql/catalyst/src/main/java/org/apache/spark/sql/catalyst/expressions/FixedLengthRowBasedKeyValueBatch.java +++ b/sql/catalyst/src/main/java/org/apache/spark/sql/catalyst/expressions/FixedLengthRowBasedKeyValueBatch.java @@ -33,7 +33,7 @@ public final class FixedLengthRowBasedKeyValueBatch extends RowBasedKeyValueBatc private final int vlen; private final int recordLength; - private final long getKeyOffsetForFixedLengthRecords(int rowId) { + private long getKeyOffsetForFixedLengthRecords(int rowId) { return recordStartOffset + rowId * (long) recordLength; } @@ -43,7 +43,7 @@ private final long getKeyOffsetForFixedLengthRecords(int rowId) { * Returns an UnsafeRow pointing to the value if succeeds, otherwise returns null. */ @Override - public final UnsafeRow appendRow(Object kbase, long koff, int klen, + public UnsafeRow appendRow(Object kbase, long koff, int klen, Object vbase, long voff, int vlen) { // if run out of max supported rows or page size, return null if (numRows >= capacity || page == null || page.size() - pageCursor < recordLength) { @@ -71,7 +71,7 @@ public final UnsafeRow appendRow(Object kbase, long koff, int klen, * Returns the key row in this batch at `rowId`. Returned key row is reused across calls. */ @Override - public final UnsafeRow getKeyRow(int rowId) { + public UnsafeRow getKeyRow(int rowId) { assert(rowId >= 0); assert(rowId < numRows); if (keyRowId != rowId) { // if keyRowId == rowId, desired keyRow is already cached @@ -90,7 +90,7 @@ public final UnsafeRow getKeyRow(int rowId) { * In most times, 1) is skipped because `getKeyRow(id)` is often called before `getValueRow(id)`. */ @Override - protected final UnsafeRow getValueFromKey(int rowId) { + protected UnsafeRow getValueFromKey(int rowId) { if (keyRowId != rowId) { getKeyRow(rowId); } @@ -103,7 +103,7 @@ protected final UnsafeRow getValueFromKey(int rowId) { * Returns an iterator to go through all rows */ @Override - public final org.apache.spark.unsafe.KVIterator rowIterator() { + public org.apache.spark.unsafe.KVIterator rowIterator() { return new org.apache.spark.unsafe.KVIterator() { private final UnsafeRow key = new UnsafeRow(keySchema.length()); private final UnsafeRow value = new UnsafeRow(valueSchema.length()); diff --git a/sql/catalyst/src/main/java/org/apache/spark/sql/catalyst/expressions/RowBasedKeyValueBatch.java b/sql/catalyst/src/main/java/org/apache/spark/sql/catalyst/expressions/RowBasedKeyValueBatch.java index cea9d5d5bc3a5..4899f856c8756 100644 --- a/sql/catalyst/src/main/java/org/apache/spark/sql/catalyst/expressions/RowBasedKeyValueBatch.java +++ b/sql/catalyst/src/main/java/org/apache/spark/sql/catalyst/expressions/RowBasedKeyValueBatch.java @@ -123,7 +123,7 @@ public final void close() { } } - private final boolean acquirePage(long requiredSize) { + private boolean acquirePage(long requiredSize) { try { page = allocatePage(requiredSize); } catch (OutOfMemoryError e) { diff --git a/sql/catalyst/src/main/java/org/apache/spark/sql/catalyst/expressions/VariableLengthRowBasedKeyValueBatch.java b/sql/catalyst/src/main/java/org/apache/spark/sql/catalyst/expressions/VariableLengthRowBasedKeyValueBatch.java index f4002ee0d50de..ea4f984be24e5 100644 --- a/sql/catalyst/src/main/java/org/apache/spark/sql/catalyst/expressions/VariableLengthRowBasedKeyValueBatch.java +++ b/sql/catalyst/src/main/java/org/apache/spark/sql/catalyst/expressions/VariableLengthRowBasedKeyValueBatch.java @@ -39,7 +39,7 @@ public final class VariableLengthRowBasedKeyValueBatch extends RowBasedKeyValueB * Returns an UnsafeRow pointing to the value if succeeds, otherwise returns null. */ @Override - public final UnsafeRow appendRow(Object kbase, long koff, int klen, + public UnsafeRow appendRow(Object kbase, long koff, int klen, Object vbase, long voff, int vlen) { final long recordLength = 8 + klen + vlen + 8; // if run out of max supported rows or page size, return null @@ -94,7 +94,7 @@ public UnsafeRow getKeyRow(int rowId) { * In most times, 1) is skipped because `getKeyRow(id)` is often called before `getValueRow(id)`. */ @Override - public final UnsafeRow getValueFromKey(int rowId) { + public UnsafeRow getValueFromKey(int rowId) { if (keyRowId != rowId) { getKeyRow(rowId); } @@ -110,7 +110,7 @@ public final UnsafeRow getValueFromKey(int rowId) { * Returns an iterator to go through all rows */ @Override - public final org.apache.spark.unsafe.KVIterator rowIterator() { + public org.apache.spark.unsafe.KVIterator rowIterator() { return new org.apache.spark.unsafe.KVIterator() { private final UnsafeRow key = new UnsafeRow(keySchema.length()); private final UnsafeRow value = new UnsafeRow(valueSchema.length());