Skip to content
Merged
Show file tree
Hide file tree
Changes from all commits
Commits
File filter

Filter by extension

Filter by extension

Conversations
Failed to load comments.
Loading
Jump to
Jump to file
Failed to load files.
Loading
Diff view
Diff view
15 changes: 15 additions & 0 deletions java/lance-jni/src/blocking_dataset.rs
Original file line number Diff line number Diff line change
Expand Up @@ -1578,6 +1578,21 @@ fn inner_delete(env: &mut JNIEnv, java_dataset: JObject, predicate: JString) ->
Ok(())
}

#[no_mangle]
pub extern "system" fn Java_org_lance_Dataset_nativeTruncateTable(
mut env: JNIEnv,
java_dataset: JObject,
) {
ok_or_throw_without_return!(env, inner_truncate_table(&mut env, java_dataset))
}

fn inner_truncate_table(env: &mut JNIEnv, java_dataset: JObject) -> Result<()> {
let mut dataset_guard =
unsafe { env.get_rust_field::<_, _, BlockingDataset>(java_dataset, NATIVE_DATASET) }?;
RT.block_on(dataset_guard.inner.truncate_table())?;
Ok(())
}

//////////////////////////////
// Schema evolution Methods //
//////////////////////////////
Expand Down
13 changes: 13 additions & 0 deletions java/src/main/java/org/lance/Dataset.java
Original file line number Diff line number Diff line change
Expand Up @@ -654,6 +654,19 @@ public void delete(String predicate) {

private native void nativeDelete(String predicate);

/**
* Truncate the dataset by deleting all rows. The schema is preserved and a new version is
* created.
*/
public void truncateTable() {
try (LockManager.WriteLock writeLock = lockManager.acquireWriteLock()) {
Preconditions.checkArgument(nativeDatasetHandle != 0, "Dataset is closed");
nativeTruncateTable();
}
}

private native void nativeTruncateTable();

/**
* Gets the URI of the dataset.
*
Expand Down
65 changes: 65 additions & 0 deletions java/src/test/java/org/lance/operation/TruncateTest.java
Original file line number Diff line number Diff line change
@@ -0,0 +1,65 @@
/*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package org.lance.operation;

import org.lance.Dataset;
import org.lance.FragmentMetadata;
import org.lance.TestUtils;
import org.lance.Transaction;

import org.apache.arrow.memory.RootAllocator;
import org.apache.arrow.vector.types.pojo.Schema;
import org.junit.jupiter.api.Test;
import org.junit.jupiter.api.io.TempDir;

import java.nio.file.Path;

import static org.junit.jupiter.api.Assertions.assertEquals;

public class TruncateTest extends OperationTestBase {

@Test
void testTruncateTable(@TempDir Path tempDir) throws Exception {
String datasetPath = tempDir.resolve("testTruncate").toString();
try (RootAllocator allocator = new RootAllocator(Long.MAX_VALUE)) {
TestUtils.SimpleTestDataset testDataset =
new TestUtils.SimpleTestDataset(allocator, datasetPath);
dataset = testDataset.createEmptyDataset();

// Append some data
int rowCount = 20;
FragmentMetadata fragmentMeta = testDataset.createNewFragment(rowCount);
Transaction transaction =
dataset
.newTransactionBuilder()
.operation(
Append.builder()
.fragments(java.util.Collections.singletonList(fragmentMeta))
.build())
.build();
try (Dataset ds1 = transaction.commit()) {
assertEquals(rowCount, ds1.countRows());

// Truncate to empty while preserving schema
ds1.truncateTable();
assertEquals(0, ds1.countRows());

try (org.lance.ipc.LanceScanner scanner = ds1.newScan()) {
Schema schemaRes = scanner.schema();
assertEquals(testDataset.getSchema(), schemaRes);
}
}
}
}
}
8 changes: 8 additions & 0 deletions python/python/lance/dataset.py
Original file line number Diff line number Diff line change
Expand Up @@ -1996,6 +1996,14 @@ def delete(
predicate = str(predicate)
self._ds.delete(predicate, conflict_retries, retry_timeout)

def truncate_table(self) -> None:
"""
Truncate the dataset by deleting all rows.
The schema is preserved and a new version is created.
"""
self._ds.truncate_table()
self._list_indices_res = None

def insert(
self,
data: ReaderLike,
Expand Down
18 changes: 18 additions & 0 deletions python/python/tests/test_dataset.py
Original file line number Diff line number Diff line change
Expand Up @@ -108,6 +108,24 @@ def test_dataset_overwrite(tmp_path: Path):
assert ds_v1.to_table() == table1


def test_truncate_table(tmp_path: Path):
base_dir = tmp_path / "truncate"
table = pa.table(
{
"i": pa.array([1, 2, 3], pa.int32()),
"dict": pa.DictionaryArray.from_arrays(
pa.array([0, 1, 2], pa.uint16()), pa.array(["a", "b", "c"])
),
}
)
ds = lance.write_dataset(table, base_dir, data_storage_version="stable")
assert ds.count_rows() == 3

ds.truncate_table()
assert ds.count_rows() == 0
assert ds.schema == table.schema


def test_dataset_append(tmp_path: Path):
table = pa.Table.from_pydict({"colA": [1, 2, 3], "colB": [4, 5, 6]})
base_dir = tmp_path / "test"
Expand Down
9 changes: 9 additions & 0 deletions python/src/dataset.rs
Original file line number Diff line number Diff line change
Expand Up @@ -1551,6 +1551,15 @@ impl Dataset {
Ok(())
}

/// Truncate the dataset by deleting all rows. The schema is preserved and a new version is created.
fn truncate_table(&mut self) -> PyResult<()> {
let mut new_self = self.ds.as_ref().clone();
rt().block_on(None, new_self.truncate_table())?
.map_err(|err: lance::Error| PyIOError::new_err(err.to_string()))?;
self.ds = Arc::new(new_self);
Ok(())
}

/// Cleanup old versions from the dataset
#[pyo3(signature = (older_than_micros = None, retain_versions = None, delete_unverified = None, error_if_tagged_old_versions = None))]
fn cleanup_old_versions(
Expand Down
5 changes: 5 additions & 0 deletions rust/lance/src/dataset.rs
Original file line number Diff line number Diff line change
Expand Up @@ -1562,6 +1562,11 @@ impl Dataset {
write::delete::delete(self, predicate).await
}

/// Truncate the dataset by deleting all rows.
pub async fn truncate_table(&mut self) -> Result<()> {
self.delete("true").await
}

/// Add new base paths to the dataset.
///
/// This method allows you to register additional storage locations (buckets)
Expand Down
29 changes: 29 additions & 0 deletions rust/lance/src/dataset/tests/dataset_io.rs
Original file line number Diff line number Diff line change
Expand Up @@ -44,6 +44,35 @@ use lance_table::io::manifest::read_manifest;
use object_store::path::Path;
use rstest::rstest;

#[tokio::test]
async fn test_truncate_table() {
let tmpdir = tempfile::tempdir().unwrap();
let path = tmpdir.path();
create_file(path, WriteMode::Create, LanceFileVersion::V2_2).await;

let uri = path.to_str().unwrap();
let mut ds = Dataset::open(uri).await.unwrap();
let rows_before = ds.count_rows(None).await.unwrap();
assert!(rows_before > 0);

ds.truncate_table().await.unwrap();

let rows_after = ds.count_rows(None).await.unwrap();
assert_eq!(rows_after, 0);
assert_eq!(ds.count_fragments(), 0);

let expected_schema = Arc::new(ArrowSchema::new(vec![
ArrowField::new("i", DataType::Int32, false),
ArrowField::new(
"dict",
DataType::Dictionary(Box::new(DataType::UInt16), Box::new(DataType::Utf8)),
false,
),
]));
let actual_schema = ArrowSchema::from(ds.schema());
assert_eq!(&actual_schema, expected_schema.as_ref());
}

#[rstest]
#[lance_test_macros::test(tokio::test)]
async fn test_create_dataset(
Expand Down
Loading