diff --git a/be/src/olap/base_tablet.cpp b/be/src/olap/base_tablet.cpp index 25398e84346806..895899b9ca6cb5 100644 --- a/be/src/olap/base_tablet.cpp +++ b/be/src/olap/base_tablet.cpp @@ -707,6 +707,8 @@ Status BaseTablet::calc_segment_delete_bitmap(RowsetSharedPtr rowset, row_id); ++conflict_rows; continue; + // NOTE: for partial update which doesn't specify the sequence column, we can't use the sequence column value filled in flush phase + // as its final value. Otherwise it may cause inconsistency between replicas. } if (is_partial_update && rowset_writer != nullptr) { // In publish version, record rows to be deleted for concurrent update diff --git a/regression-test/data/fault_injection_p0/partial_update/test_partial_update_publish_seq.out b/regression-test/data/fault_injection_p0/partial_update/test_partial_update_publish_seq.out new file mode 100644 index 00000000000000..a7fa43e0ad982f --- /dev/null +++ b/regression-test/data/fault_injection_p0/partial_update/test_partial_update_publish_seq.out @@ -0,0 +1,98 @@ +-- This file is automatically generated. You should know what you did if you want to edit this +-- !seq_map_0 -- +1 1 1 1 1 +2 2 2 2 2 +3 3 3 3 3 + +-- !seq_map_1 -- +1 20 99 88 1 +2 10 99 88 2 +3 10 99 3 3 + +-- !inspect -- +1 1 1 1 1 0 1 2 +1 10 99 1 1 0 10 3 +1 20 1 88 1 0 20 4 +1 20 99 88 1 0 20 4 +2 2 2 2 2 0 2 2 +2 10 99 2 2 0 10 3 +2 10 2 88 2 0 10 4 +2 10 99 88 2 0 10 4 +3 3 3 3 3 0 3 2 +3 10 99 3 3 0 10 3 +3 5 3 88 3 0 5 4 + +-- !seq_map_2 -- +1 20 99 88 33 +2 10 77 88 33 +3 50 77 3 33 + +-- !inspect -- +1 1 1 1 1 0 1 2 +1 10 99 1 1 0 10 3 +1 20 1 88 1 0 20 4 +1 20 99 88 1 0 20 4 +1 9 77 88 1 0 9 5 +1 20 99 88 33 0 20 6 +2 2 2 2 2 0 2 2 +2 10 99 2 2 0 10 3 +2 10 2 88 2 0 10 4 +2 10 99 88 2 0 10 4 +2 10 77 88 2 0 10 5 +2 10 99 88 33 0 10 6 +2 10 77 88 33 0 10 6 +3 3 3 3 3 0 3 2 +3 10 99 3 3 0 10 3 +3 5 3 88 3 0 5 4 +3 50 77 3 3 0 50 5 +3 10 99 3 33 0 10 6 +3 50 77 3 33 0 50 6 + +-- !seq_map_3 -- +3 120 66 3 33 + +-- !inspect -- +1 1 1 1 1 0 1 2 +1 10 99 1 1 0 10 3 +1 20 99 88 1 0 20 4 +1 20 1 88 1 0 20 4 +1 9 77 88 1 0 9 5 +1 20 99 88 33 0 20 6 +1 80 66 88 33 0 80 7 +1 100 66 88 33 1 100 8 +1 100 99 88 33 1 100 8 +2 2 2 2 2 0 2 2 +2 10 99 2 2 0 10 3 +2 10 99 88 2 0 10 4 +2 10 2 88 2 0 10 4 +2 10 77 88 2 0 10 5 +2 10 99 88 33 0 10 6 +2 10 77 88 33 0 10 6 +2 100 66 88 33 0 100 7 +2 100 66 88 33 1 100 8 +2 100 77 88 33 1 100 8 +3 3 3 3 3 0 3 2 +3 10 99 3 3 0 10 3 +3 5 3 88 3 0 5 4 +3 50 77 3 3 0 50 5 +3 10 99 3 33 0 10 6 +3 50 77 3 33 0 50 6 +3 120 66 3 33 0 120 7 +3 100 77 3 33 1 100 8 + +-- !seq_map_4 -- + +-- !inspect -- +1 10 1 1 1 0 10 2 +1 20 55 1 1 0 20 3 +1 10 1 1 100 1 10 4 +1 20 55 1 100 1 20 4 +2 10 2 2 2 0 10 2 +2 100 55 2 2 0 100 3 +2 10 2 2 100 1 10 4 +2 100 55 2 100 1 100 4 +3 10 3 3 3 0 10 2 +3 120 55 3 3 0 120 3 +3 10 3 3 100 1 10 4 +3 120 55 3 100 1 120 4 + diff --git a/regression-test/data/unique_with_mow_p0/partial_update/test_partial_update_default_value.out b/regression-test/data/unique_with_mow_p0/partial_update/test_partial_update_default_value.out index edd3326a752f97..231696c1d6e61d 100644 --- a/regression-test/data/unique_with_mow_p0/partial_update/test_partial_update_default_value.out +++ b/regression-test/data/unique_with_mow_p0/partial_update/test_partial_update_default_value.out @@ -4,8 +4,32 @@ 2 doris2 400 223 1 3 yixiu 600 4321 4321 +-- !sql -- +1 1 3.141592653589793 2.718281828459045 0 +2 2 3.141592653589793 2.718281828459045 0 +3 3 3.141592653589793 2.718281828459045 0 + +-- !sql -- +1 10 3.141592653589793 2.718281828459045 0 +2 20 3.141592653589793 2.718281828459045 0 +3 3 3.141592653589793 2.718281828459045 0 +4 40 3.141592653589793 2.718281828459045 0 +5 50 3.141592653589793 2.718281828459045 0 + -- !select_default -- 1 doris 200 123 1 2 doris2 400 223 1 3 yixiu 600 4321 4321 +-- !sql -- +1 1 3.141592653589793 2.718281828459045 0 +2 2 3.141592653589793 2.718281828459045 0 +3 3 3.141592653589793 2.718281828459045 0 + +-- !sql -- +1 10 3.141592653589793 2.718281828459045 0 +2 20 3.141592653589793 2.718281828459045 0 +3 3 3.141592653589793 2.718281828459045 0 +4 40 3.141592653589793 2.718281828459045 0 +5 50 3.141592653589793 2.718281828459045 0 + diff --git a/regression-test/data/unique_with_mow_p0/partial_update/test_partial_update_lookup_row_key.out b/regression-test/data/unique_with_mow_p0/partial_update/test_partial_update_lookup_row_key.out new file mode 100644 index 00000000000000..2250bd3535a6f5 --- /dev/null +++ b/regression-test/data/unique_with_mow_p0/partial_update/test_partial_update_lookup_row_key.out @@ -0,0 +1,57 @@ +-- This file is automatically generated. You should know what you did if you want to edit this +-- !1 -- +1 400 1 1 1 +2 400 2 2 2 +3 400 3 3 3 +4 400 4 4 4 + +-- !1 -- +1 400 99 1 1 400 +2 400 99 2 2 400 +3 400 99 3 3 400 +4 400 99 4 4 400 +5 \N 99 \N \N \N +6 \N 99 \N \N \N + +-- !2 -- +1 400 1 1 1 +2 400 2 2 2 +3 400 3 3 3 +4 400 4 4 4 + +-- !2 -- +1 500 1 88 1 500 +2 500 2 88 2 500 +3 400 3 3 3 400 +4 400 4 4 4 400 +5 200 \N 88 \N 200 +6 200 \N 88 \N 200 + +-- !1 -- +1 400 1 1 1 +2 400 2 2 2 +3 400 3 3 3 +4 400 4 4 4 + +-- !1 -- +1 400 99 1 1 400 +2 400 99 2 2 400 +3 400 99 3 3 400 +4 400 99 4 4 400 +5 \N 99 \N \N \N +6 \N 99 \N \N \N + +-- !2 -- +1 400 1 1 1 +2 400 2 2 2 +3 400 3 3 3 +4 400 4 4 4 + +-- !2 -- +1 500 1 88 1 500 +2 500 2 88 2 500 +3 400 3 3 3 400 +4 400 4 4 4 400 +5 200 \N 88 \N 200 +6 200 \N 88 \N 200 + diff --git a/regression-test/data/unique_with_mow_p0/partial_update/test_partial_update_row_store.out b/regression-test/data/unique_with_mow_p0/partial_update/test_partial_update_row_store.out new file mode 100644 index 00000000000000..59b96bcbd7c363 --- /dev/null +++ b/regression-test/data/unique_with_mow_p0/partial_update/test_partial_update_row_store.out @@ -0,0 +1,71 @@ +-- This file is automatically generated. You should know what you did if you want to edit this +-- !1 -- +1 1 1 1 +2 2 2 2 +3 3 3 3 +4 4 4 4 +5 5 5 5 +6 6 6 6 + +-- !2 -- +1 10 10 1 22 +2 20 20 2 22 +3 3 3 3 22 +4 4 4 4 22 +5 50 50 5 22 +6 6 6 6 22 +7 70 70 7 22 +8 8 8 8 22 +9 9 9 9 22 +100 100 100 \N 22 + +-- !2 -- +1 10 10 99 22 +2 20 20 2 22 +3 3 3 99 22 +4 4 4 4 22 +5 50 50 5 22 +6 6 6 99 22 +7 70 70 7 22 +8 8 8 99 22 +9 9 9 9 22 +100 100 100 \N 22 +200 \N \N 200 18 + +-- !3 -- +1 1 1 1 +2 2 2 2 +3 3 3 3 +4 4 4 4 +5 5 5 5 +6 6 6 6 + +-- !4 -- +1 1 1 1 55 +2 2 777 2 55 +3 3 777 3 55 +4 4 4 4 55 +5 5 5 5 55 +6 6 6 6 55 +7 7 7 7 55 +8 8 777 8 55 +9 9 9 9 55 +10 \N 777 \N 47 +21 \N 777 \N 47 + +-- !4 -- +1 1 1 987 22 +2 2 777 987 22 +3 3 777 3 22 +4 4 4 4 22 +5 5 5 5 22 +6 6 6 6 22 +7 7 7 7 22 +8 8 777 8 22 +9 9 9 9 22 +10 \N 777 \N 22 +11 11 11 987 22 +20 20 20 20 22 +21 \N 777 \N 22 +22 \N \N 987 18 + diff --git a/regression-test/suites/fault_injection_p0/partial_update/test_partial_update_publish_seq.groovy b/regression-test/suites/fault_injection_p0/partial_update/test_partial_update_publish_seq.groovy new file mode 100644 index 00000000000000..19639998da4e4c --- /dev/null +++ b/regression-test/suites/fault_injection_p0/partial_update/test_partial_update_publish_seq.groovy @@ -0,0 +1,180 @@ +// Licensed to the Apache Software Foundation (ASF) under one +// or more contributor license agreements. See the NOTICE file +// distributed with this work for additional information +// regarding copyright ownership. The ASF licenses this file +// to you under the Apache License, Version 2.0 (the +// "License"); you may not use this file except in compliance +// with the License. You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, +// software distributed under the License is distributed on an +// "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY +// KIND, either express or implied. See the License for the +// specific language governing permissions and limitations +// under the License. + +import org.junit.Assert +import java.util.concurrent.TimeUnit +import org.awaitility.Awaitility + +suite("test_partial_update_publish_seq", "nonConcurrent") { + + def enable_block_in_publish = { + if (isCloudMode()) { + GetDebugPoint().enableDebugPointForAllFEs("CloudGlobalTransactionMgr.getDeleteBitmapUpdateLock.enable_spin_wait") + GetDebugPoint().enableDebugPointForAllFEs("CloudGlobalTransactionMgr.getDeleteBitmapUpdateLock.block") + } else { + GetDebugPoint().enableDebugPointForAllBEs("EnginePublishVersionTask::execute.enable_spin_wait") + GetDebugPoint().enableDebugPointForAllBEs("EnginePublishVersionTask::execute.block") + } + } + + def disable_block_in_publish = { + if (isCloudMode()) { + GetDebugPoint().disableDebugPointForAllFEs("CloudGlobalTransactionMgr.getDeleteBitmapUpdateLock.enable_spin_wait") + GetDebugPoint().disableDebugPointForAllFEs("CloudGlobalTransactionMgr.getDeleteBitmapUpdateLock.block") + } else { + GetDebugPoint().disableDebugPointForAllBEs("EnginePublishVersionTask::execute.enable_spin_wait") + GetDebugPoint().disableDebugPointForAllBEs("EnginePublishVersionTask::execute.block") + } + } + + def inspect_rows = { sqlStr -> + sql "set skip_delete_sign=true;" + sql "set skip_delete_bitmap=true;" + sql "sync" + qt_inspect sqlStr + sql "set skip_delete_sign=false;" + sql "set skip_delete_bitmap=false;" + sql "sync" + } + + + try { + GetDebugPoint().clearDebugPointsForAllFEs() + GetDebugPoint().clearDebugPointsForAllBEs() + + def table1 = "test_partial_update_publish_seq_map" + sql "DROP TABLE IF EXISTS ${table1} FORCE;" + sql """ CREATE TABLE IF NOT EXISTS ${table1} ( + `k1` int NOT NULL, + `c1` int, + `c2` int, + `c3` int, + `c4` int + )UNIQUE KEY(k1) + DISTRIBUTED BY HASH(k1) BUCKETS 1 + PROPERTIES ( + "enable_mow_light_delete" = "false", + "disable_auto_compaction" = "true", + "function_column.sequence_col" = "c1", + "replication_num" = "1"); """ + + sql "insert into ${table1} values(1,1,1,1,1),(2,2,2,2,2),(3,3,3,3,3);" + sql "sync;" + qt_seq_map_0 "select * from ${table1} order by k1;" + + + // with seq map val, >/=/< conflicting seq val + enable_block_in_publish() + def t1 = Thread.start { + sql "set enable_unique_key_partial_update=true;" + sql "set enable_insert_strict=false;" + sql "sync;" + sql "insert into ${table1}(k1,c1,c2) values(1,10,99),(2,10,99),(3,10,99);" + } + Thread.sleep(500) + def t2 = Thread.start { + sql "set enable_unique_key_partial_update=true;" + sql "set enable_insert_strict=false;" + sql "sync;" + sql "insert into ${table1}(k1,c1,c3) values(1,20,88),(2,10,88),(3,5,88);" + } + Thread.sleep(1000) + disable_block_in_publish() + t1.join() + t2.join() + qt_seq_map_1 "select * from ${table1} order by k1;" + inspect_rows "select *,__DORIS_DELETE_SIGN__,__DORIS_SEQUENCE_COL__,__DORIS_VERSION_COL__ from ${table1} order by k1,__DORIS_VERSION_COL__;" + + // without seq map val, the filled seq val >/=/< conflicting seq val + enable_block_in_publish() + t1 = Thread.start { + sql "set enable_unique_key_partial_update=true;" + sql "set enable_insert_strict=false;" + sql "sync;" + sql "insert into ${table1}(k1,c1,c2) values(1,9,77),(2,10,77),(3,50,77);" + } + Thread.sleep(500) + t2 = Thread.start { + sql "set enable_unique_key_partial_update=true;" + sql "set enable_insert_strict=false;" + sql "sync;" + sql "insert into ${table1}(k1,c4) values(1,33),(2,33),(3,33);" + } + Thread.sleep(1000) + disable_block_in_publish() + t1.join() + t2.join() + qt_seq_map_2 "select * from ${table1} order by k1;" + inspect_rows "select *,__DORIS_DELETE_SIGN__,__DORIS_SEQUENCE_COL__,__DORIS_VERSION_COL__ from ${table1} order by k1,__DORIS_VERSION_COL__;" + + // with delete sign and seq col val, >/=/< conflicting seq val + enable_block_in_publish() + t1 = Thread.start { + sql "set enable_unique_key_partial_update=true;" + sql "set enable_insert_strict=false;" + sql "sync;" + sql "insert into ${table1}(k1,c1,c2) values(1,80,66),(2,100,66),(3,120,66);" + } + Thread.sleep(500) + t2 = Thread.start { + sql "set enable_unique_key_partial_update=true;" + sql "set enable_insert_strict=false;" + sql "sync;" + sql "insert into ${table1}(k1,c1,__DORIS_DELETE_SIGN__) values(1,100,1),(2,100,1),(3,100,1);" + } + Thread.sleep(1000) + disable_block_in_publish() + t1.join() + t2.join() + qt_seq_map_3 "select * from ${table1} order by k1;" + inspect_rows "select *,__DORIS_DELETE_SIGN__,__DORIS_SEQUENCE_COL__,__DORIS_VERSION_COL__ from ${table1} order by k1,__DORIS_VERSION_COL__;" + + + sql "truncate table ${table1};" + sql "insert into ${table1} values(1,10,1,1,1),(2,10,2,2,2),(3,10,3,3,3);" + sql "sync;" + // with delete sign and without seq col val, >/=/< conflicting seq val + enable_block_in_publish() + t1 = Thread.start { + sql "set enable_unique_key_partial_update=true;" + sql "set enable_insert_strict=false;" + sql "sync;" + sql "insert into ${table1}(k1,c1,c2) values(1,20,55),(2,100,55),(3,120,55);" + } + Thread.sleep(500) + t2 = Thread.start { + sql "set enable_unique_key_partial_update=true;" + sql "set enable_insert_strict=false;" + sql "sync;" + sql "insert into ${table1}(k1,c4,__DORIS_DELETE_SIGN__) values(1,100,1),(2,100,1),(3,100,1);" + } + Thread.sleep(1000) + disable_block_in_publish() + t1.join() + t2.join() + qt_seq_map_4 "select * from ${table1} order by k1;" + inspect_rows "select *,__DORIS_DELETE_SIGN__,__DORIS_SEQUENCE_COL__,__DORIS_VERSION_COL__ from ${table1} order by k1,__DORIS_VERSION_COL__;" + + + } catch(Exception e) { + logger.info(e.getMessage()) + throw e + } finally { + GetDebugPoint().clearDebugPointsForAllFEs() + GetDebugPoint().clearDebugPointsForAllBEs() + } +} diff --git a/regression-test/suites/unique_with_mow_p0/partial_update/test_partial_update_default_value.groovy b/regression-test/suites/unique_with_mow_p0/partial_update/test_partial_update_default_value.groovy index 28d1d0ed42d556..fb12c840280eda 100644 --- a/regression-test/suites/unique_with_mow_p0/partial_update/test_partial_update_default_value.groovy +++ b/regression-test/suites/unique_with_mow_p0/partial_update/test_partial_update_default_value.groovy @@ -68,8 +68,31 @@ suite("test_primary_key_partial_update_default_value", "p0") { select * from ${tableName} order by id; """ - // drop drop + // test special default values + tableName = "test_primary_key_partial_update_default_value2" + // create table sql """ DROP TABLE IF EXISTS ${tableName} """ + sql """ CREATE TABLE ${tableName} ( + k int, + c1 int, + c2 bitmap NOT NULL DEFAULT bitmap_empty, + c3 double DEFAULT PI, + c4 double DEFAULT E, + c5 array NOT NULL DEFAULT "[]" + ) UNIQUE KEY(`k`) DISTRIBUTED BY HASH(`k`) BUCKETS 1 + PROPERTIES("replication_num" = "1", "enable_unique_key_merge_on_write" = "true", + "store_row_column" = "${use_row_store}"); """ + + sql "set enable_unique_key_partial_update=true;" + sql "set enable_insert_strict=false;" + sql "sync;" + sql "insert into ${tableName}(k,c1) values(1,1),(2,2),(3,3);" + sql "sync;" + qt_sql "select k,c1,bitmap_to_string(c2),c3,c4,ARRAY_SIZE(c5) from ${tableName} order by k;" + + sql "insert into ${tableName}(k,c1) values(1,10),(2,20),(4,40),(5,50);" + sql "sync;" + qt_sql "select k,c1,bitmap_to_string(c2),c3,c4,ARRAY_SIZE(c5) from ${tableName} order by k;" } } } diff --git a/regression-test/suites/unique_with_mow_p0/partial_update/test_partial_update_lookup_row_key.groovy b/regression-test/suites/unique_with_mow_p0/partial_update/test_partial_update_lookup_row_key.groovy new file mode 100644 index 00000000000000..dfd8f43a6b3015 --- /dev/null +++ b/regression-test/suites/unique_with_mow_p0/partial_update/test_partial_update_lookup_row_key.groovy @@ -0,0 +1,81 @@ + +// Licensed to the Apache Software Foundation (ASF) under one +// or more contributor license agreements. See the NOTICE file +// distributed with this work for additional information +// regarding copyright ownership. The ASF licenses this file +// to you under the Apache License, Version 2.0 (the +// "License"); you may not use this file except in compliance +// with the License. You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, +// software distributed under the License is distributed on an +// "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY +// KIND, either express or implied. See the License for the +// specific language governing permissions and limitations +// under the License. + +suite("test_partial_update_lookup_row_key", "p0") { + + String db = context.config.getDbNameByFile(context.file) + sql "select 1;" // to create database + + for (def use_row_store : [false, true]) { + logger.info("current params: use_row_store: ${use_row_store}") + + connect( context.config.jdbcUser, context.config.jdbcPassword, context.config.jdbcUrl) { + sql "use ${db};" + sql "sync;" + + def tableName = "test_partial_update_publish_conflict_seq" + sql """ DROP TABLE IF EXISTS ${tableName} force;""" + sql """ CREATE TABLE ${tableName} ( + `k` int(11) NULL, + `v1` BIGINT NULL, + `v2` BIGINT NULL, + `v3` BIGINT NULL, + `v4` BIGINT NULL, + ) UNIQUE KEY(`k`) DISTRIBUTED BY HASH(`k`) BUCKETS 1 + PROPERTIES( + "replication_num" = "1", + "enable_unique_key_merge_on_write" = "true", + "disable_auto_compaction" = "true", + "function_column.sequence_col" = "v1", + "store_row_column" = "${use_row_store}"); """ + + sql """ insert into ${tableName} values + (1,400,1,1,1),(2,100,2,2,2),(3,30,3,3,3),(4,300,4,4,4);""" + sql """ insert into ${tableName} values + (1,100,1,1,1),(2,400,2,2,2),(3,100,3,3,3),(4,200,4,4,4);""" + sql """ insert into ${tableName} values + (1,200,1,1,1),(2,200,2,2,2),(3,300,3,3,3),(4,400,4,4,4);""" + sql """ insert into ${tableName} values + (1,300,1,1,1),(2,300,2,2,2),(3,400,3,3,3),(4,100,4,4,4);""" + qt_1 "select * from ${tableName} order by k;" + // lookup_row_key will find key rowset with highest version to lowest version + // the index of valid segment for each key will be in the search seqeuence + + sql "set enable_unique_key_partial_update=true;" + sql "set enable_insert_strict=false;" + sql "sync;" + sql "insert into ${tableName}(k,v2) values(1,99),(2,99),(3,99),(4,99),(5,99),(6,99);" + qt_1 "select *,__DORIS_SEQUENCE_COL__ from ${tableName} order by k;" + + + sql "truncate table ${tableName};" + sql """ insert into ${tableName} values + (1,400,1,1,1),(2,100,2,2,2),(3,30,3,3,3),(4,300,4,4,4);""" + sql """ insert into ${tableName} values + (1,100,1,1,1),(2,400,2,2,2),(3,100,3,3,3),(4,200,4,4,4);""" + sql """ insert into ${tableName} values + (1,200,1,1,1),(2,200,2,2,2),(3,300,3,3,3),(4,400,4,4,4);""" + sql """ insert into ${tableName} values + (1,300,1,1,1),(2,300,2,2,2),(3,400,3,3,3),(4,100,4,4,4);""" + qt_2 "select * from ${tableName} order by k;" + + sql "insert into ${tableName}(k,v1,v3) values(1,500,88),(2,500,88),(3,300,88),(4,200,88),(5,200,88),(6,200,88);" + qt_2 "select *,__DORIS_SEQUENCE_COL__ from ${tableName} order by k;" + } + } +} diff --git a/regression-test/suites/unique_with_mow_p0/partial_update/test_partial_update_row_store.groovy b/regression-test/suites/unique_with_mow_p0/partial_update/test_partial_update_row_store.groovy new file mode 100644 index 00000000000000..30102f9064d5fe --- /dev/null +++ b/regression-test/suites/unique_with_mow_p0/partial_update/test_partial_update_row_store.groovy @@ -0,0 +1,103 @@ +// Licensed to the Apache Software Foundation (ASF) under one +// or more contributor license agreements. See the NOTICE file +// distributed with this work for additional information +// regarding copyright ownership. The ASF licenses this file +// to you under the Apache License, Version 2.0 (the +// "License"); you may not use this file except in compliance +// with the License. You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, +// software distributed under the License is distributed on an +// "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY +// KIND, either express or implied. See the License for the +// specific language governing permissions and limitations +// under the License. + +import org.junit.Assert +import java.util.concurrent.TimeUnit +import org.awaitility.Awaitility + +suite("test_partial_update_row_store", "nonConcurrent") { + + def table1 = "test_partial_update_row_store" + sql "DROP TABLE IF EXISTS ${table1} FORCE;" + sql """ CREATE TABLE IF NOT EXISTS ${table1} ( + `k1` int NOT NULL, + `c1` int, + `c2` int, + c3 int + )UNIQUE KEY(k1) + DISTRIBUTED BY HASH(k1) BUCKETS 1 + PROPERTIES ( + "enable_mow_light_delete" = "false", + "disable_auto_compaction" = "true", + "replication_num" = "1", + "store_row_column" = "false"); """ + + sql "insert into ${table1} values(1,1,1,1),(2,2,2,2),(3,3,3,3);" + sql "insert into ${table1} values(4,4,4,4),(5,5,5,5),(6,6,6,6);" + sql "sync;" + qt_1 "select * from ${table1} order by k1;" + + def doSchemaChange = { cmd -> + sql cmd + waitForSchemaChangeDone { + sql """SHOW ALTER TABLE COLUMN WHERE IndexName='${table1}' ORDER BY createtime DESC LIMIT 1""" + time 2000 + } + } + + // turn on row_store_column, but only store part of columns + doSchemaChange """alter table ${table1} set ("store_row_column" = "true")""" + doSchemaChange """alter table ${table1} set ("row_store_columns" = "k1,c2")""" + sql "insert into ${table1} values(7,7,7,7),(8,8,8,8),(9,9,9,9);" + + sql "set enable_unique_key_partial_update=true;" + sql "set enable_insert_strict=false;" + sql "sync;" + sql "insert into ${table1}(k1,c1,c2) values(1,10,10),(2,20,20),(5,50,50),(7,70,70),(100,100,100);" + qt_2 "select *, LENGTH(__DORIS_ROW_STORE_COL__) from ${table1} order by k1;" + sql "insert into ${table1}(k1,c3) values(1,99),(3,99),(6,99),(8,99),(200,200);" + qt_2 "select *, LENGTH(__DORIS_ROW_STORE_COL__) from ${table1} order by k1;" + sql "set enable_unique_key_partial_update=false;" + sql "set enable_insert_strict=true;" + sql "sync;" + + + sql "truncate table ${table1};" + sql "insert into ${table1} values(1,1,1,1),(2,2,2,2),(3,3,3,3);" + sql "insert into ${table1} values(4,4,4,4),(5,5,5,5),(6,6,6,6);" + sql "sync;" + qt_3 "select * from ${table1} order by k1;" + + + // turn on full row store column + doSchemaChange """alter table ${table1} set ("store_row_column" = "true")""" + sql "insert into ${table1} values(7,7,7,7),(8,8,8,8),(9,9,9,9);" + + sql "set enable_unique_key_partial_update=true;" + sql "set enable_insert_strict=false;" + sql "sync;" + sql "insert into ${table1}(k1,c2) values(2,777),(3,777),(10,777),(21,777),(8,777);" + qt_4 "select *,LENGTH(__DORIS_ROW_STORE_COL__) from ${table1} order by k1;" + sql "set enable_unique_key_partial_update=false;" + sql "set enable_insert_strict=true;" + sql "sync;" + + // from row store to part columns row store + doSchemaChange """alter table ${table1} set ("row_store_columns" = "k1,c2")""" + sql "insert into ${table1} values(11,11,11,11),(20,20,20,20);" + + sql "set enable_unique_key_partial_update=true;" + sql "set enable_insert_strict=false;" + sql "sync;" + sql "insert into ${table1}(k1,c3) values(1,987),(2,987),(11,987),(22,987);" + qt_4 "select *,LENGTH(__DORIS_ROW_STORE_COL__) from ${table1} order by k1;" + sql "set enable_unique_key_partial_update=false;" + sql "set enable_insert_strict=true;" + sql "sync;" + + // Can not alter store_row_column from true to false currently, should add related case if supported +}