Skip to content
Merged
Show file tree
Hide file tree
Changes from all commits
Commits
File filter

Filter by extension

Filter by extension

Conversations
Failed to load comments.
Loading
Jump to
Jump to file
Failed to load files.
Loading
Diff view
Diff view
Original file line number Diff line number Diff line change
Expand Up @@ -1361,6 +1361,10 @@ class Suite implements GroovyInterceptable {
return enableStorageVault;
}

boolean isGroupCommitMode() {
return getFeConfig("wait_internal_group_commit_finish").equals("true")
}

String getFeConfig(String key) {
return sql_return_maparray("SHOW FRONTEND CONFIG LIKE '${key}'")[0].Value
}
Expand Down
Original file line number Diff line number Diff line change
Expand Up @@ -32,14 +32,21 @@ suite("test_timezone") {
sql """ set time_zone = '+02:00' """

sql """ set enable_nereids_planner = false """
if (isGroupCommitMode()) {
sql """ set enable_nereids_planner = true """
}
sql """insert into test_timezone values('2022-01-01 01:02:55', '2022-01-01 01:02:55.123')"""
sql """insert into test_timezone values('2022-02-01 01:02:55Z', '2022-02-01 01:02:55.123Z')"""
sql """insert into test_timezone values('2022-03-01 01:02:55+08:00', '2022-03-01 01:02:55.123UTC')"""
sql """insert into test_timezone values('2022-04-01T01:02:55-06:00', '2022-04-01T01:02:55.123+06:00')"""
sql """insert into test_timezone values('2022-05-01 01:02:55+02:30', '2022-05-01 01:02:55.123-02:30')"""
sql """insert into test_timezone values('2022-06-01T01:02:55+04:30', '2022-06-01 01:02:55.123-07:30')"""
sql """insert into test_timezone values('20220701010255+07:00', '20220701010255-05:00')"""
sql """insert into test_timezone values('20220801+05:00', '20220801America/Argentina/Buenos_Aires')"""
if (isGroupCommitMode()) {
sql """insert into test_timezone values('2022-07-31 21:00', '2022-08-01')"""
} else {
sql """insert into test_timezone values('20220801+05:00', '20220801America/Argentina/Buenos_Aires')"""
}
qt_legacy "select * from test_timezone order by k1"

sql """ truncate table test_timezone """
Expand Down
Original file line number Diff line number Diff line change
Expand Up @@ -175,9 +175,10 @@ suite("test_generated_column_fault_tolerance_nereids") {
PROPERTIES("replication_num" = "1");
;"""
// qt_common_default_test_insert_null
def exception_str = isGroupCommitMode() ? "too many filtered rows" : "Insert has filtered data in strict mode"
test {
sql "INSERT INTO test_gen_col_common_ft(a,b) values(1,null);"
exception "Insert has filtered data in strict mode."
exception exception_str
}

// qt_common_default_test_insert_gencol
Expand Down
Original file line number Diff line number Diff line change
Expand Up @@ -161,7 +161,8 @@ suite("test_create_table_generated_column_legacy") {
// qt_common_default_test_insert_null
test {
sql "INSERT INTO test_gen_col_common_legacy(a,b) values(1,null);"
exception "Insert has filtered data in strict mode."
def exception_str = isGroupCommitMode() ? "too many filtered rows" : "Insert has filtered data in strict mode"
exception exception_str
}

// qt_common_default_test_insert_gencol
Expand Down
Original file line number Diff line number Diff line change
Expand Up @@ -288,7 +288,22 @@ suite("test_disable_move_memtable", "nonConcurrent") {
sql """ set enable_nereids_dml=false """
insert_into_select_with_injection("VTabletWriterV2._init._output_tuple_desc_null", "test", "unknown destination tuple descriptor")
insert_into_select_with_injection("VTabletWriterV2._init._output_tuple_desc_null", "test1", "success")


if (isGroupCommitMode()) {
def ret = sql "SHOW FRONTEND CONFIG like '%stream_load_default_memtable_on_sink_node%';"
logger.info("${ret}")
try {
sql "ADMIN SET FRONTEND CONFIG ('stream_load_default_memtable_on_sink_node' = 'true')"
sql """ set enable_nereids_planner=true """
sql """ set enable_nereids_dml=true """
stream_load_with_injection("VTabletWriterV2._init._output_tuple_desc_null", "baseall", "fail")
stream_load_with_injection("VTabletWriterV2._init._output_tuple_desc_null", "baseall1", "fail")
} finally {
sql "ADMIN SET FRONTEND CONFIG ('stream_load_default_memtable_on_sink_node' = '${ret[0][1]}')"
}
return
}

sql """ set enable_nereids_planner=true """
sql """ set enable_nereids_dml=true """
stream_load_with_injection("VTabletWriterV2._init._output_tuple_desc_null", "baseall", "fail")
Expand Down
Original file line number Diff line number Diff line change
Expand Up @@ -127,15 +127,15 @@ suite("test_jsonb_load_and_function", "p0") {
sql """ set enable_insert_strict = true """
def success = true
try {
sql """INSERT INTO ${testTable} VALUES(26, '')"""
sql """INSERT INTO ${testTable} VALUES(27, '')"""
} catch(Exception ex) {
logger.info("""INSERT INTO ${testTable} invalid json failed: """ + ex)
success = false
}
assertEquals(false, success)
success = true
try {
sql """INSERT INTO ${testTable} VALUES(26, 'abc')"""
sql """INSERT INTO ${testTable} VALUES(28, 'abc')"""
} catch(Exception ex) {
logger.info("""INSERT INTO ${testTable} invalid json failed: """ + ex)
success = false
Expand All @@ -147,15 +147,15 @@ suite("test_jsonb_load_and_function", "p0") {
sql """ set enable_insert_strict = false """
success = true
try {
sql """INSERT INTO ${testTable} VALUES(26, '')"""
sql """INSERT INTO ${testTable} VALUES(29, '')"""
} catch(Exception ex) {
logger.info("""INSERT INTO ${testTable} invalid json failed: """ + ex)
success = false
}
assertEquals(true, success)
success = true
try {
sql """INSERT INTO ${testTable} VALUES(26, 'abc')"""
sql """INSERT INTO ${testTable} VALUES(30, 'abc')"""
} catch(Exception ex) {
logger.info("""INSERT INTO ${testTable} invalid json failed: """ + ex)
success = false
Expand Down
Original file line number Diff line number Diff line change
Expand Up @@ -48,21 +48,22 @@ suite("test_array_string_insert", "load") {
sql "set enable_insert_strict = true"

// ARRAY<char> too long
def exception_str = isGroupCommitMode() ? "too many filtered rows" : "Insert has filtered data in strict mode"
test {
sql "INSERT INTO ${testTable} VALUES (1, ['12345','123456'], [], NULL)"
exception "Insert has filtered data in strict mode"
exception exception_str
}

// NULL for NOT NULL column
test {
sql "INSERT INTO ${testTable} VALUES (2, ['12345','123'], NULL, NULL)"
exception "Insert has filtered data in strict mode"
exception exception_str
}

// ARRAY<ARRAY<char>> too long
test {
sql "INSERT INTO ${testTable} VALUES (3, NULL, ['4'], [['123456'],['222']])"
exception "Insert has filtered data in strict mode"
exception exception_str
}

// normal insert
Expand Down
Original file line number Diff line number Diff line change
Expand Up @@ -631,7 +631,9 @@ suite("test_http_stream", "p0") {
}
log.info("http_stream result: ${result}".toString())
def json = parseJson(result)
assertEquals(label, json.Label.toLowerCase())
if (!isGroupCommitMode()) {
assertEquals(label, json.Label.toLowerCase())
}
assertEquals("success", json.Status.toLowerCase())
assertEquals(11, json.NumberTotalRows)
assertEquals(0, json.NumberFilteredRows)
Expand Down
Original file line number Diff line number Diff line change
Expand Up @@ -341,8 +341,8 @@ suite("test_auto_partition_behavior") {
"""
test{
sql """insert into `long_value` values ("jwklefjklwehrnkjlwbfjkwhefkjhwjkefhkjwehfkjwehfkjwehfkjbvkwebconqkcqnocdmowqmosqmojwnqknrviuwbnclkmwkj");"""

exception "Partition name's length is over limit of 50."
def exception_str = isGroupCommitMode() ? "s length is over limit of 50." : "Partition name's length is over limit of 50."
exception exception_str
}

// illegal partiton definetion
Expand Down
Original file line number Diff line number Diff line change
Expand Up @@ -76,10 +76,11 @@ suite("test_list_default_multi_col_partition") {
DISTRIBUTED BY HASH(k1) BUCKETS 5 properties("replication_num" = "1")
"""
// insert value which is not allowed in existing partitions
def exception_str = isGroupCommitMode() ? "too many filtered rows" : "Insert has filtered data in strict mode"
try {
test {
sql """insert into list_default_multi_col_par values (10,1,1,1,24453.325,1,1)"""
exception """Insert has filtered data in strict mode"""
exception exception_str
}
} finally{
}
Expand All @@ -102,7 +103,7 @@ suite("test_list_default_multi_col_partition") {
try {
test {
sql """insert into list_default_multi_col_par values (10,1,1,1,24453.325,1,1)"""
exception """Insert has filtered data in strict mode"""
exception exception_str
}
} finally{
}
Expand Down
Original file line number Diff line number Diff line change
Expand Up @@ -76,10 +76,11 @@ suite("test_list_default_partition") {
DISTRIBUTED BY HASH(k1) BUCKETS 5 properties("replication_num" = "1")
"""
// insert value which is not allowed in existing partitions
def exception_str = isGroupCommitMode() ? "too many filtered rows" : "Insert has filtered data in strict mode"
try {
test {
sql """insert into list_default_par values (10,1,1,1,24453.325,1,1)"""
exception """Insert has filtered data in strict mode"""
exception exception_str
}
} finally{
}
Expand All @@ -102,7 +103,7 @@ suite("test_list_default_partition") {
try {
test {
sql """insert into list_default_par values (10,1,1,1,24453.325,1,1)"""
exception """Insert has filtered data in strict mode"""
exception exception_str
}
} finally{
}
Expand Down
Original file line number Diff line number Diff line change
Expand Up @@ -352,9 +352,10 @@ suite("test_list_partition_datatype", "p0") {
PROPERTIES ("replication_allocation" = "tag.location.default: 1")
"""
sql """INSERT INTO test_list_partition_ddl_tbl_1 VALUES("0000-01-01", "0000-01-01"), ("9999-12-31", "9999-12-31")"""
def exception_str = isGroupCommitMode() ? "too many filtered rows" : "Insert has filtered data in strict mode"
test {
sql """INSERT INTO test_list_partition_ddl_tbl_1 VALUES("2000-01-02", "2000-01-03")"""
exception "Insert has filtered data in strict mode"
exception exception_str
}
qt_sql1 "SELECT * FROM test_list_partition_ddl_tbl_1 order by k1"
sql """INSERT INTO test_list_partition_ddl_tbl_1 VALUES("2000-11-02", "2000-11-03")"""
Expand Down Expand Up @@ -452,15 +453,15 @@ suite("test_list_partition_datatype", "p0") {
"""
test {
sql """insert into test_list_partition_tb2_char values('d', '1')"""
exception "Insert has filtered data in strict mode"
exception exception_str
}
sql """alter table test_list_partition_tb2_char add partition partition_add_1 values in ("aaa","bbb")"""
def ret = sql "show partitions from test_list_partition_tb2_char where PartitionName='partition_add_1'"
assertTrue(ret.size() == 1)

test {
sql """ insert into test_list_partition_tb2_char values('aa', '1')"""
exception "Insert has filtered data in strict mode"
exception exception_str
}
sql "insert into test_list_partition_tb2_char values('a', 'a')"
sql "insert into test_list_partition_tb2_char values('aaa', 'a')"
Expand Down
Original file line number Diff line number Diff line change
Expand Up @@ -280,10 +280,11 @@ suite("test_multi_partition_key", "p0") {
"values(0, NULL, 0, 0, 0, '2000-01-01 00:00:00', '2000-01-01', 'a', 'a', 0.001, -0.001, 0.001)"
qt_sql7 "select k1 from test_multi_col_test_partition_null_value partition(partition_a) where k2 is null"
sql "ALTER TABLE test_multi_col_test_partition_null_value DROP PARTITION partition_a"
def exception_str = isGroupCommitMode() ? "too many filtered rows" : "Insert has filtered data in strict mode"
test {
sql "insert into test_multi_col_test_partition_null_value " +
"values(0, NULL, 0, 0, 0, '2000-01-01 00:00:00', '2000-01-01', 'a', 'a', 0.001, -0.001, 0.001)"
exception "Insert has filtered data in strict mode"
exception exception_str
}
qt_sql8 "select k1 from test_multi_col_test_partition_null_value where k2 is null"
// partition columns and add key column
Expand Down Expand Up @@ -413,12 +414,12 @@ suite("test_multi_partition_key", "p0") {
"""
test {
sql "insert into test_multi_col_insert values (-127, -200)"
exception "Insert has filtered data in strict mode"
exception exception_str
}
sql "insert into test_multi_col_insert values (10, -100)"
test {
sql "insert into test_multi_col_insert values (10, 50)"
exception "Insert has filtered data in strict mode"
exception exception_str

}
sql "insert into test_multi_col_insert values (10, 100)"
Expand Down