From c5a1a7fa182b291f5e427b68739894fa77704970 Mon Sep 17 00:00:00 2001 From: csun5285 Date: Wed, 19 Jun 2024 20:35:37 +0800 Subject: [PATCH 1/8] fix test --- .../test_clone_missing_version.groovy | 7 +- ...est_compaction_with_visible_version.groovy | 7 +- ...t_single_compaction_fault_injection.groovy | 18 -- ...paction_with_variant_inverted_index.groovy | 25 +- .../test_single_replica_compaction.groovy | 25 +- .../test_time_series_compaction_policy.groovy | 33 +- ...st_index_compaction_fault_injection.groovy | 9 - .../test_segcompaction_fault_injection.groovy | 18 -- ...t_too_many_segments_fault_injection.groovy | 19 -- .../schema_change/test_number_overflow.groovy | 20 -- .../test_agg_keys_schema_change.groovy | 17 -- .../test_agg_mv_schema_change.groovy | 17 -- .../test_agg_rollup_schema_change.groovy | 17 -- .../test_agg_vals_schema_change.groovy | 281 ++++++++---------- .../test_dup_keys_schema_change.groovy | 17 -- .../test_dup_mv_schema_change.groovy | 16 - .../test_dup_rollup_schema_change.groovy | 15 - .../test_dup_vals_schema_change.groovy | 15 - .../test_uniq_keys_schema_change.groovy | 252 ++++++++-------- .../test_uniq_mv_schema_change.groovy | 15 - .../test_uniq_rollup_schema_change.groovy | 15 - .../test_uniq_vals_schema_change.groovy | 15 - .../test_varchar_schema_change.groovy | 16 - .../test_segcompaction_agg_keys.groovy | 19 -- .../test_segcompaction_agg_keys_index.groovy | 18 -- .../test_segcompaction_dup_keys.groovy | 20 -- .../test_segcompaction_dup_keys_index.groovy | 19 -- .../test_segcompaction_unique_keys.groovy | 20 -- ...est_segcompaction_unique_keys_index.groovy | 19 -- .../test_segcompaction_unique_keys_mow.groovy | 20 -- ...segcompaction_unique_keys_mow_index.groovy | 20 -- 31 files changed, 277 insertions(+), 767 deletions(-) diff --git a/regression-test/suites/clone_p0/test_clone_missing_version.groovy b/regression-test/suites/clone_p0/test_clone_missing_version.groovy index 144d45bc5f9de8..2981cf3c5e3638 100644 --- a/regression-test/suites/clone_p0/test_clone_missing_version.groovy +++ b/regression-test/suites/clone_p0/test_clone_missing_version.groovy @@ -27,7 +27,6 @@ suite('test_clone_missing_version') { 'schedule_slot_num_per_hdd_path=1000', ] options.beConfigs += [ - 'disable_auto_compaction=true', 'report_tablet_interval_seconds=1', ] @@ -43,7 +42,11 @@ suite('test_clone_missing_version') { GetDebugPoint().disableDebugPoint(be.Host, be.HttpPort as int, NodeType.BE, injectName) } - sql 'CREATE TABLE t (k INT) DISTRIBUTED BY HASH(k) BUCKETS 1' + sql """ + CREATE TABLE t (k INT) DISTRIBUTED BY HASH(k) BUCKETS 1 PROPERTIES ( + "disable_auto_compaction" = "true" + ) + """ sql 'INSERT INTO t VALUES(2)' diff --git a/regression-test/suites/compaction/test_compaction_with_visible_version.groovy b/regression-test/suites/compaction/test_compaction_with_visible_version.groovy index 194a1b67566192..4a6ee4c847a5db 100644 --- a/regression-test/suites/compaction/test_compaction_with_visible_version.groovy +++ b/regression-test/suites/compaction/test_compaction_with_visible_version.groovy @@ -26,7 +26,6 @@ suite('test_compaction_with_visible_version') { 'partition_info_update_interval_secs=5', ] options.beConfigs += [ - 'disable_auto_compaction=true', 'report_tablet_interval_seconds=1', 'tablet_rowset_stale_sweep_by_size=true', 'tablet_rowset_stale_sweep_threshold_size=0', @@ -166,7 +165,11 @@ suite('test_compaction_with_visible_version') { } } - sql " CREATE TABLE ${tableName} (k1 int, k2 int) DISTRIBUTED BY HASH(k1) BUCKETS 1 " + sql """ + CREATE TABLE ${tableName} (k1 int, k2 int) DISTRIBUTED BY HASH(k1) BUCKETS 1 PROPERTIES ( + "disable_auto_compaction" = "true" + ) + """ // normal def rowNum = 0L diff --git a/regression-test/suites/compaction/test_single_compaction_fault_injection.groovy b/regression-test/suites/compaction/test_single_compaction_fault_injection.groovy index ebc74257032cab..737528d1e5dfd7 100644 --- a/regression-test/suites/compaction/test_single_compaction_fault_injection.groovy +++ b/regression-test/suites/compaction/test_single_compaction_fault_injection.groovy @@ -123,7 +123,6 @@ suite("test_single_compaction_fault_injection", "p2") { return tabletStatus } - boolean disableAutoCompaction = true try { String backend_id; def backendId_to_backendIP = [:] @@ -131,23 +130,8 @@ suite("test_single_compaction_fault_injection", "p2") { getBackendIpHttpPort(backendId_to_backendIP, backendId_to_backendHttpPort); backend_id = backendId_to_backendIP.keySet()[0] - def (code, out, err) = show_be_config(backendId_to_backendIP.get(backend_id), backendId_to_backendHttpPort.get(backend_id)) - - logger.info("Show config: code=" + code + ", out=" + out + ", err=" + err) - assertEquals(code, 0) - def configList = parseJson(out.trim()) - assert configList instanceof List - - for (Object ele in (List) configList) { - assert ele instanceof List - if (((List) ele)[0] == "disable_auto_compaction") { - disableAutoCompaction = Boolean.parseBoolean(((List) ele)[2]) - } - } - set_be_config.call("disable_auto_compaction", "true") set_be_config.call("update_replica_infos_interval_seconds", "5") - // find the master be for single compaction Boolean found = false String master_backend_id @@ -369,7 +353,5 @@ suite("test_single_compaction_fault_injection", "p2") { select * from ${tableName} order by id """ - } finally { - set_be_config.call("disable_auto_compaction", disableAutoCompaction.toString()) } } diff --git a/regression-test/suites/compaction/test_single_compaction_with_variant_inverted_index.groovy b/regression-test/suites/compaction/test_single_compaction_with_variant_inverted_index.groovy index 02b978c3659137..ee299355473240 100644 --- a/regression-test/suites/compaction/test_single_compaction_with_variant_inverted_index.groovy +++ b/regression-test/suites/compaction/test_single_compaction_with_variant_inverted_index.groovy @@ -22,24 +22,12 @@ suite("test_single_compaction_with_variant_inverted", "p2") { return; } def tableName = "test_single_compaction_with_variant_inverted" - - def set_be_config = { key, value -> - def backendId_to_backendIP = [:] - def backendId_to_backendHttpPort = [:] - getBackendIpHttpPort(backendId_to_backendIP, backendId_to_backendHttpPort); - - for (String backend_id: backendId_to_backendIP.keySet()) { - def (code, out, err) = update_be_config(backendId_to_backendIP.get(backend_id), backendId_to_backendHttpPort.get(backend_id), key, value) - logger.info("update config: code=" + code + ", out=" + out + ", err=" + err) - } - } def calc_file_crc_on_tablet = { ip, port, tablet -> return curl("GET", String.format("http://%s:%s/api/calc_crc?tablet_id=%s", ip, port, tablet)) } boolean disableAutoCompaction = true - boolean has_update_be_config = false try { String backend_id; def backendId_to_backendIP = [:] @@ -60,8 +48,6 @@ suite("test_single_compaction_with_variant_inverted", "p2") { disableAutoCompaction = Boolean.parseBoolean(((List) ele)[2]) } } - set_be_config.call("disable_auto_compaction", "true") - has_update_be_config = true def triggerCompaction = { be_host, be_http_port, compact_type, tablet_id -> StringBuilder sb = new StringBuilder(); @@ -158,7 +144,12 @@ suite("test_single_compaction_with_variant_inverted", "p2") { DUPLICATE KEY(`id`) COMMENT 'OLAP' DISTRIBUTED BY HASH(`id`) BUCKETS 1 - PROPERTIES ( "replication_num" = "2", "enable_single_replica_compaction" = "true", "inverted_index_storage_format" = "V1"); + PROPERTIES ( + "replication_num" = "2", + "enable_single_replica_compaction" = "true", + "inverted_index_storage_format" = "V1", + "disable_auto_compaction" = "true" + ); """ def tablets = sql_return_maparray """ show tablets from ${tableName}; """ @@ -249,9 +240,5 @@ suite("test_single_compaction_with_variant_inverted", "p2") { sql """ DROP TABLE IF EXISTS ${tableName}; """ - } finally { - if (has_update_be_config) { - set_be_config.call("disable_auto_compaction", disableAutoCompaction.toString()) - } } } diff --git a/regression-test/suites/compaction/test_single_replica_compaction.groovy b/regression-test/suites/compaction/test_single_replica_compaction.groovy index 2387cc8f323d2d..a1c771395a5aff 100644 --- a/regression-test/suites/compaction/test_single_replica_compaction.groovy +++ b/regression-test/suites/compaction/test_single_replica_compaction.groovy @@ -23,23 +23,11 @@ suite("test_single_replica_compaction", "p2") { } def tableName = "test_single_replica_compaction" - def set_be_config = { key, value -> - def backendId_to_backendIP = [:] - def backendId_to_backendHttpPort = [:] - getBackendIpHttpPort(backendId_to_backendIP, backendId_to_backendHttpPort); - - for (String backend_id: backendId_to_backendIP.keySet()) { - def (code, out, err) = update_be_config(backendId_to_backendIP.get(backend_id), backendId_to_backendHttpPort.get(backend_id), key, value) - logger.info("update config: code=" + code + ", out=" + out + ", err=" + err) - } - } - def calc_file_crc_on_tablet = { ip, port, tablet -> return curl("GET", String.format("http://%s:%s/api/calc_crc?tablet_id=%s", ip, port, tablet)) } boolean disableAutoCompaction = true - boolean has_update_be_config = false try { String backend_id; def backendId_to_backendIP = [:] @@ -60,8 +48,6 @@ suite("test_single_replica_compaction", "p2") { disableAutoCompaction = Boolean.parseBoolean(((List) ele)[2]) } } - set_be_config.call("disable_auto_compaction", "true") - has_update_be_config = true def triggerCompaction = { be_host, be_http_port, compact_type, tablet_id -> if (compact_type == "cumulative") { @@ -166,7 +152,12 @@ suite("test_single_replica_compaction", "p2") { UNIQUE KEY(`id`) COMMENT 'OLAP' DISTRIBUTED BY HASH(`id`) BUCKETS 1 - PROPERTIES ( "replication_num" = "2", "enable_single_replica_compaction" = "true", "enable_unique_key_merge_on_write" = "false" ); + PROPERTIES ( + "replication_num" = "2", + "enable_single_replica_compaction" = "true", + "enable_unique_key_merge_on_write" = "false", + "disable_auto_compaction" = "true" + ); """ def tablets = sql_return_maparray """ show tablets from ${tableName}; """ @@ -290,9 +281,5 @@ suite("test_single_replica_compaction", "p2") { select * from ${tableName} order by id """ - } finally { - if (has_update_be_config) { - set_be_config.call("disable_auto_compaction", disableAutoCompaction.toString()) - } } } diff --git a/regression-test/suites/compaction/test_time_series_compaction_policy.groovy b/regression-test/suites/compaction/test_time_series_compaction_policy.groovy index ff41811049cf08..1aaf92ea2f9756 100644 --- a/regression-test/suites/compaction/test_time_series_compaction_policy.groovy +++ b/regression-test/suites/compaction/test_time_series_compaction_policy.groovy @@ -23,13 +23,6 @@ suite("test_time_series_compaction_polciy", "p0") { def backendId_to_backendHttpPort = [:] getBackendIpHttpPort(backendId_to_backendIP, backendId_to_backendHttpPort); - def set_be_config = { key, value -> - for (String backend_id: backendId_to_backendIP.keySet()) { - def (code, out, err) = update_be_config(backendId_to_backendIP.get(backend_id), backendId_to_backendHttpPort.get(backend_id), key, value) - logger.info("update config: code=" + code + ", out=" + out + ", err=" + err) - } - } - def trigger_cumulative_compaction_on_tablets = { tablets -> for (def tablet : tablets) { String tablet_id = tablet.TabletId @@ -82,25 +75,7 @@ suite("test_time_series_compaction_polciy", "p0") { return rowsetCount } - boolean disableAutoCompaction = false try { - String backend_id; - backend_id = backendId_to_backendIP.keySet()[0] - def (code, out, err) = show_be_config(backendId_to_backendIP.get(backend_id), backendId_to_backendHttpPort.get(backend_id)) - - logger.info("Show config: code=" + code + ", out=" + out + ", err=" + err) - assertEquals(code, 0) - def configList = parseJson(out.trim()) - assert configList instanceof List - - for (Object ele in (List) configList) { - assert ele instanceof List - if (((List) ele)[0] == "disable_auto_compaction") { - disableAutoCompaction = Boolean.parseBoolean(((List) ele)[2]) - logger.info("disable_auto_compaction: ${((List) ele)[2]}") - } - } - set_be_config.call("disable_auto_compaction", "true") sql """ DROP TABLE IF EXISTS ${tableName}; """ sql """ @@ -113,7 +88,11 @@ suite("test_time_series_compaction_polciy", "p0") { DUPLICATE KEY(`id`) COMMENT 'OLAP' DISTRIBUTED BY HASH(`id`) BUCKETS 2 - PROPERTIES ( "replication_num" = "1", "disable_auto_compaction" = "true", "compaction_policy" = "time_series"); + PROPERTIES ( + "replication_num" = "1", + "disable_auto_compaction" = "true", + "compaction_policy" = "time_series" + ); """ // insert 16 lines, BUCKETS = 2 sql """ INSERT INTO ${tableName} VALUES (1, "andy", "andy love apple", 100); """ @@ -191,8 +170,6 @@ suite("test_time_series_compaction_polciy", "p0") { rowsetCount = get_rowset_count.call(tablets); assert (rowsetCount == 11 * replicaNum) qt_sql_3 """ select count() from ${tableName}""" - } finally { - set_be_config.call("disable_auto_compaction", disableAutoCompaction.toString()) } } diff --git a/regression-test/suites/fault_injection_p0/test_index_compaction_fault_injection.groovy b/regression-test/suites/fault_injection_p0/test_index_compaction_fault_injection.groovy index 57c8132dd7a1bd..6c9cac37f0b27f 100644 --- a/regression-test/suites/fault_injection_p0/test_index_compaction_fault_injection.groovy +++ b/regression-test/suites/fault_injection_p0/test_index_compaction_fault_injection.groovy @@ -280,15 +280,9 @@ suite("test_index_compaction_failure_injection", "nonConcurrent") { } } set_be_config.call("inverted_index_compaction_enable", "true") - if (isCloudMode) { - set_be_config.call("disable_auto_compaction", "true") - } has_update_be_config = true // check updated config check_config.call("inverted_index_compaction_enable", "true"); - if (isCloudMode) { - check_config.call("disable_auto_compaction", "true") - } /** @@ -349,9 +343,6 @@ suite("test_index_compaction_failure_injection", "nonConcurrent") { } finally { if (has_update_be_config) { set_be_config.call("inverted_index_compaction_enable", invertedIndexCompactionEnable.toString()) - if (isCloudMode) { - set_be_config.call("disable_auto_compaction", disableAutoCompaction.toString()) - } } } } diff --git a/regression-test/suites/fault_injection_p0/test_segcompaction_fault_injection.groovy b/regression-test/suites/fault_injection_p0/test_segcompaction_fault_injection.groovy index 32e855e8ff2f2e..3c679af7a6603f 100644 --- a/regression-test/suites/fault_injection_p0/test_segcompaction_fault_injection.groovy +++ b/regression-test/suites/fault_injection_p0/test_segcompaction_fault_injection.groovy @@ -43,26 +43,8 @@ suite("test_segcompaction_correctness", "nonConcurrent,p2") { String endpoint = getS3Endpoint() String region = getS3Region() String bucket = getS3BucketName() - def backendId_to_backendIP = [:] - def backendId_to_backendHttpPort = [:] - String backend_id; try { - getBackendIpHttpPort(backendId_to_backendIP, backendId_to_backendHttpPort); - backend_id = backendId_to_backendIP.keySet()[0] - def (code, out, err) = show_be_config(backendId_to_backendIP.get(backend_id), backendId_to_backendHttpPort.get(backend_id)) - logger.info("Show config: code=" + code + ", out=" + out + ", err=" + err) - assertEquals(code, 0) - def configList = parseJson(out.trim()) - assert configList instanceof List - - boolean disableAutoCompaction = true - for (Object ele in (List) configList) { - assert ele instanceof List - if (((List) ele)[0] == "disable_auto_compaction") { - disableAutoCompaction = Boolean.parseBoolean(((List) ele)[2]) - } - } sql """ DROP TABLE IF EXISTS ${tableName} """ sql "${create_table_sql}" diff --git a/regression-test/suites/fault_injection_p0/test_too_many_segments_fault_injection.groovy b/regression-test/suites/fault_injection_p0/test_too_many_segments_fault_injection.groovy index a838b6f77aef81..cd36bb7d18525f 100644 --- a/regression-test/suites/fault_injection_p0/test_too_many_segments_fault_injection.groovy +++ b/regression-test/suites/fault_injection_p0/test_too_many_segments_fault_injection.groovy @@ -44,26 +44,7 @@ suite("test_too_many_segments", "nonConcurrent,p2") { // the epic -238 case String endpoint = getS3Endpoint() String region = getS3Region() String bucket = getS3BucketName() - def backendId_to_backendIP = [:] - def backendId_to_backendHttpPort = [:] - String backend_id; try { - getBackendIpHttpPort(backendId_to_backendIP, backendId_to_backendHttpPort); - backend_id = backendId_to_backendIP.keySet()[0] - def (code, out, err) = show_be_config(backendId_to_backendIP.get(backend_id), backendId_to_backendHttpPort.get(backend_id)) - - logger.info("Show config: code=" + code + ", out=" + out + ", err=" + err) - assertEquals(code, 0) - def configList = parseJson(out.trim()) - assert configList instanceof List - - boolean disableAutoCompaction = true - for (Object ele in (List) configList) { - assert ele instanceof List - if (((List) ele)[0] == "disable_auto_compaction") { - disableAutoCompaction = Boolean.parseBoolean(((List) ele)[2]) - } - } sql """ DROP TABLE IF EXISTS ${tableName} """ sql "${create_table_sql}" diff --git a/regression-test/suites/schema_change/test_number_overflow.groovy b/regression-test/suites/schema_change/test_number_overflow.groovy index e69bb4e6ce8aeb..d13e797c1e37a6 100644 --- a/regression-test/suites/schema_change/test_number_overflow.groovy +++ b/regression-test/suites/schema_change/test_number_overflow.groovy @@ -25,26 +25,6 @@ suite ("test_number_overflow") { try { - String backend_id; - def backendId_to_backendIP = [:] - def backendId_to_backendHttpPort = [:] - getBackendIpHttpPort(backendId_to_backendIP, backendId_to_backendHttpPort); - - backend_id = backendId_to_backendIP.keySet()[0] - def (code, out, err) = show_be_config(backendId_to_backendIP.get(backend_id), backendId_to_backendHttpPort.get(backend_id)) - - logger.info("Show config: code=" + code + ", out=" + out + ", err=" + err) - assertEquals(code, 0) - def configList = parseJson(out.trim()) - assert configList instanceof List - - for (Object ele in (List) configList) { - assert ele instanceof List - if (((List) ele)[0] == "disable_auto_compaction") { - disableAutoCompaction = Boolean.parseBoolean(((List) ele)[2]) - } - } - sql """ DROP TABLE IF EXISTS test_number_overflow """ sql """ CREATE TABLE IF NOT EXISTS test_number_overflow ( k1 INT NOT NULL, k2 VARCHAR(4096) NOT NULL, k3 VARCHAR(4096) NOT NULL, k4 VARCHAR(4096) NOT NULL, k5 VARCHAR(4096) NOT NULL, k6 VARCHAR(4096) NOT NULL, k7 VARCHAR(4096) NOT NULL, k8 VARCHAR(4096) NOT NULL, k9 VARCHAR(4096) NOT NULL, v1 FLOAT SUM NOT NULL, v2 DECIMAL(20,7) SUM NOT NULL ) AGGREGATE KEY(k1,k2,k3,k4,k5,k6,k7,k8,k9) PARTITION BY RANGE(k1) ( PARTITION partition_a VALUES LESS THAN ("5"), PARTITION partition_b VALUES LESS THAN ("30"), PARTITION partition_c VALUES LESS THAN ("100"), PARTITION partition_d VALUES LESS THAN ("500"), PARTITION partition_e VALUES LESS THAN ("1000"), PARTITION partition_f VALUES LESS THAN ("2000"), PARTITION partition_g VALUES LESS THAN MAXVALUE ) DISTRIBUTED BY HASH(k1, k2) BUCKETS 3 diff --git a/regression-test/suites/schema_change_p0/test_agg_keys_schema_change.groovy b/regression-test/suites/schema_change_p0/test_agg_keys_schema_change.groovy index 29198f2793278c..2672db7c751f88 100644 --- a/regression-test/suites/schema_change_p0/test_agg_keys_schema_change.groovy +++ b/regression-test/suites/schema_change_p0/test_agg_keys_schema_change.groovy @@ -27,27 +27,10 @@ suite ("test_agg_keys_schema_change") { try { - String backend_id; def backendId_to_backendIP = [:] def backendId_to_backendHttpPort = [:] getBackendIpHttpPort(backendId_to_backendIP, backendId_to_backendHttpPort); - backend_id = backendId_to_backendIP.keySet()[0] - def (code, out, err) = show_be_config(backendId_to_backendIP.get(backend_id), backendId_to_backendHttpPort.get(backend_id)) - - logger.info("Show config: code=" + code + ", out=" + out + ", err=" + err) - assertEquals(code, 0) - def configList = parseJson(out.trim()) - assert configList instanceof List - - boolean disableAutoCompaction = true - for (Object ele in (List) configList) { - assert ele instanceof List - if (((List) ele)[0] == "disable_auto_compaction") { - disableAutoCompaction = Boolean.parseBoolean(((List) ele)[2]) - } - } - sql """ DROP TABLE IF EXISTS schema_change_agg_keys_regression_test """ sql """ CREATE TABLE IF NOT EXISTS schema_change_agg_keys_regression_test ( diff --git a/regression-test/suites/schema_change_p0/test_agg_mv_schema_change.groovy b/regression-test/suites/schema_change_p0/test_agg_mv_schema_change.groovy index c9a6ae5ff842c5..2e68bd7608162e 100644 --- a/regression-test/suites/schema_change_p0/test_agg_mv_schema_change.groovy +++ b/regression-test/suites/schema_change_p0/test_agg_mv_schema_change.groovy @@ -44,27 +44,10 @@ suite ("test_agg_mv_schema_change") { def tableName = "schema_change_agg_mv_regression_test" try { - String backend_id; def backendId_to_backendIP = [:] def backendId_to_backendHttpPort = [:] getBackendIpHttpPort(backendId_to_backendIP, backendId_to_backendHttpPort); - backend_id = backendId_to_backendIP.keySet()[0] - def (code, out, err) = show_be_config(backendId_to_backendIP.get(backend_id), backendId_to_backendHttpPort.get(backend_id)) - - logger.info("Show config: code=" + code + ", out=" + out + ", err=" + err) - assertEquals(code, 0) - def configList = parseJson(out.trim()) - assert configList instanceof List - - boolean disableAutoCompaction = true - for (Object ele in (List) configList) { - assert ele instanceof List - if (((List) ele)[0] == "disable_auto_compaction") { - disableAutoCompaction = Boolean.parseBoolean(((List) ele)[2]) - } - } - sql """ DROP TABLE IF EXISTS ${tableName} """ sql """ CREATE TABLE IF NOT EXISTS ${tableName} ( diff --git a/regression-test/suites/schema_change_p0/test_agg_rollup_schema_change.groovy b/regression-test/suites/schema_change_p0/test_agg_rollup_schema_change.groovy index a44ae10e3162ba..9302133295cf90 100644 --- a/regression-test/suites/schema_change_p0/test_agg_rollup_schema_change.groovy +++ b/regression-test/suites/schema_change_p0/test_agg_rollup_schema_change.groovy @@ -44,27 +44,10 @@ suite ("test_agg_rollup_schema_change") { } try { - String backend_id; def backendId_to_backendIP = [:] def backendId_to_backendHttpPort = [:] getBackendIpHttpPort(backendId_to_backendIP, backendId_to_backendHttpPort); - backend_id = backendId_to_backendIP.keySet()[0] - def (code, out, err) = show_be_config(backendId_to_backendIP.get(backend_id), backendId_to_backendHttpPort.get(backend_id)) - - logger.info("Show config: code=" + code + ", out=" + out + ", err=" + err) - assertEquals(code, 0) - def configList = parseJson(out.trim()) - assert configList instanceof List - - boolean disableAutoCompaction = true - for (Object ele in (List) configList) { - assert ele instanceof List - if (((List) ele)[0] == "disable_auto_compaction") { - disableAutoCompaction = Boolean.parseBoolean(((List) ele)[2]) - } - } - sql """ DROP TABLE IF EXISTS ${tableName} """ sql """ CREATE TABLE IF NOT EXISTS ${tableName} ( diff --git a/regression-test/suites/schema_change_p0/test_agg_vals_schema_change.groovy b/regression-test/suites/schema_change_p0/test_agg_vals_schema_change.groovy index 0652b56400fea6..165fba94b9a63d 100644 --- a/regression-test/suites/schema_change_p0/test_agg_vals_schema_change.groovy +++ b/regression-test/suites/schema_change_p0/test_agg_vals_schema_change.groovy @@ -22,165 +22,148 @@ suite ("test_agg_vals_schema_change") { try { - String backend_id; def backendId_to_backendIP = [:] def backendId_to_backendHttpPort = [:] getBackendIpHttpPort(backendId_to_backendIP, backendId_to_backendHttpPort); - backend_id = backendId_to_backendIP.keySet()[0] - def (code, out, err) = show_be_config(backendId_to_backendIP.get(backend_id), backendId_to_backendHttpPort.get(backend_id)) - - logger.info("Show config: code=" + code + ", out=" + out + ", err=" + err) - assertEquals(code, 0) - def configList = parseJson(out.trim()) - assert configList instanceof List - - boolean disableAutoCompaction = true - for (Object ele in (List) configList) { - assert ele instanceof List - if (((List) ele)[0] == "disable_auto_compaction") { - disableAutoCompaction = Boolean.parseBoolean(((List) ele)[2]) - } - } - - sql """ DROP TABLE IF EXISTS ${tableName} """ - - sql """ - CREATE TABLE IF NOT EXISTS ${tableName} ( - `user_id` LARGEINT NOT NULL COMMENT "用户id", - `date` DATE NOT NULL COMMENT "数据灌入日期时间", - `city` VARCHAR(20) COMMENT "用户所在城市", - `age` SMALLINT COMMENT "用户年龄", - `sex` TINYINT COMMENT "用户性别", - `last_visit_date` DATETIME REPLACE DEFAULT "1970-01-01 00:00:00" COMMENT "用户最后一次访问时间", - `last_update_date` DATETIME REPLACE DEFAULT "1970-01-01 00:00:00" COMMENT "用户最后一次更新时间", - `last_visit_date_not_null` DATETIME REPLACE NOT NULL DEFAULT "1970-01-01 00:00:00" COMMENT "用户最后一次访问时间", - `cost` BIGINT SUM DEFAULT "0" COMMENT "用户总消费", - `max_dwell_time` INT MAX DEFAULT "0" COMMENT "用户最大停留时间", - `min_dwell_time` INT MIN DEFAULT "99999" COMMENT "用户最小停留时间", - `hll_col` HLL HLL_UNION NOT NULL COMMENT "HLL列", - `bitmap_col` Bitmap BITMAP_UNION NOT NULL COMMENT "bitmap列") - AGGREGATE KEY(`user_id`, `date`, `city`, `age`, `sex`) DISTRIBUTED BY HASH(`user_id`) - BUCKETS 8 - PROPERTIES ( "replication_num" = "1", "light_schema_change" = "false" ); - """ - - sql """ INSERT INTO ${tableName} VALUES - (1, '2017-10-01', 'Beijing', 10, 1, '2020-01-01', '2020-01-01', '2020-01-01', 1, 30, 20, hll_hash(1), to_bitmap(1)) - """ - - sql """ INSERT INTO ${tableName} VALUES - (1, '2017-10-01', 'Beijing', 10, 1, '2020-01-02', '2020-01-02', '2020-01-02', 1, 31, 19, hll_hash(2), to_bitmap(2)) - """ - - qt_sc """ - select * from ${tableName} order by user_id - """ - - // alter and test light schema change - if (!isCloudMode()) { - sql """ALTER TABLE ${tableName} SET ("light_schema_change" = "true");""" - } - - sql """ INSERT INTO ${tableName} VALUES - (2, '2017-10-01', 'Beijing', 10, 1, '2020-01-02', '2020-01-02', '2020-01-02', 1, 31, 21, hll_hash(2), to_bitmap(2)) - """ - - sql """ INSERT INTO ${tableName} VALUES - (2, '2017-10-01', 'Beijing', 10, 1, '2020-01-03', '2020-01-03', '2020-01-03', 1, 32, 20, hll_hash(3), to_bitmap(3)) - """ - qt_sc """ - select * from ${tableName} order by user_id + sql """ DROP TABLE IF EXISTS ${tableName} """ + + sql """ + CREATE TABLE IF NOT EXISTS ${tableName} ( + `user_id` LARGEINT NOT NULL COMMENT "用户id", + `date` DATE NOT NULL COMMENT "数据灌入日期时间", + `city` VARCHAR(20) COMMENT "用户所在城市", + `age` SMALLINT COMMENT "用户年龄", + `sex` TINYINT COMMENT "用户性别", + `last_visit_date` DATETIME REPLACE DEFAULT "1970-01-01 00:00:00" COMMENT "用户最后一次访问时间", + `last_update_date` DATETIME REPLACE DEFAULT "1970-01-01 00:00:00" COMMENT "用户最后一次更新时间", + `last_visit_date_not_null` DATETIME REPLACE NOT NULL DEFAULT "1970-01-01 00:00:00" COMMENT "用户最后一次访问时间", + `cost` BIGINT SUM DEFAULT "0" COMMENT "用户总消费", + `max_dwell_time` INT MAX DEFAULT "0" COMMENT "用户最大停留时间", + `min_dwell_time` INT MIN DEFAULT "99999" COMMENT "用户最小停留时间", + `hll_col` HLL HLL_UNION NOT NULL COMMENT "HLL列", + `bitmap_col` Bitmap BITMAP_UNION NOT NULL COMMENT "bitmap列") + AGGREGATE KEY(`user_id`, `date`, `city`, `age`, `sex`) DISTRIBUTED BY HASH(`user_id`) + BUCKETS 8 + PROPERTIES ( "replication_num" = "1", "light_schema_change" = "false" ); + """ + + sql """ INSERT INTO ${tableName} VALUES + (1, '2017-10-01', 'Beijing', 10, 1, '2020-01-01', '2020-01-01', '2020-01-01', 1, 30, 20, hll_hash(1), to_bitmap(1)) + """ + + sql """ INSERT INTO ${tableName} VALUES + (1, '2017-10-01', 'Beijing', 10, 1, '2020-01-02', '2020-01-02', '2020-01-02', 1, 31, 19, hll_hash(2), to_bitmap(2)) + """ + + qt_sc """ + select * from ${tableName} order by user_id """ - // add column - sql """ - ALTER table ${tableName} ADD COLUMN new_column INT MAX default "1" - """ - - qt_sc """ SELECT * FROM ${tableName} WHERE user_id=2 """ - - sql """ INSERT INTO ${tableName} VALUES - (2, '2017-10-01', 'Beijing', 10, 1, '2020-01-03', '2020-01-03', '2020-01-03', 1, 32, 20, hll_hash(4), to_bitmap(4), 2) - """ - qt_sc """ SELECT * FROM ${tableName} WHERE user_id=2 """ - - - sql """ INSERT INTO ${tableName} (`user_id`,`date`,`city`,`age`,`sex`,`last_visit_date`,`last_update_date`, - `last_visit_date_not_null`,`cost`,`max_dwell_time`,`min_dwell_time`, `hll_col`, `bitmap_col`) - VALUES - (3, '2017-10-01', 'Beijing', 10, 1, '2020-01-03', '2020-01-03', '2020-01-03', 1, 32, 20, hll_hash(4), to_bitmap(4)) - """ - - qt_sc """ SELECT * FROM ${tableName} WHERE user_id=3 """ - - sql """ INSERT INTO ${tableName} VALUES - (3, '2017-10-01', 'Beijing', 10, 1, '2020-01-03', '2020-01-03', '2020-01-03', 1, 32, 20, hll_hash(4), to_bitmap(4), 2) - """ - qt_sc """ SELECT * FROM ${tableName} WHERE user_id = 3 """ - - qt_sc """ select count(*) from ${tableName} """ - - // drop column - sql """ - ALTER TABLE ${tableName} DROP COLUMN last_visit_date - """ - qt_sc """ select * from ${tableName} where user_id = 3 """ - - sql """ INSERT INTO ${tableName} VALUES - (4, '2017-10-01', 'Beijing', 10, 1, '2020-01-03', '2020-01-03', 1, 32, 20, hll_hash(4), to_bitmap(4), 2) - """ - - qt_sc """ select * from ${tableName} where user_id = 4 """ - - sql """ INSERT INTO ${tableName} VALUES - (5, '2017-10-01', 'Beijing', 10, 1, '2020-01-03', '2020-01-03', 1, 32, 20, hll_hash(5), to_bitmap(5), 2) - """ - sql """ INSERT INTO ${tableName} VALUES - (5, '2017-10-01', 'Beijing', 10, 1, '2020-01-03', '2020-01-03', 1, 32, 20, hll_hash(5), to_bitmap(5), 2) - """ - sql """ INSERT INTO ${tableName} VALUES - (5, '2017-10-01', 'Beijing', 10, 1, '2020-01-03', '2020-01-03', 1, 32, 20, hll_hash(5), to_bitmap(5), 2) - """ - sql """ INSERT INTO ${tableName} VALUES - (5, '2017-10-01', 'Beijing', 10, 1, '2020-01-03', '2020-01-03', 1, 32, 20, hll_hash(5), to_bitmap(5), 2) - """ - sql """ INSERT INTO ${tableName} VALUES - (5, '2017-10-01', 'Beijing', 10, 1, '2020-01-03', '2020-01-03', 1, 32, 20, hll_hash(5), to_bitmap(5), 2) - """ - sql """ INSERT INTO ${tableName} VALUES - (5, '2017-10-01', 'Beijing', 10, 1, '2020-01-03', '2020-01-03', 1, 32, 20, hll_hash(5), to_bitmap(5), 2) - """ - - // compaction - String[][] tablets = sql """ show tablets from ${tableName}; """ - for (String[] tablet in tablets) { - String tablet_id = tablet[0] - backend_id = tablet[2] - logger.info("run compaction:" + tablet_id) - (code, out, err) = be_run_cumulative_compaction(backendId_to_backendIP.get(backend_id), backendId_to_backendHttpPort.get(backend_id), tablet_id) - logger.info("Run compaction: code=" + code + ", out=" + out + ", err=" + err) - //assertEquals(code, 0) - } + // alter and test light schema change + if (!isCloudMode()) { + sql """ALTER TABLE ${tableName} SET ("light_schema_change" = "true");""" + } - // wait for all compactions done - for (String[] tablet in tablets) { - boolean running = true - do { - Thread.sleep(100) + sql """ INSERT INTO ${tableName} VALUES + (2, '2017-10-01', 'Beijing', 10, 1, '2020-01-02', '2020-01-02', '2020-01-02', 1, 31, 21, hll_hash(2), to_bitmap(2)) + """ + + sql """ INSERT INTO ${tableName} VALUES + (2, '2017-10-01', 'Beijing', 10, 1, '2020-01-03', '2020-01-03', '2020-01-03', 1, 32, 20, hll_hash(3), to_bitmap(3)) + """ + qt_sc """ + select * from ${tableName} order by user_id + """ + + // add column + sql """ + ALTER table ${tableName} ADD COLUMN new_column INT MAX default "1" + """ + + qt_sc """ SELECT * FROM ${tableName} WHERE user_id=2 """ + + sql """ INSERT INTO ${tableName} VALUES + (2, '2017-10-01', 'Beijing', 10, 1, '2020-01-03', '2020-01-03', '2020-01-03', 1, 32, 20, hll_hash(4), to_bitmap(4), 2) + """ + qt_sc """ SELECT * FROM ${tableName} WHERE user_id=2 """ + + + sql """ INSERT INTO ${tableName} (`user_id`,`date`,`city`,`age`,`sex`,`last_visit_date`,`last_update_date`, + `last_visit_date_not_null`,`cost`,`max_dwell_time`,`min_dwell_time`, `hll_col`, `bitmap_col`) + VALUES + (3, '2017-10-01', 'Beijing', 10, 1, '2020-01-03', '2020-01-03', '2020-01-03', 1, 32, 20, hll_hash(4), to_bitmap(4)) + """ + + qt_sc """ SELECT * FROM ${tableName} WHERE user_id=3 """ + + sql """ INSERT INTO ${tableName} VALUES + (3, '2017-10-01', 'Beijing', 10, 1, '2020-01-03', '2020-01-03', '2020-01-03', 1, 32, 20, hll_hash(4), to_bitmap(4), 2) + """ + qt_sc """ SELECT * FROM ${tableName} WHERE user_id = 3 """ + + qt_sc """ select count(*) from ${tableName} """ + + // drop column + sql """ + ALTER TABLE ${tableName} DROP COLUMN last_visit_date + """ + qt_sc """ select * from ${tableName} where user_id = 3 """ + + sql """ INSERT INTO ${tableName} VALUES + (4, '2017-10-01', 'Beijing', 10, 1, '2020-01-03', '2020-01-03', 1, 32, 20, hll_hash(4), to_bitmap(4), 2) + """ + + qt_sc """ select * from ${tableName} where user_id = 4 """ + + sql """ INSERT INTO ${tableName} VALUES + (5, '2017-10-01', 'Beijing', 10, 1, '2020-01-03', '2020-01-03', 1, 32, 20, hll_hash(5), to_bitmap(5), 2) + """ + sql """ INSERT INTO ${tableName} VALUES + (5, '2017-10-01', 'Beijing', 10, 1, '2020-01-03', '2020-01-03', 1, 32, 20, hll_hash(5), to_bitmap(5), 2) + """ + sql """ INSERT INTO ${tableName} VALUES + (5, '2017-10-01', 'Beijing', 10, 1, '2020-01-03', '2020-01-03', 1, 32, 20, hll_hash(5), to_bitmap(5), 2) + """ + sql """ INSERT INTO ${tableName} VALUES + (5, '2017-10-01', 'Beijing', 10, 1, '2020-01-03', '2020-01-03', 1, 32, 20, hll_hash(5), to_bitmap(5), 2) + """ + sql """ INSERT INTO ${tableName} VALUES + (5, '2017-10-01', 'Beijing', 10, 1, '2020-01-03', '2020-01-03', 1, 32, 20, hll_hash(5), to_bitmap(5), 2) + """ + sql """ INSERT INTO ${tableName} VALUES + (5, '2017-10-01', 'Beijing', 10, 1, '2020-01-03', '2020-01-03', 1, 32, 20, hll_hash(5), to_bitmap(5), 2) + """ + + // compaction + String[][] tablets = sql """ show tablets from ${tableName}; """ + for (String[] tablet in tablets) { String tablet_id = tablet[0] backend_id = tablet[2] - (code, out, err) = be_get_compaction_status(backendId_to_backendIP.get(backend_id), backendId_to_backendHttpPort.get(backend_id), tablet_id) - logger.info("Get compaction status: code=" + code + ", out=" + out + ", err=" + err) - assertEquals(code, 0) - def compactionStatus = parseJson(out.trim()) - assertEquals("success", compactionStatus.status.toLowerCase()) - running = compactionStatus.run_status - } while (running) - } - qt_sc """ select count(*) from ${tableName} """ + logger.info("run compaction:" + tablet_id) + (code, out, err) = be_run_cumulative_compaction(backendId_to_backendIP.get(backend_id), backendId_to_backendHttpPort.get(backend_id), tablet_id) + logger.info("Run compaction: code=" + code + ", out=" + out + ", err=" + err) + //assertEquals(code, 0) + } + + // wait for all compactions done + for (String[] tablet in tablets) { + boolean running = true + do { + Thread.sleep(100) + String tablet_id = tablet[0] + backend_id = tablet[2] + (code, out, err) = be_get_compaction_status(backendId_to_backendIP.get(backend_id), backendId_to_backendHttpPort.get(backend_id), tablet_id) + logger.info("Get compaction status: code=" + code + ", out=" + out + ", err=" + err) + assertEquals(code, 0) + def compactionStatus = parseJson(out.trim()) + assertEquals("success", compactionStatus.status.toLowerCase()) + running = compactionStatus.run_status + } while (running) + } + qt_sc """ select count(*) from ${tableName} """ - qt_sc """ SELECT * FROM ${tableName} WHERE user_id=2 """ + qt_sc """ SELECT * FROM ${tableName} WHERE user_id=2 """ } finally { //try_sql("DROP TABLE IF EXISTS ${tableName}") diff --git a/regression-test/suites/schema_change_p0/test_dup_keys_schema_change.groovy b/regression-test/suites/schema_change_p0/test_dup_keys_schema_change.groovy index 024e7b58882dad..336805df29a87e 100644 --- a/regression-test/suites/schema_change_p0/test_dup_keys_schema_change.groovy +++ b/regression-test/suites/schema_change_p0/test_dup_keys_schema_change.groovy @@ -25,27 +25,10 @@ suite ("test_dup_keys_schema_change") { } try { - String backend_id; def backendId_to_backendIP = [:] def backendId_to_backendHttpPort = [:] getBackendIpHttpPort(backendId_to_backendIP, backendId_to_backendHttpPort); - backend_id = backendId_to_backendIP.keySet()[0] - def (code, out, err) = show_be_config(backendId_to_backendIP.get(backend_id), backendId_to_backendHttpPort.get(backend_id)) - - logger.info("Show config: code=" + code + ", out=" + out + ", err=" + err) - assertEquals(code, 0) - def configList = parseJson(out.trim()) - assert configList instanceof List - - boolean disableAutoCompaction = true - for (Object ele in (List) configList) { - assert ele instanceof List - if (((List) ele)[0] == "disable_auto_compaction") { - disableAutoCompaction = Boolean.parseBoolean(((List) ele)[2]) - } - } - sql """ DROP TABLE IF EXISTS ${tableName} """ sql """ diff --git a/regression-test/suites/schema_change_p0/test_dup_mv_schema_change.groovy b/regression-test/suites/schema_change_p0/test_dup_mv_schema_change.groovy index 9a962977ddaea2..e09da2af730109 100644 --- a/regression-test/suites/schema_change_p0/test_dup_mv_schema_change.groovy +++ b/regression-test/suites/schema_change_p0/test_dup_mv_schema_change.groovy @@ -44,22 +44,6 @@ suite ("test_dup_mv_schema_change") { def backendId_to_backendHttpPort = [:] getBackendIpHttpPort(backendId_to_backendIP, backendId_to_backendHttpPort); - backend_id = backendId_to_backendIP.keySet()[0] - def (code, out, err) = show_be_config(backendId_to_backendIP.get(backend_id), backendId_to_backendHttpPort.get(backend_id)) - - logger.info("Show config: code=" + code + ", out=" + out + ", err=" + err) - assertEquals(code, 0) - def configList = parseJson(out.trim()) - assert configList instanceof List - - boolean disableAutoCompaction = true - for (Object ele in (List) configList) { - assert ele instanceof List - if (((List) ele)[0] == "disable_auto_compaction") { - disableAutoCompaction = Boolean.parseBoolean(((List) ele)[2]) - } - } - sql """ DROP TABLE IF EXISTS ${tableName} """ sql """ diff --git a/regression-test/suites/schema_change_p0/test_dup_rollup_schema_change.groovy b/regression-test/suites/schema_change_p0/test_dup_rollup_schema_change.groovy index 62b63ae4ec6f13..eea771d400c487 100644 --- a/regression-test/suites/schema_change_p0/test_dup_rollup_schema_change.groovy +++ b/regression-test/suites/schema_change_p0/test_dup_rollup_schema_change.groovy @@ -49,21 +49,6 @@ suite ("test_dup_rollup_schema_change") { def backendId_to_backendHttpPort = [:] getBackendIpHttpPort(backendId_to_backendIP, backendId_to_backendHttpPort); - backend_id = backendId_to_backendIP.keySet()[0] - def (code, out, err) = show_be_config(backendId_to_backendIP.get(backend_id), backendId_to_backendHttpPort.get(backend_id)) - - logger.info("Show config: code=" + code + ", out=" + out + ", err=" + err) - assertEquals(code, 0) - def configList = parseJson(out.trim()) - assert configList instanceof List - - boolean disableAutoCompaction = true - for (Object ele in (List) configList) { - assert ele instanceof List - if (((List) ele)[0] == "disable_auto_compaction") { - disableAutoCompaction = Boolean.parseBoolean(((List) ele)[2]) - } - } sql """ DROP TABLE IF EXISTS ${tableName} """ sql """ diff --git a/regression-test/suites/schema_change_p0/test_dup_vals_schema_change.groovy b/regression-test/suites/schema_change_p0/test_dup_vals_schema_change.groovy index d88475ce57bc33..c73d13d0720d7e 100644 --- a/regression-test/suites/schema_change_p0/test_dup_vals_schema_change.groovy +++ b/regression-test/suites/schema_change_p0/test_dup_vals_schema_change.groovy @@ -26,21 +26,6 @@ suite ("test_dup_vals_schema_change") { def backendId_to_backendHttpPort = [:] getBackendIpHttpPort(backendId_to_backendIP, backendId_to_backendHttpPort); - backend_id = backendId_to_backendIP.keySet()[0] - def (code, out, err) = show_be_config(backendId_to_backendIP.get(backend_id), backendId_to_backendHttpPort.get(backend_id)) - - logger.info("Show config: code=" + code + ", out=" + out + ", err=" + err) - assertEquals(code, 0) - def configList = parseJson(out.trim()) - assert configList instanceof List - - boolean disableAutoCompaction = true - for (Object ele in (List) configList) { - assert ele instanceof List - if (((List) ele)[0] == "disable_auto_compaction") { - disableAutoCompaction = Boolean.parseBoolean(((List) ele)[2]) - } - } sql """ DROP TABLE IF EXISTS ${tableName} """ sql """ diff --git a/regression-test/suites/schema_change_p0/test_uniq_keys_schema_change.groovy b/regression-test/suites/schema_change_p0/test_uniq_keys_schema_change.groovy index a887ec44908ac1..e06c27b8abf4db 100644 --- a/regression-test/suites/schema_change_p0/test_uniq_keys_schema_change.groovy +++ b/regression-test/suites/schema_change_p0/test_uniq_keys_schema_change.groovy @@ -25,145 +25,129 @@ suite ("test_uniq_keys_schema_change") { def backendId_to_backendIP = [:] def backendId_to_backendHttpPort = [:] getBackendIpHttpPort(backendId_to_backendIP, backendId_to_backendHttpPort); - - backend_id = backendId_to_backendIP.keySet()[0] - def (code, out, err) = show_be_config(backendId_to_backendIP.get(backend_id), backendId_to_backendHttpPort.get(backend_id)) - - logger.info("Show config: code=" + code + ", out=" + out + ", err=" + err) - assertEquals(code, 0) - def configList = parseJson(out.trim()) - assert configList instanceof List - - boolean disableAutoCompaction = true - for (Object ele in (List) configList) { - assert ele instanceof List - if (((List) ele)[0] == "disable_auto_compaction") { - disableAutoCompaction = Boolean.parseBoolean(((List) ele)[2]) - } + sql """ DROP TABLE IF EXISTS ${tableName} """ + + sql """ + CREATE TABLE IF NOT EXISTS schema_change_uniq_keys_regression_test ( + `user_id` LARGEINT NOT NULL COMMENT "用户id", + `date` DATE NOT NULL COMMENT "数据灌入日期时间", + `city` VARCHAR(20) COMMENT "用户所在城市", + `age` SMALLINT COMMENT "用户年龄", + `sex` TINYINT COMMENT "用户性别", + `last_visit_date` DATETIME DEFAULT "1970-01-01 00:00:00" COMMENT "用户最后一次访问时间", + `last_update_date` DATETIME DEFAULT "1970-01-01 00:00:00" COMMENT "用户最后一次更新时间", + `last_visit_date_not_null` DATETIME NOT NULL DEFAULT "1970-01-01 00:00:00" COMMENT "用户最后一次访问时间", + `cost` BIGINT DEFAULT "0" COMMENT "用户总消费", + `max_dwell_time` INT DEFAULT "0" COMMENT "用户最大停留时间", + `min_dwell_time` INT DEFAULT "99999" COMMENT "用户最小停留时间") + UNIQUE KEY(`user_id`, `date`, `city`, `age`, `sex`) DISTRIBUTED BY HASH(`user_id`) + BUCKETS 8 + PROPERTIES ( "replication_num" = "1", "light_schema_change" = "false"); + """ + + sql """ INSERT INTO schema_change_uniq_keys_regression_test VALUES + (1, '2017-10-01', 'Beijing', 10, 1, '2020-01-01', '2020-01-01', '2020-01-01', 1, 30, 20) + """ + + sql """ INSERT INTO schema_change_uniq_keys_regression_test VALUES + (1, '2017-10-01', 'Beijing', 10, 1, '2020-01-02', '2020-01-02', '2020-01-02', 1, 31, 19) + """ + + qt_sc """ select count(*) from schema_change_uniq_keys_regression_test """ + + // alter and test light schema change + if (!isCloudMode()) { + sql """ALTER TABLE ${tableName} SET ("light_schema_change" = "true");""" } - sql """ DROP TABLE IF EXISTS ${tableName} """ - - sql """ - CREATE TABLE IF NOT EXISTS schema_change_uniq_keys_regression_test ( - `user_id` LARGEINT NOT NULL COMMENT "用户id", - `date` DATE NOT NULL COMMENT "数据灌入日期时间", - `city` VARCHAR(20) COMMENT "用户所在城市", - `age` SMALLINT COMMENT "用户年龄", - `sex` TINYINT COMMENT "用户性别", - `last_visit_date` DATETIME DEFAULT "1970-01-01 00:00:00" COMMENT "用户最后一次访问时间", - `last_update_date` DATETIME DEFAULT "1970-01-01 00:00:00" COMMENT "用户最后一次更新时间", - `last_visit_date_not_null` DATETIME NOT NULL DEFAULT "1970-01-01 00:00:00" COMMENT "用户最后一次访问时间", - `cost` BIGINT DEFAULT "0" COMMENT "用户总消费", - `max_dwell_time` INT DEFAULT "0" COMMENT "用户最大停留时间", - `min_dwell_time` INT DEFAULT "99999" COMMENT "用户最小停留时间") - UNIQUE KEY(`user_id`, `date`, `city`, `age`, `sex`) DISTRIBUTED BY HASH(`user_id`) - BUCKETS 8 - PROPERTIES ( "replication_num" = "1", "light_schema_change" = "false"); - """ - - sql """ INSERT INTO schema_change_uniq_keys_regression_test VALUES - (1, '2017-10-01', 'Beijing', 10, 1, '2020-01-01', '2020-01-01', '2020-01-01', 1, 30, 20) - """ - - sql """ INSERT INTO schema_change_uniq_keys_regression_test VALUES - (1, '2017-10-01', 'Beijing', 10, 1, '2020-01-02', '2020-01-02', '2020-01-02', 1, 31, 19) - """ - - qt_sc """ select count(*) from schema_change_uniq_keys_regression_test """ - - // alter and test light schema change - if (!isCloudMode()) { - sql """ALTER TABLE ${tableName} SET ("light_schema_change" = "true");""" - } - - sql """ INSERT INTO schema_change_uniq_keys_regression_test VALUES - (2, '2017-10-01', 'Beijing', 10, 1, '2020-01-02', '2020-01-02', '2020-01-02', 1, 31, 21) - """ - - sql """ INSERT INTO schema_change_uniq_keys_regression_test VALUES - (2, '2017-10-01', 'Beijing', 10, 1, '2020-01-03', '2020-01-03', '2020-01-03', 1, 32, 20) - """ - qt_sc """ - select count(*) from schema_change_uniq_keys_regression_test - """ - - // add column - sql """ - ALTER table ${tableName} ADD COLUMN new_column INT default "1" - """ - - sql """ SELECT * FROM ${tableName} WHERE user_id=2 """ - - sql """ INSERT INTO ${tableName} (`user_id`,`date`,`city`,`age`,`sex`,`last_visit_date`,`last_update_date`, - `last_visit_date_not_null`,`cost`,`max_dwell_time`,`min_dwell_time`) - VALUES - (3, '2017-10-01', 'Beijing', 10, 1, '2020-01-03', '2020-01-03', '2020-01-03', 1, 32, 20) - """ - - qt_sc """ SELECT * FROM ${tableName} WHERE user_id=3 """ - - - sql """ INSERT INTO ${tableName} VALUES - (3, '2017-10-01', 'Beijing', 10, 1, '2020-01-03', '2020-01-03', '2020-01-03', 1, 32, 20, 2) - """ - qt_sc """ SELECT * FROM ${tableName} WHERE user_id = 3 """ - - qt_sc """ select count(*) from ${tableName} """ - - sql """ INSERT INTO ${tableName} VALUES - (4, '2017-10-01', 'Beijing', 10, 1, '2020-01-03', '2020-01-03', '2020-01-03', 1, 32, 20, 2) - """ - - qt_sc """ select * from ${tableName} where user_id = 4 """ - - sql """ INSERT INTO ${tableName} VALUES - (5, '2017-10-01', 'Beijing', 10, 1, '2020-01-03', '2020-01-03', '2020-01-03', 1, 32, 20, 2) - """ - sql """ INSERT INTO ${tableName} VALUES - (5, '2017-10-01', 'Beijing', 10, 1, '2020-01-03', '2020-01-03', '2020-01-03', 1, 32, 20, 2) - """ - sql """ INSERT INTO ${tableName} VALUES - (5, '2017-10-01', 'Beijing', 10, 1, '2020-01-03', '2020-01-03', '2020-01-03', 1, 32, 20, 2) - """ - sql """ INSERT INTO ${tableName} VALUES - (5, '2017-10-01', 'Beijing', 10, 1, '2020-01-03', '2020-01-03', '2020-01-03', 1, 32, 20, 2) - """ - sql """ INSERT INTO ${tableName} VALUES - (5, '2017-10-01', 'Beijing', 10, 1, '2020-01-03', '2020-01-03', '2020-01-03', 1, 32, 20, 2) - """ - sql """ INSERT INTO ${tableName} VALUES - (5, '2017-10-01', 'Beijing', 10, 1, '2020-01-03', '2020-01-03', '2020-01-03', 1, 32, 20, 2) - """ - - // compaction - String[][] tablets = sql """ show tablets from ${tableName}; """ - for (String[] tablet in tablets) { - String tablet_id = tablet[0] - backend_id = tablet[2] - logger.info("run compaction:" + tablet_id) - (code, out, err) = be_run_cumulative_compaction(backendId_to_backendIP.get(backend_id), backendId_to_backendHttpPort.get(backend_id), tablet_id) - logger.info("Run compaction: code=" + code + ", out=" + out + ", err=" + err) - //assertEquals(code, 0) - } - // wait for all compactions done - for (String[] tablet in tablets) { - boolean running = true - do { - Thread.sleep(100) + sql """ INSERT INTO schema_change_uniq_keys_regression_test VALUES + (2, '2017-10-01', 'Beijing', 10, 1, '2020-01-02', '2020-01-02', '2020-01-02', 1, 31, 21) + """ + + sql """ INSERT INTO schema_change_uniq_keys_regression_test VALUES + (2, '2017-10-01', 'Beijing', 10, 1, '2020-01-03', '2020-01-03', '2020-01-03', 1, 32, 20) + """ + qt_sc """ + select count(*) from schema_change_uniq_keys_regression_test + """ + + // add column + sql """ + ALTER table ${tableName} ADD COLUMN new_column INT default "1" + """ + + sql """ SELECT * FROM ${tableName} WHERE user_id=2 """ + + sql """ INSERT INTO ${tableName} (`user_id`,`date`,`city`,`age`,`sex`,`last_visit_date`,`last_update_date`, + `last_visit_date_not_null`,`cost`,`max_dwell_time`,`min_dwell_time`) + VALUES + (3, '2017-10-01', 'Beijing', 10, 1, '2020-01-03', '2020-01-03', '2020-01-03', 1, 32, 20) + """ + + qt_sc """ SELECT * FROM ${tableName} WHERE user_id=3 """ + + + sql """ INSERT INTO ${tableName} VALUES + (3, '2017-10-01', 'Beijing', 10, 1, '2020-01-03', '2020-01-03', '2020-01-03', 1, 32, 20, 2) + """ + qt_sc """ SELECT * FROM ${tableName} WHERE user_id = 3 """ + + qt_sc """ select count(*) from ${tableName} """ + + sql """ INSERT INTO ${tableName} VALUES + (4, '2017-10-01', 'Beijing', 10, 1, '2020-01-03', '2020-01-03', '2020-01-03', 1, 32, 20, 2) + """ + + qt_sc """ select * from ${tableName} where user_id = 4 """ + + sql """ INSERT INTO ${tableName} VALUES + (5, '2017-10-01', 'Beijing', 10, 1, '2020-01-03', '2020-01-03', '2020-01-03', 1, 32, 20, 2) + """ + sql """ INSERT INTO ${tableName} VALUES + (5, '2017-10-01', 'Beijing', 10, 1, '2020-01-03', '2020-01-03', '2020-01-03', 1, 32, 20, 2) + """ + sql """ INSERT INTO ${tableName} VALUES + (5, '2017-10-01', 'Beijing', 10, 1, '2020-01-03', '2020-01-03', '2020-01-03', 1, 32, 20, 2) + """ + sql """ INSERT INTO ${tableName} VALUES + (5, '2017-10-01', 'Beijing', 10, 1, '2020-01-03', '2020-01-03', '2020-01-03', 1, 32, 20, 2) + """ + sql """ INSERT INTO ${tableName} VALUES + (5, '2017-10-01', 'Beijing', 10, 1, '2020-01-03', '2020-01-03', '2020-01-03', 1, 32, 20, 2) + """ + sql """ INSERT INTO ${tableName} VALUES + (5, '2017-10-01', 'Beijing', 10, 1, '2020-01-03', '2020-01-03', '2020-01-03', 1, 32, 20, 2) + """ + + // compaction + String[][] tablets = sql """ show tablets from ${tableName}; """ + for (String[] tablet in tablets) { String tablet_id = tablet[0] backend_id = tablet[2] - (code, out, err) = be_get_compaction_status(backendId_to_backendIP.get(backend_id), backendId_to_backendHttpPort.get(backend_id), tablet_id) - logger.info("Get compaction status: code=" + code + ", out=" + out + ", err=" + err) - assertEquals(code, 0) - def compactionStatus = parseJson(out.trim()) - assertEquals("success", compactionStatus.status.toLowerCase()) - running = compactionStatus.run_status - } while (running) - } - qt_sc """ select count(*) from ${tableName} """ + logger.info("run compaction:" + tablet_id) + (code, out, err) = be_run_cumulative_compaction(backendId_to_backendIP.get(backend_id), backendId_to_backendHttpPort.get(backend_id), tablet_id) + logger.info("Run compaction: code=" + code + ", out=" + out + ", err=" + err) + //assertEquals(code, 0) + } + + // wait for all compactions done + for (String[] tablet in tablets) { + boolean running = true + do { + Thread.sleep(100) + String tablet_id = tablet[0] + backend_id = tablet[2] + (code, out, err) = be_get_compaction_status(backendId_to_backendIP.get(backend_id), backendId_to_backendHttpPort.get(backend_id), tablet_id) + logger.info("Get compaction status: code=" + code + ", out=" + out + ", err=" + err) + assertEquals(code, 0) + def compactionStatus = parseJson(out.trim()) + assertEquals("success", compactionStatus.status.toLowerCase()) + running = compactionStatus.run_status + } while (running) + } + qt_sc """ select count(*) from ${tableName} """ - qt_sc """ SELECT * FROM ${tableName} WHERE user_id=2 """ + qt_sc """ SELECT * FROM ${tableName} WHERE user_id=2 """ } finally { //try_sql("DROP TABLE IF EXISTS ${tableName}") diff --git a/regression-test/suites/schema_change_p0/test_uniq_mv_schema_change.groovy b/regression-test/suites/schema_change_p0/test_uniq_mv_schema_change.groovy index fa09cbccaab742..a364b7f9ccbf8e 100644 --- a/regression-test/suites/schema_change_p0/test_uniq_mv_schema_change.groovy +++ b/regression-test/suites/schema_change_p0/test_uniq_mv_schema_change.groovy @@ -44,21 +44,6 @@ suite ("test_uniq_mv_schema_change") { def backendId_to_backendHttpPort = [:] getBackendIpHttpPort(backendId_to_backendIP, backendId_to_backendHttpPort); - backend_id = backendId_to_backendIP.keySet()[0] - def (code, out, err) = show_be_config(backendId_to_backendIP.get(backend_id), backendId_to_backendHttpPort.get(backend_id)) - - logger.info("Show config: code=" + code + ", out=" + out + ", err=" + err) - assertEquals(code, 0) - def configList = parseJson(out.trim()) - assert configList instanceof List - - boolean disableAutoCompaction = true - for (Object ele in (List) configList) { - assert ele instanceof List - if (((List) ele)[0] == "disable_auto_compaction") { - disableAutoCompaction = Boolean.parseBoolean(((List) ele)[2]) - } - } sql """ DROP TABLE IF EXISTS ${tableName} """ sql """ diff --git a/regression-test/suites/schema_change_p0/test_uniq_rollup_schema_change.groovy b/regression-test/suites/schema_change_p0/test_uniq_rollup_schema_change.groovy index 46487788d2efe8..06fa33ac1cfbe7 100644 --- a/regression-test/suites/schema_change_p0/test_uniq_rollup_schema_change.groovy +++ b/regression-test/suites/schema_change_p0/test_uniq_rollup_schema_change.groovy @@ -48,21 +48,6 @@ suite ("test_uniq_rollup_schema_change") { def backendId_to_backendHttpPort = [:] getBackendIpHttpPort(backendId_to_backendIP, backendId_to_backendHttpPort); - backend_id = backendId_to_backendIP.keySet()[0] - def (code, out, err) = show_be_config(backendId_to_backendIP.get(backend_id), backendId_to_backendHttpPort.get(backend_id)) - - logger.info("Show config: code=" + code + ", out=" + out + ", err=" + err) - assertEquals(code, 0) - def configList = parseJson(out.trim()) - assert configList instanceof List - - boolean disableAutoCompaction = true - for (Object ele in (List) configList) { - assert ele instanceof List - if (((List) ele)[0] == "disable_auto_compaction") { - disableAutoCompaction = Boolean.parseBoolean(((List) ele)[2]) - } - } sql """ DROP TABLE IF EXISTS ${tableName} """ sql """ diff --git a/regression-test/suites/schema_change_p0/test_uniq_vals_schema_change.groovy b/regression-test/suites/schema_change_p0/test_uniq_vals_schema_change.groovy index 2e336603a991b6..016aedb1644460 100644 --- a/regression-test/suites/schema_change_p0/test_uniq_vals_schema_change.groovy +++ b/regression-test/suites/schema_change_p0/test_uniq_vals_schema_change.groovy @@ -28,21 +28,6 @@ suite ("test_uniq_vals_schema_change") { def backendId_to_backendHttpPort = [:] getBackendIpHttpPort(backendId_to_backendIP, backendId_to_backendHttpPort); - backend_id = backendId_to_backendIP.keySet()[0] - def (code, out, err) = show_be_config(backendId_to_backendIP.get(backend_id), backendId_to_backendHttpPort.get(backend_id)) - - logger.info("Show config: code=" + code + ", out=" + out + ", err=" + err) - assertEquals(code, 0) - def configList = parseJson(out.trim()) - assert configList instanceof List - - boolean disableAutoCompaction = true - for (Object ele in (List) configList) { - assert ele instanceof List - if (((List) ele)[0] == "disable_auto_compaction") { - disableAutoCompaction = Boolean.parseBoolean(((List) ele)[2]) - } - } sql """ DROP TABLE IF EXISTS ${tableName} """ sql """ diff --git a/regression-test/suites/schema_change_p0/test_varchar_schema_change.groovy b/regression-test/suites/schema_change_p0/test_varchar_schema_change.groovy index 8f3f0d3fcec133..38bd996e89de9e 100644 --- a/regression-test/suites/schema_change_p0/test_varchar_schema_change.groovy +++ b/regression-test/suites/schema_change_p0/test_varchar_schema_change.groovy @@ -32,22 +32,6 @@ suite ("test_varchar_schema_change") { def backendId_to_backendHttpPort = [:] getBackendIpHttpPort(backendId_to_backendIP, backendId_to_backendHttpPort); - backend_id = backendId_to_backendIP.keySet()[0] - def (code, out, err) = show_be_config(backendId_to_backendIP.get(backend_id), backendId_to_backendHttpPort.get(backend_id)) - - logger.info("Show config: code=" + code + ", out=" + out + ", err=" + err) - assertEquals(code, 0) - def configList = parseJson(out.trim()) - assert configList instanceof List - - boolean disableAutoCompaction = true - for (Object ele in (List) configList) { - assert ele instanceof List - if (((List) ele)[0] == "disable_auto_compaction") { - disableAutoCompaction = Boolean.parseBoolean(((List) ele)[2]) - } - } - sql """ DROP TABLE IF EXISTS ${tableName} """ sql """ CREATE TABLE IF NOT EXISTS ${tableName} ( diff --git a/regression-test/suites/segcompaction_p2/test_segcompaction_agg_keys.groovy b/regression-test/suites/segcompaction_p2/test_segcompaction_agg_keys.groovy index d6215e31d75b55..11b94bc90ee7d5 100644 --- a/regression-test/suites/segcompaction_p2/test_segcompaction_agg_keys.groovy +++ b/regression-test/suites/segcompaction_p2/test_segcompaction_agg_keys.groovy @@ -27,25 +27,6 @@ suite("test_segcompaction_agg_keys") { try { - String backend_id; - def backendId_to_backendIP = [:] - def backendId_to_backendHttpPort = [:] - getBackendIpHttpPort(backendId_to_backendIP, backendId_to_backendHttpPort); - backend_id = backendId_to_backendIP.keySet()[0] - def (code, out, err) = show_be_config(backendId_to_backendIP.get(backend_id), backendId_to_backendHttpPort.get(backend_id)) - - logger.info("Show config: code=" + code + ", out=" + out + ", err=" + err) - assertEquals(code, 0) - def configList = parseJson(out.trim()) - assert configList instanceof List - - boolean disableAutoCompaction = true - for (Object ele in (List) configList) { - assert ele instanceof List - if (((List) ele)[0] == "disable_auto_compaction") { - disableAutoCompaction = Boolean.parseBoolean(((List) ele)[2]) - } - } sql """ DROP TABLE IF EXISTS ${tableName} """ sql """ diff --git a/regression-test/suites/segcompaction_p2/test_segcompaction_agg_keys_index.groovy b/regression-test/suites/segcompaction_p2/test_segcompaction_agg_keys_index.groovy index 23e6c20fe35e01..a30b53670d89e5 100644 --- a/regression-test/suites/segcompaction_p2/test_segcompaction_agg_keys_index.groovy +++ b/regression-test/suites/segcompaction_p2/test_segcompaction_agg_keys_index.groovy @@ -27,24 +27,6 @@ suite("test_segcompaction_agg_keys_index") { try { - String backend_id; - def backendId_to_backendIP = [:] - def backendId_to_backendHttpPort = [:] - getBackendIpHttpPort(backendId_to_backendIP, backendId_to_backendHttpPort); - backend_id = backendId_to_backendIP.keySet()[0] - def (code, out, err) = show_be_config(backendId_to_backendIP.get(backend_id), backendId_to_backendHttpPort.get(backend_id)) - logger.info("Show config: code=" + code + ", out=" + out + ", err=" + err) - assertEquals(code, 0) - def configList = parseJson(out.trim()) - assert configList instanceof List - - boolean disableAutoCompaction = true - for (Object ele in (List) configList) { - assert ele instanceof List - if (((List) ele)[0] == "disable_auto_compaction") { - disableAutoCompaction = Boolean.parseBoolean(((List) ele)[2]) - } - } sql """ DROP TABLE IF EXISTS ${tableName} """ sql """ diff --git a/regression-test/suites/segcompaction_p2/test_segcompaction_dup_keys.groovy b/regression-test/suites/segcompaction_p2/test_segcompaction_dup_keys.groovy index ad9e6f8dcebefa..910a650ab31233 100644 --- a/regression-test/suites/segcompaction_p2/test_segcompaction_dup_keys.groovy +++ b/regression-test/suites/segcompaction_p2/test_segcompaction_dup_keys.groovy @@ -27,26 +27,6 @@ suite("test_segcompaction_dup_keys") { try { - String backend_id; - def backendId_to_backendIP = [:] - def backendId_to_backendHttpPort = [:] - getBackendIpHttpPort(backendId_to_backendIP, backendId_to_backendHttpPort); - - backend_id = backendId_to_backendIP.keySet()[0] - def (code, out, err) = show_be_config(backendId_to_backendIP.get(backend_id), backendId_to_backendHttpPort.get(backend_id)) - - logger.info("Show config: code=" + code + ", out=" + out + ", err=" + err) - assertEquals(code, 0) - def configList = parseJson(out.trim()) - assert configList instanceof List - - boolean disableAutoCompaction = true - for (Object ele in (List) configList) { - assert ele instanceof List - if (((List) ele)[0] == "disable_auto_compaction") { - disableAutoCompaction = Boolean.parseBoolean(((List) ele)[2]) - } - } sql """ DROP TABLE IF EXISTS ${tableName} """ sql """ diff --git a/regression-test/suites/segcompaction_p2/test_segcompaction_dup_keys_index.groovy b/regression-test/suites/segcompaction_p2/test_segcompaction_dup_keys_index.groovy index 5bc9278c54e85c..958fb590c12ee0 100644 --- a/regression-test/suites/segcompaction_p2/test_segcompaction_dup_keys_index.groovy +++ b/regression-test/suites/segcompaction_p2/test_segcompaction_dup_keys_index.groovy @@ -27,25 +27,6 @@ suite("test_segcompaction_dup_keys_index") { try { - String backend_id; - def backendId_to_backendIP = [:] - def backendId_to_backendHttpPort = [:] - getBackendIpHttpPort(backendId_to_backendIP, backendId_to_backendHttpPort); - - backend_id = backendId_to_backendIP.keySet()[0] - def (code, out, err) = show_be_config(backendId_to_backendIP.get(backend_id), backendId_to_backendHttpPort.get(backend_id)) - logger.info("Show config: code=" + code + ", out=" + out + ", err=" + err) - assertEquals(code, 0) - def configList = parseJson(out.trim()) - assert configList instanceof List - - boolean disableAutoCompaction = true - for (Object ele in (List) configList) { - assert ele instanceof List - if (((List) ele)[0] == "disable_auto_compaction") { - disableAutoCompaction = Boolean.parseBoolean(((List) ele)[2]) - } - } sql """ DROP TABLE IF EXISTS ${tableName} """ sql """ diff --git a/regression-test/suites/segcompaction_p2/test_segcompaction_unique_keys.groovy b/regression-test/suites/segcompaction_p2/test_segcompaction_unique_keys.groovy index 1c564ee25db92a..c4c3c123e8d96c 100644 --- a/regression-test/suites/segcompaction_p2/test_segcompaction_unique_keys.groovy +++ b/regression-test/suites/segcompaction_p2/test_segcompaction_unique_keys.groovy @@ -27,26 +27,6 @@ suite("test_segcompaction_unique_keys") { try { - String backend_id; - def backendId_to_backendIP = [:] - def backendId_to_backendHttpPort = [:] - getBackendIpHttpPort(backendId_to_backendIP, backendId_to_backendHttpPort); - - backend_id = backendId_to_backendIP.keySet()[0] - def (code, out, err) = show_be_config(backendId_to_backendIP.get(backend_id), backendId_to_backendHttpPort.get(backend_id)) - - logger.info("Show config: code=" + code + ", out=" + out + ", err=" + err) - assertEquals(code, 0) - def configList = parseJson(out.trim()) - assert configList instanceof List - - boolean disableAutoCompaction = true - for (Object ele in (List) configList) { - assert ele instanceof List - if (((List) ele)[0] == "disable_auto_compaction") { - disableAutoCompaction = Boolean.parseBoolean(((List) ele)[2]) - } - } sql """ DROP TABLE IF EXISTS ${tableName} """ sql """ diff --git a/regression-test/suites/segcompaction_p2/test_segcompaction_unique_keys_index.groovy b/regression-test/suites/segcompaction_p2/test_segcompaction_unique_keys_index.groovy index eb39c48e0b79a5..1602e074a5013a 100644 --- a/regression-test/suites/segcompaction_p2/test_segcompaction_unique_keys_index.groovy +++ b/regression-test/suites/segcompaction_p2/test_segcompaction_unique_keys_index.groovy @@ -27,25 +27,6 @@ suite("test_segcompaction_unique_keys_index") { try { - String backend_id; - def backendId_to_backendIP = [:] - def backendId_to_backendHttpPort = [:] - getBackendIpHttpPort(backendId_to_backendIP, backendId_to_backendHttpPort); - - backend_id = backendId_to_backendIP.keySet()[0] - def (code, out, err) = show_be_config(backendId_to_backendIP.get(backend_id), backendId_to_backendHttpPort.get(backend_id)) - logger.info("Show config: code=" + code + ", out=" + out + ", err=" + err) - assertEquals(code, 0) - def configList = parseJson(out.trim()) - assert configList instanceof List - - boolean disableAutoCompaction = true - for (Object ele in (List) configList) { - assert ele instanceof List - if (((List) ele)[0] == "disable_auto_compaction") { - disableAutoCompaction = Boolean.parseBoolean(((List) ele)[2]) - } - } sql """ DROP TABLE IF EXISTS ${tableName} """ sql """ diff --git a/regression-test/suites/segcompaction_p2/test_segcompaction_unique_keys_mow.groovy b/regression-test/suites/segcompaction_p2/test_segcompaction_unique_keys_mow.groovy index cb9e25cb032507..946bfee3168808 100644 --- a/regression-test/suites/segcompaction_p2/test_segcompaction_unique_keys_mow.groovy +++ b/regression-test/suites/segcompaction_p2/test_segcompaction_unique_keys_mow.groovy @@ -27,26 +27,6 @@ suite("test_segcompaction_unique_keys_mow") { try { - String backend_id; - def backendId_to_backendIP = [:] - def backendId_to_backendHttpPort = [:] - getBackendIpHttpPort(backendId_to_backendIP, backendId_to_backendHttpPort); - - backend_id = backendId_to_backendIP.keySet()[0] - def (code, out, err) = show_be_config(backendId_to_backendIP.get(backend_id), backendId_to_backendHttpPort.get(backend_id)) - - logger.info("Show config: code=" + code + ", out=" + out + ", err=" + err) - assertEquals(code, 0) - def configList = parseJson(out.trim()) - assert configList instanceof List - - boolean disableAutoCompaction = true - for (Object ele in (List) configList) { - assert ele instanceof List - if (((List) ele)[0] == "disable_auto_compaction") { - disableAutoCompaction = Boolean.parseBoolean(((List) ele)[2]) - } - } sql """ DROP TABLE IF EXISTS ${tableName} """ sql """ diff --git a/regression-test/suites/segcompaction_p2/test_segcompaction_unique_keys_mow_index.groovy b/regression-test/suites/segcompaction_p2/test_segcompaction_unique_keys_mow_index.groovy index bb9813e9d6d73f..ae7fa357bd40ad 100644 --- a/regression-test/suites/segcompaction_p2/test_segcompaction_unique_keys_mow_index.groovy +++ b/regression-test/suites/segcompaction_p2/test_segcompaction_unique_keys_mow_index.groovy @@ -27,26 +27,6 @@ suite("test_segcompaction_unique_keys_mow_index") { try { - String backend_id; - def backendId_to_backendIP = [:] - def backendId_to_backendHttpPort = [:] - getBackendIpHttpPort(backendId_to_backendIP, backendId_to_backendHttpPort); - - backend_id = backendId_to_backendIP.keySet()[0] - def (code, out, err) = show_be_config(backendId_to_backendIP.get(backend_id), backendId_to_backendHttpPort.get(backend_id)) - - logger.info("Show config: code=" + code + ", out=" + out + ", err=" + err) - assertEquals(code, 0) - def configList = parseJson(out.trim()) - assert configList instanceof List - - boolean disableAutoCompaction = true - for (Object ele in (List) configList) { - assert ele instanceof List - if (((List) ele)[0] == "disable_auto_compaction") { - disableAutoCompaction = Boolean.parseBoolean(((List) ele)[2]) - } - } sql """ DROP TABLE IF EXISTS ${tableName} """ sql """ From e4645ea1c7015f2ee1854f34bdbdd5e98feac230 Mon Sep 17 00:00:00 2001 From: csun5285 Date: Thu, 20 Jun 2024 11:46:05 +0800 Subject: [PATCH 2/8] fix --- ...t_single_compaction_fault_injection.groovy | 218 ++++++++- ...paction_with_variant_inverted_index.groovy | 369 ++++++++------- .../test_single_replica_compaction.groovy | 432 +++++++++--------- .../test_time_series_compaction_policy.groovy | 184 ++++---- .../test_in_null_no_return.groovy | 140 ++++++ 5 files changed, 835 insertions(+), 508 deletions(-) create mode 100644 regression-test/suites/inverted_index_p0/test_in_null_no_return.groovy diff --git a/regression-test/suites/compaction/test_single_compaction_fault_injection.groovy b/regression-test/suites/compaction/test_single_compaction_fault_injection.groovy index 737528d1e5dfd7..6b63292e02ee9d 100644 --- a/regression-test/suites/compaction/test_single_compaction_fault_injection.groovy +++ b/regression-test/suites/compaction/test_single_compaction_fault_injection.groovy @@ -17,7 +17,7 @@ import org.codehaus.groovy.runtime.IOGroovyMethods -suite("test_single_compaction_fault_injection", "p2") { +suite("test_single_compaction_fault_injection", "p2, nonConcurrent") { def tableName = "test_single_compaction" def set_be_config = { key, value -> @@ -123,6 +123,20 @@ suite("test_single_compaction_fault_injection", "p2") { return tabletStatus } + String backend_id; + def backendId_to_backendIP = [:] + def backendId_to_backendHttpPort = [:] + getBackendIpHttpPort(backendId_to_backendIP, backendId_to_backendHttpPort); + + backend_id = backendId_to_backendIP.keySet()[0] + set_be_config.call("update_replica_infos_interval_seconds", "5") + + // find the master be for single compaction + Boolean found = false + String master_backend_id + List follower_backend_id = new ArrayList<>() + String tablet_id + def tablets try { String backend_id; def backendId_to_backendIP = [:] @@ -150,7 +164,7 @@ suite("test_single_compaction_fault_injection", "p2") { UNIQUE KEY(`id`) COMMENT 'OLAP' DISTRIBUTED BY HASH(`id`) BUCKETS 1 - PROPERTIES ( "replication_num" = "2", "enable_single_replica_compaction" = "true", "enable_unique_key_merge_on_write" = "false" ); + PROPERTIES ( "replication_num" = "2", "enable_single_replica_compaction" = "true", "enable_unique_key_merge_on_write" = "false", "disable_auto_compaction" = "true" ); """ tablets = sql_return_maparray """ show tablets from ${tableName}; """ @@ -352,6 +366,204 @@ suite("test_single_compaction_fault_injection", "p2") { qt_sql """ select * from ${tableName} order by id """ - + + tablets = sql_return_maparray """ show tablets from ${tableName}; """ + // wait for update replica infos + Thread.sleep(20000) + // The test table only has one bucket with 2 replicas, + // and `show tablets` will return 2 different replicas with the same tablet. + // So we can use the same tablet_id to get tablet/trigger compaction with different backends. + tablet_id = tablets[0].TabletId + def tablet_info = sql_return_maparray """ show tablet ${tablet_id}; """ + logger.info("tablet: " + tablet_info) + for (def tablet in tablets) { + String trigger_backend_id = tablet.BackendId + def tablet_status = getTabletStatus(backendId_to_backendIP[trigger_backend_id], backendId_to_backendHttpPort[trigger_backend_id], tablet_id) + if (!tablet_status.containsKey("single replica compaction status")) { + if (found) { + found = false + logger.warn("multipe master"); + break; + } + found = true + master_backend_id = trigger_backend_id + } else { + follower_backend_id.add(trigger_backend_id) + } + } + assertFalse(found) + assertFalse(master_backend_id.isEmpty()) + assertTrue(follower_backend_id.isEmpty()) + master_backend_id = "" + } finally { + GetDebugPoint().disableDebugPointForAllFEs('getTabletReplicaInfos.returnEmpty') + // wait for update replica infos + // be.conf: update_replica_infos_interval_seconds + 2s + Thread.sleep(20000) + // The test table only has one bucket with 2 replicas, + // and `show tablets` will return 2 different replicas with the same tablet. + // So we can use the same tablet_id to get tablet/trigger compaction with different backends. + tablet_id = tablets[0].TabletId + def tablet_info = sql_return_maparray """ show tablet ${tablet_id}; """ + for (def tablet in tablets) { + String trigger_backend_id = tablet.BackendId + def tablet_status = getTabletStatus(backendId_to_backendIP[trigger_backend_id], backendId_to_backendHttpPort[trigger_backend_id], tablet_id); + if (!tablet_status.containsKey("single replica compaction status")) { + if (found) { + logger.warn("multipe master") + assertTrue(false) + } + found = true + master_backend_id = trigger_backend_id + } else { + follower_backend_id.add(trigger_backend_id) + } + } + assertTrue(found) + assertFalse(master_backend_id.isEmpty()) + assertFalse(follower_backend_id.isEmpty()) + } + + + def checkSucceedCompactionResult = { + def master_tablet_status = getTabletStatus(backendId_to_backendIP[master_backend_id], backendId_to_backendHttpPort[master_backend_id], tablet_id); + def master_rowsets = master_tablet_status."rowsets" + assert master_rowsets instanceof List + logger.info("rowset size: " + master_rowsets.size()) + + for (String backend: follower_backend_id) { + def tablet_status = getTabletStatus(backendId_to_backendIP[backend], backendId_to_backendHttpPort[backend], tablet_id); + def rowsets = tablet_status."rowsets" + assert rowsets instanceof List + assertEquals(master_rowsets.size(), rowsets.size()) + } + } + + def checkFailedCompactionResult = { + def master_tablet_status = getTabletStatus(backendId_to_backendIP[master_backend_id], backendId_to_backendHttpPort[master_backend_id], tablet_id); + def master_rowsets = master_tablet_status."rowsets" + assert master_rowsets instanceof List + logger.info("rowset size: " + master_rowsets.size()) + + for (String backend: follower_backend_id) { + def tablet_status = getTabletStatus(backendId_to_backendIP[backend], backendId_to_backendHttpPort[backend], tablet_id); + def rowsets = tablet_status."rowsets" + assert rowsets instanceof List + assertFalse(master_rowsets.size() == rowsets.size()) + } + } + + // return ok + try { + GetDebugPoint().enableDebugPointForAllBEs("do_single_compaction_return_ok"); + for (String id in follower_backend_id) { + assertTrue(triggerSingleCompaction(backendId_to_backendIP[id], backendId_to_backendHttpPort[id], tablet_id).contains("Success")); + waitForCompaction(backendId_to_backendIP[id], backendId_to_backendHttpPort[id], tablet_id); + } + } finally { + GetDebugPoint().disableDebugPointForAllBEs("do_single_compaction_return_ok"); + } + sql """ INSERT INTO ${tableName} VALUES (1, "a", 100); """ + sql """ INSERT INTO ${tableName} VALUES (1, "b", 100); """ + sql """ INSERT INTO ${tableName} VALUES (2, "a", 100); """ + sql """ INSERT INTO ${tableName} VALUES (2, "b", 100); """ + sql """ INSERT INTO ${tableName} VALUES (3, "a", 100); """ + sql """ INSERT INTO ${tableName} VALUES (3, "b", 100); """ + + // trigger master be to do cumu compaction + assertTrue(triggerCompaction(backendId_to_backendIP[master_backend_id], backendId_to_backendHttpPort[master_backend_id], + "cumulative", tablet_id).contains("Success")); + waitForCompaction(backendId_to_backendIP[master_backend_id], backendId_to_backendHttpPort[master_backend_id], tablet_id) + + try { + GetDebugPoint().enableDebugPointForAllBEs("single_compaction_failed_get_peer"); + for (String id in follower_backend_id) { + out = triggerSingleCompaction(backendId_to_backendIP[id], backendId_to_backendHttpPort[id], tablet_id) + assertTrue(out.contains("compaction task is successfully triggered") || out.contains("tablet don't have peer replica")); + } + checkFailedCompactionResult.call() + } finally { + GetDebugPoint().disableDebugPointForAllBEs("single_compaction_failed_get_peer") } + + try { + GetDebugPoint().enableDebugPointForAllBEs("single_compaction_failed_get_peer_versions"); + for (String id in follower_backend_id) { + out = triggerSingleCompaction(backendId_to_backendIP[id], backendId_to_backendHttpPort[id], tablet_id) + assertTrue(out.contains("compaction task is successfully triggered") || out.contains("tablet failed get peer versions")); + } + checkFailedCompactionResult.call() + } finally { + GetDebugPoint().disableDebugPointForAllBEs("single_compaction_failed_get_peer_versions") + } + + try { + GetDebugPoint().enableDebugPointForAllBEs("single_compaction_failed_make_snapshot"); + for (String id in follower_backend_id) { + out = triggerSingleCompaction(backendId_to_backendIP[id], backendId_to_backendHttpPort[id], tablet_id) + assertTrue(out.contains("compaction task is successfully triggered") || out.contains("failed snapshot")); + } + checkFailedCompactionResult.call() + } finally { + GetDebugPoint().disableDebugPointForAllBEs("single_compaction_failed_make_snapshot") + } + + try { + GetDebugPoint().enableDebugPointForAllBEs("single_compaction_failed_download_file"); + for (String id in follower_backend_id) { + out = triggerSingleCompaction(backendId_to_backendIP[id], backendId_to_backendHttpPort[id], tablet_id) + assertTrue(out.contains("compaction task is successfully triggered") || out.contains("failed to download file")); + } + checkFailedCompactionResult.call() + } finally { + GetDebugPoint().disableDebugPointForAllBEs("single_compaction_failed_download_file") + } + + // trigger follower be to fetch compaction result + for (String id in follower_backend_id) { + assertTrue(triggerSingleCompaction(backendId_to_backendIP[id], backendId_to_backendHttpPort[id], tablet_id).contains("Success")); + waitForCompaction(backendId_to_backendIP[id], backendId_to_backendHttpPort[id], tablet_id) + } + + // check rowsets + checkSucceedCompactionResult.call() + + sql """ INSERT INTO ${tableName} VALUES (4, "a", 100); """ + sql """ INSERT INTO ${tableName} VALUES (5, "a", 100); """ + sql """ INSERT INTO ${tableName} VALUES (6, "a", 100); """ + sql """ DELETE FROM ${tableName} WHERE id = 4; """ + sql """ INSERT INTO ${tableName} VALUES (7, "a", 100); """ + sql """ INSERT INTO ${tableName} VALUES (8, "a", 100); """ + + // trigger master be to do cumu compaction with delete + assertTrue(triggerCompaction(backendId_to_backendIP[master_backend_id], backendId_to_backendHttpPort[master_backend_id], + "cumulative", tablet_id).contains("Success")); + waitForCompaction(backendId_to_backendIP[master_backend_id], backendId_to_backendHttpPort[master_backend_id], tablet_id) + + // trigger follower be to fetch compaction result + for (String id in follower_backend_id) { + assertTrue(triggerSingleCompaction(backendId_to_backendIP[id], backendId_to_backendHttpPort[id], tablet_id).contains("Success")); + waitForCompaction(backendId_to_backendIP[id], backendId_to_backendHttpPort[id], tablet_id) + } + + // check rowsets + checkSucceedCompactionResult.call() + + // trigger master be to do base compaction + assertTrue(triggerCompaction(backendId_to_backendIP[master_backend_id], backendId_to_backendHttpPort[master_backend_id], + "base", tablet_id).contains("Success")); + waitForCompaction(backendId_to_backendIP[master_backend_id], backendId_to_backendHttpPort[master_backend_id], tablet_id) + + // trigger follower be to fetch compaction result + for (String id in follower_backend_id) { + assertTrue(triggerSingleCompaction(backendId_to_backendIP[id], backendId_to_backendHttpPort[id], tablet_id).contains("Success")); + waitForCompaction(backendId_to_backendIP[id], backendId_to_backendHttpPort[id], tablet_id) + } + + // check rowsets + checkSucceedCompactionResult.call() + + qt_sql """ + select * from ${tableName} order by id + """ } diff --git a/regression-test/suites/compaction/test_single_compaction_with_variant_inverted_index.groovy b/regression-test/suites/compaction/test_single_compaction_with_variant_inverted_index.groovy index ee299355473240..55b1f223295d24 100644 --- a/regression-test/suites/compaction/test_single_compaction_with_variant_inverted_index.groovy +++ b/regression-test/suites/compaction/test_single_compaction_with_variant_inverted_index.groovy @@ -28,96 +28,74 @@ suite("test_single_compaction_with_variant_inverted", "p2") { } boolean disableAutoCompaction = true - try { - String backend_id; - def backendId_to_backendIP = [:] - def backendId_to_backendHttpPort = [:] - getBackendIpHttpPort(backendId_to_backendIP, backendId_to_backendHttpPort); - - backend_id = backendId_to_backendIP.keySet()[0] - def (code, out, err) = show_be_config(backendId_to_backendIP.get(backend_id), backendId_to_backendHttpPort.get(backend_id)) - - logger.info("Show config: code=" + code + ", out=" + out + ", err=" + err) - assertEquals(code, 0) - def configList = parseJson(out.trim()) - assert configList instanceof List - - for (Object ele in (List) configList) { - assert ele instanceof List - if (((List) ele)[0] == "disable_auto_compaction") { - disableAutoCompaction = Boolean.parseBoolean(((List) ele)[2]) - } + String backend_id; + def backendId_to_backendIP = [:] + def backendId_to_backendHttpPort = [:] + getBackendIpHttpPort(backendId_to_backendIP, backendId_to_backendHttpPort); + + backend_id = backendId_to_backendIP.keySet()[0] + def (code, out, err) = show_be_config(backendId_to_backendIP.get(backend_id), backendId_to_backendHttpPort.get(backend_id)) + + logger.info("Show config: code=" + code + ", out=" + out + ", err=" + err) + assertEquals(code, 0) + def configList = parseJson(out.trim()) + assert configList instanceof List + + for (Object ele in (List) configList) { + assert ele instanceof List + if (((List) ele)[0] == "disable_auto_compaction") { + disableAutoCompaction = Boolean.parseBoolean(((List) ele)[2]) } + } - def triggerCompaction = { be_host, be_http_port, compact_type, tablet_id -> - StringBuilder sb = new StringBuilder(); - sb.append("curl -X POST http://${be_host}:${be_http_port}") - sb.append("/api/compaction/run?tablet_id=") - sb.append(tablet_id) - sb.append("&compact_type=${compact_type}") - - String command = sb.toString() - logger.info(command) - process = command.execute() - code = process.waitFor() - err = IOGroovyMethods.getText(new BufferedReader(new InputStreamReader(process.getErrorStream()))); - out = process.getText() - logger.info("Run compaction: code=" + code + ", out=" + out + ", disableAutoCompaction " + disableAutoCompaction + ", err=" + err) - if (!disableAutoCompaction) { - return "Success, " + out - } - assertEquals(code, 0) - return out - } - - def triggerSingleCompaction = { be_host, be_http_port, tablet_id -> - StringBuilder sb = new StringBuilder(); - sb.append("curl -X POST http://${be_host}:${be_http_port}") - sb.append("/api/compaction/run?tablet_id=") - sb.append(tablet_id) - sb.append("&compact_type=cumulative&remote=true") - - String command = sb.toString() - logger.info(command) - process = command.execute() - code = process.waitFor() - err = IOGroovyMethods.getText(new BufferedReader(new InputStreamReader(process.getErrorStream()))); - out = process.getText() - logger.info("Run compaction: code=" + code + ", out=" + out + ", disableAutoCompaction " + disableAutoCompaction + ", err=" + err) - if (!disableAutoCompaction) { - return "Success, " + out - } - assertEquals(code, 0) - return out + def triggerCompaction = { be_host, be_http_port, compact_type, tablet_id -> + StringBuilder sb = new StringBuilder(); + sb.append("curl -X POST http://${be_host}:${be_http_port}") + sb.append("/api/compaction/run?tablet_id=") + sb.append(tablet_id) + sb.append("&compact_type=${compact_type}") + + String command = sb.toString() + logger.info(command) + process = command.execute() + code = process.waitFor() + err = IOGroovyMethods.getText(new BufferedReader(new InputStreamReader(process.getErrorStream()))); + out = process.getText() + logger.info("Run compaction: code=" + code + ", out=" + out + ", disableAutoCompaction " + disableAutoCompaction + ", err=" + err) + if (!disableAutoCompaction) { + return "Success, " + out } - def waitForCompaction = { be_host, be_http_port, tablet_id -> - boolean running = true - do { - Thread.sleep(1000) - StringBuilder sb = new StringBuilder(); - sb.append("curl -X GET http://${be_host}:${be_http_port}") - sb.append("/api/compaction/run_status?tablet_id=") - sb.append(tablet_id) - - String command = sb.toString() - logger.info(command) - process = command.execute() - code = process.waitFor() - out = process.getText() - logger.info("Get compaction status: code=" + code + ", out=" + out) - assertEquals(code, 0) - def compactionStatus = parseJson(out.trim()) - assertEquals("success", compactionStatus.status.toLowerCase()) - running = compactionStatus.run_status - } while (running) + assertEquals(code, 0) + return out + } + + def triggerSingleCompaction = { be_host, be_http_port, tablet_id -> + StringBuilder sb = new StringBuilder(); + sb.append("curl -X POST http://${be_host}:${be_http_port}") + sb.append("/api/compaction/run?tablet_id=") + sb.append(tablet_id) + sb.append("&compact_type=cumulative&remote=true") + + String command = sb.toString() + logger.info(command) + process = command.execute() + code = process.waitFor() + err = IOGroovyMethods.getText(new BufferedReader(new InputStreamReader(process.getErrorStream()))); + out = process.getText() + logger.info("Run compaction: code=" + code + ", out=" + out + ", disableAutoCompaction " + disableAutoCompaction + ", err=" + err) + if (!disableAutoCompaction) { + return "Success, " + out } - - def getTabletStatus = { be_host, be_http_port, tablet_id -> - boolean running = true + assertEquals(code, 0) + return out + } + def waitForCompaction = { be_host, be_http_port, tablet_id -> + boolean running = true + do { Thread.sleep(1000) StringBuilder sb = new StringBuilder(); sb.append("curl -X GET http://${be_host}:${be_http_port}") - sb.append("/api/compaction/show?tablet_id=") + sb.append("/api/compaction/run_status?tablet_id=") sb.append(tablet_id) String command = sb.toString() @@ -125,120 +103,139 @@ suite("test_single_compaction_with_variant_inverted", "p2") { process = command.execute() code = process.waitFor() out = process.getText() - logger.info("Get tablet status: code=" + code + ", out=" + out) + logger.info("Get compaction status: code=" + code + ", out=" + out) assertEquals(code, 0) - def tabletStatus = parseJson(out.trim()) - return tabletStatus - } + def compactionStatus = parseJson(out.trim()) + assertEquals("success", compactionStatus.status.toLowerCase()) + running = compactionStatus.run_status + } while (running) + } + def getTabletStatus = { be_host, be_http_port, tablet_id -> + boolean running = true + Thread.sleep(1000) + StringBuilder sb = new StringBuilder(); + sb.append("curl -X GET http://${be_host}:${be_http_port}") + sb.append("/api/compaction/show?tablet_id=") + sb.append(tablet_id) + + String command = sb.toString() + logger.info(command) + process = command.execute() + code = process.waitFor() + out = process.getText() + logger.info("Get tablet status: code=" + code + ", out=" + out) + assertEquals(code, 0) + def tabletStatus = parseJson(out.trim()) + return tabletStatus + } - sql """ DROP TABLE IF EXISTS ${tableName}; """ - sql """ - CREATE TABLE ${tableName} ( - `id` int(11) NULL, - `name` varchar(255) NULL, - `score` int(11) NULL, - `properties` variant, - INDEX idx_props (`properties`) USING INVERTED PROPERTIES("parser" = "none") COMMENT '' - ) ENGINE=OLAP - DUPLICATE KEY(`id`) - COMMENT 'OLAP' - DISTRIBUTED BY HASH(`id`) BUCKETS 1 - PROPERTIES ( - "replication_num" = "2", - "enable_single_replica_compaction" = "true", - "inverted_index_storage_format" = "V1", - "disable_auto_compaction" = "true" - ); - """ - - def tablets = sql_return_maparray """ show tablets from ${tableName}; """ - - // wait for update replica infos - // be.conf: update_replica_infos_interval_seconds + 2s - Thread.sleep(62000) - - // find the master be for single replica compaction - Boolean found = false - String master_backend_id; - List follower_backend_id = new ArrayList<>() - // The test table only has one bucket with 2 replicas, - // and `show tablets` will return 2 different replicas with the same tablet. - // So we can use the same tablet_id to get tablet/trigger compaction with different backends. - String tablet_id = tablets[0].TabletId - def tablet_info = sql_return_maparray """ show tablet ${tablet_id}; """ - logger.info("tablet: " + tablet_info) - for (def tablet in tablets) { - String trigger_backend_id = tablet.BackendId - def tablet_status = getTabletStatus(backendId_to_backendIP[trigger_backend_id], backendId_to_backendHttpPort[trigger_backend_id], tablet_id); - if (!tablet_status.containsKey("single replica compaction status")) { - if (found) { - logger.warn("multipe master"); - assertTrue(false) - } - found = true - master_backend_id = trigger_backend_id - } else { - follower_backend_id.add(trigger_backend_id) - } - } - def checkCompactionResult = { - def master_tablet_status = getTabletStatus(backendId_to_backendIP[master_backend_id], backendId_to_backendHttpPort[master_backend_id], tablet_id); - def master_rowsets = master_tablet_status."rowsets" - assert master_rowsets instanceof List - logger.info("rowset size: " + master_rowsets.size()) - - for (String backend: follower_backend_id) { - def tablet_status = getTabletStatus(backendId_to_backendIP[backend], backendId_to_backendHttpPort[backend], tablet_id); - def rowsets = tablet_status."rowsets" - assert rowsets instanceof List - assertEquals(master_rowsets.size(), rowsets.size()) + sql """ DROP TABLE IF EXISTS ${tableName}; """ + sql """ + CREATE TABLE ${tableName} ( + `id` int(11) NULL, + `name` varchar(255) NULL, + `score` int(11) NULL, + `properties` variant, + INDEX idx_props (`properties`) USING INVERTED PROPERTIES("parser" = "none") COMMENT '' + ) ENGINE=OLAP + DUPLICATE KEY(`id`) + COMMENT 'OLAP' + DISTRIBUTED BY HASH(`id`) BUCKETS 1 + PROPERTIES ( + "replication_num" = "2", + "enable_single_replica_compaction" = "true", + "inverted_index_storage_format" = "V1", + "disable_auto_compaction" = "true" + ); + """ + + def tablets = sql_return_maparray """ show tablets from ${tableName}; """ + + // wait for update replica infos + // be.conf: update_replica_infos_interval_seconds + 2s + Thread.sleep(62000) + + // find the master be for single replica compaction + Boolean found = false + String master_backend_id; + List follower_backend_id = new ArrayList<>() + // The test table only has one bucket with 2 replicas, + // and `show tablets` will return 2 different replicas with the same tablet. + // So we can use the same tablet_id to get tablet/trigger compaction with different backends. + String tablet_id = tablets[0].TabletId + def tablet_info = sql_return_maparray """ show tablet ${tablet_id}; """ + logger.info("tablet: " + tablet_info) + for (def tablet in tablets) { + String trigger_backend_id = tablet.BackendId + def tablet_status = getTabletStatus(backendId_to_backendIP[trigger_backend_id], backendId_to_backendHttpPort[trigger_backend_id], tablet_id); + if (!tablet_status.containsKey("single replica compaction status")) { + if (found) { + logger.warn("multipe master"); + assertTrue(false) } + found = true + master_backend_id = trigger_backend_id + } else { + follower_backend_id.add(trigger_backend_id) } + } - def checkTabletFileCrc = { - def (master_code, master_out, master_err) = calc_file_crc_on_tablet(backendId_to_backendIP[master_backend_id], backendId_to_backendHttpPort[master_backend_id], tablet_id) - logger.info("Run calc_file_crc_on_tablet: ip=" + backendId_to_backendIP[master_backend_id] + " code=" + master_code + ", out=" + master_out + ", err=" + master_err) - - for (String backend: follower_backend_id) { - def (follower_code, follower_out, follower_err) = calc_file_crc_on_tablet(backendId_to_backendIP[backend], backendId_to_backendHttpPort[backend], tablet_id) - logger.info("Run calc_file_crc_on_tablet: ip=" + backendId_to_backendIP[backend] + " code=" + follower_code + ", out=" + follower_out + ", err=" + follower_err) - assertTrue(parseJson(follower_out.trim()).crc_value == parseJson(master_out.trim()).crc_value) - assertTrue(parseJson(follower_out.trim()).start_version == parseJson(master_out.trim()).start_version) - assertTrue(parseJson(follower_out.trim()).end_version == parseJson(master_out.trim()).end_version) - assertTrue(parseJson(follower_out.trim()).file_count == parseJson(master_out.trim()).file_count) - assertTrue(parseJson(follower_out.trim()).rowset_count == parseJson(master_out.trim()).rowset_count) - } + def checkCompactionResult = { + def master_tablet_status = getTabletStatus(backendId_to_backendIP[master_backend_id], backendId_to_backendHttpPort[master_backend_id], tablet_id); + def master_rowsets = master_tablet_status."rowsets" + assert master_rowsets instanceof List + logger.info("rowset size: " + master_rowsets.size()) + + for (String backend: follower_backend_id) { + def tablet_status = getTabletStatus(backendId_to_backendIP[backend], backendId_to_backendHttpPort[backend], tablet_id); + def rowsets = tablet_status."rowsets" + assert rowsets instanceof List + assertEquals(master_rowsets.size(), rowsets.size()) } + } - sql """ INSERT INTO ${tableName} VALUES (1, "a", 100, '{"a" : 1234, "point" : 1, "xxxx" : "ddddd"}'); """ - sql """ INSERT INTO ${tableName} VALUES (1, "b", 100, '{"%a" : 1234, "@point" : 1, "[xxxx" : "ddddd"}'); """ - sql """ INSERT INTO ${tableName} VALUES (2, "a", 100, '{"@a" : 1234, "%point" : 1, "]xxxx" : "ddddd"}'); """ - sql """ INSERT INTO ${tableName} VALUES (2, "b", 100, '{"%a" : 1234, "%point" : 1, "{xxxx" : "ddddd"}'); """ - sql """ INSERT INTO ${tableName} VALUES (3, "a", 100, '{"@a" : 1234, "@point" : 1, "}xxxx" : "ddddd"}'); """ - sql """ INSERT INTO ${tableName} VALUES (3, "b", 100, '{"a" : 1234, "point" : 1, "|xxxx" : "ddddd"}'); """ - - // trigger master be to do full compaction - assertTrue(triggerCompaction(backendId_to_backendIP[master_backend_id], backendId_to_backendHttpPort[master_backend_id], - "full", tablet_id).contains("Success")); - waitForCompaction(backendId_to_backendIP[master_backend_id], backendId_to_backendHttpPort[master_backend_id], tablet_id) - - // trigger follower be to fetch compaction result - for (String id in follower_backend_id) { - assertTrue(triggerSingleCompaction(backendId_to_backendIP[id], backendId_to_backendHttpPort[id], tablet_id).contains("Success")); - waitForCompaction(backendId_to_backendIP[id], backendId_to_backendHttpPort[id], tablet_id) + def checkTabletFileCrc = { + def (master_code, master_out, master_err) = calc_file_crc_on_tablet(backendId_to_backendIP[master_backend_id], backendId_to_backendHttpPort[master_backend_id], tablet_id) + logger.info("Run calc_file_crc_on_tablet: ip=" + backendId_to_backendIP[master_backend_id] + " code=" + master_code + ", out=" + master_out + ", err=" + master_err) + + for (String backend: follower_backend_id) { + def (follower_code, follower_out, follower_err) = calc_file_crc_on_tablet(backendId_to_backendIP[backend], backendId_to_backendHttpPort[backend], tablet_id) + logger.info("Run calc_file_crc_on_tablet: ip=" + backendId_to_backendIP[backend] + " code=" + follower_code + ", out=" + follower_out + ", err=" + follower_err) + assertTrue(parseJson(follower_out.trim()).crc_value == parseJson(master_out.trim()).crc_value) + assertTrue(parseJson(follower_out.trim()).start_version == parseJson(master_out.trim()).start_version) + assertTrue(parseJson(follower_out.trim()).end_version == parseJson(master_out.trim()).end_version) + assertTrue(parseJson(follower_out.trim()).file_count == parseJson(master_out.trim()).file_count) + assertTrue(parseJson(follower_out.trim()).rowset_count == parseJson(master_out.trim()).rowset_count) } + } - // check rowsets - checkCompactionResult.call() - checkTabletFileCrc.call() + sql """ INSERT INTO ${tableName} VALUES (1, "a", 100, '{"a" : 1234, "point" : 1, "xxxx" : "ddddd"}'); """ + sql """ INSERT INTO ${tableName} VALUES (1, "b", 100, '{"%a" : 1234, "@point" : 1, "[xxxx" : "ddddd"}'); """ + sql """ INSERT INTO ${tableName} VALUES (2, "a", 100, '{"@a" : 1234, "%point" : 1, "]xxxx" : "ddddd"}'); """ + sql """ INSERT INTO ${tableName} VALUES (2, "b", 100, '{"%a" : 1234, "%point" : 1, "{xxxx" : "ddddd"}'); """ + sql """ INSERT INTO ${tableName} VALUES (3, "a", 100, '{"@a" : 1234, "@point" : 1, "}xxxx" : "ddddd"}'); """ + sql """ INSERT INTO ${tableName} VALUES (3, "b", 100, '{"a" : 1234, "point" : 1, "|xxxx" : "ddddd"}'); """ + + // trigger master be to do full compaction + assertTrue(triggerCompaction(backendId_to_backendIP[master_backend_id], backendId_to_backendHttpPort[master_backend_id], + "full", tablet_id).contains("Success")); + waitForCompaction(backendId_to_backendIP[master_backend_id], backendId_to_backendHttpPort[master_backend_id], tablet_id) + + // trigger follower be to fetch compaction result + for (String id in follower_backend_id) { + assertTrue(triggerSingleCompaction(backendId_to_backendIP[id], backendId_to_backendHttpPort[id], tablet_id).contains("Success")); + waitForCompaction(backendId_to_backendIP[id], backendId_to_backendHttpPort[id], tablet_id) + } - qt_sql """ - select count() from ${tableName} where properties MATCH_ANY 'point xxxx'; - """ + // check rowsets + checkCompactionResult.call() + checkTabletFileCrc.call() - sql """ DROP TABLE IF EXISTS ${tableName}; """ - - } + qt_sql """ + select count() from ${tableName} where properties MATCH_ANY 'point xxxx'; + """ + + sql """ DROP TABLE IF EXISTS ${tableName}; """ } diff --git a/regression-test/suites/compaction/test_single_replica_compaction.groovy b/regression-test/suites/compaction/test_single_replica_compaction.groovy index a1c771395a5aff..3d8eb8bfc5f2a6 100644 --- a/regression-test/suites/compaction/test_single_replica_compaction.groovy +++ b/regression-test/suites/compaction/test_single_replica_compaction.groovy @@ -17,7 +17,7 @@ import org.codehaus.groovy.runtime.IOGroovyMethods -suite("test_single_replica_compaction", "p2") { +suite("test_single_compaction_p2", "p2") { if (isCloudMode()) { return; } @@ -27,107 +27,69 @@ suite("test_single_replica_compaction", "p2") { return curl("GET", String.format("http://%s:%s/api/calc_crc?tablet_id=%s", ip, port, tablet)) } - boolean disableAutoCompaction = true - try { - String backend_id; - def backendId_to_backendIP = [:] - def backendId_to_backendHttpPort = [:] - getBackendIpHttpPort(backendId_to_backendIP, backendId_to_backendHttpPort); - - backend_id = backendId_to_backendIP.keySet()[0] - def (code, out, err) = show_be_config(backendId_to_backendIP.get(backend_id), backendId_to_backendHttpPort.get(backend_id)) - - logger.info("Show config: code=" + code + ", out=" + out + ", err=" + err) - assertEquals(code, 0) - def configList = parseJson(out.trim()) - assert configList instanceof List - - for (Object ele in (List) configList) { - assert ele instanceof List - if (((List) ele)[0] == "disable_auto_compaction") { - disableAutoCompaction = Boolean.parseBoolean(((List) ele)[2]) - } + String backend_id; + def backendId_to_backendIP = [:] + def backendId_to_backendHttpPort = [:] + getBackendIpHttpPort(backendId_to_backendIP, backendId_to_backendHttpPort); + + def triggerCompaction = { be_host, be_http_port, compact_type, tablet_id -> + if (compact_type == "cumulative") { + def (code_1, out_1, err_1) = be_run_cumulative_compaction(be_host, be_http_port, tablet_id) + logger.info("Run compaction: code=" + code_1 + ", out=" + out_1 + ", err=" + err_1) + assertEquals(code_1, 0) + return out_1 + } else if (compact_type == "full") { + def (code_2, out_2, err_2) = be_run_full_compaction(be_host, be_http_port, tablet_id) + logger.info("Run compaction: code=" + code_2 + ", out=" + out_2 + ", err=" + err_2) + assertEquals(code_2, 0) + return out_2 + } else { + assertFalse(True) } + } - def triggerCompaction = { be_host, be_http_port, compact_type, tablet_id -> - if (compact_type == "cumulative") { - def (code_1, out_1, err_1) = be_run_cumulative_compaction(be_host, be_http_port, tablet_id) - logger.info("Run compaction: code=" + code_1 + ", out=" + out_1 + ", err=" + err_1) - assertEquals(code_1, 0) - return out_1 - } else if (compact_type == "full") { - def (code_2, out_2, err_2) = be_run_full_compaction(be_host, be_http_port, tablet_id) - logger.info("Run compaction: code=" + code_2 + ", out=" + out_2 + ", err=" + err_2) - assertEquals(code_2, 0) - return out_2 - } else { - assertFalse(True) + def triggerSingleCompaction = { be_host, be_http_port, tablet_id -> + StringBuilder sb = new StringBuilder(); + sb.append("curl -X POST http://${be_host}:${be_http_port}") + sb.append("/api/compaction/run?tablet_id=") + sb.append(tablet_id) + sb.append("&compact_type=cumulative&remote=true") + + Integer maxRetries = 10; // Maximum number of retries + Integer retryCount = 0; // Current retry count + Integer sleepTime = 5000; // Sleep time in milliseconds + String cmd = sb.toString() + def process + int code_3 + String err_3 + String out_3 + + while (retryCount < maxRetries) { + process = cmd.execute() + code_3 = process.waitFor() + err_3 = IOGroovyMethods.getText(new BufferedReader(new InputStreamReader(process.getErrorStream()))) + out_3 = process.getText() + + // If the command was successful, break the loop + if (code_3 == 0) { + break } - } - def triggerSingleCompaction = { be_host, be_http_port, tablet_id -> - StringBuilder sb = new StringBuilder(); - sb.append("curl -X POST http://${be_host}:${be_http_port}") - sb.append("/api/compaction/run?tablet_id=") - sb.append(tablet_id) - sb.append("&compact_type=cumulative&remote=true") - - Integer maxRetries = 10; // Maximum number of retries - Integer retryCount = 0; // Current retry count - Integer sleepTime = 5000; // Sleep time in milliseconds - String cmd = sb.toString() - def process - int code_3 - String err_3 - String out_3 - - while (retryCount < maxRetries) { - process = cmd.execute() - code_3 = process.waitFor() - err_3 = IOGroovyMethods.getText(new BufferedReader(new InputStreamReader(process.getErrorStream()))) - out_3 = process.getText() - - // If the command was successful, break the loop - if (code_3 == 0) { - break - } - - // If the command was not successful, increment the retry count, sleep for a while and try again - retryCount++ - sleep(sleepTime) - } - assertEquals(code_3, 0) - logger.info("Get compaction status: code=" + code_3 + ", out=" + out_3) - return out_3 + // If the command was not successful, increment the retry count, sleep for a while and try again + retryCount++ + sleep(sleepTime) } - def waitForCompaction = { be_host, be_http_port, tablet_id -> - boolean running = true - do { - Thread.sleep(1000) - StringBuilder sb = new StringBuilder(); - sb.append("curl -X GET http://${be_host}:${be_http_port}") - sb.append("/api/compaction/run_status?tablet_id=") - sb.append(tablet_id) - - String command = sb.toString() - logger.info(command) - process = command.execute() - code = process.waitFor() - out = process.getText() - logger.info("Get compaction status: code=" + code + ", out=" + out) - assertEquals(code, 0) - def compactionStatus = parseJson(out.trim()) - assertEquals("success", compactionStatus.status.toLowerCase()) - running = compactionStatus.run_status - } while (running) - } - - def getTabletStatus = { be_host, be_http_port, tablet_id -> - boolean running = true + assertEquals(code_3, 0) + logger.info("Get compaction status: code=" + code_3 + ", out=" + out_3) + return out_3 + } + def waitForCompaction = { be_host, be_http_port, tablet_id -> + boolean running = true + do { Thread.sleep(1000) StringBuilder sb = new StringBuilder(); sb.append("curl -X GET http://${be_host}:${be_http_port}") - sb.append("/api/compaction/show?tablet_id=") + sb.append("/api/compaction/run_status?tablet_id=") sb.append(tablet_id) String command = sb.toString() @@ -135,151 +97,171 @@ suite("test_single_replica_compaction", "p2") { process = command.execute() code = process.waitFor() out = process.getText() - logger.info("Get tablet status: code=" + code + ", out=" + out) + logger.info("Get compaction status: code=" + code + ", out=" + out) assertEquals(code, 0) - def tabletStatus = parseJson(out.trim()) - return tabletStatus - } - + def compactionStatus = parseJson(out.trim()) + assertEquals("success", compactionStatus.status.toLowerCase()) + running = compactionStatus.run_status + } while (running) + } - sql """ DROP TABLE IF EXISTS ${tableName}; """ - sql """ - CREATE TABLE ${tableName} ( - `id` int(11) NULL, - `name` varchar(255) NULL, - `score` int(11) NULL - ) ENGINE=OLAP - UNIQUE KEY(`id`) - COMMENT 'OLAP' - DISTRIBUTED BY HASH(`id`) BUCKETS 1 - PROPERTIES ( - "replication_num" = "2", - "enable_single_replica_compaction" = "true", - "enable_unique_key_merge_on_write" = "false", - "disable_auto_compaction" = "true" - ); - """ - - def tablets = sql_return_maparray """ show tablets from ${tableName}; """ - - // wait for update replica infos - // be.conf: update_replica_infos_interval_seconds + 2s - Thread.sleep(62000) - - // find the master be for single replica compaction - Boolean found = false - String master_backend_id; - List follower_backend_id = new ArrayList<>() - // The test table only has one bucket with 2 replicas, - // and `show tablets` will return 2 different replicas with the same tablet. - // So we can use the same tablet_id to get tablet/trigger compaction with different backends. - String tablet_id = tablets[0].TabletId - def tablet_info = sql_return_maparray """ show tablet ${tablet_id}; """ - logger.info("tablet: " + tablet_info) - for (def tablet in tablets) { - String trigger_backend_id = tablet.BackendId - def tablet_status = getTabletStatus(backendId_to_backendIP[trigger_backend_id], backendId_to_backendHttpPort[trigger_backend_id], tablet_id); - if (!tablet_status.containsKey("single replica compaction status")) { - if (found) { - logger.warn("multipe master"); - assertTrue(false) - } - found = true - master_backend_id = trigger_backend_id - } else { - follower_backend_id.add(trigger_backend_id) - } - } + def getTabletStatus = { be_host, be_http_port, tablet_id -> + boolean running = true + Thread.sleep(1000) + StringBuilder sb = new StringBuilder(); + sb.append("curl -X GET http://${be_host}:${be_http_port}") + sb.append("/api/compaction/run_status?tablet_id=") + sb.append(tablet_id) + + String command = sb.toString() + logger.info(command) + process = command.execute() + code = process.waitFor() + out = process.getText() + logger.info("Get compaction status: code=" + code + ", out=" + out) + assertEquals(code, 0) + def tabletStatus = parseJson(out.trim()) + return tabletStatus + } - def checkCompactionResult = { - def master_tablet_status = getTabletStatus(backendId_to_backendIP[master_backend_id], backendId_to_backendHttpPort[master_backend_id], tablet_id); - def master_rowsets = master_tablet_status."rowsets" - assert master_rowsets instanceof List - logger.info("rowset size: " + master_rowsets.size()) - - for (String backend: follower_backend_id) { - def tablet_status = getTabletStatus(backendId_to_backendIP[backend], backendId_to_backendHttpPort[backend], tablet_id); - def rowsets = tablet_status."rowsets" - assert rowsets instanceof List - assertEquals(master_rowsets.size(), rowsets.size()) - } - } - def checkTabletFileCrc = { - def (master_code, master_out, master_err) = calc_file_crc_on_tablet(backendId_to_backendIP[master_backend_id], backendId_to_backendHttpPort[master_backend_id], tablet_id) - logger.info("Run calc_file_crc_on_tablet: ip=" + backendId_to_backendIP[master_backend_id] + " code=" + master_code + ", out=" + master_out + ", err=" + master_err) - - for (String backend: follower_backend_id) { - def (follower_code, follower_out, follower_err) = calc_file_crc_on_tablet(backendId_to_backendIP[backend], backendId_to_backendHttpPort[backend], tablet_id) - logger.info("Run calc_file_crc_on_tablet: ip=" + backendId_to_backendIP[backend] + " code=" + follower_code + ", out=" + follower_out + ", err=" + follower_err) - assertTrue(parseJson(follower_out.trim()).crc_value == parseJson(master_out.trim()).crc_value) - assertTrue(parseJson(follower_out.trim()).start_version == parseJson(master_out.trim()).start_version) - assertTrue(parseJson(follower_out.trim()).end_version == parseJson(master_out.trim()).end_version) - assertTrue(parseJson(follower_out.trim()).file_count == parseJson(master_out.trim()).file_count) - assertTrue(parseJson(follower_out.trim()).rowset_count == parseJson(master_out.trim()).rowset_count) + sql """ DROP TABLE IF EXISTS ${tableName}; """ + sql """ + CREATE TABLE ${tableName} ( + `id` int(11) NULL, + `name` varchar(255) NULL, + `score` int(11) NULL + ) ENGINE=OLAP + UNIQUE KEY(`id`) + COMMENT 'OLAP' + DISTRIBUTED BY HASH(`id`) BUCKETS 1 + PROPERTIES ( + "replication_num" = "2", + "enable_single_replica_compaction" = "true", + "enable_unique_key_merge_on_write" = "false", + "disable_auto_compaction" = "true" + ); + """ + + def tablets = sql_return_maparray """ show tablets from ${tableName}; """ + + // wait for update replica infos + // be.conf: update_replica_infos_interval_seconds + 2s + Thread.sleep(62000) + + // find the master be for single replica compaction + Boolean found = false + String master_backend_id; + List follower_backend_id = new ArrayList<>() + // The test table only has one bucket with 2 replicas, + // and `show tablets` will return 2 different replicas with the same tablet. + // So we can use the same tablet_id to get tablet/trigger compaction with different backends. + String tablet_id = tablets[0].TabletId + def tablet_info = sql_return_maparray """ show tablet ${tablet_id}; """ + logger.info("tablet: " + tablet_info) + for (def tablet in tablets) { + String trigger_backend_id = tablet.BackendId + def tablet_status = getTabletStatus(backendId_to_backendIP[trigger_backend_id], backendId_to_backendHttpPort[trigger_backend_id], tablet_id); + if (!tablet_status.containsKey("single replica compaction status")) { + if (found) { + logger.warn("multipe master"); + assertTrue(false) } + found = true + master_backend_id = trigger_backend_id + } else { + follower_backend_id.add(trigger_backend_id) } + } - sql """ INSERT INTO ${tableName} VALUES (1, "a", 100); """ - sql """ INSERT INTO ${tableName} VALUES (1, "b", 100); """ - sql """ INSERT INTO ${tableName} VALUES (2, "a", 100); """ - sql """ INSERT INTO ${tableName} VALUES (2, "b", 100); """ - sql """ INSERT INTO ${tableName} VALUES (3, "a", 100); """ - sql """ INSERT INTO ${tableName} VALUES (3, "b", 100); """ - - // trigger master be to do cumu compaction - assertTrue(triggerCompaction(backendId_to_backendIP[master_backend_id], backendId_to_backendHttpPort[master_backend_id], - "cumulative", tablet_id).contains("Success")); - waitForCompaction(backendId_to_backendIP[master_backend_id], backendId_to_backendHttpPort[master_backend_id], tablet_id) - - // trigger follower be to fetch compaction result - for (String id in follower_backend_id) { - assertTrue(triggerSingleCompaction(backendId_to_backendIP[id], backendId_to_backendHttpPort[id], tablet_id).contains("Success")); - waitForCompaction(backendId_to_backendIP[id], backendId_to_backendHttpPort[id], tablet_id) + def checkCompactionResult = { + def master_tablet_status = getTabletStatus(backendId_to_backendIP[master_backend_id], backendId_to_backendHttpPort[master_backend_id], tablet_id); + def master_rowsets = master_tablet_status."rowsets" + assert master_rowsets instanceof List + logger.info("rowset size: " + master_rowsets.size()) + + for (String backend: follower_backend_id) { + def tablet_status = getTabletStatus(backendId_to_backendIP[backend], backendId_to_backendHttpPort[backend], tablet_id); + def rowsets = tablet_status."rowsets" + assert rowsets instanceof List + assertEquals(master_rowsets.size(), rowsets.size()) } + } - // check rowsets - checkCompactionResult.call() - - sql """ INSERT INTO ${tableName} VALUES (4, "a", 100); """ - sql """ INSERT INTO ${tableName} VALUES (5, "a", 100); """ - sql """ INSERT INTO ${tableName} VALUES (6, "a", 100); """ - sql """ DELETE FROM ${tableName} WHERE id = 4; """ - sql """ INSERT INTO ${tableName} VALUES (7, "a", 100); """ - sql """ INSERT INTO ${tableName} VALUES (8, "a", 100); """ - - // trigger master be to do cumu compaction with delete - assertTrue(triggerCompaction(backendId_to_backendIP[master_backend_id], backendId_to_backendHttpPort[master_backend_id], - "cumulative", tablet_id).contains("Success")); - waitForCompaction(backendId_to_backendIP[master_backend_id], backendId_to_backendHttpPort[master_backend_id], tablet_id) - - // trigger follower be to fetch compaction result - for (String id in follower_backend_id) { - assertTrue(triggerSingleCompaction(backendId_to_backendIP[id], backendId_to_backendHttpPort[id], tablet_id).contains("Success")); - waitForCompaction(backendId_to_backendIP[id], backendId_to_backendHttpPort[id], tablet_id) + def checkTabletFileCrc = { + def (master_code, master_out, master_err) = calc_file_crc_on_tablet(backendId_to_backendIP[master_backend_id], backendId_to_backendHttpPort[master_backend_id], tablet_id) + logger.info("Run calc_file_crc_on_tablet: ip=" + backendId_to_backendIP[master_backend_id] + " code=" + master_code + ", out=" + master_out + ", err=" + master_err) + + for (String backend: follower_backend_id) { + def (follower_code, follower_out, follower_err) = calc_file_crc_on_tablet(backendId_to_backendIP[backend], backendId_to_backendHttpPort[backend], tablet_id) + logger.info("Run calc_file_crc_on_tablet: ip=" + backendId_to_backendIP[backend] + " code=" + follower_code + ", out=" + follower_out + ", err=" + follower_err) + assertTrue(parseJson(follower_out.trim()).crc_value == parseJson(master_out.trim()).crc_value) + assertTrue(parseJson(follower_out.trim()).start_version == parseJson(master_out.trim()).start_version) + assertTrue(parseJson(follower_out.trim()).end_version == parseJson(master_out.trim()).end_version) + assertTrue(parseJson(follower_out.trim()).file_count == parseJson(master_out.trim()).file_count) + assertTrue(parseJson(follower_out.trim()).rowset_count == parseJson(master_out.trim()).rowset_count) } + } - // check rowsets - checkCompactionResult.call() + sql """ INSERT INTO ${tableName} VALUES (1, "a", 100); """ + sql """ INSERT INTO ${tableName} VALUES (1, "b", 100); """ + sql """ INSERT INTO ${tableName} VALUES (2, "a", 100); """ + sql """ INSERT INTO ${tableName} VALUES (2, "b", 100); """ + sql """ INSERT INTO ${tableName} VALUES (3, "a", 100); """ + sql """ INSERT INTO ${tableName} VALUES (3, "b", 100); """ + + // trigger master be to do cumu compaction + assertTrue(triggerCompaction(backendId_to_backendIP[master_backend_id], backendId_to_backendHttpPort[master_backend_id], + "cumulative", tablet_id).contains("Success")); + waitForCompaction(backendId_to_backendIP[master_backend_id], backendId_to_backendHttpPort[master_backend_id], tablet_id) + + // trigger follower be to fetch compaction result + for (String id in follower_backend_id) { + assertTrue(triggerSingleCompaction(backendId_to_backendIP[id], backendId_to_backendHttpPort[id], tablet_id).contains("Success")); + waitForCompaction(backendId_to_backendIP[id], backendId_to_backendHttpPort[id], tablet_id) + } - // trigger master be to do full compaction - assertTrue(triggerCompaction(backendId_to_backendIP[master_backend_id], backendId_to_backendHttpPort[master_backend_id], - "full", tablet_id).contains("Success")); - waitForCompaction(backendId_to_backendIP[master_backend_id], backendId_to_backendHttpPort[master_backend_id], tablet_id) + // check rowsets + checkCompactionResult.call() + + sql """ INSERT INTO ${tableName} VALUES (4, "a", 100); """ + sql """ INSERT INTO ${tableName} VALUES (5, "a", 100); """ + sql """ INSERT INTO ${tableName} VALUES (6, "a", 100); """ + sql """ DELETE FROM ${tableName} WHERE id = 4; """ + sql """ INSERT INTO ${tableName} VALUES (7, "a", 100); """ + sql """ INSERT INTO ${tableName} VALUES (8, "a", 100); """ + + // trigger master be to do cumu compaction with delete + assertTrue(triggerCompaction(backendId_to_backendIP[master_backend_id], backendId_to_backendHttpPort[master_backend_id], + "cumulative", tablet_id).contains("Success")); + waitForCompaction(backendId_to_backendIP[master_backend_id], backendId_to_backendHttpPort[master_backend_id], tablet_id) + + // trigger follower be to fetch compaction result + for (String id in follower_backend_id) { + assertTrue(triggerSingleCompaction(backendId_to_backendIP[id], backendId_to_backendHttpPort[id], tablet_id).contains("Success")); + waitForCompaction(backendId_to_backendIP[id], backendId_to_backendHttpPort[id], tablet_id) + } - // trigger follower be to fetch compaction result - for (String id in follower_backend_id) { - assertTrue(triggerSingleCompaction(backendId_to_backendIP[id], backendId_to_backendHttpPort[id], tablet_id).contains("Success")); - waitForCompaction(backendId_to_backendIP[id], backendId_to_backendHttpPort[id], tablet_id) - } + // check rowsets + checkCompactionResult.call() - // check rowsets - checkCompactionResult.call() - checkTabletFileCrc.call() + // trigger master be to do full compaction + assertTrue(triggerCompaction(backendId_to_backendIP[master_backend_id], backendId_to_backendHttpPort[master_backend_id], + "full", tablet_id).contains("Success")); + waitForCompaction(backendId_to_backendIP[master_backend_id], backendId_to_backendHttpPort[master_backend_id], tablet_id) - qt_sql """ - select * from ${tableName} order by id - """ - + // trigger follower be to fetch compaction result + for (String id in follower_backend_id) { + assertTrue(triggerSingleCompaction(backendId_to_backendIP[id], backendId_to_backendHttpPort[id], tablet_id).contains("Success")); + waitForCompaction(backendId_to_backendIP[id], backendId_to_backendHttpPort[id], tablet_id) } + + // check rowsets + checkCompactionResult.call() + checkTabletFileCrc.call() + + qt_sql """ + select * from ${tableName} order by id + """ + } diff --git a/regression-test/suites/compaction/test_time_series_compaction_policy.groovy b/regression-test/suites/compaction/test_time_series_compaction_policy.groovy index 1aaf92ea2f9756..2e8018f94a6a09 100644 --- a/regression-test/suites/compaction/test_time_series_compaction_policy.groovy +++ b/regression-test/suites/compaction/test_time_series_compaction_policy.groovy @@ -75,101 +75,97 @@ suite("test_time_series_compaction_polciy", "p0") { return rowsetCount } - try { - - sql """ DROP TABLE IF EXISTS ${tableName}; """ - sql """ - CREATE TABLE ${tableName} ( - `id` int(11) NULL, - `name` varchar(255) NULL, - `hobbies` text NULL, - `score` int(11) NULL - ) ENGINE=OLAP - DUPLICATE KEY(`id`) - COMMENT 'OLAP' - DISTRIBUTED BY HASH(`id`) BUCKETS 2 - PROPERTIES ( - "replication_num" = "1", - "disable_auto_compaction" = "true", - "compaction_policy" = "time_series" - ); - """ - // insert 16 lines, BUCKETS = 2 - sql """ INSERT INTO ${tableName} VALUES (1, "andy", "andy love apple", 100); """ - sql """ INSERT INTO ${tableName} VALUES (1, "bason", "bason hate pear", 99); """ - sql """ INSERT INTO ${tableName} VALUES (1, "andy", "andy love apple", 100); """ - sql """ INSERT INTO ${tableName} VALUES (1, "bason", "bason hate pear", 99); """ - sql """ INSERT INTO ${tableName} VALUES (1, "andy", "andy love apple", 100); """ - sql """ INSERT INTO ${tableName} VALUES (100, "andy", "andy love apple", 100); """ - sql """ INSERT INTO ${tableName} VALUES (100, "bason", "bason hate pear", 99); """ - sql """ INSERT INTO ${tableName} VALUES (100, "andy", "andy love apple", 100); """ - sql """ INSERT INTO ${tableName} VALUES (100, "bason", "bason hate pear", 99); """ - sql """ INSERT INTO ${tableName} VALUES (100, "andy", "andy love apple", 100); """ - sql """ INSERT INTO ${tableName} VALUES (1, "andy", "andy love apple", 100); """ - sql """ INSERT INTO ${tableName} VALUES (1, "bason", "bason hate pear", 99); """ - sql """ INSERT INTO ${tableName} VALUES (1, "andy", "andy love apple", 100); """ - sql """ INSERT INTO ${tableName} VALUES (1, "bason", "bason hate pear", 99); """ - sql """ INSERT INTO ${tableName} VALUES (1, "andy", "andy love apple", 100); """ - sql """ INSERT INTO ${tableName} VALUES (100, "andy", "andy love apple", 100); """ - - qt_sql_1 """ select count() from ${tableName} """ - - //TabletId,ReplicaId,BackendId,SchemaHash,Version,LstSuccessVersion,LstFailedVersion,LstFailedTime,LocalDataSize,RemoteDataSize,RowCount,State,LstConsistencyCheckTime,CheckVersion,VersionCount,PathHash,MetaUrl,CompactionStatus - def tablets = sql_return_maparray """ show tablets from ${tableName}; """ - - int replicaNum = 1 - def dedup_tablets = deduplicate_tablets(tablets) - if (dedup_tablets.size() > 0) { - replicaNum = Math.round(tablets.size() / dedup_tablets.size()) - if (replicaNum != 1 && replicaNum != 3) { - assert(false) - } - } - - // BUCKETS = 2 - // before cumulative compaction, there are 17 * 2 = 34 rowsets. - int rowsetCount = get_rowset_count.call(tablets); - assert (rowsetCount == 34 * replicaNum) - - // trigger cumulative compactions for all tablets in table - trigger_cumulative_compaction_on_tablets.call(tablets) - - // wait for cumulative compaction done - wait_cumulative_compaction_done.call(tablets) - - // after cumulative compaction, there is only 26 rowset. - // 5 consecutive empty versions are merged into one empty version - // 34 - 2*4 = 26 - rowsetCount = get_rowset_count.call(tablets); - assert (rowsetCount == 26 * replicaNum) - - // trigger cumulative compactions for all tablets in ${tableName} - trigger_cumulative_compaction_on_tablets.call(tablets) - - // wait for cumulative compaction done - wait_cumulative_compaction_done.call(tablets) - - // after cumulative compaction, there is only 22 rowset. - // 26 - 4 = 22 - rowsetCount = get_rowset_count.call(tablets); - assert (rowsetCount == 22 * replicaNum) - - qt_sql_2 """ select count() from ${tableName}""" - if (isCloudMode()) { - return; + sql """ DROP TABLE IF EXISTS ${tableName}; """ + sql """ + CREATE TABLE ${tableName} ( + `id` int(11) NULL, + `name` varchar(255) NULL, + `hobbies` text NULL, + `score` int(11) NULL + ) ENGINE=OLAP + DUPLICATE KEY(`id`) + COMMENT 'OLAP' + DISTRIBUTED BY HASH(`id`) BUCKETS 2 + PROPERTIES ( + "replication_num" = "1", + "disable_auto_compaction" = "true", + "compaction_policy" = "time_series" + ); + """ + // insert 16 lines, BUCKETS = 2 + sql """ INSERT INTO ${tableName} VALUES (1, "andy", "andy love apple", 100); """ + sql """ INSERT INTO ${tableName} VALUES (1, "bason", "bason hate pear", 99); """ + sql """ INSERT INTO ${tableName} VALUES (1, "andy", "andy love apple", 100); """ + sql """ INSERT INTO ${tableName} VALUES (1, "bason", "bason hate pear", 99); """ + sql """ INSERT INTO ${tableName} VALUES (1, "andy", "andy love apple", 100); """ + sql """ INSERT INTO ${tableName} VALUES (100, "andy", "andy love apple", 100); """ + sql """ INSERT INTO ${tableName} VALUES (100, "bason", "bason hate pear", 99); """ + sql """ INSERT INTO ${tableName} VALUES (100, "andy", "andy love apple", 100); """ + sql """ INSERT INTO ${tableName} VALUES (100, "bason", "bason hate pear", 99); """ + sql """ INSERT INTO ${tableName} VALUES (100, "andy", "andy love apple", 100); """ + sql """ INSERT INTO ${tableName} VALUES (1, "andy", "andy love apple", 100); """ + sql """ INSERT INTO ${tableName} VALUES (1, "bason", "bason hate pear", 99); """ + sql """ INSERT INTO ${tableName} VALUES (1, "andy", "andy love apple", 100); """ + sql """ INSERT INTO ${tableName} VALUES (1, "bason", "bason hate pear", 99); """ + sql """ INSERT INTO ${tableName} VALUES (1, "andy", "andy love apple", 100); """ + sql """ INSERT INTO ${tableName} VALUES (100, "andy", "andy love apple", 100); """ + + qt_sql_1 """ select count() from ${tableName} """ + + //TabletId,ReplicaId,BackendId,SchemaHash,Version,LstSuccessVersion,LstFailedVersion,LstFailedTime,LocalDataSize,RemoteDataSize,RowCount,State,LstConsistencyCheckTime,CheckVersion,VersionCount,PathHash,MetaUrl,CompactionStatus + def tablets = sql_return_maparray """ show tablets from ${tableName}; """ + + int replicaNum = 1 + def dedup_tablets = deduplicate_tablets(tablets) + if (dedup_tablets.size() > 0) { + replicaNum = Math.round(tablets.size() / dedup_tablets.size()) + if (replicaNum != 1 && replicaNum != 3) { + assert(false) } - sql """ alter table ${tableName} set ("time_series_compaction_file_count_threshold"="10")""" - sql """sync""" - // trigger cumulative compactions for all tablets in ${tableName} - trigger_cumulative_compaction_on_tablets.call(tablets) - - // wait for cumulative compaction done - wait_cumulative_compaction_done.call(tablets) - - // after cumulative compaction, there is only 11 rowset. - rowsetCount = get_rowset_count.call(tablets); - assert (rowsetCount == 11 * replicaNum) - qt_sql_3 """ select count() from ${tableName}""" } + // BUCKETS = 2 + // before cumulative compaction, there are 17 * 2 = 34 rowsets. + int rowsetCount = get_rowset_count.call(tablets); + assert (rowsetCount == 34 * replicaNum) + + // trigger cumulative compactions for all tablets in table + trigger_cumulative_compaction_on_tablets.call(tablets) + + // wait for cumulative compaction done + wait_cumulative_compaction_done.call(tablets) + + // after cumulative compaction, there is only 26 rowset. + // 5 consecutive empty versions are merged into one empty version + // 34 - 2*4 = 26 + rowsetCount = get_rowset_count.call(tablets); + assert (rowsetCount == 26 * replicaNum) + + // trigger cumulative compactions for all tablets in ${tableName} + trigger_cumulative_compaction_on_tablets.call(tablets) + + // wait for cumulative compaction done + wait_cumulative_compaction_done.call(tablets) + + // after cumulative compaction, there is only 22 rowset. + // 26 - 4 = 22 + rowsetCount = get_rowset_count.call(tablets); + assert (rowsetCount == 22 * replicaNum) + + qt_sql_2 """ select count() from ${tableName}""" + if (isCloudMode()) { + return; + } + sql """ alter table ${tableName} set ("time_series_compaction_file_count_threshold"="10")""" + sql """sync""" + // trigger cumulative compactions for all tablets in ${tableName} + trigger_cumulative_compaction_on_tablets.call(tablets) + + // wait for cumulative compaction done + wait_cumulative_compaction_done.call(tablets) + + // after cumulative compaction, there is only 11 rowset. + rowsetCount = get_rowset_count.call(tablets); + assert (rowsetCount == 11 * replicaNum) + qt_sql_3 """ select count() from ${tableName}""" } diff --git a/regression-test/suites/inverted_index_p0/test_in_null_no_return.groovy b/regression-test/suites/inverted_index_p0/test_in_null_no_return.groovy new file mode 100644 index 00000000000000..68b2801fcb121b --- /dev/null +++ b/regression-test/suites/inverted_index_p0/test_in_null_no_return.groovy @@ -0,0 +1,140 @@ +import java.time.LocalDateTime +import java.time.format.DateTimeFormatter + +suite("test_in_null_no_return", "nonConcurrent") { + + // load data + def load_data = { loadTableName, fileName -> + streamLoad { + table loadTableName + set 'read_json_by_line', 'true' + set 'format', 'json' + file fileName + time 10000 + + check { result, exception, startTime, endTime -> + if (exception != null) { + throw exception + } + log.info("Stream load result: ${result}".toString()) + def json = parseJson(result) + assertEquals("success", json.Status.toLowerCase()) + assertEquals(json.NumberTotalRows, json.NumberLoadedRows) + assertTrue(json.NumberLoadedRows > 0 && json.LoadBytes > 0) + } + } + } + + def execute_sql = { key, value, sqlList -> + sql """ set ${key} = ${value} """ + List resultList = new ArrayList<>() + for (sqlStr in sqlList) { + def sqlResult = sql """ ${sqlStr} """ + resultList.add(sqlResult) + } + return resultList + } + + def compare_result = { result1, result2, executedSql -> + assertEquals(result1.size(), result2.size()) + for (int i = 0; i < result1.size(); i++) { + if (result1[i] != result2[i]) { + logger.info("sql is {}", executedSql[i]) + assertTrue(False) + } + } + } + + def run_compaction = { compactionTableName -> + String backend_id; + + def backendId_to_backendIP = [:] + def backendId_to_backendHttpPort = [:] + getBackendIpHttpPort(backendId_to_backendIP, backendId_to_backendHttpPort); + + def tablets = sql_return_maparray """ show tablets from ${compactionTableName}; """ + + // run + for (def tablet in tablets) { + String tablet_id = tablet.TabletId + backend_id = tablet.BackendId + times = 1 + + do{ + (code, out, err) = be_run_full_compaction(backendId_to_backendIP.get(backend_id), backendId_to_backendHttpPort.get(backend_id), tablet_id) + logger.info("Run compaction: code=" + code + ", out=" + out + ", err=" + err) + ++times + sleep(2000) + } while (parseJson(out.trim()).status.toLowerCase()!="success" && times<=10) + + def compactJson = parseJson(out.trim()) + if (compactJson.status.toLowerCase() == "fail") { + logger.info("Compaction was done automatically!") + } + } + + // wait + for (def tablet : tablets) { + boolean running = true + do { + Thread.sleep(1000) + def tablet_id = tablet.TabletId + backend_id = tablet.BackendId + def (code, out, err) = be_get_compaction_status(backendId_to_backendIP.get(backend_id), backendId_to_backendHttpPort.get(backend_id), tablet_id) + logger.info("Get compaction status: code=" + code + ", out=" + out + ", err=" + err) + assertEquals(code, 0) + def compactionStatus = parseJson(out.trim()) + assertEquals("success", compactionStatus.status.toLowerCase()) + running = compactionStatus.run_status + } while (running) + } + } + + + + try { + // GetDebugPoint().enableDebugPointForAllBEs("match.invert_index_not_support_execute_match") + + def dupTableName = "dup_httplogs" + sql """ drop table if exists ${dupTableName} """ + // create table + sql """ + CREATE TABLE IF NOT EXISTS dup_httplogs + ( + `id` bigint NOT NULL AUTO_INCREMENT(100), + `@timestamp` int(11) NULL, + `clientip` varchar(20) NULL, + `request` text NULL, + `status` int(11) NULL, + `size` int(11) NULL, + INDEX clientip_idx (`clientip`) USING INVERTED COMMENT '', + INDEX request_idx (`request`) USING INVERTED PROPERTIES("parser" = "unicode", "support_phrase" = "true") COMMENT '', + INDEX status_idx (`status`) USING INVERTED COMMENT '', + INDEX size_idx (`size`) USING INVERTED COMMENT '' + ) DUPLICATE KEY(`id`) + DISTRIBUTED BY HASH (`id`) BUCKETS 32 + PROPERTIES ( + "replication_allocation" = "tag.location.default: 1", + "compaction_policy" = "time_series", + "inverted_index_storage_format" = "v2", + "compression" = "ZSTD", + "disable_auto_compaction" = "true" + ); + """ + + load_data.call(dupTableName, 'documents-1000.json'); + sql """ INSERT INTO ${dupTableName} (`@timestamp`, clientip, request, status, size) VALUES (100, '10.16.10.6', 'GET /api/v1/organizations/1 HTTP/1.1', 500, 1000) """ + sql """ INSERT INTO ${dupTableName} (`@timestamp`, clientip, request, status, size) VALUES (100, NULL, 'GET /api/v1/organizations/1 HTTP/1.1', 500, 1000) """ + sql """ INSERT INTO ${dupTableName} (`@timestamp`, clientip, request, status, size) VALUES (100, '10.16.10.6', NULL, 500, 1000) """ + sql """ INSERT INTO ${dupTableName} (`@timestamp`, clientip, request, status, size) VALUES (100, '10.16.10.6', 'GET /api/v1/organizations/1 HTTP/1.1', NULL, 1000) """ + sql """ INSERT INTO ${dupTableName} (`@timestamp`, clientip, request, status, size) VALUES (100, '10.16.10.6', 'GET /api/v1/organizations/1 HTTP/1.1', 500, NULL) """ + sql """ sync """ + + sql """ SELECT count() from dup_httplogs WHERE clientip IN (NULL, '') or clientip IN ('') LIMIT 2 """ + sql """ SELECT count() from dup_httplogs WHERE clientip IN (NULL, '17.0.0.0') or clientip IN ('') LIMIT 2 """ + sql """ SELECT count() from dup_httplogs WHERE request IN (NULL, '') or clientip IN ('') LIMIT 2 """ + sql """ SELECT count() from dup_httplogs WHERE request IN (NULL, '17.0.0.0') or request IN ('') LIMIT 2 """ + } finally { + // GetDebugPoint().disableDebugPointForAllBEs("match.invert_index_not_support_execute_match") + } +} \ No newline at end of file From 810b467b028645a8978fdc3290b36e5d5dee5766 Mon Sep 17 00:00:00 2001 From: csun5285 Date: Thu, 18 Jul 2024 23:33:12 +0800 Subject: [PATCH 3/8] remove unused --- .../test_in_null_no_return.groovy | 140 ------------------ 1 file changed, 140 deletions(-) delete mode 100644 regression-test/suites/inverted_index_p0/test_in_null_no_return.groovy diff --git a/regression-test/suites/inverted_index_p0/test_in_null_no_return.groovy b/regression-test/suites/inverted_index_p0/test_in_null_no_return.groovy deleted file mode 100644 index 68b2801fcb121b..00000000000000 --- a/regression-test/suites/inverted_index_p0/test_in_null_no_return.groovy +++ /dev/null @@ -1,140 +0,0 @@ -import java.time.LocalDateTime -import java.time.format.DateTimeFormatter - -suite("test_in_null_no_return", "nonConcurrent") { - - // load data - def load_data = { loadTableName, fileName -> - streamLoad { - table loadTableName - set 'read_json_by_line', 'true' - set 'format', 'json' - file fileName - time 10000 - - check { result, exception, startTime, endTime -> - if (exception != null) { - throw exception - } - log.info("Stream load result: ${result}".toString()) - def json = parseJson(result) - assertEquals("success", json.Status.toLowerCase()) - assertEquals(json.NumberTotalRows, json.NumberLoadedRows) - assertTrue(json.NumberLoadedRows > 0 && json.LoadBytes > 0) - } - } - } - - def execute_sql = { key, value, sqlList -> - sql """ set ${key} = ${value} """ - List resultList = new ArrayList<>() - for (sqlStr in sqlList) { - def sqlResult = sql """ ${sqlStr} """ - resultList.add(sqlResult) - } - return resultList - } - - def compare_result = { result1, result2, executedSql -> - assertEquals(result1.size(), result2.size()) - for (int i = 0; i < result1.size(); i++) { - if (result1[i] != result2[i]) { - logger.info("sql is {}", executedSql[i]) - assertTrue(False) - } - } - } - - def run_compaction = { compactionTableName -> - String backend_id; - - def backendId_to_backendIP = [:] - def backendId_to_backendHttpPort = [:] - getBackendIpHttpPort(backendId_to_backendIP, backendId_to_backendHttpPort); - - def tablets = sql_return_maparray """ show tablets from ${compactionTableName}; """ - - // run - for (def tablet in tablets) { - String tablet_id = tablet.TabletId - backend_id = tablet.BackendId - times = 1 - - do{ - (code, out, err) = be_run_full_compaction(backendId_to_backendIP.get(backend_id), backendId_to_backendHttpPort.get(backend_id), tablet_id) - logger.info("Run compaction: code=" + code + ", out=" + out + ", err=" + err) - ++times - sleep(2000) - } while (parseJson(out.trim()).status.toLowerCase()!="success" && times<=10) - - def compactJson = parseJson(out.trim()) - if (compactJson.status.toLowerCase() == "fail") { - logger.info("Compaction was done automatically!") - } - } - - // wait - for (def tablet : tablets) { - boolean running = true - do { - Thread.sleep(1000) - def tablet_id = tablet.TabletId - backend_id = tablet.BackendId - def (code, out, err) = be_get_compaction_status(backendId_to_backendIP.get(backend_id), backendId_to_backendHttpPort.get(backend_id), tablet_id) - logger.info("Get compaction status: code=" + code + ", out=" + out + ", err=" + err) - assertEquals(code, 0) - def compactionStatus = parseJson(out.trim()) - assertEquals("success", compactionStatus.status.toLowerCase()) - running = compactionStatus.run_status - } while (running) - } - } - - - - try { - // GetDebugPoint().enableDebugPointForAllBEs("match.invert_index_not_support_execute_match") - - def dupTableName = "dup_httplogs" - sql """ drop table if exists ${dupTableName} """ - // create table - sql """ - CREATE TABLE IF NOT EXISTS dup_httplogs - ( - `id` bigint NOT NULL AUTO_INCREMENT(100), - `@timestamp` int(11) NULL, - `clientip` varchar(20) NULL, - `request` text NULL, - `status` int(11) NULL, - `size` int(11) NULL, - INDEX clientip_idx (`clientip`) USING INVERTED COMMENT '', - INDEX request_idx (`request`) USING INVERTED PROPERTIES("parser" = "unicode", "support_phrase" = "true") COMMENT '', - INDEX status_idx (`status`) USING INVERTED COMMENT '', - INDEX size_idx (`size`) USING INVERTED COMMENT '' - ) DUPLICATE KEY(`id`) - DISTRIBUTED BY HASH (`id`) BUCKETS 32 - PROPERTIES ( - "replication_allocation" = "tag.location.default: 1", - "compaction_policy" = "time_series", - "inverted_index_storage_format" = "v2", - "compression" = "ZSTD", - "disable_auto_compaction" = "true" - ); - """ - - load_data.call(dupTableName, 'documents-1000.json'); - sql """ INSERT INTO ${dupTableName} (`@timestamp`, clientip, request, status, size) VALUES (100, '10.16.10.6', 'GET /api/v1/organizations/1 HTTP/1.1', 500, 1000) """ - sql """ INSERT INTO ${dupTableName} (`@timestamp`, clientip, request, status, size) VALUES (100, NULL, 'GET /api/v1/organizations/1 HTTP/1.1', 500, 1000) """ - sql """ INSERT INTO ${dupTableName} (`@timestamp`, clientip, request, status, size) VALUES (100, '10.16.10.6', NULL, 500, 1000) """ - sql """ INSERT INTO ${dupTableName} (`@timestamp`, clientip, request, status, size) VALUES (100, '10.16.10.6', 'GET /api/v1/organizations/1 HTTP/1.1', NULL, 1000) """ - sql """ INSERT INTO ${dupTableName} (`@timestamp`, clientip, request, status, size) VALUES (100, '10.16.10.6', 'GET /api/v1/organizations/1 HTTP/1.1', 500, NULL) """ - sql """ sync """ - - sql """ SELECT count() from dup_httplogs WHERE clientip IN (NULL, '') or clientip IN ('') LIMIT 2 """ - sql """ SELECT count() from dup_httplogs WHERE clientip IN (NULL, '17.0.0.0') or clientip IN ('') LIMIT 2 """ - sql """ SELECT count() from dup_httplogs WHERE request IN (NULL, '') or clientip IN ('') LIMIT 2 """ - sql """ SELECT count() from dup_httplogs WHERE request IN (NULL, '17.0.0.0') or request IN ('') LIMIT 2 """ - } finally { - // GetDebugPoint().disableDebugPointForAllBEs("match.invert_index_not_support_execute_match") - } -} \ No newline at end of file From d68d81f49e7eaa593ebb09d29570236480a6b3b4 Mon Sep 17 00:00:00 2001 From: csun5285 Date: Thu, 18 Jul 2024 23:38:19 +0800 Subject: [PATCH 4/8] rebase --- ...t_single_compaction_fault_injection.groovy | 256 ++---------------- 1 file changed, 28 insertions(+), 228 deletions(-) diff --git a/regression-test/suites/compaction/test_single_compaction_fault_injection.groovy b/regression-test/suites/compaction/test_single_compaction_fault_injection.groovy index 6b63292e02ee9d..27d549e9396e59 100644 --- a/regression-test/suites/compaction/test_single_compaction_fault_injection.groovy +++ b/regression-test/suites/compaction/test_single_compaction_fault_injection.groovy @@ -137,234 +137,33 @@ suite("test_single_compaction_fault_injection", "p2, nonConcurrent") { List follower_backend_id = new ArrayList<>() String tablet_id def tablets - try { - String backend_id; - def backendId_to_backendIP = [:] - def backendId_to_backendHttpPort = [:] - getBackendIpHttpPort(backendId_to_backendIP, backendId_to_backendHttpPort); - - backend_id = backendId_to_backendIP.keySet()[0] - set_be_config.call("update_replica_infos_interval_seconds", "5") - - // find the master be for single compaction - Boolean found = false - String master_backend_id - List follower_backend_id = new ArrayList<>() - String tablet_id - def tablets - try { - GetDebugPoint().enableDebugPointForAllFEs('getTabletReplicaInfos.returnEmpty') - sql """ DROP TABLE IF EXISTS ${tableName}; """ - sql """ - CREATE TABLE ${tableName} ( - `id` int(11) NULL, - `name` varchar(255) NULL, - `score` int(11) NULL - ) ENGINE=OLAP - UNIQUE KEY(`id`) - COMMENT 'OLAP' - DISTRIBUTED BY HASH(`id`) BUCKETS 1 - PROPERTIES ( "replication_num" = "2", "enable_single_replica_compaction" = "true", "enable_unique_key_merge_on_write" = "false", "disable_auto_compaction" = "true" ); - """ - - tablets = sql_return_maparray """ show tablets from ${tableName}; """ - // wait for update replica infos - Thread.sleep(20000) - // The test table only has one bucket with 2 replicas, - // and `show tablets` will return 2 different replicas with the same tablet. - // So we can use the same tablet_id to get tablet/trigger compaction with different backends. - tablet_id = tablets[0].TabletId - def tablet_info = sql_return_maparray """ show tablet ${tablet_id}; """ - logger.info("tablet: " + tablet_info) - for (def tablet in tablets) { - String trigger_backend_id = tablet.BackendId - def tablet_status = getTabletStatus(backendId_to_backendIP[trigger_backend_id], backendId_to_backendHttpPort[trigger_backend_id], tablet_id) - if (!tablet_status.containsKey("single replica compaction status")) { - if (found) { - found = false - logger.warn("multipe master"); - break; - } - found = true - master_backend_id = trigger_backend_id - } else { - follower_backend_id.add(trigger_backend_id) - } - } - assertFalse(found) - assertFalse(master_backend_id.isEmpty()) - assertTrue(follower_backend_id.isEmpty()) - master_backend_id = "" - } finally { - GetDebugPoint().disableDebugPointForAllFEs('getTabletReplicaInfos.returnEmpty') - // wait for update replica infos - // be.conf: update_replica_infos_interval_seconds + 2s - Thread.sleep(20000) - // The test table only has one bucket with 2 replicas, - // and `show tablets` will return 2 different replicas with the same tablet. - // So we can use the same tablet_id to get tablet/trigger compaction with different backends. - tablet_id = tablets[0].TabletId - def tablet_info = sql_return_maparray """ show tablet ${tablet_id}; """ - for (def tablet in tablets) { - String trigger_backend_id = tablet.BackendId - def tablet_status = getTabletStatus(backendId_to_backendIP[trigger_backend_id], backendId_to_backendHttpPort[trigger_backend_id], tablet_id); - if (!tablet_status.containsKey("single replica compaction status")) { - if (found) { - logger.warn("multipe master") - assertTrue(false) - } - found = true - master_backend_id = trigger_backend_id - } else { - follower_backend_id.add(trigger_backend_id) - } - } - assertTrue(found) - assertFalse(master_backend_id.isEmpty()) - assertFalse(follower_backend_id.isEmpty()) - } - - - def checkSucceedCompactionResult = { - def master_tablet_status = getTabletStatus(backendId_to_backendIP[master_backend_id], backendId_to_backendHttpPort[master_backend_id], tablet_id); - def master_rowsets = master_tablet_status."rowsets" - assert master_rowsets instanceof List - logger.info("rowset size: " + master_rowsets.size()) - - for (String backend: follower_backend_id) { - def tablet_status = getTabletStatus(backendId_to_backendIP[backend], backendId_to_backendHttpPort[backend], tablet_id); - def rowsets = tablet_status."rowsets" - assert rowsets instanceof List - assertEquals(master_rowsets.size(), rowsets.size()) - } - } - - def checkFailedCompactionResult = { - def master_tablet_status = getTabletStatus(backendId_to_backendIP[master_backend_id], backendId_to_backendHttpPort[master_backend_id], tablet_id); - def master_rowsets = master_tablet_status."rowsets" - assert master_rowsets instanceof List - logger.info("rowset size: " + master_rowsets.size()) - - for (String backend: follower_backend_id) { - def tablet_status = getTabletStatus(backendId_to_backendIP[backend], backendId_to_backendHttpPort[backend], tablet_id); - def rowsets = tablet_status."rowsets" - assert rowsets instanceof List - assertFalse(master_rowsets.size() == rowsets.size()) - } - } - - // return ok - try { - GetDebugPoint().enableDebugPointForAllBEs("do_single_compaction_return_ok"); - for (String id in follower_backend_id) { - assertTrue(triggerSingleCompaction(backendId_to_backendIP[id], backendId_to_backendHttpPort[id], tablet_id).contains("Success")); - waitForCompaction(backendId_to_backendIP[id], backendId_to_backendHttpPort[id], tablet_id); - } - } finally { - GetDebugPoint().disableDebugPointForAllBEs("do_single_compaction_return_ok"); - } - sql """ INSERT INTO ${tableName} VALUES (1, "a", 100); """ - sql """ INSERT INTO ${tableName} VALUES (1, "b", 100); """ - sql """ INSERT INTO ${tableName} VALUES (2, "a", 100); """ - sql """ INSERT INTO ${tableName} VALUES (2, "b", 100); """ - sql """ INSERT INTO ${tableName} VALUES (3, "a", 100); """ - sql """ INSERT INTO ${tableName} VALUES (3, "b", 100); """ - - // trigger master be to do cumu compaction - assertTrue(triggerCompaction(backendId_to_backendIP[master_backend_id], backendId_to_backendHttpPort[master_backend_id], - "cumulative", tablet_id).contains("Success")); - waitForCompaction(backendId_to_backendIP[master_backend_id], backendId_to_backendHttpPort[master_backend_id], tablet_id) - - try { - GetDebugPoint().enableDebugPointForAllBEs("single_compaction_failed_get_peer"); - for (String id in follower_backend_id) { - out = triggerSingleCompaction(backendId_to_backendIP[id], backendId_to_backendHttpPort[id], tablet_id) - assertTrue(out.contains("compaction task is successfully triggered") || out.contains("tablet don't have peer replica")); - } - checkFailedCompactionResult.call() - } finally { - GetDebugPoint().disableDebugPointForAllBEs("single_compaction_failed_get_peer") - } - - try { - GetDebugPoint().enableDebugPointForAllBEs("single_compaction_failed_get_peer_versions"); - for (String id in follower_backend_id) { - out = triggerSingleCompaction(backendId_to_backendIP[id], backendId_to_backendHttpPort[id], tablet_id) - assertTrue(out.contains("compaction task is successfully triggered") || out.contains("tablet failed get peer versions")); - } - checkFailedCompactionResult.call() - } finally { - GetDebugPoint().disableDebugPointForAllBEs("single_compaction_failed_get_peer_versions") - } - - try { - GetDebugPoint().enableDebugPointForAllBEs("single_compaction_failed_make_snapshot"); - for (String id in follower_backend_id) { - out = triggerSingleCompaction(backendId_to_backendIP[id], backendId_to_backendHttpPort[id], tablet_id) - assertTrue(out.contains("compaction task is successfully triggered") || out.contains("failed snapshot")); - } - checkFailedCompactionResult.call() - } finally { - GetDebugPoint().disableDebugPointForAllBEs("single_compaction_failed_make_snapshot") - } - - try { - GetDebugPoint().enableDebugPointForAllBEs("single_compaction_failed_download_file"); - for (String id in follower_backend_id) { - out = triggerSingleCompaction(backendId_to_backendIP[id], backendId_to_backendHttpPort[id], tablet_id) - assertTrue(out.contains("compaction task is successfully triggered") || out.contains("failed to download file")); - } - checkFailedCompactionResult.call() - } finally { - GetDebugPoint().disableDebugPointForAllBEs("single_compaction_failed_download_file") - } - - // trigger follower be to fetch compaction result - for (String id in follower_backend_id) { - assertTrue(triggerSingleCompaction(backendId_to_backendIP[id], backendId_to_backendHttpPort[id], tablet_id).contains("Success")); - waitForCompaction(backendId_to_backendIP[id], backendId_to_backendHttpPort[id], tablet_id) - } - - // check rowsets - checkSucceedCompactionResult.call() - - sql """ INSERT INTO ${tableName} VALUES (4, "a", 100); """ - sql """ INSERT INTO ${tableName} VALUES (5, "a", 100); """ - sql """ INSERT INTO ${tableName} VALUES (6, "a", 100); """ - sql """ DELETE FROM ${tableName} WHERE id = 4; """ - sql """ INSERT INTO ${tableName} VALUES (7, "a", 100); """ - sql """ INSERT INTO ${tableName} VALUES (8, "a", 100); """ - - // trigger master be to do cumu compaction with delete - assertTrue(triggerCompaction(backendId_to_backendIP[master_backend_id], backendId_to_backendHttpPort[master_backend_id], - "cumulative", tablet_id).contains("Success")); - waitForCompaction(backendId_to_backendIP[master_backend_id], backendId_to_backendHttpPort[master_backend_id], tablet_id) - - // trigger follower be to fetch compaction result - for (String id in follower_backend_id) { - assertTrue(triggerSingleCompaction(backendId_to_backendIP[id], backendId_to_backendHttpPort[id], tablet_id).contains("Success")); - waitForCompaction(backendId_to_backendIP[id], backendId_to_backendHttpPort[id], tablet_id) - } - - // check rowsets - checkSucceedCompactionResult.call() - - // trigger master be to do base compaction - assertTrue(triggerCompaction(backendId_to_backendIP[master_backend_id], backendId_to_backendHttpPort[master_backend_id], - "full", tablet_id).contains("Success")); - waitForCompaction(backendId_to_backendIP[master_backend_id], backendId_to_backendHttpPort[master_backend_id], tablet_id) - - // trigger follower be to fetch compaction result - for (String id in follower_backend_id) { - assertTrue(triggerSingleCompaction(backendId_to_backendIP[id], backendId_to_backendHttpPort[id], tablet_id).contains("Success")); - waitForCompaction(backendId_to_backendIP[id], backendId_to_backendHttpPort[id], tablet_id) - } + String backend_id; + def backendId_to_backendIP = [:] + def backendId_to_backendHttpPort = [:] + getBackendIpHttpPort(backendId_to_backendIP, backendId_to_backendHttpPort); - // check rowsets - checkSucceedCompactionResult.call() + backend_id = backendId_to_backendIP.keySet()[0] + set_be_config.call("update_replica_infos_interval_seconds", "5") - qt_sql """ - select * from ${tableName} order by id + // find the master be for single compaction + Boolean found = false + String master_backend_id + List follower_backend_id = new ArrayList<>() + String tablet_id + def tablets + try { + GetDebugPoint().enableDebugPointForAllFEs('getTabletReplicaInfos.returnEmpty') + sql """ DROP TABLE IF EXISTS ${tableName}; """ + sql """ + CREATE TABLE ${tableName} ( + `id` int(11) NULL, + `name` varchar(255) NULL, + `score` int(11) NULL + ) ENGINE=OLAP + UNIQUE KEY(`id`) + COMMENT 'OLAP' + DISTRIBUTED BY HASH(`id`) BUCKETS 1 + PROPERTIES ( "replication_num" = "2", "enable_single_replica_compaction" = "true", "enable_unique_key_merge_on_write" = "false", "disable_auto_compaction" = "true" ); """ tablets = sql_return_maparray """ show tablets from ${tableName}; """ @@ -551,7 +350,7 @@ suite("test_single_compaction_fault_injection", "p2, nonConcurrent") { // trigger master be to do base compaction assertTrue(triggerCompaction(backendId_to_backendIP[master_backend_id], backendId_to_backendHttpPort[master_backend_id], - "base", tablet_id).contains("Success")); + "full", tablet_id).contains("Success")); waitForCompaction(backendId_to_backendIP[master_backend_id], backendId_to_backendHttpPort[master_backend_id], tablet_id) // trigger follower be to fetch compaction result @@ -566,4 +365,5 @@ suite("test_single_compaction_fault_injection", "p2, nonConcurrent") { qt_sql """ select * from ${tableName} order by id """ -} + +} \ No newline at end of file From aba14758403a5644a12ea9d11de72ee847ca77a0 Mon Sep 17 00:00:00 2001 From: csun5285 Date: Fri, 19 Jul 2024 11:30:04 +0800 Subject: [PATCH 5/8] fix --- ...t_single_compaction_fault_injection.groovy | 31 ++++--------------- 1 file changed, 6 insertions(+), 25 deletions(-) diff --git a/regression-test/suites/compaction/test_single_compaction_fault_injection.groovy b/regression-test/suites/compaction/test_single_compaction_fault_injection.groovy index 27d549e9396e59..737b02ff434f6c 100644 --- a/regression-test/suites/compaction/test_single_compaction_fault_injection.groovy +++ b/regression-test/suites/compaction/test_single_compaction_fault_injection.groovy @@ -19,11 +19,12 @@ import org.codehaus.groovy.runtime.IOGroovyMethods suite("test_single_compaction_fault_injection", "p2, nonConcurrent") { def tableName = "test_single_compaction" - + + def backendId_to_backendIP = [:] + def backendId_to_backendHttpPort = [:] + getBackendIpHttpPort(backendId_to_backendIP, backendId_to_backendHttpPort); + def set_be_config = { key, value -> - def backendId_to_backendIP = [:] - def backendId_to_backendHttpPort = [:] - getBackendIpHttpPort(backendId_to_backendIP, backendId_to_backendHttpPort); for (String backend_id: backendId_to_backendIP.keySet()) { def (code, out, err) = update_be_config(backendId_to_backendIP.get(backend_id), backendId_to_backendHttpPort.get(backend_id), key, value) @@ -123,26 +124,6 @@ suite("test_single_compaction_fault_injection", "p2, nonConcurrent") { return tabletStatus } - String backend_id; - def backendId_to_backendIP = [:] - def backendId_to_backendHttpPort = [:] - getBackendIpHttpPort(backendId_to_backendIP, backendId_to_backendHttpPort); - - backend_id = backendId_to_backendIP.keySet()[0] - set_be_config.call("update_replica_infos_interval_seconds", "5") - - // find the master be for single compaction - Boolean found = false - String master_backend_id - List follower_backend_id = new ArrayList<>() - String tablet_id - def tablets - String backend_id; - def backendId_to_backendIP = [:] - def backendId_to_backendHttpPort = [:] - getBackendIpHttpPort(backendId_to_backendIP, backendId_to_backendHttpPort); - - backend_id = backendId_to_backendIP.keySet()[0] set_be_config.call("update_replica_infos_interval_seconds", "5") // find the master be for single compaction @@ -163,7 +144,7 @@ suite("test_single_compaction_fault_injection", "p2, nonConcurrent") { UNIQUE KEY(`id`) COMMENT 'OLAP' DISTRIBUTED BY HASH(`id`) BUCKETS 1 - PROPERTIES ( "replication_num" = "2", "enable_single_replica_compaction" = "true", "enable_unique_key_merge_on_write" = "false", "disable_auto_compaction" = "true" ); + PROPERTIES ( "replication_num" = "1", "enable_single_replica_compaction" = "true", "enable_unique_key_merge_on_write" = "false", "disable_auto_compaction" = "true" ); """ tablets = sql_return_maparray """ show tablets from ${tableName}; """ From f1bff4d03d5c87f82b2208f15d57aecafe566e68 Mon Sep 17 00:00:00 2001 From: csun5285 Date: Fri, 19 Jul 2024 11:40:16 +0800 Subject: [PATCH 6/8] fix --- .../compaction/test_single_compaction_fault_injection.groovy | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/regression-test/suites/compaction/test_single_compaction_fault_injection.groovy b/regression-test/suites/compaction/test_single_compaction_fault_injection.groovy index 737b02ff434f6c..1fc661cb374c19 100644 --- a/regression-test/suites/compaction/test_single_compaction_fault_injection.groovy +++ b/regression-test/suites/compaction/test_single_compaction_fault_injection.groovy @@ -144,7 +144,7 @@ suite("test_single_compaction_fault_injection", "p2, nonConcurrent") { UNIQUE KEY(`id`) COMMENT 'OLAP' DISTRIBUTED BY HASH(`id`) BUCKETS 1 - PROPERTIES ( "replication_num" = "1", "enable_single_replica_compaction" = "true", "enable_unique_key_merge_on_write" = "false", "disable_auto_compaction" = "true" ); + PROPERTIES ( "replication_num" = "2", "enable_single_replica_compaction" = "true", "enable_unique_key_merge_on_write" = "false", "disable_auto_compaction" = "true" ); """ tablets = sql_return_maparray """ show tablets from ${tableName}; """ From 6afbb58171d88fdc8d2bce786325b05a87f8be0b Mon Sep 17 00:00:00 2001 From: csun5285 Date: Mon, 29 Jul 2024 15:19:59 +0800 Subject: [PATCH 7/8] fix --- .../test_single_compaction_fault_injection.groovy | 5 +++-- ...e_compaction_with_variant_inverted_index.groovy | 4 ++-- .../test_single_replica_compaction.groovy | 14 +++++++++++++- 3 files changed, 18 insertions(+), 5 deletions(-) diff --git a/regression-test/suites/compaction/test_single_compaction_fault_injection.groovy b/regression-test/suites/compaction/test_single_compaction_fault_injection.groovy index 1fc661cb374c19..6bc8446ce6a7a9 100644 --- a/regression-test/suites/compaction/test_single_compaction_fault_injection.groovy +++ b/regression-test/suites/compaction/test_single_compaction_fault_injection.groovy @@ -125,7 +125,7 @@ suite("test_single_compaction_fault_injection", "p2, nonConcurrent") { } set_be_config.call("update_replica_infos_interval_seconds", "5") - + set_be_config.call("disable_auto_compacton", "true") // find the master be for single compaction Boolean found = false String master_backend_id @@ -144,7 +144,7 @@ suite("test_single_compaction_fault_injection", "p2, nonConcurrent") { UNIQUE KEY(`id`) COMMENT 'OLAP' DISTRIBUTED BY HASH(`id`) BUCKETS 1 - PROPERTIES ( "replication_num" = "2", "enable_single_replica_compaction" = "true", "enable_unique_key_merge_on_write" = "false", "disable_auto_compaction" = "true" ); + PROPERTIES ( "replication_num" = "2", "enable_single_replica_compaction" = "true", "enable_unique_key_merge_on_write" = "false"); """ tablets = sql_return_maparray """ show tablets from ${tableName}; """ @@ -347,4 +347,5 @@ suite("test_single_compaction_fault_injection", "p2, nonConcurrent") { select * from ${tableName} order by id """ + set_be_config.call("disable_auto_compacton", "false") } \ No newline at end of file diff --git a/regression-test/suites/compaction/test_single_compaction_with_variant_inverted_index.groovy b/regression-test/suites/compaction/test_single_compaction_with_variant_inverted_index.groovy index 55b1f223295d24..5264c8f2d1b447 100644 --- a/regression-test/suites/compaction/test_single_compaction_with_variant_inverted_index.groovy +++ b/regression-test/suites/compaction/test_single_compaction_with_variant_inverted_index.groovy @@ -147,7 +147,7 @@ suite("test_single_compaction_with_variant_inverted", "p2") { "replication_num" = "2", "enable_single_replica_compaction" = "true", "inverted_index_storage_format" = "V1", - "disable_auto_compaction" = "true" + "compaction_policy" = "time_series" ); """ @@ -155,7 +155,7 @@ suite("test_single_compaction_with_variant_inverted", "p2") { // wait for update replica infos // be.conf: update_replica_infos_interval_seconds + 2s - Thread.sleep(62000) + Thread.sleep(72000) // find the master be for single replica compaction Boolean found = false diff --git a/regression-test/suites/compaction/test_single_replica_compaction.groovy b/regression-test/suites/compaction/test_single_replica_compaction.groovy index 3d8eb8bfc5f2a6..fa4fc4854bc3db 100644 --- a/regression-test/suites/compaction/test_single_replica_compaction.groovy +++ b/regression-test/suites/compaction/test_single_replica_compaction.groovy @@ -32,6 +32,13 @@ suite("test_single_compaction_p2", "p2") { def backendId_to_backendHttpPort = [:] getBackendIpHttpPort(backendId_to_backendIP, backendId_to_backendHttpPort); + def set_be_config = { key, value -> + + for (String backend_id: backendId_to_backendIP.keySet()) { + def (code, out, err) = update_be_config(backendId_to_backendIP.get(backend_id), backendId_to_backendHttpPort.get(backend_id), key, value) + logger.info("update config: code=" + code + ", out=" + out + ", err=" + err) + } + } def triggerCompaction = { be_host, be_http_port, compact_type, tablet_id -> if (compact_type == "cumulative") { def (code_1, out_1, err_1) = be_run_cumulative_compaction(be_host, be_http_port, tablet_id) @@ -143,11 +150,14 @@ suite("test_single_compaction_p2", "p2") { ); """ + set_be_config.call("update_replica_infos_interval_seconds", "5") + set_be_config.call("disable_auto_compacton", "true") + def tablets = sql_return_maparray """ show tablets from ${tableName}; """ // wait for update replica infos // be.conf: update_replica_infos_interval_seconds + 2s - Thread.sleep(62000) + Thread.sleep(22000) // find the master be for single replica compaction Boolean found = false @@ -264,4 +274,6 @@ suite("test_single_compaction_p2", "p2") { select * from ${tableName} order by id """ + set_be_config.call("disable_auto_compacton", "false") + } From b093d122780b7322604148cd8ac1873413e67be9 Mon Sep 17 00:00:00 2001 From: csun5285 Date: Mon, 5 Aug 2024 15:36:20 +0800 Subject: [PATCH 8/8] fix test --- ...t_single_compaction_fault_injection.groovy | 39 +++--------------- ...paction_with_variant_inverted_index.groovy | 1 - .../test_single_replica_compaction.groovy | 40 +++---------------- 3 files changed, 12 insertions(+), 68 deletions(-) diff --git a/regression-test/suites/compaction/test_single_compaction_fault_injection.groovy b/regression-test/suites/compaction/test_single_compaction_fault_injection.groovy index 6bc8446ce6a7a9..839bfaa10dd154 100644 --- a/regression-test/suites/compaction/test_single_compaction_fault_injection.groovy +++ b/regression-test/suites/compaction/test_single_compaction_fault_injection.groovy @@ -24,14 +24,6 @@ suite("test_single_compaction_fault_injection", "p2, nonConcurrent") { def backendId_to_backendHttpPort = [:] getBackendIpHttpPort(backendId_to_backendIP, backendId_to_backendHttpPort); - def set_be_config = { key, value -> - - for (String backend_id: backendId_to_backendIP.keySet()) { - def (code, out, err) = update_be_config(backendId_to_backendIP.get(backend_id), backendId_to_backendHttpPort.get(backend_id), key, value) - logger.info("update config: code=" + code + ", out=" + out + ", err=" + err) - } - } - def triggerCompaction = { be_host, be_http_port, compact_type, tablet_id -> if (compact_type == "cumulative") { def (code_1, out_1, err_1) = be_run_cumulative_compaction(be_host, be_http_port, tablet_id) @@ -124,8 +116,6 @@ suite("test_single_compaction_fault_injection", "p2, nonConcurrent") { return tabletStatus } - set_be_config.call("update_replica_infos_interval_seconds", "5") - set_be_config.call("disable_auto_compacton", "true") // find the master be for single compaction Boolean found = false String master_backend_id @@ -144,12 +134,12 @@ suite("test_single_compaction_fault_injection", "p2, nonConcurrent") { UNIQUE KEY(`id`) COMMENT 'OLAP' DISTRIBUTED BY HASH(`id`) BUCKETS 1 - PROPERTIES ( "replication_num" = "2", "enable_single_replica_compaction" = "true", "enable_unique_key_merge_on_write" = "false"); + PROPERTIES ( "replication_num" = "2", "enable_single_replica_compaction" = "true", "enable_unique_key_merge_on_write" = "false", "compaction_policy" = "time_series"); """ tablets = sql_return_maparray """ show tablets from ${tableName}; """ // wait for update replica infos - Thread.sleep(20000) + Thread.sleep(70000) // The test table only has one bucket with 2 replicas, // and `show tablets` will return 2 different replicas with the same tablet. // So we can use the same tablet_id to get tablet/trigger compaction with different backends. @@ -178,8 +168,7 @@ suite("test_single_compaction_fault_injection", "p2, nonConcurrent") { } finally { GetDebugPoint().disableDebugPointForAllFEs('getTabletReplicaInfos.returnEmpty') // wait for update replica infos - // be.conf: update_replica_infos_interval_seconds + 2s - Thread.sleep(20000) + Thread.sleep(70000) // The test table only has one bucket with 2 replicas, // and `show tablets` will return 2 different replicas with the same tablet. // So we can use the same tablet_id to get tablet/trigger compaction with different backends. @@ -250,9 +239,9 @@ suite("test_single_compaction_fault_injection", "p2, nonConcurrent") { sql """ INSERT INTO ${tableName} VALUES (3, "a", 100); """ sql """ INSERT INTO ${tableName} VALUES (3, "b", 100); """ - // trigger master be to do cumu compaction + // trigger master be to do compaction assertTrue(triggerCompaction(backendId_to_backendIP[master_backend_id], backendId_to_backendHttpPort[master_backend_id], - "cumulative", tablet_id).contains("Success")); + "full", tablet_id).contains("Success")); waitForCompaction(backendId_to_backendIP[master_backend_id], backendId_to_backendHttpPort[master_backend_id], tablet_id) try { @@ -315,21 +304,7 @@ suite("test_single_compaction_fault_injection", "p2, nonConcurrent") { sql """ INSERT INTO ${tableName} VALUES (7, "a", 100); """ sql """ INSERT INTO ${tableName} VALUES (8, "a", 100); """ - // trigger master be to do cumu compaction with delete - assertTrue(triggerCompaction(backendId_to_backendIP[master_backend_id], backendId_to_backendHttpPort[master_backend_id], - "cumulative", tablet_id).contains("Success")); - waitForCompaction(backendId_to_backendIP[master_backend_id], backendId_to_backendHttpPort[master_backend_id], tablet_id) - - // trigger follower be to fetch compaction result - for (String id in follower_backend_id) { - assertTrue(triggerSingleCompaction(backendId_to_backendIP[id], backendId_to_backendHttpPort[id], tablet_id).contains("Success")); - waitForCompaction(backendId_to_backendIP[id], backendId_to_backendHttpPort[id], tablet_id) - } - - // check rowsets - checkSucceedCompactionResult.call() - - // trigger master be to do base compaction + // trigger master be to do compaction with delete assertTrue(triggerCompaction(backendId_to_backendIP[master_backend_id], backendId_to_backendHttpPort[master_backend_id], "full", tablet_id).contains("Success")); waitForCompaction(backendId_to_backendIP[master_backend_id], backendId_to_backendHttpPort[master_backend_id], tablet_id) @@ -346,6 +321,4 @@ suite("test_single_compaction_fault_injection", "p2, nonConcurrent") { qt_sql """ select * from ${tableName} order by id """ - - set_be_config.call("disable_auto_compacton", "false") } \ No newline at end of file diff --git a/regression-test/suites/compaction/test_single_compaction_with_variant_inverted_index.groovy b/regression-test/suites/compaction/test_single_compaction_with_variant_inverted_index.groovy index 5264c8f2d1b447..69768659d59e50 100644 --- a/regression-test/suites/compaction/test_single_compaction_with_variant_inverted_index.groovy +++ b/regression-test/suites/compaction/test_single_compaction_with_variant_inverted_index.groovy @@ -154,7 +154,6 @@ suite("test_single_compaction_with_variant_inverted", "p2") { def tablets = sql_return_maparray """ show tablets from ${tableName}; """ // wait for update replica infos - // be.conf: update_replica_infos_interval_seconds + 2s Thread.sleep(72000) // find the master be for single replica compaction diff --git a/regression-test/suites/compaction/test_single_replica_compaction.groovy b/regression-test/suites/compaction/test_single_replica_compaction.groovy index fa4fc4854bc3db..d8ff209b93cd43 100644 --- a/regression-test/suites/compaction/test_single_replica_compaction.groovy +++ b/regression-test/suites/compaction/test_single_replica_compaction.groovy @@ -27,18 +27,10 @@ suite("test_single_compaction_p2", "p2") { return curl("GET", String.format("http://%s:%s/api/calc_crc?tablet_id=%s", ip, port, tablet)) } - String backend_id; def backendId_to_backendIP = [:] def backendId_to_backendHttpPort = [:] getBackendIpHttpPort(backendId_to_backendIP, backendId_to_backendHttpPort); - def set_be_config = { key, value -> - - for (String backend_id: backendId_to_backendIP.keySet()) { - def (code, out, err) = update_be_config(backendId_to_backendIP.get(backend_id), backendId_to_backendHttpPort.get(backend_id), key, value) - logger.info("update config: code=" + code + ", out=" + out + ", err=" + err) - } - } def triggerCompaction = { be_host, be_http_port, compact_type, tablet_id -> if (compact_type == "cumulative") { def (code_1, out_1, err_1) = be_run_cumulative_compaction(be_host, be_http_port, tablet_id) @@ -117,7 +109,7 @@ suite("test_single_compaction_p2", "p2") { Thread.sleep(1000) StringBuilder sb = new StringBuilder(); sb.append("curl -X GET http://${be_host}:${be_http_port}") - sb.append("/api/compaction/run_status?tablet_id=") + sb.append("/api/compaction/show?tablet_id=") sb.append(tablet_id) String command = sb.toString() @@ -146,18 +138,14 @@ suite("test_single_compaction_p2", "p2") { "replication_num" = "2", "enable_single_replica_compaction" = "true", "enable_unique_key_merge_on_write" = "false", - "disable_auto_compaction" = "true" + "compaction_policy" = "time_series" ); """ - set_be_config.call("update_replica_infos_interval_seconds", "5") - set_be_config.call("disable_auto_compacton", "true") - def tablets = sql_return_maparray """ show tablets from ${tableName}; """ // wait for update replica infos - // be.conf: update_replica_infos_interval_seconds + 2s - Thread.sleep(22000) + Thread.sleep(70000) // find the master be for single replica compaction Boolean found = false @@ -220,9 +208,9 @@ suite("test_single_compaction_p2", "p2") { sql """ INSERT INTO ${tableName} VALUES (3, "a", 100); """ sql """ INSERT INTO ${tableName} VALUES (3, "b", 100); """ - // trigger master be to do cumu compaction + // trigger master be to do compaction assertTrue(triggerCompaction(backendId_to_backendIP[master_backend_id], backendId_to_backendHttpPort[master_backend_id], - "cumulative", tablet_id).contains("Success")); + "full", tablet_id).contains("Success")); waitForCompaction(backendId_to_backendIP[master_backend_id], backendId_to_backendHttpPort[master_backend_id], tablet_id) // trigger follower be to fetch compaction result @@ -241,21 +229,7 @@ suite("test_single_compaction_p2", "p2") { sql """ INSERT INTO ${tableName} VALUES (7, "a", 100); """ sql """ INSERT INTO ${tableName} VALUES (8, "a", 100); """ - // trigger master be to do cumu compaction with delete - assertTrue(triggerCompaction(backendId_to_backendIP[master_backend_id], backendId_to_backendHttpPort[master_backend_id], - "cumulative", tablet_id).contains("Success")); - waitForCompaction(backendId_to_backendIP[master_backend_id], backendId_to_backendHttpPort[master_backend_id], tablet_id) - - // trigger follower be to fetch compaction result - for (String id in follower_backend_id) { - assertTrue(triggerSingleCompaction(backendId_to_backendIP[id], backendId_to_backendHttpPort[id], tablet_id).contains("Success")); - waitForCompaction(backendId_to_backendIP[id], backendId_to_backendHttpPort[id], tablet_id) - } - - // check rowsets - checkCompactionResult.call() - - // trigger master be to do full compaction + // trigger master be to do compaction with delete assertTrue(triggerCompaction(backendId_to_backendIP[master_backend_id], backendId_to_backendHttpPort[master_backend_id], "full", tablet_id).contains("Success")); waitForCompaction(backendId_to_backendIP[master_backend_id], backendId_to_backendHttpPort[master_backend_id], tablet_id) @@ -274,6 +248,4 @@ suite("test_single_compaction_p2", "p2") { select * from ${tableName} order by id """ - set_be_config.call("disable_auto_compacton", "false") - }