From 893b578c51db5e54985782f9325cc03ef7c23068 Mon Sep 17 00:00:00 2001 From: csun5285 Date: Wed, 19 Jun 2024 20:35:37 +0800 Subject: [PATCH 1/2] fix test fix remove unused rebase fix fix fix fix test --- .../test_clone_missing_version.groovy | 91 ++++ ...est_compaction_with_visible_version.groovy | 7 +- ...t_single_compaction_fault_injection.groovy | 419 ++++++++--------- ...paction_with_variant_inverted_index.groovy | 381 ++++++++------- .../test_single_replica_compaction.groovy | 434 ++++++++---------- .../test_time_series_compaction_policy.groovy | 11 +- .../test_segcompaction_fault_injection.groovy | 18 - ...t_too_many_segments_fault_injection.groovy | 19 - .../schema_change/test_number_overflow.groovy | 20 - .../test_agg_keys_schema_change.groovy | 17 - .../test_agg_mv_schema_change.groovy | 17 - .../test_agg_rollup_schema_change.groovy | 17 - .../test_agg_vals_schema_change.groovy | 279 ++++++----- .../test_dup_keys_schema_change.groovy | 17 - .../test_dup_mv_schema_change.groovy | 16 - .../test_dup_rollup_schema_change.groovy | 15 - .../test_dup_vals_schema_change.groovy | 15 - .../test_uniq_keys_schema_change.groovy | 250 +++++----- .../test_uniq_mv_schema_change.groovy | 15 - .../test_uniq_rollup_schema_change.groovy | 15 - .../test_uniq_vals_schema_change.groovy | 15 - .../test_varchar_schema_change.groovy | 16 - .../test_segcompaction_agg_keys.groovy | 19 - .../test_segcompaction_agg_keys_index.groovy | 18 - .../test_segcompaction_dup_keys.groovy | 20 - .../test_segcompaction_dup_keys_index.groovy | 19 - .../test_segcompaction_unique_keys.groovy | 20 - ...est_segcompaction_unique_keys_index.groovy | 19 - .../test_segcompaction_unique_keys_mow.groovy | 20 - ...segcompaction_unique_keys_mow_index.groovy | 20 - 30 files changed, 916 insertions(+), 1343 deletions(-) create mode 100644 regression-test/suites/clone_p0/test_clone_missing_version.groovy diff --git a/regression-test/suites/clone_p0/test_clone_missing_version.groovy b/regression-test/suites/clone_p0/test_clone_missing_version.groovy new file mode 100644 index 00000000000000..2981cf3c5e3638 --- /dev/null +++ b/regression-test/suites/clone_p0/test_clone_missing_version.groovy @@ -0,0 +1,91 @@ +// Licensed to the Apache Software Foundation (ASF) under one +// or more contributor license agreements. See the NOTICE file +// distributed with this work for additional information +// regarding copyright ownership. The ASF licenses this file +// to you under the Apache License, Version 2.0 (the +// "License"); you may not use this file except in compliance +// with the License. You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, +// software distributed under the License is distributed on an +// "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY +// KIND, either express or implied. See the License for the +// specific language governing permissions and limitations +// under the License. + +import org.apache.doris.regression.suite.ClusterOptions +import org.apache.doris.regression.util.NodeType + +suite('test_clone_missing_version') { + def options = new ClusterOptions() + options.feConfigs += [ + 'disable_tablet_scheduler=true', + 'tablet_checker_interval_ms=500', + 'schedule_batch_size=1000', + 'schedule_slot_num_per_hdd_path=1000', + ] + options.beConfigs += [ + 'report_tablet_interval_seconds=1', + ] + + options.enableDebugPoints() + options.cloudMode = false + docker(options) { + def injectName = 'TxnManager.prepare_txn.random_failed' + def be = sql_return_maparray('show backends').get(0) + def addInject = { -> + GetDebugPoint().enableDebugPoint(be.Host, be.HttpPort as int, NodeType.BE, injectName, [percent:1.0]) + } + def deleteInject = { -> + GetDebugPoint().disableDebugPoint(be.Host, be.HttpPort as int, NodeType.BE, injectName) + } + + sql """ + CREATE TABLE t (k INT) DISTRIBUTED BY HASH(k) BUCKETS 1 PROPERTIES ( + "disable_auto_compaction" = "true" + ) + """ + + sql 'INSERT INTO t VALUES(2)' + + addInject() + + sql 'INSERT INTO t VALUES(3)' + sql 'INSERT INTO t VALUES(4)' + sql 'INSERT INTO t VALUES(5)' + + deleteInject() + + sql 'INSERT INTO t VALUES(6)' + + addInject() + + sql 'INSERT INTO t VALUES(7)' + sql 'INSERT INTO t VALUES(8)' + sql 'INSERT INTO t VALUES(9)' + + deleteInject() + + sql 'INSERT INTO t VALUES(10)' + + sleep 5000 + + def replica = sql_return_maparray('show tablets from t').find { it.BackendId.toLong().equals(be.BackendId.toLong()) } + assertNotNull(replica) + assertEquals(4, replica.VersionCount.toInteger()) + assertEquals(2, replica.Version.toInteger()) + assertEquals(9, replica.LstFailedVersion.toInteger()) + + setFeConfig('disable_tablet_scheduler', false) + + sleep 10000 + + replica = sql_return_maparray('show tablets from t').find { it.BackendId.toLong().equals(be.BackendId.toLong()) } + assertNotNull(replica) + assertEquals(10, replica.VersionCount.toInteger()) + assertEquals(10, replica.Version.toInteger()) + assertEquals(-1, replica.LstFailedVersion.toInteger()) + } +} diff --git a/regression-test/suites/compaction/test_compaction_with_visible_version.groovy b/regression-test/suites/compaction/test_compaction_with_visible_version.groovy index 194a1b67566192..4a6ee4c847a5db 100644 --- a/regression-test/suites/compaction/test_compaction_with_visible_version.groovy +++ b/regression-test/suites/compaction/test_compaction_with_visible_version.groovy @@ -26,7 +26,6 @@ suite('test_compaction_with_visible_version') { 'partition_info_update_interval_secs=5', ] options.beConfigs += [ - 'disable_auto_compaction=true', 'report_tablet_interval_seconds=1', 'tablet_rowset_stale_sweep_by_size=true', 'tablet_rowset_stale_sweep_threshold_size=0', @@ -166,7 +165,11 @@ suite('test_compaction_with_visible_version') { } } - sql " CREATE TABLE ${tableName} (k1 int, k2 int) DISTRIBUTED BY HASH(k1) BUCKETS 1 " + sql """ + CREATE TABLE ${tableName} (k1 int, k2 int) DISTRIBUTED BY HASH(k1) BUCKETS 1 PROPERTIES ( + "disable_auto_compaction" = "true" + ) + """ // normal def rowNum = 0L diff --git a/regression-test/suites/compaction/test_single_compaction_fault_injection.groovy b/regression-test/suites/compaction/test_single_compaction_fault_injection.groovy index ebc74257032cab..839bfaa10dd154 100644 --- a/regression-test/suites/compaction/test_single_compaction_fault_injection.groovy +++ b/regression-test/suites/compaction/test_single_compaction_fault_injection.groovy @@ -17,19 +17,12 @@ import org.codehaus.groovy.runtime.IOGroovyMethods -suite("test_single_compaction_fault_injection", "p2") { +suite("test_single_compaction_fault_injection", "p2, nonConcurrent") { def tableName = "test_single_compaction" - - def set_be_config = { key, value -> - def backendId_to_backendIP = [:] - def backendId_to_backendHttpPort = [:] - getBackendIpHttpPort(backendId_to_backendIP, backendId_to_backendHttpPort); - for (String backend_id: backendId_to_backendIP.keySet()) { - def (code, out, err) = update_be_config(backendId_to_backendIP.get(backend_id), backendId_to_backendHttpPort.get(backend_id), key, value) - logger.info("update config: code=" + code + ", out=" + out + ", err=" + err) - } - } + def backendId_to_backendIP = [:] + def backendId_to_backendHttpPort = [:] + getBackendIpHttpPort(backendId_to_backendIP, backendId_to_backendHttpPort); def triggerCompaction = { be_host, be_http_port, compact_type, tablet_id -> if (compact_type == "cumulative") { @@ -123,253 +116,209 @@ suite("test_single_compaction_fault_injection", "p2") { return tabletStatus } - boolean disableAutoCompaction = true + // find the master be for single compaction + Boolean found = false + String master_backend_id + List follower_backend_id = new ArrayList<>() + String tablet_id + def tablets try { - String backend_id; - def backendId_to_backendIP = [:] - def backendId_to_backendHttpPort = [:] - getBackendIpHttpPort(backendId_to_backendIP, backendId_to_backendHttpPort); - - backend_id = backendId_to_backendIP.keySet()[0] - def (code, out, err) = show_be_config(backendId_to_backendIP.get(backend_id), backendId_to_backendHttpPort.get(backend_id)) - - logger.info("Show config: code=" + code + ", out=" + out + ", err=" + err) - assertEquals(code, 0) - def configList = parseJson(out.trim()) - assert configList instanceof List - - for (Object ele in (List) configList) { - assert ele instanceof List - if (((List) ele)[0] == "disable_auto_compaction") { - disableAutoCompaction = Boolean.parseBoolean(((List) ele)[2]) - } - } - set_be_config.call("disable_auto_compaction", "true") - set_be_config.call("update_replica_infos_interval_seconds", "5") - - - // find the master be for single compaction - Boolean found = false - String master_backend_id - List follower_backend_id = new ArrayList<>() - String tablet_id - def tablets - try { - GetDebugPoint().enableDebugPointForAllFEs('getTabletReplicaInfos.returnEmpty') - sql """ DROP TABLE IF EXISTS ${tableName}; """ - sql """ - CREATE TABLE ${tableName} ( - `id` int(11) NULL, - `name` varchar(255) NULL, - `score` int(11) NULL - ) ENGINE=OLAP - UNIQUE KEY(`id`) - COMMENT 'OLAP' - DISTRIBUTED BY HASH(`id`) BUCKETS 1 - PROPERTIES ( "replication_num" = "2", "enable_single_replica_compaction" = "true", "enable_unique_key_merge_on_write" = "false" ); - """ + GetDebugPoint().enableDebugPointForAllFEs('getTabletReplicaInfos.returnEmpty') + sql """ DROP TABLE IF EXISTS ${tableName}; """ + sql """ + CREATE TABLE ${tableName} ( + `id` int(11) NULL, + `name` varchar(255) NULL, + `score` int(11) NULL + ) ENGINE=OLAP + UNIQUE KEY(`id`) + COMMENT 'OLAP' + DISTRIBUTED BY HASH(`id`) BUCKETS 1 + PROPERTIES ( "replication_num" = "2", "enable_single_replica_compaction" = "true", "enable_unique_key_merge_on_write" = "false", "compaction_policy" = "time_series"); + """ - tablets = sql_return_maparray """ show tablets from ${tableName}; """ - // wait for update replica infos - Thread.sleep(20000) - // The test table only has one bucket with 2 replicas, - // and `show tablets` will return 2 different replicas with the same tablet. - // So we can use the same tablet_id to get tablet/trigger compaction with different backends. - tablet_id = tablets[0].TabletId - def tablet_info = sql_return_maparray """ show tablet ${tablet_id}; """ - logger.info("tablet: " + tablet_info) - for (def tablet in tablets) { - String trigger_backend_id = tablet.BackendId - def tablet_status = getTabletStatus(backendId_to_backendIP[trigger_backend_id], backendId_to_backendHttpPort[trigger_backend_id], tablet_id) - if (!tablet_status.containsKey("single replica compaction status")) { - if (found) { - found = false - logger.warn("multipe master"); - break; - } - found = true - master_backend_id = trigger_backend_id - } else { - follower_backend_id.add(trigger_backend_id) - } - } - assertFalse(found) - assertFalse(master_backend_id.isEmpty()) - assertTrue(follower_backend_id.isEmpty()) - master_backend_id = "" - } finally { - GetDebugPoint().disableDebugPointForAllFEs('getTabletReplicaInfos.returnEmpty') - // wait for update replica infos - // be.conf: update_replica_infos_interval_seconds + 2s - Thread.sleep(20000) - // The test table only has one bucket with 2 replicas, - // and `show tablets` will return 2 different replicas with the same tablet. - // So we can use the same tablet_id to get tablet/trigger compaction with different backends. - tablet_id = tablets[0].TabletId - def tablet_info = sql_return_maparray """ show tablet ${tablet_id}; """ - for (def tablet in tablets) { - String trigger_backend_id = tablet.BackendId - def tablet_status = getTabletStatus(backendId_to_backendIP[trigger_backend_id], backendId_to_backendHttpPort[trigger_backend_id], tablet_id); - if (!tablet_status.containsKey("single replica compaction status")) { - if (found) { - logger.warn("multipe master") - assertTrue(false) - } - found = true - master_backend_id = trigger_backend_id - } else { - follower_backend_id.add(trigger_backend_id) + tablets = sql_return_maparray """ show tablets from ${tableName}; """ + // wait for update replica infos + Thread.sleep(70000) + // The test table only has one bucket with 2 replicas, + // and `show tablets` will return 2 different replicas with the same tablet. + // So we can use the same tablet_id to get tablet/trigger compaction with different backends. + tablet_id = tablets[0].TabletId + def tablet_info = sql_return_maparray """ show tablet ${tablet_id}; """ + logger.info("tablet: " + tablet_info) + for (def tablet in tablets) { + String trigger_backend_id = tablet.BackendId + def tablet_status = getTabletStatus(backendId_to_backendIP[trigger_backend_id], backendId_to_backendHttpPort[trigger_backend_id], tablet_id) + if (!tablet_status.containsKey("single replica compaction status")) { + if (found) { + found = false + logger.warn("multipe master"); + break; } - } - assertTrue(found) - assertFalse(master_backend_id.isEmpty()) - assertFalse(follower_backend_id.isEmpty()) - } - - - def checkSucceedCompactionResult = { - def master_tablet_status = getTabletStatus(backendId_to_backendIP[master_backend_id], backendId_to_backendHttpPort[master_backend_id], tablet_id); - def master_rowsets = master_tablet_status."rowsets" - assert master_rowsets instanceof List - logger.info("rowset size: " + master_rowsets.size()) - - for (String backend: follower_backend_id) { - def tablet_status = getTabletStatus(backendId_to_backendIP[backend], backendId_to_backendHttpPort[backend], tablet_id); - def rowsets = tablet_status."rowsets" - assert rowsets instanceof List - assertEquals(master_rowsets.size(), rowsets.size()) + found = true + master_backend_id = trigger_backend_id + } else { + follower_backend_id.add(trigger_backend_id) } } - - def checkFailedCompactionResult = { - def master_tablet_status = getTabletStatus(backendId_to_backendIP[master_backend_id], backendId_to_backendHttpPort[master_backend_id], tablet_id); - def master_rowsets = master_tablet_status."rowsets" - assert master_rowsets instanceof List - logger.info("rowset size: " + master_rowsets.size()) - - for (String backend: follower_backend_id) { - def tablet_status = getTabletStatus(backendId_to_backendIP[backend], backendId_to_backendHttpPort[backend], tablet_id); - def rowsets = tablet_status."rowsets" - assert rowsets instanceof List - assertFalse(master_rowsets.size() == rowsets.size()) - } - } - - // return ok - try { - GetDebugPoint().enableDebugPointForAllBEs("do_single_compaction_return_ok"); - for (String id in follower_backend_id) { - assertTrue(triggerSingleCompaction(backendId_to_backendIP[id], backendId_to_backendHttpPort[id], tablet_id).contains("Success")); - waitForCompaction(backendId_to_backendIP[id], backendId_to_backendHttpPort[id], tablet_id); + assertFalse(found) + assertFalse(master_backend_id.isEmpty()) + assertTrue(follower_backend_id.isEmpty()) + master_backend_id = "" + } finally { + GetDebugPoint().disableDebugPointForAllFEs('getTabletReplicaInfos.returnEmpty') + // wait for update replica infos + Thread.sleep(70000) + // The test table only has one bucket with 2 replicas, + // and `show tablets` will return 2 different replicas with the same tablet. + // So we can use the same tablet_id to get tablet/trigger compaction with different backends. + tablet_id = tablets[0].TabletId + def tablet_info = sql_return_maparray """ show tablet ${tablet_id}; """ + for (def tablet in tablets) { + String trigger_backend_id = tablet.BackendId + def tablet_status = getTabletStatus(backendId_to_backendIP[trigger_backend_id], backendId_to_backendHttpPort[trigger_backend_id], tablet_id); + if (!tablet_status.containsKey("single replica compaction status")) { + if (found) { + logger.warn("multipe master") + assertTrue(false) + } + found = true + master_backend_id = trigger_backend_id + } else { + follower_backend_id.add(trigger_backend_id) } - } finally { - GetDebugPoint().disableDebugPointForAllBEs("do_single_compaction_return_ok"); } - sql """ INSERT INTO ${tableName} VALUES (1, "a", 100); """ - sql """ INSERT INTO ${tableName} VALUES (1, "b", 100); """ - sql """ INSERT INTO ${tableName} VALUES (2, "a", 100); """ - sql """ INSERT INTO ${tableName} VALUES (2, "b", 100); """ - sql """ INSERT INTO ${tableName} VALUES (3, "a", 100); """ - sql """ INSERT INTO ${tableName} VALUES (3, "b", 100); """ - - // trigger master be to do cumu compaction - assertTrue(triggerCompaction(backendId_to_backendIP[master_backend_id], backendId_to_backendHttpPort[master_backend_id], - "cumulative", tablet_id).contains("Success")); - waitForCompaction(backendId_to_backendIP[master_backend_id], backendId_to_backendHttpPort[master_backend_id], tablet_id) - - try { - GetDebugPoint().enableDebugPointForAllBEs("single_compaction_failed_get_peer"); - for (String id in follower_backend_id) { - out = triggerSingleCompaction(backendId_to_backendIP[id], backendId_to_backendHttpPort[id], tablet_id) - assertTrue(out.contains("compaction task is successfully triggered") || out.contains("tablet don't have peer replica")); - } - checkFailedCompactionResult.call() - } finally { - GetDebugPoint().disableDebugPointForAllBEs("single_compaction_failed_get_peer") + assertTrue(found) + assertFalse(master_backend_id.isEmpty()) + assertFalse(follower_backend_id.isEmpty()) + } + + + def checkSucceedCompactionResult = { + def master_tablet_status = getTabletStatus(backendId_to_backendIP[master_backend_id], backendId_to_backendHttpPort[master_backend_id], tablet_id); + def master_rowsets = master_tablet_status."rowsets" + assert master_rowsets instanceof List + logger.info("rowset size: " + master_rowsets.size()) + + for (String backend: follower_backend_id) { + def tablet_status = getTabletStatus(backendId_to_backendIP[backend], backendId_to_backendHttpPort[backend], tablet_id); + def rowsets = tablet_status."rowsets" + assert rowsets instanceof List + assertEquals(master_rowsets.size(), rowsets.size()) } + } - try { - GetDebugPoint().enableDebugPointForAllBEs("single_compaction_failed_get_peer_versions"); - for (String id in follower_backend_id) { - out = triggerSingleCompaction(backendId_to_backendIP[id], backendId_to_backendHttpPort[id], tablet_id) - assertTrue(out.contains("compaction task is successfully triggered") || out.contains("tablet failed get peer versions")); - } - checkFailedCompactionResult.call() - } finally { - GetDebugPoint().disableDebugPointForAllBEs("single_compaction_failed_get_peer_versions") + def checkFailedCompactionResult = { + def master_tablet_status = getTabletStatus(backendId_to_backendIP[master_backend_id], backendId_to_backendHttpPort[master_backend_id], tablet_id); + def master_rowsets = master_tablet_status."rowsets" + assert master_rowsets instanceof List + logger.info("rowset size: " + master_rowsets.size()) + + for (String backend: follower_backend_id) { + def tablet_status = getTabletStatus(backendId_to_backendIP[backend], backendId_to_backendHttpPort[backend], tablet_id); + def rowsets = tablet_status."rowsets" + assert rowsets instanceof List + assertFalse(master_rowsets.size() == rowsets.size()) } + } - try { - GetDebugPoint().enableDebugPointForAllBEs("single_compaction_failed_make_snapshot"); - for (String id in follower_backend_id) { - out = triggerSingleCompaction(backendId_to_backendIP[id], backendId_to_backendHttpPort[id], tablet_id) - assertTrue(out.contains("compaction task is successfully triggered") || out.contains("failed snapshot")); - } - checkFailedCompactionResult.call() - } finally { - GetDebugPoint().disableDebugPointForAllBEs("single_compaction_failed_make_snapshot") + // return ok + try { + GetDebugPoint().enableDebugPointForAllBEs("do_single_compaction_return_ok"); + for (String id in follower_backend_id) { + assertTrue(triggerSingleCompaction(backendId_to_backendIP[id], backendId_to_backendHttpPort[id], tablet_id).contains("Success")); + waitForCompaction(backendId_to_backendIP[id], backendId_to_backendHttpPort[id], tablet_id); } + } finally { + GetDebugPoint().disableDebugPointForAllBEs("do_single_compaction_return_ok"); + } + sql """ INSERT INTO ${tableName} VALUES (1, "a", 100); """ + sql """ INSERT INTO ${tableName} VALUES (1, "b", 100); """ + sql """ INSERT INTO ${tableName} VALUES (2, "a", 100); """ + sql """ INSERT INTO ${tableName} VALUES (2, "b", 100); """ + sql """ INSERT INTO ${tableName} VALUES (3, "a", 100); """ + sql """ INSERT INTO ${tableName} VALUES (3, "b", 100); """ + + // trigger master be to do compaction + assertTrue(triggerCompaction(backendId_to_backendIP[master_backend_id], backendId_to_backendHttpPort[master_backend_id], + "full", tablet_id).contains("Success")); + waitForCompaction(backendId_to_backendIP[master_backend_id], backendId_to_backendHttpPort[master_backend_id], tablet_id) - try { - GetDebugPoint().enableDebugPointForAllBEs("single_compaction_failed_download_file"); - for (String id in follower_backend_id) { - out = triggerSingleCompaction(backendId_to_backendIP[id], backendId_to_backendHttpPort[id], tablet_id) - assertTrue(out.contains("compaction task is successfully triggered") || out.contains("failed to download file")); - } - checkFailedCompactionResult.call() - } finally { - GetDebugPoint().disableDebugPointForAllBEs("single_compaction_failed_download_file") + try { + GetDebugPoint().enableDebugPointForAllBEs("single_compaction_failed_get_peer"); + for (String id in follower_backend_id) { + out = triggerSingleCompaction(backendId_to_backendIP[id], backendId_to_backendHttpPort[id], tablet_id) + assertTrue(out.contains("compaction task is successfully triggered") || out.contains("tablet don't have peer replica")); } + checkFailedCompactionResult.call() + } finally { + GetDebugPoint().disableDebugPointForAllBEs("single_compaction_failed_get_peer") + } - // trigger follower be to fetch compaction result + try { + GetDebugPoint().enableDebugPointForAllBEs("single_compaction_failed_get_peer_versions"); for (String id in follower_backend_id) { - assertTrue(triggerSingleCompaction(backendId_to_backendIP[id], backendId_to_backendHttpPort[id], tablet_id).contains("Success")); - waitForCompaction(backendId_to_backendIP[id], backendId_to_backendHttpPort[id], tablet_id) + out = triggerSingleCompaction(backendId_to_backendIP[id], backendId_to_backendHttpPort[id], tablet_id) + assertTrue(out.contains("compaction task is successfully triggered") || out.contains("tablet failed get peer versions")); } + checkFailedCompactionResult.call() + } finally { + GetDebugPoint().disableDebugPointForAllBEs("single_compaction_failed_get_peer_versions") + } - // check rowsets - checkSucceedCompactionResult.call() - - sql """ INSERT INTO ${tableName} VALUES (4, "a", 100); """ - sql """ INSERT INTO ${tableName} VALUES (5, "a", 100); """ - sql """ INSERT INTO ${tableName} VALUES (6, "a", 100); """ - sql """ DELETE FROM ${tableName} WHERE id = 4; """ - sql """ INSERT INTO ${tableName} VALUES (7, "a", 100); """ - sql """ INSERT INTO ${tableName} VALUES (8, "a", 100); """ - - // trigger master be to do cumu compaction with delete - assertTrue(triggerCompaction(backendId_to_backendIP[master_backend_id], backendId_to_backendHttpPort[master_backend_id], - "cumulative", tablet_id).contains("Success")); - waitForCompaction(backendId_to_backendIP[master_backend_id], backendId_to_backendHttpPort[master_backend_id], tablet_id) - - // trigger follower be to fetch compaction result + try { + GetDebugPoint().enableDebugPointForAllBEs("single_compaction_failed_make_snapshot"); for (String id in follower_backend_id) { - assertTrue(triggerSingleCompaction(backendId_to_backendIP[id], backendId_to_backendHttpPort[id], tablet_id).contains("Success")); - waitForCompaction(backendId_to_backendIP[id], backendId_to_backendHttpPort[id], tablet_id) + out = triggerSingleCompaction(backendId_to_backendIP[id], backendId_to_backendHttpPort[id], tablet_id) + assertTrue(out.contains("compaction task is successfully triggered") || out.contains("failed snapshot")); } + checkFailedCompactionResult.call() + } finally { + GetDebugPoint().disableDebugPointForAllBEs("single_compaction_failed_make_snapshot") + } - // check rowsets - checkSucceedCompactionResult.call() - - // trigger master be to do base compaction - assertTrue(triggerCompaction(backendId_to_backendIP[master_backend_id], backendId_to_backendHttpPort[master_backend_id], - "full", tablet_id).contains("Success")); - waitForCompaction(backendId_to_backendIP[master_backend_id], backendId_to_backendHttpPort[master_backend_id], tablet_id) - - // trigger follower be to fetch compaction result + try { + GetDebugPoint().enableDebugPointForAllBEs("single_compaction_failed_download_file"); for (String id in follower_backend_id) { - assertTrue(triggerSingleCompaction(backendId_to_backendIP[id], backendId_to_backendHttpPort[id], tablet_id).contains("Success")); - waitForCompaction(backendId_to_backendIP[id], backendId_to_backendHttpPort[id], tablet_id) + out = triggerSingleCompaction(backendId_to_backendIP[id], backendId_to_backendHttpPort[id], tablet_id) + assertTrue(out.contains("compaction task is successfully triggered") || out.contains("failed to download file")); } + checkFailedCompactionResult.call() + } finally { + GetDebugPoint().disableDebugPointForAllBEs("single_compaction_failed_download_file") + } - // check rowsets - checkSucceedCompactionResult.call() + // trigger follower be to fetch compaction result + for (String id in follower_backend_id) { + assertTrue(triggerSingleCompaction(backendId_to_backendIP[id], backendId_to_backendHttpPort[id], tablet_id).contains("Success")); + waitForCompaction(backendId_to_backendIP[id], backendId_to_backendHttpPort[id], tablet_id) + } - qt_sql """ - select * from ${tableName} order by id - """ - - } finally { - set_be_config.call("disable_auto_compaction", disableAutoCompaction.toString()) + // check rowsets + checkSucceedCompactionResult.call() + + sql """ INSERT INTO ${tableName} VALUES (4, "a", 100); """ + sql """ INSERT INTO ${tableName} VALUES (5, "a", 100); """ + sql """ INSERT INTO ${tableName} VALUES (6, "a", 100); """ + sql """ DELETE FROM ${tableName} WHERE id = 4; """ + sql """ INSERT INTO ${tableName} VALUES (7, "a", 100); """ + sql """ INSERT INTO ${tableName} VALUES (8, "a", 100); """ + + // trigger master be to do compaction with delete + assertTrue(triggerCompaction(backendId_to_backendIP[master_backend_id], backendId_to_backendHttpPort[master_backend_id], + "full", tablet_id).contains("Success")); + waitForCompaction(backendId_to_backendIP[master_backend_id], backendId_to_backendHttpPort[master_backend_id], tablet_id) + + // trigger follower be to fetch compaction result + for (String id in follower_backend_id) { + assertTrue(triggerSingleCompaction(backendId_to_backendIP[id], backendId_to_backendHttpPort[id], tablet_id).contains("Success")); + waitForCompaction(backendId_to_backendIP[id], backendId_to_backendHttpPort[id], tablet_id) } -} + + // check rowsets + checkSucceedCompactionResult.call() + + qt_sql """ + select * from ${tableName} order by id + """ +} \ No newline at end of file diff --git a/regression-test/suites/compaction/test_single_compaction_with_variant_inverted_index.groovy b/regression-test/suites/compaction/test_single_compaction_with_variant_inverted_index.groovy index c31d7ab7c28cf9..0e13662f3b433d 100644 --- a/regression-test/suites/compaction/test_single_compaction_with_variant_inverted_index.groovy +++ b/regression-test/suites/compaction/test_single_compaction_with_variant_inverted_index.groovy @@ -19,116 +19,80 @@ import org.codehaus.groovy.runtime.IOGroovyMethods suite("test_single_compaction_with_variant_inverted", "p2") { def tableName = "test_single_compaction_with_variant_inverted" - - def set_be_config = { key, value -> - def backendId_to_backendIP = [:] - def backendId_to_backendHttpPort = [:] - getBackendIpHttpPort(backendId_to_backendIP, backendId_to_backendHttpPort); - - for (String backend_id: backendId_to_backendIP.keySet()) { - def (code, out, err) = update_be_config(backendId_to_backendIP.get(backend_id), backendId_to_backendHttpPort.get(backend_id), key, value) - logger.info("update config: code=" + code + ", out=" + out + ", err=" + err) - } - } def calc_file_crc_on_tablet = { ip, port, tablet -> return curl("GET", String.format("http://%s:%s/api/calc_crc?tablet_id=%s", ip, port, tablet)) } boolean disableAutoCompaction = true - boolean has_update_be_config = false - try { - String backend_id; - def backendId_to_backendIP = [:] - def backendId_to_backendHttpPort = [:] - getBackendIpHttpPort(backendId_to_backendIP, backendId_to_backendHttpPort); - - backend_id = backendId_to_backendIP.keySet()[0] - def (code, out, err) = show_be_config(backendId_to_backendIP.get(backend_id), backendId_to_backendHttpPort.get(backend_id)) - - logger.info("Show config: code=" + code + ", out=" + out + ", err=" + err) - assertEquals(code, 0) - def configList = parseJson(out.trim()) - assert configList instanceof List - - for (Object ele in (List) configList) { - assert ele instanceof List - if (((List) ele)[0] == "disable_auto_compaction") { - disableAutoCompaction = Boolean.parseBoolean(((List) ele)[2]) - } + String backend_id; + def backendId_to_backendIP = [:] + def backendId_to_backendHttpPort = [:] + getBackendIpHttpPort(backendId_to_backendIP, backendId_to_backendHttpPort); + + backend_id = backendId_to_backendIP.keySet()[0] + def (code, out, err) = show_be_config(backendId_to_backendIP.get(backend_id), backendId_to_backendHttpPort.get(backend_id)) + + logger.info("Show config: code=" + code + ", out=" + out + ", err=" + err) + assertEquals(code, 0) + def configList = parseJson(out.trim()) + assert configList instanceof List + + for (Object ele in (List) configList) { + assert ele instanceof List + if (((List) ele)[0] == "disable_auto_compaction") { + disableAutoCompaction = Boolean.parseBoolean(((List) ele)[2]) } - set_be_config.call("disable_auto_compaction", "true") - has_update_be_config = true - - def triggerCompaction = { be_host, be_http_port, compact_type, tablet_id -> - StringBuilder sb = new StringBuilder(); - sb.append("curl -X POST http://${be_host}:${be_http_port}") - sb.append("/api/compaction/run?tablet_id=") - sb.append(tablet_id) - sb.append("&compact_type=${compact_type}") - - String command = sb.toString() - logger.info(command) - process = command.execute() - code = process.waitFor() - err = IOGroovyMethods.getText(new BufferedReader(new InputStreamReader(process.getErrorStream()))); - out = process.getText() - logger.info("Run compaction: code=" + code + ", out=" + out + ", disableAutoCompaction " + disableAutoCompaction + ", err=" + err) - if (!disableAutoCompaction) { - return "Success, " + out - } - assertEquals(code, 0) - return out - } - - def triggerSingleCompaction = { be_host, be_http_port, tablet_id -> - StringBuilder sb = new StringBuilder(); - sb.append("curl -X POST http://${be_host}:${be_http_port}") - sb.append("/api/compaction/run?tablet_id=") - sb.append(tablet_id) - sb.append("&compact_type=cumulative&remote=true") + } - String command = sb.toString() - logger.info(command) - process = command.execute() - code = process.waitFor() - err = IOGroovyMethods.getText(new BufferedReader(new InputStreamReader(process.getErrorStream()))); - out = process.getText() - logger.info("Run compaction: code=" + code + ", out=" + out + ", disableAutoCompaction " + disableAutoCompaction + ", err=" + err) - if (!disableAutoCompaction) { - return "Success, " + out - } - assertEquals(code, 0) - return out + def triggerCompaction = { be_host, be_http_port, compact_type, tablet_id -> + StringBuilder sb = new StringBuilder(); + sb.append("curl -X POST http://${be_host}:${be_http_port}") + sb.append("/api/compaction/run?tablet_id=") + sb.append(tablet_id) + sb.append("&compact_type=${compact_type}") + + String command = sb.toString() + logger.info(command) + process = command.execute() + code = process.waitFor() + err = IOGroovyMethods.getText(new BufferedReader(new InputStreamReader(process.getErrorStream()))); + out = process.getText() + logger.info("Run compaction: code=" + code + ", out=" + out + ", disableAutoCompaction " + disableAutoCompaction + ", err=" + err) + if (!disableAutoCompaction) { + return "Success, " + out } - def waitForCompaction = { be_host, be_http_port, tablet_id -> - boolean running = true - do { - Thread.sleep(1000) - StringBuilder sb = new StringBuilder(); - sb.append("curl -X GET http://${be_host}:${be_http_port}") - sb.append("/api/compaction/run_status?tablet_id=") - sb.append(tablet_id) - - String command = sb.toString() - logger.info(command) - process = command.execute() - code = process.waitFor() - out = process.getText() - logger.info("Get compaction status: code=" + code + ", out=" + out) - assertEquals(code, 0) - def compactionStatus = parseJson(out.trim()) - assertEquals("success", compactionStatus.status.toLowerCase()) - running = compactionStatus.run_status - } while (running) + assertEquals(code, 0) + return out + } + + def triggerSingleCompaction = { be_host, be_http_port, tablet_id -> + StringBuilder sb = new StringBuilder(); + sb.append("curl -X POST http://${be_host}:${be_http_port}") + sb.append("/api/compaction/run?tablet_id=") + sb.append(tablet_id) + sb.append("&compact_type=cumulative&remote=true") + + String command = sb.toString() + logger.info(command) + process = command.execute() + code = process.waitFor() + err = IOGroovyMethods.getText(new BufferedReader(new InputStreamReader(process.getErrorStream()))); + out = process.getText() + logger.info("Run compaction: code=" + code + ", out=" + out + ", disableAutoCompaction " + disableAutoCompaction + ", err=" + err) + if (!disableAutoCompaction) { + return "Success, " + out } - - def getTabletStatus = { be_host, be_http_port, tablet_id -> - boolean running = true + assertEquals(code, 0) + return out + } + def waitForCompaction = { be_host, be_http_port, tablet_id -> + boolean running = true + do { Thread.sleep(1000) StringBuilder sb = new StringBuilder(); sb.append("curl -X GET http://${be_host}:${be_http_port}") - sb.append("/api/compaction/show?tablet_id=") + sb.append("/api/compaction/run_status?tablet_id=") sb.append(tablet_id) String command = sb.toString() @@ -136,119 +100,138 @@ suite("test_single_compaction_with_variant_inverted", "p2") { process = command.execute() code = process.waitFor() out = process.getText() - logger.info("Get tablet status: code=" + code + ", out=" + out) + logger.info("Get compaction status: code=" + code + ", out=" + out) assertEquals(code, 0) - def tabletStatus = parseJson(out.trim()) - return tabletStatus - } + def compactionStatus = parseJson(out.trim()) + assertEquals("success", compactionStatus.status.toLowerCase()) + running = compactionStatus.run_status + } while (running) + } + + def getTabletStatus = { be_host, be_http_port, tablet_id -> + boolean running = true + Thread.sleep(1000) + StringBuilder sb = new StringBuilder(); + sb.append("curl -X GET http://${be_host}:${be_http_port}") + sb.append("/api/compaction/show?tablet_id=") + sb.append(tablet_id) + + String command = sb.toString() + logger.info(command) + process = command.execute() + code = process.waitFor() + out = process.getText() + logger.info("Get tablet status: code=" + code + ", out=" + out) + assertEquals(code, 0) + def tabletStatus = parseJson(out.trim()) + return tabletStatus + } - sql """ DROP TABLE IF EXISTS ${tableName}; """ - sql """ - CREATE TABLE ${tableName} ( - `id` int(11) NULL, - `name` varchar(255) NULL, - `score` int(11) NULL, - `properties` variant, - INDEX idx_props (`properties`) USING INVERTED PROPERTIES("parser" = "none") COMMENT '' - ) ENGINE=OLAP - DUPLICATE KEY(`id`) - COMMENT 'OLAP' - DISTRIBUTED BY HASH(`id`) BUCKETS 1 - PROPERTIES ( "replication_num" = "2", "enable_single_replica_compaction" = "true", "inverted_index_storage_format" = "V1"); - """ - - def tablets = sql_return_maparray """ show tablets from ${tableName}; """ - - // wait for update replica infos - // be.conf: update_replica_infos_interval_seconds + 2s - Thread.sleep(62000) - - // find the master be for single replica compaction - Boolean found = false - String master_backend_id; - List follower_backend_id = new ArrayList<>() - // The test table only has one bucket with 2 replicas, - // and `show tablets` will return 2 different replicas with the same tablet. - // So we can use the same tablet_id to get tablet/trigger compaction with different backends. - String tablet_id = tablets[0].TabletId - def tablet_info = sql_return_maparray """ show tablet ${tablet_id}; """ - logger.info("tablet: " + tablet_info) - for (def tablet in tablets) { - String trigger_backend_id = tablet.BackendId - def tablet_status = getTabletStatus(backendId_to_backendIP[trigger_backend_id], backendId_to_backendHttpPort[trigger_backend_id], tablet_id); - if (!tablet_status.containsKey("single replica compaction status")) { - if (found) { - logger.warn("multipe master"); - assertTrue(false) - } - found = true - master_backend_id = trigger_backend_id - } else { - follower_backend_id.add(trigger_backend_id) + sql """ DROP TABLE IF EXISTS ${tableName}; """ + sql """ + CREATE TABLE ${tableName} ( + `id` int(11) NULL, + `name` varchar(255) NULL, + `score` int(11) NULL, + `properties` variant, + INDEX idx_props (`properties`) USING INVERTED PROPERTIES("parser" = "none") COMMENT '' + ) ENGINE=OLAP + DUPLICATE KEY(`id`) + COMMENT 'OLAP' + DISTRIBUTED BY HASH(`id`) BUCKETS 1 + PROPERTIES ( + "replication_num" = "2", + "enable_single_replica_compaction" = "true", + "inverted_index_storage_format" = "V1", + "compaction_policy" = "time_series" + ); + """ + + def tablets = sql_return_maparray """ show tablets from ${tableName}; """ + + // wait for update replica infos + Thread.sleep(72000) + + // find the master be for single replica compaction + Boolean found = false + String master_backend_id; + List follower_backend_id = new ArrayList<>() + // The test table only has one bucket with 2 replicas, + // and `show tablets` will return 2 different replicas with the same tablet. + // So we can use the same tablet_id to get tablet/trigger compaction with different backends. + String tablet_id = tablets[0].TabletId + def tablet_info = sql_return_maparray """ show tablet ${tablet_id}; """ + logger.info("tablet: " + tablet_info) + for (def tablet in tablets) { + String trigger_backend_id = tablet.BackendId + def tablet_status = getTabletStatus(backendId_to_backendIP[trigger_backend_id], backendId_to_backendHttpPort[trigger_backend_id], tablet_id); + if (!tablet_status.containsKey("single replica compaction status")) { + if (found) { + logger.warn("multipe master"); + assertTrue(false) } + found = true + master_backend_id = trigger_backend_id + } else { + follower_backend_id.add(trigger_backend_id) } + } - def checkCompactionResult = { - def master_tablet_status = getTabletStatus(backendId_to_backendIP[master_backend_id], backendId_to_backendHttpPort[master_backend_id], tablet_id); - def master_rowsets = master_tablet_status."rowsets" - assert master_rowsets instanceof List - logger.info("rowset size: " + master_rowsets.size()) - - for (String backend: follower_backend_id) { - def tablet_status = getTabletStatus(backendId_to_backendIP[backend], backendId_to_backendHttpPort[backend], tablet_id); - def rowsets = tablet_status."rowsets" - assert rowsets instanceof List - assertEquals(master_rowsets.size(), rowsets.size()) - } + def checkCompactionResult = { + def master_tablet_status = getTabletStatus(backendId_to_backendIP[master_backend_id], backendId_to_backendHttpPort[master_backend_id], tablet_id); + def master_rowsets = master_tablet_status."rowsets" + assert master_rowsets instanceof List + logger.info("rowset size: " + master_rowsets.size()) + + for (String backend: follower_backend_id) { + def tablet_status = getTabletStatus(backendId_to_backendIP[backend], backendId_to_backendHttpPort[backend], tablet_id); + def rowsets = tablet_status."rowsets" + assert rowsets instanceof List + assertEquals(master_rowsets.size(), rowsets.size()) } + } - def checkTabletFileCrc = { - def (master_code, master_out, master_err) = calc_file_crc_on_tablet(backendId_to_backendIP[master_backend_id], backendId_to_backendHttpPort[master_backend_id], tablet_id) - logger.info("Run calc_file_crc_on_tablet: ip=" + backendId_to_backendIP[master_backend_id] + " code=" + master_code + ", out=" + master_out + ", err=" + master_err) - - for (String backend: follower_backend_id) { - def (follower_code, follower_out, follower_err) = calc_file_crc_on_tablet(backendId_to_backendIP[backend], backendId_to_backendHttpPort[backend], tablet_id) - logger.info("Run calc_file_crc_on_tablet: ip=" + backendId_to_backendIP[backend] + " code=" + follower_code + ", out=" + follower_out + ", err=" + follower_err) - assertTrue(parseJson(follower_out.trim()).crc_value == parseJson(master_out.trim()).crc_value) - assertTrue(parseJson(follower_out.trim()).start_version == parseJson(master_out.trim()).start_version) - assertTrue(parseJson(follower_out.trim()).end_version == parseJson(master_out.trim()).end_version) - assertTrue(parseJson(follower_out.trim()).file_count == parseJson(master_out.trim()).file_count) - assertTrue(parseJson(follower_out.trim()).rowset_count == parseJson(master_out.trim()).rowset_count) - } + def checkTabletFileCrc = { + def (master_code, master_out, master_err) = calc_file_crc_on_tablet(backendId_to_backendIP[master_backend_id], backendId_to_backendHttpPort[master_backend_id], tablet_id) + logger.info("Run calc_file_crc_on_tablet: ip=" + backendId_to_backendIP[master_backend_id] + " code=" + master_code + ", out=" + master_out + ", err=" + master_err) + + for (String backend: follower_backend_id) { + def (follower_code, follower_out, follower_err) = calc_file_crc_on_tablet(backendId_to_backendIP[backend], backendId_to_backendHttpPort[backend], tablet_id) + logger.info("Run calc_file_crc_on_tablet: ip=" + backendId_to_backendIP[backend] + " code=" + follower_code + ", out=" + follower_out + ", err=" + follower_err) + assertTrue(parseJson(follower_out.trim()).crc_value == parseJson(master_out.trim()).crc_value) + assertTrue(parseJson(follower_out.trim()).start_version == parseJson(master_out.trim()).start_version) + assertTrue(parseJson(follower_out.trim()).end_version == parseJson(master_out.trim()).end_version) + assertTrue(parseJson(follower_out.trim()).file_count == parseJson(master_out.trim()).file_count) + assertTrue(parseJson(follower_out.trim()).rowset_count == parseJson(master_out.trim()).rowset_count) } + } - sql """ INSERT INTO ${tableName} VALUES (1, "a", 100, '{"a" : 1234, "point" : 1, "xxxx" : "ddddd"}'); """ - sql """ INSERT INTO ${tableName} VALUES (1, "b", 100, '{"%a" : 1234, "@point" : 1, "[xxxx" : "ddddd"}'); """ - sql """ INSERT INTO ${tableName} VALUES (2, "a", 100, '{"@a" : 1234, "%point" : 1, "]xxxx" : "ddddd"}'); """ - sql """ INSERT INTO ${tableName} VALUES (2, "b", 100, '{"%a" : 1234, "%point" : 1, "{xxxx" : "ddddd"}'); """ - sql """ INSERT INTO ${tableName} VALUES (3, "a", 100, '{"@a" : 1234, "@point" : 1, "}xxxx" : "ddddd"}'); """ - sql """ INSERT INTO ${tableName} VALUES (3, "b", 100, '{"a" : 1234, "point" : 1, "|xxxx" : "ddddd"}'); """ - - // trigger master be to do full compaction - assertTrue(triggerCompaction(backendId_to_backendIP[master_backend_id], backendId_to_backendHttpPort[master_backend_id], - "full", tablet_id).contains("Success")); - waitForCompaction(backendId_to_backendIP[master_backend_id], backendId_to_backendHttpPort[master_backend_id], tablet_id) - - // trigger follower be to fetch compaction result - for (String id in follower_backend_id) { - assertTrue(triggerSingleCompaction(backendId_to_backendIP[id], backendId_to_backendHttpPort[id], tablet_id).contains("Success")); - waitForCompaction(backendId_to_backendIP[id], backendId_to_backendHttpPort[id], tablet_id) - } + sql """ INSERT INTO ${tableName} VALUES (1, "a", 100, '{"a" : 1234, "point" : 1, "xxxx" : "ddddd"}'); """ + sql """ INSERT INTO ${tableName} VALUES (1, "b", 100, '{"%a" : 1234, "@point" : 1, "[xxxx" : "ddddd"}'); """ + sql """ INSERT INTO ${tableName} VALUES (2, "a", 100, '{"@a" : 1234, "%point" : 1, "]xxxx" : "ddddd"}'); """ + sql """ INSERT INTO ${tableName} VALUES (2, "b", 100, '{"%a" : 1234, "%point" : 1, "{xxxx" : "ddddd"}'); """ + sql """ INSERT INTO ${tableName} VALUES (3, "a", 100, '{"@a" : 1234, "@point" : 1, "}xxxx" : "ddddd"}'); """ + sql """ INSERT INTO ${tableName} VALUES (3, "b", 100, '{"a" : 1234, "point" : 1, "|xxxx" : "ddddd"}'); """ + + // trigger master be to do full compaction + assertTrue(triggerCompaction(backendId_to_backendIP[master_backend_id], backendId_to_backendHttpPort[master_backend_id], + "full", tablet_id).contains("Success")); + waitForCompaction(backendId_to_backendIP[master_backend_id], backendId_to_backendHttpPort[master_backend_id], tablet_id) + + // trigger follower be to fetch compaction result + for (String id in follower_backend_id) { + assertTrue(triggerSingleCompaction(backendId_to_backendIP[id], backendId_to_backendHttpPort[id], tablet_id).contains("Success")); + waitForCompaction(backendId_to_backendIP[id], backendId_to_backendHttpPort[id], tablet_id) + } - // check rowsets - checkCompactionResult.call() - checkTabletFileCrc.call() + // check rowsets + checkCompactionResult.call() + checkTabletFileCrc.call() - qt_sql """ - select count() from ${tableName} where properties MATCH_ANY 'point xxxx'; - """ + qt_sql """ + select count() from ${tableName} where properties MATCH_ANY 'point xxxx'; + """ - sql """ DROP TABLE IF EXISTS ${tableName}; """ - - } finally { - if (has_update_be_config) { - set_be_config.call("disable_auto_compaction", disableAutoCompaction.toString()) - } - } + sql """ DROP TABLE IF EXISTS ${tableName}; """ } diff --git a/regression-test/suites/compaction/test_single_replica_compaction.groovy b/regression-test/suites/compaction/test_single_replica_compaction.groovy index 0b83ddd51e6482..d8ff209b93cd43 100644 --- a/regression-test/suites/compaction/test_single_replica_compaction.groovy +++ b/regression-test/suites/compaction/test_single_replica_compaction.groovy @@ -17,128 +17,78 @@ import org.codehaus.groovy.runtime.IOGroovyMethods -suite("test_single_replica_compaction", "p2") { +suite("test_single_compaction_p2", "p2") { + if (isCloudMode()) { + return; + } def tableName = "test_single_replica_compaction" - def set_be_config = { key, value -> - def backendId_to_backendIP = [:] - def backendId_to_backendHttpPort = [:] - getBackendIpHttpPort(backendId_to_backendIP, backendId_to_backendHttpPort); - - for (String backend_id: backendId_to_backendIP.keySet()) { - def (code, out, err) = update_be_config(backendId_to_backendIP.get(backend_id), backendId_to_backendHttpPort.get(backend_id), key, value) - logger.info("update config: code=" + code + ", out=" + out + ", err=" + err) - } - } - def calc_file_crc_on_tablet = { ip, port, tablet -> return curl("GET", String.format("http://%s:%s/api/calc_crc?tablet_id=%s", ip, port, tablet)) } - boolean disableAutoCompaction = true - boolean has_update_be_config = false - try { - String backend_id; - def backendId_to_backendIP = [:] - def backendId_to_backendHttpPort = [:] - getBackendIpHttpPort(backendId_to_backendIP, backendId_to_backendHttpPort); - - backend_id = backendId_to_backendIP.keySet()[0] - def (code, out, err) = show_be_config(backendId_to_backendIP.get(backend_id), backendId_to_backendHttpPort.get(backend_id)) - - logger.info("Show config: code=" + code + ", out=" + out + ", err=" + err) - assertEquals(code, 0) - def configList = parseJson(out.trim()) - assert configList instanceof List - - for (Object ele in (List) configList) { - assert ele instanceof List - if (((List) ele)[0] == "disable_auto_compaction") { - disableAutoCompaction = Boolean.parseBoolean(((List) ele)[2]) - } - } - set_be_config.call("disable_auto_compaction", "true") - has_update_be_config = true - - def triggerCompaction = { be_host, be_http_port, compact_type, tablet_id -> - if (compact_type == "cumulative") { - def (code_1, out_1, err_1) = be_run_cumulative_compaction(be_host, be_http_port, tablet_id) - logger.info("Run compaction: code=" + code_1 + ", out=" + out_1 + ", err=" + err_1) - assertEquals(code_1, 0) - return out_1 - } else if (compact_type == "full") { - def (code_2, out_2, err_2) = be_run_full_compaction(be_host, be_http_port, tablet_id) - logger.info("Run compaction: code=" + code_2 + ", out=" + out_2 + ", err=" + err_2) - assertEquals(code_2, 0) - return out_2 - } else { - assertFalse(True) - } + def backendId_to_backendIP = [:] + def backendId_to_backendHttpPort = [:] + getBackendIpHttpPort(backendId_to_backendIP, backendId_to_backendHttpPort); + + def triggerCompaction = { be_host, be_http_port, compact_type, tablet_id -> + if (compact_type == "cumulative") { + def (code_1, out_1, err_1) = be_run_cumulative_compaction(be_host, be_http_port, tablet_id) + logger.info("Run compaction: code=" + code_1 + ", out=" + out_1 + ", err=" + err_1) + assertEquals(code_1, 0) + return out_1 + } else if (compact_type == "full") { + def (code_2, out_2, err_2) = be_run_full_compaction(be_host, be_http_port, tablet_id) + logger.info("Run compaction: code=" + code_2 + ", out=" + out_2 + ", err=" + err_2) + assertEquals(code_2, 0) + return out_2 + } else { + assertFalse(True) } + } - def triggerSingleCompaction = { be_host, be_http_port, tablet_id -> - StringBuilder sb = new StringBuilder(); - sb.append("curl -X POST http://${be_host}:${be_http_port}") - sb.append("/api/compaction/run?tablet_id=") - sb.append(tablet_id) - sb.append("&compact_type=cumulative&remote=true") - - Integer maxRetries = 10; // Maximum number of retries - Integer retryCount = 0; // Current retry count - Integer sleepTime = 5000; // Sleep time in milliseconds - String cmd = sb.toString() - def process - int code_3 - String err_3 - String out_3 - - while (retryCount < maxRetries) { - process = cmd.execute() - code_3 = process.waitFor() - err_3 = IOGroovyMethods.getText(new BufferedReader(new InputStreamReader(process.getErrorStream()))) - out_3 = process.getText() - - // If the command was successful, break the loop - if (code_3 == 0) { - break - } - - // If the command was not successful, increment the retry count, sleep for a while and try again - retryCount++ - sleep(sleepTime) + def triggerSingleCompaction = { be_host, be_http_port, tablet_id -> + StringBuilder sb = new StringBuilder(); + sb.append("curl -X POST http://${be_host}:${be_http_port}") + sb.append("/api/compaction/run?tablet_id=") + sb.append(tablet_id) + sb.append("&compact_type=cumulative&remote=true") + + Integer maxRetries = 10; // Maximum number of retries + Integer retryCount = 0; // Current retry count + Integer sleepTime = 5000; // Sleep time in milliseconds + String cmd = sb.toString() + def process + int code_3 + String err_3 + String out_3 + + while (retryCount < maxRetries) { + process = cmd.execute() + code_3 = process.waitFor() + err_3 = IOGroovyMethods.getText(new BufferedReader(new InputStreamReader(process.getErrorStream()))) + out_3 = process.getText() + + // If the command was successful, break the loop + if (code_3 == 0) { + break } - assertEquals(code_3, 0) - logger.info("Get compaction status: code=" + code_3 + ", out=" + out_3) - return out_3 - } - def waitForCompaction = { be_host, be_http_port, tablet_id -> - boolean running = true - do { - Thread.sleep(1000) - StringBuilder sb = new StringBuilder(); - sb.append("curl -X GET http://${be_host}:${be_http_port}") - sb.append("/api/compaction/run_status?tablet_id=") - sb.append(tablet_id) - String command = sb.toString() - logger.info(command) - process = command.execute() - code = process.waitFor() - out = process.getText() - logger.info("Get compaction status: code=" + code + ", out=" + out) - assertEquals(code, 0) - def compactionStatus = parseJson(out.trim()) - assertEquals("success", compactionStatus.status.toLowerCase()) - running = compactionStatus.run_status - } while (running) + // If the command was not successful, increment the retry count, sleep for a while and try again + retryCount++ + sleep(sleepTime) } - - def getTabletStatus = { be_host, be_http_port, tablet_id -> - boolean running = true + assertEquals(code_3, 0) + logger.info("Get compaction status: code=" + code_3 + ", out=" + out_3) + return out_3 + } + def waitForCompaction = { be_host, be_http_port, tablet_id -> + boolean running = true + do { Thread.sleep(1000) StringBuilder sb = new StringBuilder(); sb.append("curl -X GET http://${be_host}:${be_http_port}") - sb.append("/api/compaction/show?tablet_id=") + sb.append("/api/compaction/run_status?tablet_id=") sb.append(tablet_id) String command = sb.toString() @@ -146,150 +96,156 @@ suite("test_single_replica_compaction", "p2") { process = command.execute() code = process.waitFor() out = process.getText() - logger.info("Get tablet status: code=" + code + ", out=" + out) + logger.info("Get compaction status: code=" + code + ", out=" + out) assertEquals(code, 0) - def tabletStatus = parseJson(out.trim()) - return tabletStatus - } - - - sql """ DROP TABLE IF EXISTS ${tableName}; """ - sql """ - CREATE TABLE ${tableName} ( - `id` int(11) NULL, - `name` varchar(255) NULL, - `score` int(11) NULL - ) ENGINE=OLAP - UNIQUE KEY(`id`) - COMMENT 'OLAP' - DISTRIBUTED BY HASH(`id`) BUCKETS 1 - PROPERTIES ( "replication_num" = "2", "enable_single_replica_compaction" = "true", "enable_unique_key_merge_on_write" = "false" ); - """ - - def tablets = sql_return_maparray """ show tablets from ${tableName}; """ - - // wait for update replica infos - // be.conf: update_replica_infos_interval_seconds + 2s - Thread.sleep(62000) - - // find the master be for single replica compaction - Boolean found = false - String master_backend_id; - List follower_backend_id = new ArrayList<>() - // The test table only has one bucket with 2 replicas, - // and `show tablets` will return 2 different replicas with the same tablet. - // So we can use the same tablet_id to get tablet/trigger compaction with different backends. - String tablet_id = tablets[0].TabletId - def tablet_info = sql_return_maparray """ show tablet ${tablet_id}; """ - logger.info("tablet: " + tablet_info) - for (def tablet in tablets) { - String trigger_backend_id = tablet.BackendId - def tablet_status = getTabletStatus(backendId_to_backendIP[trigger_backend_id], backendId_to_backendHttpPort[trigger_backend_id], tablet_id); - if (!tablet_status.containsKey("single replica compaction status")) { - if (found) { - logger.warn("multipe master"); - assertTrue(false) - } - found = true - master_backend_id = trigger_backend_id - } else { - follower_backend_id.add(trigger_backend_id) - } - } - - def checkCompactionResult = { - def master_tablet_status = getTabletStatus(backendId_to_backendIP[master_backend_id], backendId_to_backendHttpPort[master_backend_id], tablet_id); - def master_rowsets = master_tablet_status."rowsets" - assert master_rowsets instanceof List - logger.info("rowset size: " + master_rowsets.size()) + def compactionStatus = parseJson(out.trim()) + assertEquals("success", compactionStatus.status.toLowerCase()) + running = compactionStatus.run_status + } while (running) + } - for (String backend: follower_backend_id) { - def tablet_status = getTabletStatus(backendId_to_backendIP[backend], backendId_to_backendHttpPort[backend], tablet_id); - def rowsets = tablet_status."rowsets" - assert rowsets instanceof List - assertEquals(master_rowsets.size(), rowsets.size()) - } - } + def getTabletStatus = { be_host, be_http_port, tablet_id -> + boolean running = true + Thread.sleep(1000) + StringBuilder sb = new StringBuilder(); + sb.append("curl -X GET http://${be_host}:${be_http_port}") + sb.append("/api/compaction/show?tablet_id=") + sb.append(tablet_id) + + String command = sb.toString() + logger.info(command) + process = command.execute() + code = process.waitFor() + out = process.getText() + logger.info("Get compaction status: code=" + code + ", out=" + out) + assertEquals(code, 0) + def tabletStatus = parseJson(out.trim()) + return tabletStatus + } - def checkTabletFileCrc = { - def (master_code, master_out, master_err) = calc_file_crc_on_tablet(backendId_to_backendIP[master_backend_id], backendId_to_backendHttpPort[master_backend_id], tablet_id) - logger.info("Run calc_file_crc_on_tablet: ip=" + backendId_to_backendIP[master_backend_id] + " code=" + master_code + ", out=" + master_out + ", err=" + master_err) - for (String backend: follower_backend_id) { - def (follower_code, follower_out, follower_err) = calc_file_crc_on_tablet(backendId_to_backendIP[backend], backendId_to_backendHttpPort[backend], tablet_id) - logger.info("Run calc_file_crc_on_tablet: ip=" + backendId_to_backendIP[backend] + " code=" + follower_code + ", out=" + follower_out + ", err=" + follower_err) - assertTrue(parseJson(follower_out.trim()).crc_value == parseJson(master_out.trim()).crc_value) - assertTrue(parseJson(follower_out.trim()).start_version == parseJson(master_out.trim()).start_version) - assertTrue(parseJson(follower_out.trim()).end_version == parseJson(master_out.trim()).end_version) - assertTrue(parseJson(follower_out.trim()).file_count == parseJson(master_out.trim()).file_count) - assertTrue(parseJson(follower_out.trim()).rowset_count == parseJson(master_out.trim()).rowset_count) + sql """ DROP TABLE IF EXISTS ${tableName}; """ + sql """ + CREATE TABLE ${tableName} ( + `id` int(11) NULL, + `name` varchar(255) NULL, + `score` int(11) NULL + ) ENGINE=OLAP + UNIQUE KEY(`id`) + COMMENT 'OLAP' + DISTRIBUTED BY HASH(`id`) BUCKETS 1 + PROPERTIES ( + "replication_num" = "2", + "enable_single_replica_compaction" = "true", + "enable_unique_key_merge_on_write" = "false", + "compaction_policy" = "time_series" + ); + """ + + def tablets = sql_return_maparray """ show tablets from ${tableName}; """ + + // wait for update replica infos + Thread.sleep(70000) + + // find the master be for single replica compaction + Boolean found = false + String master_backend_id; + List follower_backend_id = new ArrayList<>() + // The test table only has one bucket with 2 replicas, + // and `show tablets` will return 2 different replicas with the same tablet. + // So we can use the same tablet_id to get tablet/trigger compaction with different backends. + String tablet_id = tablets[0].TabletId + def tablet_info = sql_return_maparray """ show tablet ${tablet_id}; """ + logger.info("tablet: " + tablet_info) + for (def tablet in tablets) { + String trigger_backend_id = tablet.BackendId + def tablet_status = getTabletStatus(backendId_to_backendIP[trigger_backend_id], backendId_to_backendHttpPort[trigger_backend_id], tablet_id); + if (!tablet_status.containsKey("single replica compaction status")) { + if (found) { + logger.warn("multipe master"); + assertTrue(false) } + found = true + master_backend_id = trigger_backend_id + } else { + follower_backend_id.add(trigger_backend_id) } + } - sql """ INSERT INTO ${tableName} VALUES (1, "a", 100); """ - sql """ INSERT INTO ${tableName} VALUES (1, "b", 100); """ - sql """ INSERT INTO ${tableName} VALUES (2, "a", 100); """ - sql """ INSERT INTO ${tableName} VALUES (2, "b", 100); """ - sql """ INSERT INTO ${tableName} VALUES (3, "a", 100); """ - sql """ INSERT INTO ${tableName} VALUES (3, "b", 100); """ - - // trigger master be to do cumu compaction - assertTrue(triggerCompaction(backendId_to_backendIP[master_backend_id], backendId_to_backendHttpPort[master_backend_id], - "cumulative", tablet_id).contains("Success")); - waitForCompaction(backendId_to_backendIP[master_backend_id], backendId_to_backendHttpPort[master_backend_id], tablet_id) - - // trigger follower be to fetch compaction result - for (String id in follower_backend_id) { - assertTrue(triggerSingleCompaction(backendId_to_backendIP[id], backendId_to_backendHttpPort[id], tablet_id).contains("Success")); - waitForCompaction(backendId_to_backendIP[id], backendId_to_backendHttpPort[id], tablet_id) + def checkCompactionResult = { + def master_tablet_status = getTabletStatus(backendId_to_backendIP[master_backend_id], backendId_to_backendHttpPort[master_backend_id], tablet_id); + def master_rowsets = master_tablet_status."rowsets" + assert master_rowsets instanceof List + logger.info("rowset size: " + master_rowsets.size()) + + for (String backend: follower_backend_id) { + def tablet_status = getTabletStatus(backendId_to_backendIP[backend], backendId_to_backendHttpPort[backend], tablet_id); + def rowsets = tablet_status."rowsets" + assert rowsets instanceof List + assertEquals(master_rowsets.size(), rowsets.size()) } + } - // check rowsets - checkCompactionResult.call() - - sql """ INSERT INTO ${tableName} VALUES (4, "a", 100); """ - sql """ INSERT INTO ${tableName} VALUES (5, "a", 100); """ - sql """ INSERT INTO ${tableName} VALUES (6, "a", 100); """ - sql """ DELETE FROM ${tableName} WHERE id = 4; """ - sql """ INSERT INTO ${tableName} VALUES (7, "a", 100); """ - sql """ INSERT INTO ${tableName} VALUES (8, "a", 100); """ - - // trigger master be to do cumu compaction with delete - assertTrue(triggerCompaction(backendId_to_backendIP[master_backend_id], backendId_to_backendHttpPort[master_backend_id], - "cumulative", tablet_id).contains("Success")); - waitForCompaction(backendId_to_backendIP[master_backend_id], backendId_to_backendHttpPort[master_backend_id], tablet_id) - - // trigger follower be to fetch compaction result - for (String id in follower_backend_id) { - assertTrue(triggerSingleCompaction(backendId_to_backendIP[id], backendId_to_backendHttpPort[id], tablet_id).contains("Success")); - waitForCompaction(backendId_to_backendIP[id], backendId_to_backendHttpPort[id], tablet_id) + def checkTabletFileCrc = { + def (master_code, master_out, master_err) = calc_file_crc_on_tablet(backendId_to_backendIP[master_backend_id], backendId_to_backendHttpPort[master_backend_id], tablet_id) + logger.info("Run calc_file_crc_on_tablet: ip=" + backendId_to_backendIP[master_backend_id] + " code=" + master_code + ", out=" + master_out + ", err=" + master_err) + + for (String backend: follower_backend_id) { + def (follower_code, follower_out, follower_err) = calc_file_crc_on_tablet(backendId_to_backendIP[backend], backendId_to_backendHttpPort[backend], tablet_id) + logger.info("Run calc_file_crc_on_tablet: ip=" + backendId_to_backendIP[backend] + " code=" + follower_code + ", out=" + follower_out + ", err=" + follower_err) + assertTrue(parseJson(follower_out.trim()).crc_value == parseJson(master_out.trim()).crc_value) + assertTrue(parseJson(follower_out.trim()).start_version == parseJson(master_out.trim()).start_version) + assertTrue(parseJson(follower_out.trim()).end_version == parseJson(master_out.trim()).end_version) + assertTrue(parseJson(follower_out.trim()).file_count == parseJson(master_out.trim()).file_count) + assertTrue(parseJson(follower_out.trim()).rowset_count == parseJson(master_out.trim()).rowset_count) } + } - // check rowsets - checkCompactionResult.call() + sql """ INSERT INTO ${tableName} VALUES (1, "a", 100); """ + sql """ INSERT INTO ${tableName} VALUES (1, "b", 100); """ + sql """ INSERT INTO ${tableName} VALUES (2, "a", 100); """ + sql """ INSERT INTO ${tableName} VALUES (2, "b", 100); """ + sql """ INSERT INTO ${tableName} VALUES (3, "a", 100); """ + sql """ INSERT INTO ${tableName} VALUES (3, "b", 100); """ + + // trigger master be to do compaction + assertTrue(triggerCompaction(backendId_to_backendIP[master_backend_id], backendId_to_backendHttpPort[master_backend_id], + "full", tablet_id).contains("Success")); + waitForCompaction(backendId_to_backendIP[master_backend_id], backendId_to_backendHttpPort[master_backend_id], tablet_id) + + // trigger follower be to fetch compaction result + for (String id in follower_backend_id) { + assertTrue(triggerSingleCompaction(backendId_to_backendIP[id], backendId_to_backendHttpPort[id], tablet_id).contains("Success")); + waitForCompaction(backendId_to_backendIP[id], backendId_to_backendHttpPort[id], tablet_id) + } - // trigger master be to do full compaction - assertTrue(triggerCompaction(backendId_to_backendIP[master_backend_id], backendId_to_backendHttpPort[master_backend_id], - "full", tablet_id).contains("Success")); - waitForCompaction(backendId_to_backendIP[master_backend_id], backendId_to_backendHttpPort[master_backend_id], tablet_id) + // check rowsets + checkCompactionResult.call() + + sql """ INSERT INTO ${tableName} VALUES (4, "a", 100); """ + sql """ INSERT INTO ${tableName} VALUES (5, "a", 100); """ + sql """ INSERT INTO ${tableName} VALUES (6, "a", 100); """ + sql """ DELETE FROM ${tableName} WHERE id = 4; """ + sql """ INSERT INTO ${tableName} VALUES (7, "a", 100); """ + sql """ INSERT INTO ${tableName} VALUES (8, "a", 100); """ + + // trigger master be to do compaction with delete + assertTrue(triggerCompaction(backendId_to_backendIP[master_backend_id], backendId_to_backendHttpPort[master_backend_id], + "full", tablet_id).contains("Success")); + waitForCompaction(backendId_to_backendIP[master_backend_id], backendId_to_backendHttpPort[master_backend_id], tablet_id) + + // trigger follower be to fetch compaction result + for (String id in follower_backend_id) { + assertTrue(triggerSingleCompaction(backendId_to_backendIP[id], backendId_to_backendHttpPort[id], tablet_id).contains("Success")); + waitForCompaction(backendId_to_backendIP[id], backendId_to_backendHttpPort[id], tablet_id) + } - // trigger follower be to fetch compaction result - for (String id in follower_backend_id) { - assertTrue(triggerSingleCompaction(backendId_to_backendIP[id], backendId_to_backendHttpPort[id], tablet_id).contains("Success")); - waitForCompaction(backendId_to_backendIP[id], backendId_to_backendHttpPort[id], tablet_id) - } + // check rowsets + checkCompactionResult.call() + checkTabletFileCrc.call() - // check rowsets - checkCompactionResult.call() - checkTabletFileCrc.call() + qt_sql """ + select * from ${tableName} order by id + """ - qt_sql """ - select * from ${tableName} order by id - """ - - } finally { - if (has_update_be_config) { - set_be_config.call("disable_auto_compaction", disableAutoCompaction.toString()) - } - } } diff --git a/regression-test/suites/compaction/test_time_series_compaction_policy.groovy b/regression-test/suites/compaction/test_time_series_compaction_policy.groovy index 5c37ff45cb162b..2e8018f94a6a09 100644 --- a/regression-test/suites/compaction/test_time_series_compaction_policy.groovy +++ b/regression-test/suites/compaction/test_time_series_compaction_policy.groovy @@ -22,7 +22,7 @@ suite("test_time_series_compaction_polciy", "p0") { def backendId_to_backendIP = [:] def backendId_to_backendHttpPort = [:] getBackendIpHttpPort(backendId_to_backendIP, backendId_to_backendHttpPort); - + def trigger_cumulative_compaction_on_tablets = { tablets -> for (def tablet : tablets) { String tablet_id = tablet.TabletId @@ -86,7 +86,11 @@ suite("test_time_series_compaction_polciy", "p0") { DUPLICATE KEY(`id`) COMMENT 'OLAP' DISTRIBUTED BY HASH(`id`) BUCKETS 2 - PROPERTIES ( "replication_num" = "1", "disable_auto_compaction" = "true", "compaction_policy" = "time_series"); + PROPERTIES ( + "replication_num" = "1", + "disable_auto_compaction" = "true", + "compaction_policy" = "time_series" + ); """ // insert 16 lines, BUCKETS = 2 sql """ INSERT INTO ${tableName} VALUES (1, "andy", "andy love apple", 100); """ @@ -149,6 +153,9 @@ suite("test_time_series_compaction_polciy", "p0") { assert (rowsetCount == 22 * replicaNum) qt_sql_2 """ select count() from ${tableName}""" + if (isCloudMode()) { + return; + } sql """ alter table ${tableName} set ("time_series_compaction_file_count_threshold"="10")""" sql """sync""" // trigger cumulative compactions for all tablets in ${tableName} diff --git a/regression-test/suites/fault_injection_p0/test_segcompaction_fault_injection.groovy b/regression-test/suites/fault_injection_p0/test_segcompaction_fault_injection.groovy index ceac616a8f1430..b37ac4797dfded 100644 --- a/regression-test/suites/fault_injection_p0/test_segcompaction_fault_injection.groovy +++ b/regression-test/suites/fault_injection_p0/test_segcompaction_fault_injection.groovy @@ -43,26 +43,8 @@ suite("test_segcompaction_correctness", "nonConcurrent,p2") { String endpoint = getS3Endpoint() String region = getS3Region() String bucket = getS3BucketName() - def backendId_to_backendIP = [:] - def backendId_to_backendHttpPort = [:] - String backend_id; try { - getBackendIpHttpPort(backendId_to_backendIP, backendId_to_backendHttpPort); - backend_id = backendId_to_backendIP.keySet()[0] - def (code, out, err) = show_be_config(backendId_to_backendIP.get(backend_id), backendId_to_backendHttpPort.get(backend_id)) - logger.info("Show config: code=" + code + ", out=" + out + ", err=" + err) - assertEquals(code, 0) - def configList = parseJson(out.trim()) - assert configList instanceof List - - boolean disableAutoCompaction = true - for (Object ele in (List) configList) { - assert ele instanceof List - if (((List) ele)[0] == "disable_auto_compaction") { - disableAutoCompaction = Boolean.parseBoolean(((List) ele)[2]) - } - } sql """ DROP TABLE IF EXISTS ${tableName} """ sql "${create_table_sql}" diff --git a/regression-test/suites/fault_injection_p0/test_too_many_segments_fault_injection.groovy b/regression-test/suites/fault_injection_p0/test_too_many_segments_fault_injection.groovy index 9c55d2938ba0be..6556c792a67e1a 100644 --- a/regression-test/suites/fault_injection_p0/test_too_many_segments_fault_injection.groovy +++ b/regression-test/suites/fault_injection_p0/test_too_many_segments_fault_injection.groovy @@ -44,26 +44,7 @@ suite("test_too_many_segments", "nonConcurrent,p2") { // the epic -238 case String endpoint = getS3Endpoint() String region = getS3Region() String bucket = getS3BucketName() - def backendId_to_backendIP = [:] - def backendId_to_backendHttpPort = [:] - String backend_id; try { - getBackendIpHttpPort(backendId_to_backendIP, backendId_to_backendHttpPort); - backend_id = backendId_to_backendIP.keySet()[0] - def (code, out, err) = show_be_config(backendId_to_backendIP.get(backend_id), backendId_to_backendHttpPort.get(backend_id)) - - logger.info("Show config: code=" + code + ", out=" + out + ", err=" + err) - assertEquals(code, 0) - def configList = parseJson(out.trim()) - assert configList instanceof List - - boolean disableAutoCompaction = true - for (Object ele in (List) configList) { - assert ele instanceof List - if (((List) ele)[0] == "disable_auto_compaction") { - disableAutoCompaction = Boolean.parseBoolean(((List) ele)[2]) - } - } sql """ DROP TABLE IF EXISTS ${tableName} """ sql "${create_table_sql}" diff --git a/regression-test/suites/schema_change/test_number_overflow.groovy b/regression-test/suites/schema_change/test_number_overflow.groovy index e69bb4e6ce8aeb..d13e797c1e37a6 100644 --- a/regression-test/suites/schema_change/test_number_overflow.groovy +++ b/regression-test/suites/schema_change/test_number_overflow.groovy @@ -25,26 +25,6 @@ suite ("test_number_overflow") { try { - String backend_id; - def backendId_to_backendIP = [:] - def backendId_to_backendHttpPort = [:] - getBackendIpHttpPort(backendId_to_backendIP, backendId_to_backendHttpPort); - - backend_id = backendId_to_backendIP.keySet()[0] - def (code, out, err) = show_be_config(backendId_to_backendIP.get(backend_id), backendId_to_backendHttpPort.get(backend_id)) - - logger.info("Show config: code=" + code + ", out=" + out + ", err=" + err) - assertEquals(code, 0) - def configList = parseJson(out.trim()) - assert configList instanceof List - - for (Object ele in (List) configList) { - assert ele instanceof List - if (((List) ele)[0] == "disable_auto_compaction") { - disableAutoCompaction = Boolean.parseBoolean(((List) ele)[2]) - } - } - sql """ DROP TABLE IF EXISTS test_number_overflow """ sql """ CREATE TABLE IF NOT EXISTS test_number_overflow ( k1 INT NOT NULL, k2 VARCHAR(4096) NOT NULL, k3 VARCHAR(4096) NOT NULL, k4 VARCHAR(4096) NOT NULL, k5 VARCHAR(4096) NOT NULL, k6 VARCHAR(4096) NOT NULL, k7 VARCHAR(4096) NOT NULL, k8 VARCHAR(4096) NOT NULL, k9 VARCHAR(4096) NOT NULL, v1 FLOAT SUM NOT NULL, v2 DECIMAL(20,7) SUM NOT NULL ) AGGREGATE KEY(k1,k2,k3,k4,k5,k6,k7,k8,k9) PARTITION BY RANGE(k1) ( PARTITION partition_a VALUES LESS THAN ("5"), PARTITION partition_b VALUES LESS THAN ("30"), PARTITION partition_c VALUES LESS THAN ("100"), PARTITION partition_d VALUES LESS THAN ("500"), PARTITION partition_e VALUES LESS THAN ("1000"), PARTITION partition_f VALUES LESS THAN ("2000"), PARTITION partition_g VALUES LESS THAN MAXVALUE ) DISTRIBUTED BY HASH(k1, k2) BUCKETS 3 diff --git a/regression-test/suites/schema_change_p0/test_agg_keys_schema_change.groovy b/regression-test/suites/schema_change_p0/test_agg_keys_schema_change.groovy index afc5b784fbe137..93e9443accdad9 100644 --- a/regression-test/suites/schema_change_p0/test_agg_keys_schema_change.groovy +++ b/regression-test/suites/schema_change_p0/test_agg_keys_schema_change.groovy @@ -27,27 +27,10 @@ suite ("test_agg_keys_schema_change") { try { - String backend_id; def backendId_to_backendIP = [:] def backendId_to_backendHttpPort = [:] getBackendIpHttpPort(backendId_to_backendIP, backendId_to_backendHttpPort); - backend_id = backendId_to_backendIP.keySet()[0] - def (code, out, err) = show_be_config(backendId_to_backendIP.get(backend_id), backendId_to_backendHttpPort.get(backend_id)) - - logger.info("Show config: code=" + code + ", out=" + out + ", err=" + err) - assertEquals(code, 0) - def configList = parseJson(out.trim()) - assert configList instanceof List - - boolean disableAutoCompaction = true - for (Object ele in (List) configList) { - assert ele instanceof List - if (((List) ele)[0] == "disable_auto_compaction") { - disableAutoCompaction = Boolean.parseBoolean(((List) ele)[2]) - } - } - sql """ DROP TABLE IF EXISTS schema_change_agg_keys_regression_test """ sql """ CREATE TABLE IF NOT EXISTS schema_change_agg_keys_regression_test ( diff --git a/regression-test/suites/schema_change_p0/test_agg_mv_schema_change.groovy b/regression-test/suites/schema_change_p0/test_agg_mv_schema_change.groovy index 59ebfdd35cdfd7..f7af72269829b0 100644 --- a/regression-test/suites/schema_change_p0/test_agg_mv_schema_change.groovy +++ b/regression-test/suites/schema_change_p0/test_agg_mv_schema_change.groovy @@ -44,27 +44,10 @@ suite ("test_agg_mv_schema_change") { def tableName = "schema_change_agg_mv_regression_test" try { - String backend_id; def backendId_to_backendIP = [:] def backendId_to_backendHttpPort = [:] getBackendIpHttpPort(backendId_to_backendIP, backendId_to_backendHttpPort); - backend_id = backendId_to_backendIP.keySet()[0] - def (code, out, err) = show_be_config(backendId_to_backendIP.get(backend_id), backendId_to_backendHttpPort.get(backend_id)) - - logger.info("Show config: code=" + code + ", out=" + out + ", err=" + err) - assertEquals(code, 0) - def configList = parseJson(out.trim()) - assert configList instanceof List - - boolean disableAutoCompaction = true - for (Object ele in (List) configList) { - assert ele instanceof List - if (((List) ele)[0] == "disable_auto_compaction") { - disableAutoCompaction = Boolean.parseBoolean(((List) ele)[2]) - } - } - sql """ DROP TABLE IF EXISTS ${tableName} """ sql """ CREATE TABLE IF NOT EXISTS ${tableName} ( diff --git a/regression-test/suites/schema_change_p0/test_agg_rollup_schema_change.groovy b/regression-test/suites/schema_change_p0/test_agg_rollup_schema_change.groovy index b567c8bc03cecb..78578c7b52230a 100644 --- a/regression-test/suites/schema_change_p0/test_agg_rollup_schema_change.groovy +++ b/regression-test/suites/schema_change_p0/test_agg_rollup_schema_change.groovy @@ -44,27 +44,10 @@ suite ("test_agg_rollup_schema_change") { } try { - String backend_id; def backendId_to_backendIP = [:] def backendId_to_backendHttpPort = [:] getBackendIpHttpPort(backendId_to_backendIP, backendId_to_backendHttpPort); - backend_id = backendId_to_backendIP.keySet()[0] - def (code, out, err) = show_be_config(backendId_to_backendIP.get(backend_id), backendId_to_backendHttpPort.get(backend_id)) - - logger.info("Show config: code=" + code + ", out=" + out + ", err=" + err) - assertEquals(code, 0) - def configList = parseJson(out.trim()) - assert configList instanceof List - - boolean disableAutoCompaction = true - for (Object ele in (List) configList) { - assert ele instanceof List - if (((List) ele)[0] == "disable_auto_compaction") { - disableAutoCompaction = Boolean.parseBoolean(((List) ele)[2]) - } - } - sql """ DROP TABLE IF EXISTS ${tableName} """ sql """ CREATE TABLE IF NOT EXISTS ${tableName} ( diff --git a/regression-test/suites/schema_change_p0/test_agg_vals_schema_change.groovy b/regression-test/suites/schema_change_p0/test_agg_vals_schema_change.groovy index ebe882a543db50..165fba94b9a63d 100644 --- a/regression-test/suites/schema_change_p0/test_agg_vals_schema_change.groovy +++ b/regression-test/suites/schema_change_p0/test_agg_vals_schema_change.groovy @@ -22,163 +22,148 @@ suite ("test_agg_vals_schema_change") { try { - String backend_id; def backendId_to_backendIP = [:] def backendId_to_backendHttpPort = [:] getBackendIpHttpPort(backendId_to_backendIP, backendId_to_backendHttpPort); - backend_id = backendId_to_backendIP.keySet()[0] - def (code, out, err) = show_be_config(backendId_to_backendIP.get(backend_id), backendId_to_backendHttpPort.get(backend_id)) - - logger.info("Show config: code=" + code + ", out=" + out + ", err=" + err) - assertEquals(code, 0) - def configList = parseJson(out.trim()) - assert configList instanceof List - - boolean disableAutoCompaction = true - for (Object ele in (List) configList) { - assert ele instanceof List - if (((List) ele)[0] == "disable_auto_compaction") { - disableAutoCompaction = Boolean.parseBoolean(((List) ele)[2]) - } - } - - sql """ DROP TABLE IF EXISTS ${tableName} """ - - sql """ - CREATE TABLE IF NOT EXISTS ${tableName} ( - `user_id` LARGEINT NOT NULL COMMENT "用户id", - `date` DATE NOT NULL COMMENT "数据灌入日期时间", - `city` VARCHAR(20) COMMENT "用户所在城市", - `age` SMALLINT COMMENT "用户年龄", - `sex` TINYINT COMMENT "用户性别", - `last_visit_date` DATETIME REPLACE DEFAULT "1970-01-01 00:00:00" COMMENT "用户最后一次访问时间", - `last_update_date` DATETIME REPLACE DEFAULT "1970-01-01 00:00:00" COMMENT "用户最后一次更新时间", - `last_visit_date_not_null` DATETIME REPLACE NOT NULL DEFAULT "1970-01-01 00:00:00" COMMENT "用户最后一次访问时间", - `cost` BIGINT SUM DEFAULT "0" COMMENT "用户总消费", - `max_dwell_time` INT MAX DEFAULT "0" COMMENT "用户最大停留时间", - `min_dwell_time` INT MIN DEFAULT "99999" COMMENT "用户最小停留时间", - `hll_col` HLL HLL_UNION NOT NULL COMMENT "HLL列", - `bitmap_col` Bitmap BITMAP_UNION NOT NULL COMMENT "bitmap列") - AGGREGATE KEY(`user_id`, `date`, `city`, `age`, `sex`) DISTRIBUTED BY HASH(`user_id`) - BUCKETS 8 - PROPERTIES ( "replication_num" = "1", "light_schema_change" = "false" ); - """ - - sql """ INSERT INTO ${tableName} VALUES - (1, '2017-10-01', 'Beijing', 10, 1, '2020-01-01', '2020-01-01', '2020-01-01', 1, 30, 20, hll_hash(1), to_bitmap(1)) - """ - - sql """ INSERT INTO ${tableName} VALUES - (1, '2017-10-01', 'Beijing', 10, 1, '2020-01-02', '2020-01-02', '2020-01-02', 1, 31, 19, hll_hash(2), to_bitmap(2)) - """ - - qt_sc """ - select * from ${tableName} order by user_id - """ - - // alter and test light schema change - sql """ALTER TABLE ${tableName} SET ("light_schema_change" = "true");""" - - sql """ INSERT INTO ${tableName} VALUES - (2, '2017-10-01', 'Beijing', 10, 1, '2020-01-02', '2020-01-02', '2020-01-02', 1, 31, 21, hll_hash(2), to_bitmap(2)) - """ - - sql """ INSERT INTO ${tableName} VALUES - (2, '2017-10-01', 'Beijing', 10, 1, '2020-01-03', '2020-01-03', '2020-01-03', 1, 32, 20, hll_hash(3), to_bitmap(3)) - """ - qt_sc """ - select * from ${tableName} order by user_id + sql """ DROP TABLE IF EXISTS ${tableName} """ + + sql """ + CREATE TABLE IF NOT EXISTS ${tableName} ( + `user_id` LARGEINT NOT NULL COMMENT "用户id", + `date` DATE NOT NULL COMMENT "数据灌入日期时间", + `city` VARCHAR(20) COMMENT "用户所在城市", + `age` SMALLINT COMMENT "用户年龄", + `sex` TINYINT COMMENT "用户性别", + `last_visit_date` DATETIME REPLACE DEFAULT "1970-01-01 00:00:00" COMMENT "用户最后一次访问时间", + `last_update_date` DATETIME REPLACE DEFAULT "1970-01-01 00:00:00" COMMENT "用户最后一次更新时间", + `last_visit_date_not_null` DATETIME REPLACE NOT NULL DEFAULT "1970-01-01 00:00:00" COMMENT "用户最后一次访问时间", + `cost` BIGINT SUM DEFAULT "0" COMMENT "用户总消费", + `max_dwell_time` INT MAX DEFAULT "0" COMMENT "用户最大停留时间", + `min_dwell_time` INT MIN DEFAULT "99999" COMMENT "用户最小停留时间", + `hll_col` HLL HLL_UNION NOT NULL COMMENT "HLL列", + `bitmap_col` Bitmap BITMAP_UNION NOT NULL COMMENT "bitmap列") + AGGREGATE KEY(`user_id`, `date`, `city`, `age`, `sex`) DISTRIBUTED BY HASH(`user_id`) + BUCKETS 8 + PROPERTIES ( "replication_num" = "1", "light_schema_change" = "false" ); + """ + + sql """ INSERT INTO ${tableName} VALUES + (1, '2017-10-01', 'Beijing', 10, 1, '2020-01-01', '2020-01-01', '2020-01-01', 1, 30, 20, hll_hash(1), to_bitmap(1)) + """ + + sql """ INSERT INTO ${tableName} VALUES + (1, '2017-10-01', 'Beijing', 10, 1, '2020-01-02', '2020-01-02', '2020-01-02', 1, 31, 19, hll_hash(2), to_bitmap(2)) + """ + + qt_sc """ + select * from ${tableName} order by user_id """ - // add column - sql """ - ALTER table ${tableName} ADD COLUMN new_column INT MAX default "1" - """ - - qt_sc """ SELECT * FROM ${tableName} WHERE user_id=2 """ - - sql """ INSERT INTO ${tableName} VALUES - (2, '2017-10-01', 'Beijing', 10, 1, '2020-01-03', '2020-01-03', '2020-01-03', 1, 32, 20, hll_hash(4), to_bitmap(4), 2) - """ - qt_sc """ SELECT * FROM ${tableName} WHERE user_id=2 """ - - - sql """ INSERT INTO ${tableName} (`user_id`,`date`,`city`,`age`,`sex`,`last_visit_date`,`last_update_date`, - `last_visit_date_not_null`,`cost`,`max_dwell_time`,`min_dwell_time`, `hll_col`, `bitmap_col`) - VALUES - (3, '2017-10-01', 'Beijing', 10, 1, '2020-01-03', '2020-01-03', '2020-01-03', 1, 32, 20, hll_hash(4), to_bitmap(4)) - """ - - qt_sc """ SELECT * FROM ${tableName} WHERE user_id=3 """ - - sql """ INSERT INTO ${tableName} VALUES - (3, '2017-10-01', 'Beijing', 10, 1, '2020-01-03', '2020-01-03', '2020-01-03', 1, 32, 20, hll_hash(4), to_bitmap(4), 2) - """ - qt_sc """ SELECT * FROM ${tableName} WHERE user_id = 3 """ - - qt_sc """ select count(*) from ${tableName} """ - - // drop column - sql """ - ALTER TABLE ${tableName} DROP COLUMN last_visit_date - """ - qt_sc """ select * from ${tableName} where user_id = 3 """ - - sql """ INSERT INTO ${tableName} VALUES - (4, '2017-10-01', 'Beijing', 10, 1, '2020-01-03', '2020-01-03', 1, 32, 20, hll_hash(4), to_bitmap(4), 2) - """ - - qt_sc """ select * from ${tableName} where user_id = 4 """ - - sql """ INSERT INTO ${tableName} VALUES - (5, '2017-10-01', 'Beijing', 10, 1, '2020-01-03', '2020-01-03', 1, 32, 20, hll_hash(5), to_bitmap(5), 2) - """ - sql """ INSERT INTO ${tableName} VALUES - (5, '2017-10-01', 'Beijing', 10, 1, '2020-01-03', '2020-01-03', 1, 32, 20, hll_hash(5), to_bitmap(5), 2) - """ - sql """ INSERT INTO ${tableName} VALUES - (5, '2017-10-01', 'Beijing', 10, 1, '2020-01-03', '2020-01-03', 1, 32, 20, hll_hash(5), to_bitmap(5), 2) - """ - sql """ INSERT INTO ${tableName} VALUES - (5, '2017-10-01', 'Beijing', 10, 1, '2020-01-03', '2020-01-03', 1, 32, 20, hll_hash(5), to_bitmap(5), 2) - """ - sql """ INSERT INTO ${tableName} VALUES - (5, '2017-10-01', 'Beijing', 10, 1, '2020-01-03', '2020-01-03', 1, 32, 20, hll_hash(5), to_bitmap(5), 2) - """ - sql """ INSERT INTO ${tableName} VALUES - (5, '2017-10-01', 'Beijing', 10, 1, '2020-01-03', '2020-01-03', 1, 32, 20, hll_hash(5), to_bitmap(5), 2) - """ - - // compaction - String[][] tablets = sql """ show tablets from ${tableName}; """ - for (String[] tablet in tablets) { - String tablet_id = tablet[0] - backend_id = tablet[2] - logger.info("run compaction:" + tablet_id) - (code, out, err) = be_run_cumulative_compaction(backendId_to_backendIP.get(backend_id), backendId_to_backendHttpPort.get(backend_id), tablet_id) - logger.info("Run compaction: code=" + code + ", out=" + out + ", err=" + err) - //assertEquals(code, 0) - } + // alter and test light schema change + if (!isCloudMode()) { + sql """ALTER TABLE ${tableName} SET ("light_schema_change" = "true");""" + } - // wait for all compactions done - for (String[] tablet in tablets) { - boolean running = true - do { - Thread.sleep(100) + sql """ INSERT INTO ${tableName} VALUES + (2, '2017-10-01', 'Beijing', 10, 1, '2020-01-02', '2020-01-02', '2020-01-02', 1, 31, 21, hll_hash(2), to_bitmap(2)) + """ + + sql """ INSERT INTO ${tableName} VALUES + (2, '2017-10-01', 'Beijing', 10, 1, '2020-01-03', '2020-01-03', '2020-01-03', 1, 32, 20, hll_hash(3), to_bitmap(3)) + """ + qt_sc """ + select * from ${tableName} order by user_id + """ + + // add column + sql """ + ALTER table ${tableName} ADD COLUMN new_column INT MAX default "1" + """ + + qt_sc """ SELECT * FROM ${tableName} WHERE user_id=2 """ + + sql """ INSERT INTO ${tableName} VALUES + (2, '2017-10-01', 'Beijing', 10, 1, '2020-01-03', '2020-01-03', '2020-01-03', 1, 32, 20, hll_hash(4), to_bitmap(4), 2) + """ + qt_sc """ SELECT * FROM ${tableName} WHERE user_id=2 """ + + + sql """ INSERT INTO ${tableName} (`user_id`,`date`,`city`,`age`,`sex`,`last_visit_date`,`last_update_date`, + `last_visit_date_not_null`,`cost`,`max_dwell_time`,`min_dwell_time`, `hll_col`, `bitmap_col`) + VALUES + (3, '2017-10-01', 'Beijing', 10, 1, '2020-01-03', '2020-01-03', '2020-01-03', 1, 32, 20, hll_hash(4), to_bitmap(4)) + """ + + qt_sc """ SELECT * FROM ${tableName} WHERE user_id=3 """ + + sql """ INSERT INTO ${tableName} VALUES + (3, '2017-10-01', 'Beijing', 10, 1, '2020-01-03', '2020-01-03', '2020-01-03', 1, 32, 20, hll_hash(4), to_bitmap(4), 2) + """ + qt_sc """ SELECT * FROM ${tableName} WHERE user_id = 3 """ + + qt_sc """ select count(*) from ${tableName} """ + + // drop column + sql """ + ALTER TABLE ${tableName} DROP COLUMN last_visit_date + """ + qt_sc """ select * from ${tableName} where user_id = 3 """ + + sql """ INSERT INTO ${tableName} VALUES + (4, '2017-10-01', 'Beijing', 10, 1, '2020-01-03', '2020-01-03', 1, 32, 20, hll_hash(4), to_bitmap(4), 2) + """ + + qt_sc """ select * from ${tableName} where user_id = 4 """ + + sql """ INSERT INTO ${tableName} VALUES + (5, '2017-10-01', 'Beijing', 10, 1, '2020-01-03', '2020-01-03', 1, 32, 20, hll_hash(5), to_bitmap(5), 2) + """ + sql """ INSERT INTO ${tableName} VALUES + (5, '2017-10-01', 'Beijing', 10, 1, '2020-01-03', '2020-01-03', 1, 32, 20, hll_hash(5), to_bitmap(5), 2) + """ + sql """ INSERT INTO ${tableName} VALUES + (5, '2017-10-01', 'Beijing', 10, 1, '2020-01-03', '2020-01-03', 1, 32, 20, hll_hash(5), to_bitmap(5), 2) + """ + sql """ INSERT INTO ${tableName} VALUES + (5, '2017-10-01', 'Beijing', 10, 1, '2020-01-03', '2020-01-03', 1, 32, 20, hll_hash(5), to_bitmap(5), 2) + """ + sql """ INSERT INTO ${tableName} VALUES + (5, '2017-10-01', 'Beijing', 10, 1, '2020-01-03', '2020-01-03', 1, 32, 20, hll_hash(5), to_bitmap(5), 2) + """ + sql """ INSERT INTO ${tableName} VALUES + (5, '2017-10-01', 'Beijing', 10, 1, '2020-01-03', '2020-01-03', 1, 32, 20, hll_hash(5), to_bitmap(5), 2) + """ + + // compaction + String[][] tablets = sql """ show tablets from ${tableName}; """ + for (String[] tablet in tablets) { String tablet_id = tablet[0] backend_id = tablet[2] - (code, out, err) = be_get_compaction_status(backendId_to_backendIP.get(backend_id), backendId_to_backendHttpPort.get(backend_id), tablet_id) - logger.info("Get compaction status: code=" + code + ", out=" + out + ", err=" + err) - assertEquals(code, 0) - def compactionStatus = parseJson(out.trim()) - assertEquals("success", compactionStatus.status.toLowerCase()) - running = compactionStatus.run_status - } while (running) - } - qt_sc """ select count(*) from ${tableName} """ + logger.info("run compaction:" + tablet_id) + (code, out, err) = be_run_cumulative_compaction(backendId_to_backendIP.get(backend_id), backendId_to_backendHttpPort.get(backend_id), tablet_id) + logger.info("Run compaction: code=" + code + ", out=" + out + ", err=" + err) + //assertEquals(code, 0) + } + + // wait for all compactions done + for (String[] tablet in tablets) { + boolean running = true + do { + Thread.sleep(100) + String tablet_id = tablet[0] + backend_id = tablet[2] + (code, out, err) = be_get_compaction_status(backendId_to_backendIP.get(backend_id), backendId_to_backendHttpPort.get(backend_id), tablet_id) + logger.info("Get compaction status: code=" + code + ", out=" + out + ", err=" + err) + assertEquals(code, 0) + def compactionStatus = parseJson(out.trim()) + assertEquals("success", compactionStatus.status.toLowerCase()) + running = compactionStatus.run_status + } while (running) + } + qt_sc """ select count(*) from ${tableName} """ - qt_sc """ SELECT * FROM ${tableName} WHERE user_id=2 """ + qt_sc """ SELECT * FROM ${tableName} WHERE user_id=2 """ } finally { //try_sql("DROP TABLE IF EXISTS ${tableName}") diff --git a/regression-test/suites/schema_change_p0/test_dup_keys_schema_change.groovy b/regression-test/suites/schema_change_p0/test_dup_keys_schema_change.groovy index dd2fa43f095a48..52814b312f25ef 100644 --- a/regression-test/suites/schema_change_p0/test_dup_keys_schema_change.groovy +++ b/regression-test/suites/schema_change_p0/test_dup_keys_schema_change.groovy @@ -25,27 +25,10 @@ suite ("test_dup_keys_schema_change") { } try { - String backend_id; def backendId_to_backendIP = [:] def backendId_to_backendHttpPort = [:] getBackendIpHttpPort(backendId_to_backendIP, backendId_to_backendHttpPort); - backend_id = backendId_to_backendIP.keySet()[0] - def (code, out, err) = show_be_config(backendId_to_backendIP.get(backend_id), backendId_to_backendHttpPort.get(backend_id)) - - logger.info("Show config: code=" + code + ", out=" + out + ", err=" + err) - assertEquals(code, 0) - def configList = parseJson(out.trim()) - assert configList instanceof List - - boolean disableAutoCompaction = true - for (Object ele in (List) configList) { - assert ele instanceof List - if (((List) ele)[0] == "disable_auto_compaction") { - disableAutoCompaction = Boolean.parseBoolean(((List) ele)[2]) - } - } - sql """ DROP TABLE IF EXISTS ${tableName} """ sql """ diff --git a/regression-test/suites/schema_change_p0/test_dup_mv_schema_change.groovy b/regression-test/suites/schema_change_p0/test_dup_mv_schema_change.groovy index 168c450ebe141f..6ce9812e130833 100644 --- a/regression-test/suites/schema_change_p0/test_dup_mv_schema_change.groovy +++ b/regression-test/suites/schema_change_p0/test_dup_mv_schema_change.groovy @@ -44,22 +44,6 @@ suite ("test_dup_mv_schema_change") { def backendId_to_backendHttpPort = [:] getBackendIpHttpPort(backendId_to_backendIP, backendId_to_backendHttpPort); - backend_id = backendId_to_backendIP.keySet()[0] - def (code, out, err) = show_be_config(backendId_to_backendIP.get(backend_id), backendId_to_backendHttpPort.get(backend_id)) - - logger.info("Show config: code=" + code + ", out=" + out + ", err=" + err) - assertEquals(code, 0) - def configList = parseJson(out.trim()) - assert configList instanceof List - - boolean disableAutoCompaction = true - for (Object ele in (List) configList) { - assert ele instanceof List - if (((List) ele)[0] == "disable_auto_compaction") { - disableAutoCompaction = Boolean.parseBoolean(((List) ele)[2]) - } - } - sql """ DROP TABLE IF EXISTS ${tableName} """ sql """ diff --git a/regression-test/suites/schema_change_p0/test_dup_rollup_schema_change.groovy b/regression-test/suites/schema_change_p0/test_dup_rollup_schema_change.groovy index 1f1e0c5b61ac33..ad29f7d227745f 100644 --- a/regression-test/suites/schema_change_p0/test_dup_rollup_schema_change.groovy +++ b/regression-test/suites/schema_change_p0/test_dup_rollup_schema_change.groovy @@ -49,21 +49,6 @@ suite ("test_dup_rollup_schema_change") { def backendId_to_backendHttpPort = [:] getBackendIpHttpPort(backendId_to_backendIP, backendId_to_backendHttpPort); - backend_id = backendId_to_backendIP.keySet()[0] - def (code, out, err) = show_be_config(backendId_to_backendIP.get(backend_id), backendId_to_backendHttpPort.get(backend_id)) - - logger.info("Show config: code=" + code + ", out=" + out + ", err=" + err) - assertEquals(code, 0) - def configList = parseJson(out.trim()) - assert configList instanceof List - - boolean disableAutoCompaction = true - for (Object ele in (List) configList) { - assert ele instanceof List - if (((List) ele)[0] == "disable_auto_compaction") { - disableAutoCompaction = Boolean.parseBoolean(((List) ele)[2]) - } - } sql """ DROP TABLE IF EXISTS ${tableName} """ sql """ diff --git a/regression-test/suites/schema_change_p0/test_dup_vals_schema_change.groovy b/regression-test/suites/schema_change_p0/test_dup_vals_schema_change.groovy index 6a3d9f61c9e528..ca7c0f598d8477 100644 --- a/regression-test/suites/schema_change_p0/test_dup_vals_schema_change.groovy +++ b/regression-test/suites/schema_change_p0/test_dup_vals_schema_change.groovy @@ -26,21 +26,6 @@ suite ("test_dup_vals_schema_change") { def backendId_to_backendHttpPort = [:] getBackendIpHttpPort(backendId_to_backendIP, backendId_to_backendHttpPort); - backend_id = backendId_to_backendIP.keySet()[0] - def (code, out, err) = show_be_config(backendId_to_backendIP.get(backend_id), backendId_to_backendHttpPort.get(backend_id)) - - logger.info("Show config: code=" + code + ", out=" + out + ", err=" + err) - assertEquals(code, 0) - def configList = parseJson(out.trim()) - assert configList instanceof List - - boolean disableAutoCompaction = true - for (Object ele in (List) configList) { - assert ele instanceof List - if (((List) ele)[0] == "disable_auto_compaction") { - disableAutoCompaction = Boolean.parseBoolean(((List) ele)[2]) - } - } sql """ DROP TABLE IF EXISTS ${tableName} """ sql """ diff --git a/regression-test/suites/schema_change_p0/test_uniq_keys_schema_change.groovy b/regression-test/suites/schema_change_p0/test_uniq_keys_schema_change.groovy index d3eaec8cfa3cd9..e06c27b8abf4db 100644 --- a/regression-test/suites/schema_change_p0/test_uniq_keys_schema_change.groovy +++ b/regression-test/suites/schema_change_p0/test_uniq_keys_schema_change.groovy @@ -25,143 +25,129 @@ suite ("test_uniq_keys_schema_change") { def backendId_to_backendIP = [:] def backendId_to_backendHttpPort = [:] getBackendIpHttpPort(backendId_to_backendIP, backendId_to_backendHttpPort); - - backend_id = backendId_to_backendIP.keySet()[0] - def (code, out, err) = show_be_config(backendId_to_backendIP.get(backend_id), backendId_to_backendHttpPort.get(backend_id)) - - logger.info("Show config: code=" + code + ", out=" + out + ", err=" + err) - assertEquals(code, 0) - def configList = parseJson(out.trim()) - assert configList instanceof List - - boolean disableAutoCompaction = true - for (Object ele in (List) configList) { - assert ele instanceof List - if (((List) ele)[0] == "disable_auto_compaction") { - disableAutoCompaction = Boolean.parseBoolean(((List) ele)[2]) - } + sql """ DROP TABLE IF EXISTS ${tableName} """ + + sql """ + CREATE TABLE IF NOT EXISTS schema_change_uniq_keys_regression_test ( + `user_id` LARGEINT NOT NULL COMMENT "用户id", + `date` DATE NOT NULL COMMENT "数据灌入日期时间", + `city` VARCHAR(20) COMMENT "用户所在城市", + `age` SMALLINT COMMENT "用户年龄", + `sex` TINYINT COMMENT "用户性别", + `last_visit_date` DATETIME DEFAULT "1970-01-01 00:00:00" COMMENT "用户最后一次访问时间", + `last_update_date` DATETIME DEFAULT "1970-01-01 00:00:00" COMMENT "用户最后一次更新时间", + `last_visit_date_not_null` DATETIME NOT NULL DEFAULT "1970-01-01 00:00:00" COMMENT "用户最后一次访问时间", + `cost` BIGINT DEFAULT "0" COMMENT "用户总消费", + `max_dwell_time` INT DEFAULT "0" COMMENT "用户最大停留时间", + `min_dwell_time` INT DEFAULT "99999" COMMENT "用户最小停留时间") + UNIQUE KEY(`user_id`, `date`, `city`, `age`, `sex`) DISTRIBUTED BY HASH(`user_id`) + BUCKETS 8 + PROPERTIES ( "replication_num" = "1", "light_schema_change" = "false"); + """ + + sql """ INSERT INTO schema_change_uniq_keys_regression_test VALUES + (1, '2017-10-01', 'Beijing', 10, 1, '2020-01-01', '2020-01-01', '2020-01-01', 1, 30, 20) + """ + + sql """ INSERT INTO schema_change_uniq_keys_regression_test VALUES + (1, '2017-10-01', 'Beijing', 10, 1, '2020-01-02', '2020-01-02', '2020-01-02', 1, 31, 19) + """ + + qt_sc """ select count(*) from schema_change_uniq_keys_regression_test """ + + // alter and test light schema change + if (!isCloudMode()) { + sql """ALTER TABLE ${tableName} SET ("light_schema_change" = "true");""" } - sql """ DROP TABLE IF EXISTS ${tableName} """ - - sql """ - CREATE TABLE IF NOT EXISTS schema_change_uniq_keys_regression_test ( - `user_id` LARGEINT NOT NULL COMMENT "用户id", - `date` DATE NOT NULL COMMENT "数据灌入日期时间", - `city` VARCHAR(20) COMMENT "用户所在城市", - `age` SMALLINT COMMENT "用户年龄", - `sex` TINYINT COMMENT "用户性别", - `last_visit_date` DATETIME DEFAULT "1970-01-01 00:00:00" COMMENT "用户最后一次访问时间", - `last_update_date` DATETIME DEFAULT "1970-01-01 00:00:00" COMMENT "用户最后一次更新时间", - `last_visit_date_not_null` DATETIME NOT NULL DEFAULT "1970-01-01 00:00:00" COMMENT "用户最后一次访问时间", - `cost` BIGINT DEFAULT "0" COMMENT "用户总消费", - `max_dwell_time` INT DEFAULT "0" COMMENT "用户最大停留时间", - `min_dwell_time` INT DEFAULT "99999" COMMENT "用户最小停留时间") - UNIQUE KEY(`user_id`, `date`, `city`, `age`, `sex`) DISTRIBUTED BY HASH(`user_id`) - BUCKETS 8 - PROPERTIES ( "replication_num" = "1", "light_schema_change" = "false"); - """ - - sql """ INSERT INTO schema_change_uniq_keys_regression_test VALUES - (1, '2017-10-01', 'Beijing', 10, 1, '2020-01-01', '2020-01-01', '2020-01-01', 1, 30, 20) - """ - - sql """ INSERT INTO schema_change_uniq_keys_regression_test VALUES - (1, '2017-10-01', 'Beijing', 10, 1, '2020-01-02', '2020-01-02', '2020-01-02', 1, 31, 19) - """ - - qt_sc """ select count(*) from schema_change_uniq_keys_regression_test """ - - // alter and test light schema change - sql """ALTER TABLE ${tableName} SET ("light_schema_change" = "true");""" - - sql """ INSERT INTO schema_change_uniq_keys_regression_test VALUES - (2, '2017-10-01', 'Beijing', 10, 1, '2020-01-02', '2020-01-02', '2020-01-02', 1, 31, 21) - """ - - sql """ INSERT INTO schema_change_uniq_keys_regression_test VALUES - (2, '2017-10-01', 'Beijing', 10, 1, '2020-01-03', '2020-01-03', '2020-01-03', 1, 32, 20) - """ - qt_sc """ - select count(*) from schema_change_uniq_keys_regression_test - """ - - // add column - sql """ - ALTER table ${tableName} ADD COLUMN new_column INT default "1" - """ - - sql """ SELECT * FROM ${tableName} WHERE user_id=2 """ - - sql """ INSERT INTO ${tableName} (`user_id`,`date`,`city`,`age`,`sex`,`last_visit_date`,`last_update_date`, - `last_visit_date_not_null`,`cost`,`max_dwell_time`,`min_dwell_time`) - VALUES - (3, '2017-10-01', 'Beijing', 10, 1, '2020-01-03', '2020-01-03', '2020-01-03', 1, 32, 20) - """ - - qt_sc """ SELECT * FROM ${tableName} WHERE user_id=3 """ - - - sql """ INSERT INTO ${tableName} VALUES - (3, '2017-10-01', 'Beijing', 10, 1, '2020-01-03', '2020-01-03', '2020-01-03', 1, 32, 20, 2) - """ - qt_sc """ SELECT * FROM ${tableName} WHERE user_id = 3 """ - - qt_sc """ select count(*) from ${tableName} """ - - sql """ INSERT INTO ${tableName} VALUES - (4, '2017-10-01', 'Beijing', 10, 1, '2020-01-03', '2020-01-03', '2020-01-03', 1, 32, 20, 2) - """ - - qt_sc """ select * from ${tableName} where user_id = 4 """ - - sql """ INSERT INTO ${tableName} VALUES - (5, '2017-10-01', 'Beijing', 10, 1, '2020-01-03', '2020-01-03', '2020-01-03', 1, 32, 20, 2) - """ - sql """ INSERT INTO ${tableName} VALUES - (5, '2017-10-01', 'Beijing', 10, 1, '2020-01-03', '2020-01-03', '2020-01-03', 1, 32, 20, 2) - """ - sql """ INSERT INTO ${tableName} VALUES - (5, '2017-10-01', 'Beijing', 10, 1, '2020-01-03', '2020-01-03', '2020-01-03', 1, 32, 20, 2) - """ - sql """ INSERT INTO ${tableName} VALUES - (5, '2017-10-01', 'Beijing', 10, 1, '2020-01-03', '2020-01-03', '2020-01-03', 1, 32, 20, 2) - """ - sql """ INSERT INTO ${tableName} VALUES - (5, '2017-10-01', 'Beijing', 10, 1, '2020-01-03', '2020-01-03', '2020-01-03', 1, 32, 20, 2) - """ - sql """ INSERT INTO ${tableName} VALUES - (5, '2017-10-01', 'Beijing', 10, 1, '2020-01-03', '2020-01-03', '2020-01-03', 1, 32, 20, 2) - """ - - // compaction - String[][] tablets = sql """ show tablets from ${tableName}; """ - for (String[] tablet in tablets) { - String tablet_id = tablet[0] - backend_id = tablet[2] - logger.info("run compaction:" + tablet_id) - (code, out, err) = be_run_cumulative_compaction(backendId_to_backendIP.get(backend_id), backendId_to_backendHttpPort.get(backend_id), tablet_id) - logger.info("Run compaction: code=" + code + ", out=" + out + ", err=" + err) - //assertEquals(code, 0) - } - // wait for all compactions done - for (String[] tablet in tablets) { - boolean running = true - do { - Thread.sleep(100) + sql """ INSERT INTO schema_change_uniq_keys_regression_test VALUES + (2, '2017-10-01', 'Beijing', 10, 1, '2020-01-02', '2020-01-02', '2020-01-02', 1, 31, 21) + """ + + sql """ INSERT INTO schema_change_uniq_keys_regression_test VALUES + (2, '2017-10-01', 'Beijing', 10, 1, '2020-01-03', '2020-01-03', '2020-01-03', 1, 32, 20) + """ + qt_sc """ + select count(*) from schema_change_uniq_keys_regression_test + """ + + // add column + sql """ + ALTER table ${tableName} ADD COLUMN new_column INT default "1" + """ + + sql """ SELECT * FROM ${tableName} WHERE user_id=2 """ + + sql """ INSERT INTO ${tableName} (`user_id`,`date`,`city`,`age`,`sex`,`last_visit_date`,`last_update_date`, + `last_visit_date_not_null`,`cost`,`max_dwell_time`,`min_dwell_time`) + VALUES + (3, '2017-10-01', 'Beijing', 10, 1, '2020-01-03', '2020-01-03', '2020-01-03', 1, 32, 20) + """ + + qt_sc """ SELECT * FROM ${tableName} WHERE user_id=3 """ + + + sql """ INSERT INTO ${tableName} VALUES + (3, '2017-10-01', 'Beijing', 10, 1, '2020-01-03', '2020-01-03', '2020-01-03', 1, 32, 20, 2) + """ + qt_sc """ SELECT * FROM ${tableName} WHERE user_id = 3 """ + + qt_sc """ select count(*) from ${tableName} """ + + sql """ INSERT INTO ${tableName} VALUES + (4, '2017-10-01', 'Beijing', 10, 1, '2020-01-03', '2020-01-03', '2020-01-03', 1, 32, 20, 2) + """ + + qt_sc """ select * from ${tableName} where user_id = 4 """ + + sql """ INSERT INTO ${tableName} VALUES + (5, '2017-10-01', 'Beijing', 10, 1, '2020-01-03', '2020-01-03', '2020-01-03', 1, 32, 20, 2) + """ + sql """ INSERT INTO ${tableName} VALUES + (5, '2017-10-01', 'Beijing', 10, 1, '2020-01-03', '2020-01-03', '2020-01-03', 1, 32, 20, 2) + """ + sql """ INSERT INTO ${tableName} VALUES + (5, '2017-10-01', 'Beijing', 10, 1, '2020-01-03', '2020-01-03', '2020-01-03', 1, 32, 20, 2) + """ + sql """ INSERT INTO ${tableName} VALUES + (5, '2017-10-01', 'Beijing', 10, 1, '2020-01-03', '2020-01-03', '2020-01-03', 1, 32, 20, 2) + """ + sql """ INSERT INTO ${tableName} VALUES + (5, '2017-10-01', 'Beijing', 10, 1, '2020-01-03', '2020-01-03', '2020-01-03', 1, 32, 20, 2) + """ + sql """ INSERT INTO ${tableName} VALUES + (5, '2017-10-01', 'Beijing', 10, 1, '2020-01-03', '2020-01-03', '2020-01-03', 1, 32, 20, 2) + """ + + // compaction + String[][] tablets = sql """ show tablets from ${tableName}; """ + for (String[] tablet in tablets) { String tablet_id = tablet[0] backend_id = tablet[2] - (code, out, err) = be_get_compaction_status(backendId_to_backendIP.get(backend_id), backendId_to_backendHttpPort.get(backend_id), tablet_id) - logger.info("Get compaction status: code=" + code + ", out=" + out + ", err=" + err) - assertEquals(code, 0) - def compactionStatus = parseJson(out.trim()) - assertEquals("success", compactionStatus.status.toLowerCase()) - running = compactionStatus.run_status - } while (running) - } - qt_sc """ select count(*) from ${tableName} """ + logger.info("run compaction:" + tablet_id) + (code, out, err) = be_run_cumulative_compaction(backendId_to_backendIP.get(backend_id), backendId_to_backendHttpPort.get(backend_id), tablet_id) + logger.info("Run compaction: code=" + code + ", out=" + out + ", err=" + err) + //assertEquals(code, 0) + } + + // wait for all compactions done + for (String[] tablet in tablets) { + boolean running = true + do { + Thread.sleep(100) + String tablet_id = tablet[0] + backend_id = tablet[2] + (code, out, err) = be_get_compaction_status(backendId_to_backendIP.get(backend_id), backendId_to_backendHttpPort.get(backend_id), tablet_id) + logger.info("Get compaction status: code=" + code + ", out=" + out + ", err=" + err) + assertEquals(code, 0) + def compactionStatus = parseJson(out.trim()) + assertEquals("success", compactionStatus.status.toLowerCase()) + running = compactionStatus.run_status + } while (running) + } + qt_sc """ select count(*) from ${tableName} """ - qt_sc """ SELECT * FROM ${tableName} WHERE user_id=2 """ + qt_sc """ SELECT * FROM ${tableName} WHERE user_id=2 """ } finally { //try_sql("DROP TABLE IF EXISTS ${tableName}") diff --git a/regression-test/suites/schema_change_p0/test_uniq_mv_schema_change.groovy b/regression-test/suites/schema_change_p0/test_uniq_mv_schema_change.groovy index f56c49a2d458b8..6167b757d095bf 100644 --- a/regression-test/suites/schema_change_p0/test_uniq_mv_schema_change.groovy +++ b/regression-test/suites/schema_change_p0/test_uniq_mv_schema_change.groovy @@ -44,21 +44,6 @@ suite ("test_uniq_mv_schema_change") { def backendId_to_backendHttpPort = [:] getBackendIpHttpPort(backendId_to_backendIP, backendId_to_backendHttpPort); - backend_id = backendId_to_backendIP.keySet()[0] - def (code, out, err) = show_be_config(backendId_to_backendIP.get(backend_id), backendId_to_backendHttpPort.get(backend_id)) - - logger.info("Show config: code=" + code + ", out=" + out + ", err=" + err) - assertEquals(code, 0) - def configList = parseJson(out.trim()) - assert configList instanceof List - - boolean disableAutoCompaction = true - for (Object ele in (List) configList) { - assert ele instanceof List - if (((List) ele)[0] == "disable_auto_compaction") { - disableAutoCompaction = Boolean.parseBoolean(((List) ele)[2]) - } - } sql """ DROP TABLE IF EXISTS ${tableName} """ sql """ diff --git a/regression-test/suites/schema_change_p0/test_uniq_rollup_schema_change.groovy b/regression-test/suites/schema_change_p0/test_uniq_rollup_schema_change.groovy index c2d8e366d485d5..515d1ee9151138 100644 --- a/regression-test/suites/schema_change_p0/test_uniq_rollup_schema_change.groovy +++ b/regression-test/suites/schema_change_p0/test_uniq_rollup_schema_change.groovy @@ -48,21 +48,6 @@ suite ("test_uniq_rollup_schema_change") { def backendId_to_backendHttpPort = [:] getBackendIpHttpPort(backendId_to_backendIP, backendId_to_backendHttpPort); - backend_id = backendId_to_backendIP.keySet()[0] - def (code, out, err) = show_be_config(backendId_to_backendIP.get(backend_id), backendId_to_backendHttpPort.get(backend_id)) - - logger.info("Show config: code=" + code + ", out=" + out + ", err=" + err) - assertEquals(code, 0) - def configList = parseJson(out.trim()) - assert configList instanceof List - - boolean disableAutoCompaction = true - for (Object ele in (List) configList) { - assert ele instanceof List - if (((List) ele)[0] == "disable_auto_compaction") { - disableAutoCompaction = Boolean.parseBoolean(((List) ele)[2]) - } - } sql """ DROP TABLE IF EXISTS ${tableName} """ sql """ diff --git a/regression-test/suites/schema_change_p0/test_uniq_vals_schema_change.groovy b/regression-test/suites/schema_change_p0/test_uniq_vals_schema_change.groovy index 43c08dc7300f86..78d97b539c3b81 100644 --- a/regression-test/suites/schema_change_p0/test_uniq_vals_schema_change.groovy +++ b/regression-test/suites/schema_change_p0/test_uniq_vals_schema_change.groovy @@ -28,21 +28,6 @@ suite ("test_uniq_vals_schema_change") { def backendId_to_backendHttpPort = [:] getBackendIpHttpPort(backendId_to_backendIP, backendId_to_backendHttpPort); - backend_id = backendId_to_backendIP.keySet()[0] - def (code, out, err) = show_be_config(backendId_to_backendIP.get(backend_id), backendId_to_backendHttpPort.get(backend_id)) - - logger.info("Show config: code=" + code + ", out=" + out + ", err=" + err) - assertEquals(code, 0) - def configList = parseJson(out.trim()) - assert configList instanceof List - - boolean disableAutoCompaction = true - for (Object ele in (List) configList) { - assert ele instanceof List - if (((List) ele)[0] == "disable_auto_compaction") { - disableAutoCompaction = Boolean.parseBoolean(((List) ele)[2]) - } - } sql """ DROP TABLE IF EXISTS ${tableName} """ sql """ diff --git a/regression-test/suites/schema_change_p0/test_varchar_schema_change.groovy b/regression-test/suites/schema_change_p0/test_varchar_schema_change.groovy index 8f3f0d3fcec133..38bd996e89de9e 100644 --- a/regression-test/suites/schema_change_p0/test_varchar_schema_change.groovy +++ b/regression-test/suites/schema_change_p0/test_varchar_schema_change.groovy @@ -32,22 +32,6 @@ suite ("test_varchar_schema_change") { def backendId_to_backendHttpPort = [:] getBackendIpHttpPort(backendId_to_backendIP, backendId_to_backendHttpPort); - backend_id = backendId_to_backendIP.keySet()[0] - def (code, out, err) = show_be_config(backendId_to_backendIP.get(backend_id), backendId_to_backendHttpPort.get(backend_id)) - - logger.info("Show config: code=" + code + ", out=" + out + ", err=" + err) - assertEquals(code, 0) - def configList = parseJson(out.trim()) - assert configList instanceof List - - boolean disableAutoCompaction = true - for (Object ele in (List) configList) { - assert ele instanceof List - if (((List) ele)[0] == "disable_auto_compaction") { - disableAutoCompaction = Boolean.parseBoolean(((List) ele)[2]) - } - } - sql """ DROP TABLE IF EXISTS ${tableName} """ sql """ CREATE TABLE IF NOT EXISTS ${tableName} ( diff --git a/regression-test/suites/segcompaction_p2/test_segcompaction_agg_keys.groovy b/regression-test/suites/segcompaction_p2/test_segcompaction_agg_keys.groovy index 409bfd89c3ea6d..480c5df73322f4 100644 --- a/regression-test/suites/segcompaction_p2/test_segcompaction_agg_keys.groovy +++ b/regression-test/suites/segcompaction_p2/test_segcompaction_agg_keys.groovy @@ -27,25 +27,6 @@ suite("test_segcompaction_agg_keys") { try { - String backend_id; - def backendId_to_backendIP = [:] - def backendId_to_backendHttpPort = [:] - getBackendIpHttpPort(backendId_to_backendIP, backendId_to_backendHttpPort); - backend_id = backendId_to_backendIP.keySet()[0] - def (code, out, err) = show_be_config(backendId_to_backendIP.get(backend_id), backendId_to_backendHttpPort.get(backend_id)) - - logger.info("Show config: code=" + code + ", out=" + out + ", err=" + err) - assertEquals(code, 0) - def configList = parseJson(out.trim()) - assert configList instanceof List - - boolean disableAutoCompaction = true - for (Object ele in (List) configList) { - assert ele instanceof List - if (((List) ele)[0] == "disable_auto_compaction") { - disableAutoCompaction = Boolean.parseBoolean(((List) ele)[2]) - } - } sql """ DROP TABLE IF EXISTS ${tableName} """ sql """ diff --git a/regression-test/suites/segcompaction_p2/test_segcompaction_agg_keys_index.groovy b/regression-test/suites/segcompaction_p2/test_segcompaction_agg_keys_index.groovy index 596f70ca683ebd..d7975508975464 100644 --- a/regression-test/suites/segcompaction_p2/test_segcompaction_agg_keys_index.groovy +++ b/regression-test/suites/segcompaction_p2/test_segcompaction_agg_keys_index.groovy @@ -27,24 +27,6 @@ suite("test_segcompaction_agg_keys_index") { try { - String backend_id; - def backendId_to_backendIP = [:] - def backendId_to_backendHttpPort = [:] - getBackendIpHttpPort(backendId_to_backendIP, backendId_to_backendHttpPort); - backend_id = backendId_to_backendIP.keySet()[0] - def (code, out, err) = show_be_config(backendId_to_backendIP.get(backend_id), backendId_to_backendHttpPort.get(backend_id)) - logger.info("Show config: code=" + code + ", out=" + out + ", err=" + err) - assertEquals(code, 0) - def configList = parseJson(out.trim()) - assert configList instanceof List - - boolean disableAutoCompaction = true - for (Object ele in (List) configList) { - assert ele instanceof List - if (((List) ele)[0] == "disable_auto_compaction") { - disableAutoCompaction = Boolean.parseBoolean(((List) ele)[2]) - } - } sql """ DROP TABLE IF EXISTS ${tableName} """ sql """ diff --git a/regression-test/suites/segcompaction_p2/test_segcompaction_dup_keys.groovy b/regression-test/suites/segcompaction_p2/test_segcompaction_dup_keys.groovy index 6dcea441f014ee..857d5bba863b5b 100644 --- a/regression-test/suites/segcompaction_p2/test_segcompaction_dup_keys.groovy +++ b/regression-test/suites/segcompaction_p2/test_segcompaction_dup_keys.groovy @@ -27,26 +27,6 @@ suite("test_segcompaction_dup_keys") { try { - String backend_id; - def backendId_to_backendIP = [:] - def backendId_to_backendHttpPort = [:] - getBackendIpHttpPort(backendId_to_backendIP, backendId_to_backendHttpPort); - - backend_id = backendId_to_backendIP.keySet()[0] - def (code, out, err) = show_be_config(backendId_to_backendIP.get(backend_id), backendId_to_backendHttpPort.get(backend_id)) - - logger.info("Show config: code=" + code + ", out=" + out + ", err=" + err) - assertEquals(code, 0) - def configList = parseJson(out.trim()) - assert configList instanceof List - - boolean disableAutoCompaction = true - for (Object ele in (List) configList) { - assert ele instanceof List - if (((List) ele)[0] == "disable_auto_compaction") { - disableAutoCompaction = Boolean.parseBoolean(((List) ele)[2]) - } - } sql """ DROP TABLE IF EXISTS ${tableName} """ sql """ diff --git a/regression-test/suites/segcompaction_p2/test_segcompaction_dup_keys_index.groovy b/regression-test/suites/segcompaction_p2/test_segcompaction_dup_keys_index.groovy index cc042c40f5bf3a..50ae8f2ca48d40 100644 --- a/regression-test/suites/segcompaction_p2/test_segcompaction_dup_keys_index.groovy +++ b/regression-test/suites/segcompaction_p2/test_segcompaction_dup_keys_index.groovy @@ -27,25 +27,6 @@ suite("test_segcompaction_dup_keys_index") { try { - String backend_id; - def backendId_to_backendIP = [:] - def backendId_to_backendHttpPort = [:] - getBackendIpHttpPort(backendId_to_backendIP, backendId_to_backendHttpPort); - - backend_id = backendId_to_backendIP.keySet()[0] - def (code, out, err) = show_be_config(backendId_to_backendIP.get(backend_id), backendId_to_backendHttpPort.get(backend_id)) - logger.info("Show config: code=" + code + ", out=" + out + ", err=" + err) - assertEquals(code, 0) - def configList = parseJson(out.trim()) - assert configList instanceof List - - boolean disableAutoCompaction = true - for (Object ele in (List) configList) { - assert ele instanceof List - if (((List) ele)[0] == "disable_auto_compaction") { - disableAutoCompaction = Boolean.parseBoolean(((List) ele)[2]) - } - } sql """ DROP TABLE IF EXISTS ${tableName} """ sql """ diff --git a/regression-test/suites/segcompaction_p2/test_segcompaction_unique_keys.groovy b/regression-test/suites/segcompaction_p2/test_segcompaction_unique_keys.groovy index 5e31abfd22be96..6f98694a31ecfb 100644 --- a/regression-test/suites/segcompaction_p2/test_segcompaction_unique_keys.groovy +++ b/regression-test/suites/segcompaction_p2/test_segcompaction_unique_keys.groovy @@ -27,26 +27,6 @@ suite("test_segcompaction_unique_keys") { try { - String backend_id; - def backendId_to_backendIP = [:] - def backendId_to_backendHttpPort = [:] - getBackendIpHttpPort(backendId_to_backendIP, backendId_to_backendHttpPort); - - backend_id = backendId_to_backendIP.keySet()[0] - def (code, out, err) = show_be_config(backendId_to_backendIP.get(backend_id), backendId_to_backendHttpPort.get(backend_id)) - - logger.info("Show config: code=" + code + ", out=" + out + ", err=" + err) - assertEquals(code, 0) - def configList = parseJson(out.trim()) - assert configList instanceof List - - boolean disableAutoCompaction = true - for (Object ele in (List) configList) { - assert ele instanceof List - if (((List) ele)[0] == "disable_auto_compaction") { - disableAutoCompaction = Boolean.parseBoolean(((List) ele)[2]) - } - } sql """ DROP TABLE IF EXISTS ${tableName} """ sql """ diff --git a/regression-test/suites/segcompaction_p2/test_segcompaction_unique_keys_index.groovy b/regression-test/suites/segcompaction_p2/test_segcompaction_unique_keys_index.groovy index eb39c48e0b79a5..1602e074a5013a 100644 --- a/regression-test/suites/segcompaction_p2/test_segcompaction_unique_keys_index.groovy +++ b/regression-test/suites/segcompaction_p2/test_segcompaction_unique_keys_index.groovy @@ -27,25 +27,6 @@ suite("test_segcompaction_unique_keys_index") { try { - String backend_id; - def backendId_to_backendIP = [:] - def backendId_to_backendHttpPort = [:] - getBackendIpHttpPort(backendId_to_backendIP, backendId_to_backendHttpPort); - - backend_id = backendId_to_backendIP.keySet()[0] - def (code, out, err) = show_be_config(backendId_to_backendIP.get(backend_id), backendId_to_backendHttpPort.get(backend_id)) - logger.info("Show config: code=" + code + ", out=" + out + ", err=" + err) - assertEquals(code, 0) - def configList = parseJson(out.trim()) - assert configList instanceof List - - boolean disableAutoCompaction = true - for (Object ele in (List) configList) { - assert ele instanceof List - if (((List) ele)[0] == "disable_auto_compaction") { - disableAutoCompaction = Boolean.parseBoolean(((List) ele)[2]) - } - } sql """ DROP TABLE IF EXISTS ${tableName} """ sql """ diff --git a/regression-test/suites/segcompaction_p2/test_segcompaction_unique_keys_mow.groovy b/regression-test/suites/segcompaction_p2/test_segcompaction_unique_keys_mow.groovy index d1a7e77406643d..31111d3939e935 100644 --- a/regression-test/suites/segcompaction_p2/test_segcompaction_unique_keys_mow.groovy +++ b/regression-test/suites/segcompaction_p2/test_segcompaction_unique_keys_mow.groovy @@ -27,26 +27,6 @@ suite("test_segcompaction_unique_keys_mow") { try { - String backend_id; - def backendId_to_backendIP = [:] - def backendId_to_backendHttpPort = [:] - getBackendIpHttpPort(backendId_to_backendIP, backendId_to_backendHttpPort); - - backend_id = backendId_to_backendIP.keySet()[0] - def (code, out, err) = show_be_config(backendId_to_backendIP.get(backend_id), backendId_to_backendHttpPort.get(backend_id)) - - logger.info("Show config: code=" + code + ", out=" + out + ", err=" + err) - assertEquals(code, 0) - def configList = parseJson(out.trim()) - assert configList instanceof List - - boolean disableAutoCompaction = true - for (Object ele in (List) configList) { - assert ele instanceof List - if (((List) ele)[0] == "disable_auto_compaction") { - disableAutoCompaction = Boolean.parseBoolean(((List) ele)[2]) - } - } sql """ DROP TABLE IF EXISTS ${tableName} """ sql """ diff --git a/regression-test/suites/segcompaction_p2/test_segcompaction_unique_keys_mow_index.groovy b/regression-test/suites/segcompaction_p2/test_segcompaction_unique_keys_mow_index.groovy index 628ac6dffd9418..c9fa404cb966fe 100644 --- a/regression-test/suites/segcompaction_p2/test_segcompaction_unique_keys_mow_index.groovy +++ b/regression-test/suites/segcompaction_p2/test_segcompaction_unique_keys_mow_index.groovy @@ -27,26 +27,6 @@ suite("test_segcompaction_unique_keys_mow_index") { try { - String backend_id; - def backendId_to_backendIP = [:] - def backendId_to_backendHttpPort = [:] - getBackendIpHttpPort(backendId_to_backendIP, backendId_to_backendHttpPort); - - backend_id = backendId_to_backendIP.keySet()[0] - def (code, out, err) = show_be_config(backendId_to_backendIP.get(backend_id), backendId_to_backendHttpPort.get(backend_id)) - - logger.info("Show config: code=" + code + ", out=" + out + ", err=" + err) - assertEquals(code, 0) - def configList = parseJson(out.trim()) - assert configList instanceof List - - boolean disableAutoCompaction = true - for (Object ele in (List) configList) { - assert ele instanceof List - if (((List) ele)[0] == "disable_auto_compaction") { - disableAutoCompaction = Boolean.parseBoolean(((List) ele)[2]) - } - } sql """ DROP TABLE IF EXISTS ${tableName} """ sql """ From 87556e4e7de35cd3c45a6b0a661d8a2409acc6d3 Mon Sep 17 00:00:00 2001 From: csun5285 Date: Wed, 7 Aug 2024 19:28:05 +0800 Subject: [PATCH 2/2] fix rebase --- .../test_clone_missing_version.groovy | 91 ------------------- 1 file changed, 91 deletions(-) delete mode 100644 regression-test/suites/clone_p0/test_clone_missing_version.groovy diff --git a/regression-test/suites/clone_p0/test_clone_missing_version.groovy b/regression-test/suites/clone_p0/test_clone_missing_version.groovy deleted file mode 100644 index 2981cf3c5e3638..00000000000000 --- a/regression-test/suites/clone_p0/test_clone_missing_version.groovy +++ /dev/null @@ -1,91 +0,0 @@ -// Licensed to the Apache Software Foundation (ASF) under one -// or more contributor license agreements. See the NOTICE file -// distributed with this work for additional information -// regarding copyright ownership. The ASF licenses this file -// to you under the Apache License, Version 2.0 (the -// "License"); you may not use this file except in compliance -// with the License. You may obtain a copy of the License at -// -// http://www.apache.org/licenses/LICENSE-2.0 -// -// Unless required by applicable law or agreed to in writing, -// software distributed under the License is distributed on an -// "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY -// KIND, either express or implied. See the License for the -// specific language governing permissions and limitations -// under the License. - -import org.apache.doris.regression.suite.ClusterOptions -import org.apache.doris.regression.util.NodeType - -suite('test_clone_missing_version') { - def options = new ClusterOptions() - options.feConfigs += [ - 'disable_tablet_scheduler=true', - 'tablet_checker_interval_ms=500', - 'schedule_batch_size=1000', - 'schedule_slot_num_per_hdd_path=1000', - ] - options.beConfigs += [ - 'report_tablet_interval_seconds=1', - ] - - options.enableDebugPoints() - options.cloudMode = false - docker(options) { - def injectName = 'TxnManager.prepare_txn.random_failed' - def be = sql_return_maparray('show backends').get(0) - def addInject = { -> - GetDebugPoint().enableDebugPoint(be.Host, be.HttpPort as int, NodeType.BE, injectName, [percent:1.0]) - } - def deleteInject = { -> - GetDebugPoint().disableDebugPoint(be.Host, be.HttpPort as int, NodeType.BE, injectName) - } - - sql """ - CREATE TABLE t (k INT) DISTRIBUTED BY HASH(k) BUCKETS 1 PROPERTIES ( - "disable_auto_compaction" = "true" - ) - """ - - sql 'INSERT INTO t VALUES(2)' - - addInject() - - sql 'INSERT INTO t VALUES(3)' - sql 'INSERT INTO t VALUES(4)' - sql 'INSERT INTO t VALUES(5)' - - deleteInject() - - sql 'INSERT INTO t VALUES(6)' - - addInject() - - sql 'INSERT INTO t VALUES(7)' - sql 'INSERT INTO t VALUES(8)' - sql 'INSERT INTO t VALUES(9)' - - deleteInject() - - sql 'INSERT INTO t VALUES(10)' - - sleep 5000 - - def replica = sql_return_maparray('show tablets from t').find { it.BackendId.toLong().equals(be.BackendId.toLong()) } - assertNotNull(replica) - assertEquals(4, replica.VersionCount.toInteger()) - assertEquals(2, replica.Version.toInteger()) - assertEquals(9, replica.LstFailedVersion.toInteger()) - - setFeConfig('disable_tablet_scheduler', false) - - sleep 10000 - - replica = sql_return_maparray('show tablets from t').find { it.BackendId.toLong().equals(be.BackendId.toLong()) } - assertNotNull(replica) - assertEquals(10, replica.VersionCount.toInteger()) - assertEquals(10, replica.Version.toInteger()) - assertEquals(-1, replica.LstFailedVersion.toInteger()) - } -}