From 65028d9734fcddec4a1801a70eea83f08f5eaa86 Mon Sep 17 00:00:00 2001 From: yujun777 Date: Wed, 16 Aug 2023 19:09:50 +0800 Subject: [PATCH 1/7] relocate colocate table use group's replica alloc --- .../clone/ColocateTableCheckerAndBalancer.java | 7 +++---- .../apache/doris/clone/TabletScheduler.java | 18 +++++++++++++++--- 2 files changed, 18 insertions(+), 7 deletions(-) diff --git a/fe/fe-core/src/main/java/org/apache/doris/clone/ColocateTableCheckerAndBalancer.java b/fe/fe-core/src/main/java/org/apache/doris/clone/ColocateTableCheckerAndBalancer.java index 5c18c2bd468263..ce500b171f5e68 100644 --- a/fe/fe-core/src/main/java/org/apache/doris/clone/ColocateTableCheckerAndBalancer.java +++ b/fe/fe-core/src/main/java/org/apache/doris/clone/ColocateTableCheckerAndBalancer.java @@ -219,6 +219,8 @@ private void matchGroup() { continue; } + ColocateGroupSchema groupSchema = colocateIndex.getGroupSchema(groupId); + ReplicaAllocation replicaAlloc = groupSchema.getReplicaAlloc(); String unstableReason = null; OUT: for (Long tableId : tableIds) { @@ -237,8 +239,6 @@ private void matchGroup() { olapTable.readLock(); try { for (Partition partition : olapTable.getPartitions()) { - ReplicaAllocation replicaAlloc - = olapTable.getPartitionInfo().getReplicaAllocation(partition.getId()); short replicationNum = replicaAlloc.getTotalReplicaNum(); long visibleVersion = partition.getVisibleVersion(); // Here we only get VISIBLE indexes. All other indexes are not queryable. @@ -269,8 +269,7 @@ private void matchGroup() { TabletSchedCtx tabletCtx = new TabletSchedCtx( TabletSchedCtx.Type.REPAIR, db.getId(), tableId, partition.getId(), index.getId(), tablet.getId(), - olapTable.getPartitionInfo().getReplicaAllocation(partition.getId()), - System.currentTimeMillis()); + replicaAlloc, System.currentTimeMillis()); // the tablet status will be set again when being scheduled tabletCtx.setTabletStatus(st); tabletCtx.setPriority(Priority.NORMAL); diff --git a/fe/fe-core/src/main/java/org/apache/doris/clone/TabletScheduler.java b/fe/fe-core/src/main/java/org/apache/doris/clone/TabletScheduler.java index d6a8e1efa0e8f6..ee9da3ac10008f 100644 --- a/fe/fe-core/src/main/java/org/apache/doris/clone/TabletScheduler.java +++ b/fe/fe-core/src/main/java/org/apache/doris/clone/TabletScheduler.java @@ -19,6 +19,7 @@ import org.apache.doris.analysis.AdminCancelRebalanceDiskStmt; import org.apache.doris.analysis.AdminRebalanceDiskStmt; +import org.apache.doris.catalog.ColocateGroupSchema; import org.apache.doris.catalog.ColocateTableIndex; import org.apache.doris.catalog.ColocateTableIndex.GroupId; import org.apache.doris.catalog.Database; @@ -490,15 +491,20 @@ private void scheduleTablet(TabletSchedCtx tabletCtx, AgentBatchTask batchTask) throw new SchedException(Status.UNRECOVERABLE, "index does not exist"); } + ReplicaAllocation replicaAlloc = null; Tablet tablet = idx.getTablet(tabletId); Preconditions.checkNotNull(tablet); - ReplicaAllocation replicaAlloc = tbl.getPartitionInfo().getReplicaAllocation(partition.getId()); - if (isColocateTable) { GroupId groupId = colocateTableIndex.getGroup(tbl.getId()); if (groupId == null) { throw new SchedException(Status.UNRECOVERABLE, "colocate group does not exist"); } + ColocateGroupSchema groupSchema = colocateTableIndex.getGroupSchema(groupId); + if (groupSchema == null) { + throw new SchedException(Status.UNRECOVERABLE, + "colocate group schema " + groupId + " does not exist"); + } + replicaAlloc = groupSchema.getReplicaAlloc(); int tabletOrderIdx = tabletCtx.getTabletOrderIdx(); if (tabletOrderIdx == -1) { @@ -512,6 +518,7 @@ private void scheduleTablet(TabletSchedCtx tabletCtx, AgentBatchTask batchTask) statusPair = Pair.of(st, Priority.HIGH); tabletCtx.setColocateGroupBackendIds(backendsSet); } else { + replicaAlloc = tbl.getPartitionInfo().getReplicaAllocation(partition.getId()); List aliveBeIds = infoService.getAllBackendIds(true); statusPair = tablet.getHealthStatusWithPriority( infoService, partition.getVisibleVersion(), replicaAlloc, aliveBeIds); @@ -1484,14 +1491,18 @@ private void tryAddAfterFinished(TabletSchedCtx tabletCtx) { return; } - replicaAlloc = tbl.getPartitionInfo().getReplicaAllocation(partition.getId()); boolean isColocateTable = colocateTableIndex.isColocateTable(tbl.getId()); if (isColocateTable) { GroupId groupId = colocateTableIndex.getGroup(tbl.getId()); if (groupId == null) { return; } + ColocateGroupSchema groupSchema = colocateTableIndex.getGroupSchema(groupId); + if (groupSchema == null) { + return; + } + replicaAlloc = groupSchema.getReplicaAlloc(); int tabletOrderIdx = tabletCtx.getTabletOrderIdx(); if (tabletOrderIdx == -1) { tabletOrderIdx = idx.getTabletOrderIdx(tablet.getId()); @@ -1504,6 +1515,7 @@ private void tryAddAfterFinished(TabletSchedCtx tabletCtx) { statusPair = Pair.of(st, Priority.HIGH); tabletCtx.setColocateGroupBackendIds(backendsSet); } else { + replicaAlloc = tbl.getPartitionInfo().getReplicaAllocation(partition.getId()); List aliveBeIds = infoService.getAllBackendIds(true); statusPair = tablet.getHealthStatusWithPriority( infoService, partition.getVisibleVersion(), replicaAlloc, aliveBeIds); From d15dcc6bc7a465bf0625f43af283e0bf3e2edb38 Mon Sep 17 00:00:00 2001 From: yujun777 Date: Wed, 16 Aug 2023 19:28:02 +0800 Subject: [PATCH 2/7] udpate report and show tablet --- .../org/apache/doris/common/proc/TabletHealthProcDir.java | 5 +++++ .../src/main/java/org/apache/doris/master/ReportHandler.java | 5 +++++ 2 files changed, 10 insertions(+) diff --git a/fe/fe-core/src/main/java/org/apache/doris/common/proc/TabletHealthProcDir.java b/fe/fe-core/src/main/java/org/apache/doris/common/proc/TabletHealthProcDir.java index 93f54483cbfa5e..3ce3ff74c7adaf 100644 --- a/fe/fe-core/src/main/java/org/apache/doris/common/proc/TabletHealthProcDir.java +++ b/fe/fe-core/src/main/java/org/apache/doris/common/proc/TabletHealthProcDir.java @@ -17,6 +17,7 @@ package org.apache.doris.common.proc; +import org.apache.doris.catalog.ColocateGroupSchema; import org.apache.doris.catalog.ColocateTableIndex; import org.apache.doris.catalog.DatabaseIf; import org.apache.doris.catalog.Env; @@ -185,6 +186,10 @@ static class DBTabletStatistic { ++tabletNum; Tablet.TabletStatus res = null; if (groupId != null) { + ColocateGroupSchema groupSchema = colocateTableIndex.getGroupSchema(groupId); + if (groupSchema != null) { + replicaAlloc = groupSchema.getReplicaAlloc(); + } Set backendsSet = colocateTableIndex.getTabletBackendsByGroup(groupId, i); res = tablet.getColocateHealthStatus(partition.getVisibleVersion(), replicaAlloc, backendsSet); diff --git a/fe/fe-core/src/main/java/org/apache/doris/master/ReportHandler.java b/fe/fe-core/src/main/java/org/apache/doris/master/ReportHandler.java index 2e6be68604160a..4461ba19473a02 100644 --- a/fe/fe-core/src/main/java/org/apache/doris/master/ReportHandler.java +++ b/fe/fe-core/src/main/java/org/apache/doris/master/ReportHandler.java @@ -19,6 +19,7 @@ import org.apache.doris.catalog.BinlogConfig; +import org.apache.doris.catalog.ColocateGroupSchema; import org.apache.doris.catalog.ColocateTableIndex; import org.apache.doris.catalog.Database; import org.apache.doris.catalog.Env; @@ -1172,6 +1173,10 @@ private static boolean addReplica(long tabletId, TabletMeta tabletMeta, TTabletI int tabletOrderIdx = materializedIndex.getTabletOrderIdx(tabletId); Preconditions.checkState(tabletOrderIdx != -1, "get tablet materializedIndex for %s fail", tabletId); Set backendsSet = colocateTableIndex.getTabletBackendsByGroup(groupId, tabletOrderIdx); + ColocateGroupSchema groupSchema = colocateTableIndex.getGroupSchema(groupId); + if (groupSchema != null) { + replicaAlloc = groupSchema.getReplicaAlloc(); + } TabletStatus status = tablet.getColocateHealthStatus(visibleVersion, replicaAlloc, backendsSet); if (status == TabletStatus.HEALTHY) { From c48c581c2c1cce29bed84e7fea89b55538f1a9cd Mon Sep 17 00:00:00 2001 From: yujun777 Date: Tue, 22 Aug 2023 16:27:04 +0800 Subject: [PATCH 3/7] alter colocate group --- .../Alter/ALTER-COLOCATE-GROUP.md | 89 ++ docs/sidebars.json | 1 + .../Alter/ALTER-COLOCATE-GROUP.md | 91 ++ .../Alter/ALTER-WORKLOAD-GROUP.md | 4 +- fe/fe-core/src/main/cup/sql_parser.cup | 5 + .../analysis/AlterColocateGroupStmt.java | 69 + .../doris/catalog/ColocateGroupSchema.java | 4 + .../doris/catalog/ColocateTableIndex.java | 175 ++- .../apache/doris/catalog/PartitionInfo.java | 4 + .../ColocateTableCheckerAndBalancer.java | 126 +- .../common/proc/ColocationGroupProcDir.java | 2 +- .../httpv2/meta/ColocateMetaService.java | 2 +- .../apache/doris/journal/JournalEntity.java | 1 + .../doris/persist/ColocatePersistInfo.java | 27 +- .../org/apache/doris/persist/EditLog.java | 9 + .../apache/doris/persist/OperationType.java | 1 + .../java/org/apache/doris/qe/DdlExecutor.java | 3 + fe/fe-core/src/main/jflex/sql_scanner.flex | 1 + fe/fe-core/src/test/java/org/apache/doris/:w | 1336 +++++++++++++++++ .../org/apache/doris/alter/AlterTest.java | 184 ++- .../apache/doris/utframe/UtFrameUtils.java | 7 +- 21 files changed, 2114 insertions(+), 27 deletions(-) create mode 100644 docs/en/docs/sql-manual/sql-reference/Data-Definition-Statements/Alter/ALTER-COLOCATE-GROUP.md create mode 100644 docs/zh-CN/docs/sql-manual/sql-reference/Data-Definition-Statements/Alter/ALTER-COLOCATE-GROUP.md create mode 100644 fe/fe-core/src/main/java/org/apache/doris/analysis/AlterColocateGroupStmt.java create mode 100644 fe/fe-core/src/test/java/org/apache/doris/:w diff --git a/docs/en/docs/sql-manual/sql-reference/Data-Definition-Statements/Alter/ALTER-COLOCATE-GROUP.md b/docs/en/docs/sql-manual/sql-reference/Data-Definition-Statements/Alter/ALTER-COLOCATE-GROUP.md new file mode 100644 index 00000000000000..ecdecdd48a1646 --- /dev/null +++ b/docs/en/docs/sql-manual/sql-reference/Data-Definition-Statements/Alter/ALTER-COLOCATE-GROUP.md @@ -0,0 +1,89 @@ +--- +{ +"title": "ALTER-COLOCATE-GROUP", +"language": "en" +} +--- + + + +## ALTER-COLOCATE-GROUP + +### Name + +ALTER COLOCATE GROUP + + + +### Description + +This statement is used to modify the colocation group. + +Syntax: + +```sql +ALTER COLOCATE GROUP "full_group_name" +SET ( + property_list +); +``` + +NOTE: + +1. `full_group_name` is the full name of the colocation group, which can be divided into two cases: + - If the group is global, that is, its name starts with `__global__`, then `full_group_name` is equal to `group_name`; + - If the group is not global, that is, its name does not start with `__global__`, then it belongs to a certain Database, `full_group_name` is equal to `dbId` + `_` + `group_name` + +2. `full_group_name` can also be viewed through the command `show proc '/proc/colocation_group'`; + + +3. property_list is a colocation group attribute, currently only supports modifying `replication_num` and `replication_allocation`. After modifying these two attributes of the colocation group, at the same time, change the attribute `default.replication_allocation`, the attribute `dynamic.replication_allocation` of the table of the group, and the `replication_allocation` of the existing partition to be the same as it. + +### Example + +1. Modify the number of copies of a global group + + ```sql + # Set "colocate_with" = "__global__foo" when creating the table + + ALTER COLOCATE GROUP __global__foo + SET ( + "replication_num"="1" + ); + ``` + +2. Modify the number of copies of a non-global group + + ```sql + # Set "colocate_with" = "bar" when creating the table, and the dbId of the Database where the table is located is 10231 + + ALTER COLOCATE GROUP 10231_bar + SET ( + "replication_num"="1" + ); + ``` + +### Keywords + +```sql +ALTER, COLOCATE, GROUP +``` + +### Best Practice diff --git a/docs/sidebars.json b/docs/sidebars.json index cfc10dbb5174fe..0caf967961af3a 100644 --- a/docs/sidebars.json +++ b/docs/sidebars.json @@ -924,6 +924,7 @@ "sql-manual/sql-reference/Data-Definition-Statements/Alter/ALTER-VIEW", "sql-manual/sql-reference/Data-Definition-Statements/Alter/ALTER-STORAGE-POLICY", "sql-manual/sql-reference/Data-Definition-Statements/Alter/ALTER-RESOURCE", + "sql-manual/sql-reference/Data-Definition-Statements/Alter/ALTER-COLOCATE-GROUP", "sql-manual/sql-reference/Data-Definition-Statements/Alter/ALTER-WORKLOAD-GROUP", "sql-manual/sql-reference/Data-Definition-Statements/Alter/ALTER-SQL-BLOCK-RULE", "sql-manual/sql-reference/Data-Definition-Statements/Alter/PAUSE-JOB", diff --git a/docs/zh-CN/docs/sql-manual/sql-reference/Data-Definition-Statements/Alter/ALTER-COLOCATE-GROUP.md b/docs/zh-CN/docs/sql-manual/sql-reference/Data-Definition-Statements/Alter/ALTER-COLOCATE-GROUP.md new file mode 100644 index 00000000000000..de8123b44d6802 --- /dev/null +++ b/docs/zh-CN/docs/sql-manual/sql-reference/Data-Definition-Statements/Alter/ALTER-COLOCATE-GROUP.md @@ -0,0 +1,91 @@ +--- +{ +"title": "ALTER-COLOCATE-GROUP", +"language": "zh-CN" +} +--- + + + +## ALTER-COLOCATE-GROUP + +### Name + +ALTER COLOCATE GROUP + + + +### Description + +该语句用于修改 Colocation Group 的属性。 + +语法: + +```sql +ALTER COLOCATE GROUP "full_group_name" +SET ( + property_list +); +``` + +注意: + +1. `full_group_name`是colocation group名称全称,其分为两种情况: + - 如果group是全局的,即它的名称是以`__global__` 开头的,那么`full_group_name`即等于`group_name`; + - 如果group不是全局的,即它的名称不是以`__global__ `开头的,那么它是属于某个Database的,`full_group_name ` 等于 `dbId` + `_` + `group_name`。 + +2. `full_group_name` 也可以通过命令 `show proc '/proc/colocation_group'` 来查看。 + + +3. property_list 是colocation group属性,目前只支持修改`replication_num` 和 `replication_allocation`。修改colocation group的这两个属性修改之后,同时把该group的表的属性`default.replication_allocation` 、属性`dynamic.replication_allocation `、以及已有分区的`replication_allocation`改成跟它一样。 + + + +### Example + +1. 修改一个全局group的副本数 + + ```sql + # 建表时设置 "colocate_with" = "__global__foo" + + ALTER COLOCATE GROUP __global__foo + SET ( + "replication_num"="1" + ); + ``` + +2. 修改一个非全局group的副本数 + + ```sql + # 建表时设置 "colocate_with" = "bar",且表所在Database的dbId为10231 + + ALTER COLOCATE GROUP 10231_bar + SET ( + "replication_num"="1" + ); + ``` + +### Keywords + +```sql +ALTER, COLOCATE , GROUP +``` + +### Best Practice diff --git a/docs/zh-CN/docs/sql-manual/sql-reference/Data-Definition-Statements/Alter/ALTER-WORKLOAD-GROUP.md b/docs/zh-CN/docs/sql-manual/sql-reference/Data-Definition-Statements/Alter/ALTER-WORKLOAD-GROUP.md index 1bc19780f6c72a..e3c7c17b660764 100644 --- a/docs/zh-CN/docs/sql-manual/sql-reference/Data-Definition-Statements/Alter/ALTER-WORKLOAD-GROUP.md +++ b/docs/zh-CN/docs/sql-manual/sql-reference/Data-Definition-Statements/Alter/ALTER-WORKLOAD-GROUP.md @@ -1,6 +1,6 @@ --- { -"title": "ALTER-WORKLOAD -GROUP", +"title": "ALTER-WORKLOAD-GROUP", "language": "zh-CN" } --- @@ -24,7 +24,7 @@ specific language governing permissions and limitations under the License. --> -## ALTER-WORKLOAD -GROUP +## ALTER-WORKLOAD-GROUP ### Name diff --git a/fe/fe-core/src/main/cup/sql_parser.cup b/fe/fe-core/src/main/cup/sql_parser.cup index 72c99d3fbbada5..92c8f8d02478bc 100644 --- a/fe/fe-core/src/main/cup/sql_parser.cup +++ b/fe/fe-core/src/main/cup/sql_parser.cup @@ -297,6 +297,7 @@ terminal String KW_CLUSTERS, KW_COLLATE, KW_COLLATION, + KW_COLOCATE, KW_COLUMN, KW_COLUMNS, KW_COMMENT, @@ -1347,6 +1348,10 @@ alter_stmt ::= {: RESULT = new AlterResourceStmt(resourceName, properties); :} + | KW_ALTER KW_COLOCATE KW_GROUP ident_or_text:colocateGroupName KW_SET LPAREN key_value_map:properties RPAREN + {: + RESULT = new AlterColocateGroupStmt(colocateGroupName, properties); + :} | KW_ALTER KW_WORKLOAD KW_GROUP ident_or_text:workloadGroupName opt_properties:properties {: RESULT = new AlterWorkloadGroupStmt(workloadGroupName, properties); diff --git a/fe/fe-core/src/main/java/org/apache/doris/analysis/AlterColocateGroupStmt.java b/fe/fe-core/src/main/java/org/apache/doris/analysis/AlterColocateGroupStmt.java new file mode 100644 index 00000000000000..02468110a4b6b5 --- /dev/null +++ b/fe/fe-core/src/main/java/org/apache/doris/analysis/AlterColocateGroupStmt.java @@ -0,0 +1,69 @@ +// Licensed to the Apache Software Foundation (ASF) under one +// or more contributor license agreements. See the NOTICE file +// distributed with this work for additional information +// regarding copyright ownership. The ASF licenses this file +// to you under the Apache License, Version 2.0 (the +// "License"); you may not use this file except in compliance +// with the License. You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, +// software distributed under the License is distributed on an +// "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY +// KIND, either express or implied. See the License for the +// specific language governing permissions and limitations +// under the License. + +package org.apache.doris.analysis; + +import org.apache.doris.catalog.Env; +import org.apache.doris.common.AnalysisException; +import org.apache.doris.common.ErrorCode; +import org.apache.doris.common.ErrorReport; +import org.apache.doris.common.UserException; +import org.apache.doris.common.util.PrintableMap; +import org.apache.doris.mysql.privilege.PrivPredicate; +import org.apache.doris.qe.ConnectContext; + +import java.util.Map; + +public class AlterColocateGroupStmt extends DdlStmt { + private final String colocateGroupName; + private final Map properties; + + public AlterColocateGroupStmt(String colocateGroupName, Map properties) { + this.colocateGroupName = colocateGroupName; + this.properties = properties; + } + + public String getColocateGroupName() { + return colocateGroupName; + } + + public Map getProperties() { + return properties; + } + + @Override + public void analyze(Analyzer analyzer) throws UserException { + super.analyze(analyzer); + + if (!Env.getCurrentEnv().getAccessManager().checkGlobalPriv( + ConnectContext.get(), PrivPredicate.ADMIN)) { + ErrorReport.reportAnalysisException(ErrorCode.ERR_SPECIFIC_ACCESS_DENIED_ERROR, "ADMIN"); + } + + if (properties == null || properties.isEmpty()) { + throw new AnalysisException("Colocate group properties can't be null"); + } + } + + @Override + public String toSql() { + StringBuilder sb = new StringBuilder(); + sb.append("ALTER COLOCATE GROUP '").append(colocateGroupName).append("' "); + sb.append("PROPERTIES(").append(new PrintableMap<>(properties, " = ", true, false)).append(")"); + return sb.toString(); + } +} diff --git a/fe/fe-core/src/main/java/org/apache/doris/catalog/ColocateGroupSchema.java b/fe/fe-core/src/main/java/org/apache/doris/catalog/ColocateGroupSchema.java index b5004973c37f1c..57d512b9789d9e 100644 --- a/fe/fe-core/src/main/java/org/apache/doris/catalog/ColocateGroupSchema.java +++ b/fe/fe-core/src/main/java/org/apache/doris/catalog/ColocateGroupSchema.java @@ -66,6 +66,10 @@ public ReplicaAllocation getReplicaAlloc() { return replicaAlloc; } + public void setReplicaAlloc(ReplicaAllocation replicaAlloc) { + this.replicaAlloc = replicaAlloc; + } + public List getDistributionColTypes() { return distributionColTypes; } diff --git a/fe/fe-core/src/main/java/org/apache/doris/catalog/ColocateTableIndex.java b/fe/fe-core/src/main/java/org/apache/doris/catalog/ColocateTableIndex.java index 23703278fd890b..14d004ce40e9b0 100644 --- a/fe/fe-core/src/main/java/org/apache/doris/catalog/ColocateTableIndex.java +++ b/fe/fe-core/src/main/java/org/apache/doris/catalog/ColocateTableIndex.java @@ -17,10 +17,16 @@ package org.apache.doris.catalog; +import org.apache.doris.analysis.AlterColocateGroupStmt; +import org.apache.doris.clone.ColocateTableCheckerAndBalancer; +import org.apache.doris.common.DdlException; import org.apache.doris.common.FeMetaVersion; import org.apache.doris.common.MetaNotFoundException; +import org.apache.doris.common.UserException; import org.apache.doris.common.io.Text; import org.apache.doris.common.io.Writable; +import org.apache.doris.common.util.DynamicPartitionUtil; +import org.apache.doris.common.util.PropertyAnalyzer; import org.apache.doris.persist.ColocatePersistInfo; import org.apache.doris.persist.gson.GsonPostProcessable; import org.apache.doris.persist.gson.GsonUtils; @@ -249,10 +255,34 @@ public void addBackendsPerBucketSeq(GroupId groupId, Map>> } } - public void addBackendsPerBucketSeqByTag(GroupId groupId, Tag tag, List> backendsPerBucketSeq) { + public void setBackendsPerBucketSeq(GroupId groupId, Map>> backendsPerBucketSeq) { writeLock(); try { + Map>> backendsPerBucketSeqMap = group2BackendsPerBucketSeq.row(groupId); + if (backendsPerBucketSeqMap != null) { + backendsPerBucketSeqMap.clear(); + } + for (Map.Entry>> entry : backendsPerBucketSeq.entrySet()) { + group2BackendsPerBucketSeq.put(groupId, entry.getKey(), entry.getValue()); + } + } finally { + writeUnlock(); + } + } + + public boolean addBackendsPerBucketSeqByTag(GroupId groupId, Tag tag, List> backendsPerBucketSeq, + ReplicaAllocation originReplicaAlloc) { + writeLock(); + try { + ColocateGroupSchema groupSchema = group2Schema.get(groupId); + // replica allocation has outdate + if (groupSchema != null && !originReplicaAlloc.equals(groupSchema.getReplicaAlloc())) { + LOG.info("replica allocation has outdate for group {}, old replica alloc {}, new replica alloc {}", + groupId, originReplicaAlloc.getAllocMap(), groupSchema.getReplicaAlloc()); + return false; + } group2BackendsPerBucketSeq.put(groupId, tag, backendsPerBucketSeq); + return true; } finally { writeUnlock(); } @@ -277,12 +307,20 @@ public void markGroupUnstable(GroupId groupId, String reason, boolean needEditLo } } - public void markGroupStable(GroupId groupId, boolean needEditLog) { + public void markGroupStable(GroupId groupId, boolean needEditLog, ReplicaAllocation originReplicaAlloc) { writeLock(); try { if (!group2Tables.containsKey(groupId)) { return; } + // replica allocation is outdate + ColocateGroupSchema groupSchema = group2Schema.get(groupId); + if (groupSchema != null && originReplicaAlloc != null + && !originReplicaAlloc.equals(groupSchema.getReplicaAlloc())) { + LOG.warn("mark group {} failed, replica alloc has outdate, old replica alloc {}, new replica alloc {}", + groupId, originReplicaAlloc.getAllocMap(), groupSchema.getReplicaAlloc()); + return; + } if (unstableGroups.remove(groupId)) { group2ErrMsgs.put(groupId, ""); if (needEditLog) { @@ -604,13 +642,23 @@ public void replayMarkGroupUnstable(ColocatePersistInfo info) { } public void replayMarkGroupStable(ColocatePersistInfo info) { - markGroupStable(info.getGroupId(), false); + markGroupStable(info.getGroupId(), false, null); } public void replayRemoveTable(ColocatePersistInfo info) { removeTable(info.getTableId()); } + public void replayModifyReplicaAlloc(ColocatePersistInfo info) throws UserException { + writeLock(); + try { + modifyColocateGroupReplicaAllocation(info.getGroupId(), info.getReplicaAlloc(), + info.getBackendsPerBucketSeq(), false); + } finally { + writeUnlock(); + } + } + // only for test public void clear() { writeLock(); @@ -633,6 +681,14 @@ public List> getInfos() { List info = Lists.newArrayList(); GroupId groupId = entry.getValue(); info.add(groupId.toString()); + String dbName = ""; + if (groupId.dbId != 0) { + Database db = Env.getCurrentInternalCatalog().getDbNullable(groupId.dbId); + if (db != null) { + dbName = db.getFullName(); + } + } + info.add(dbName); info.add(entry.getKey()); info.add(Joiner.on(", ").join(group2Tables.get(groupId))); ColocateGroupSchema groupSchema = group2Schema.get(groupId); @@ -756,4 +812,117 @@ public void setErrMsgForGroup(GroupId groupId, String message) { public Map getTable2Group() { return table2Group; } + + public void alterColocateGroup(AlterColocateGroupStmt stmt) throws UserException { + writeLock(); + try { + Map properties = stmt.getProperties(); + String fullGroupName = stmt.getColocateGroupName(); + ColocateGroupSchema groupSchema = getGroupSchema(fullGroupName); + if (groupSchema == null) { + throw new DdlException("Not found colocate group [" + fullGroupName + "]"); + } + + GroupId groupId = groupSchema.getGroupId(); + + if (properties.size() > 1) { + throw new DdlException("Can only set one colocate group property at a time"); + } + + if (properties.containsKey(PropertyAnalyzer.PROPERTIES_REPLICATION_NUM) + || properties.containsKey(PropertyAnalyzer.PROPERTIES_REPLICATION_ALLOCATION)) { + ReplicaAllocation replicaAlloc = PropertyAnalyzer.analyzeReplicaAllocation(properties, ""); + Preconditions.checkState(!replicaAlloc.isNotSet()); + Env.getCurrentSystemInfo().checkReplicaAllocation(replicaAlloc); + Map>> backendsPerBucketSeq = getBackendsPerBucketSeq(groupId); + Map>> newBackendsPerBucketSeq = Maps.newHashMap(); + for (Map.Entry>> entry : backendsPerBucketSeq.entrySet()) { + List> newList = Lists.newArrayList(); + for (List backends : entry.getValue()) { + newList.add(Lists.newArrayList(backends)); + } + newBackendsPerBucketSeq.put(entry.getKey(), newList); + } + try { + ColocateTableCheckerAndBalancer.modifyGroupReplicaAllocation(replicaAlloc, + newBackendsPerBucketSeq, groupSchema.getBucketsNum()); + } catch (Exception e) { + LOG.warn("modify group [{}, {}] to replication allocation {} failed, bucket seq {}", + fullGroupName, groupId, replicaAlloc, backendsPerBucketSeq, e); + throw new DdlException(e.getMessage()); + } + backendsPerBucketSeq = newBackendsPerBucketSeq; + Preconditions.checkState(backendsPerBucketSeq.size() == replicaAlloc.getAllocMap().size()); + modifyColocateGroupReplicaAllocation(groupSchema.getGroupId(), replicaAlloc, + backendsPerBucketSeq, true); + } else { + throw new DdlException("Unknown colocate group property: " + properties.keySet()); + } + } finally { + writeUnlock(); + } + } + + private void modifyColocateGroupReplicaAllocation(GroupId groupId, ReplicaAllocation replicaAlloc, + Map>> backendsPerBucketSeq, boolean needEditLog) throws UserException { + ColocateGroupSchema groupSchema = getGroupSchema(groupId); + if (groupSchema == null) { + LOG.warn("not found group {}", groupId); + return; + } + + List tableIds = getAllTableIds(groupId); + for (Long tableId : tableIds) { + long dbId = groupId.dbId; + if (dbId == 0) { + dbId = groupId.getDbIdByTblId(tableId); + } + Database db = Env.getCurrentInternalCatalog().getDbNullable(dbId); + if (db == null) { + continue; + } + OlapTable table = (OlapTable) db.getTableNullable(tableId); + if (table == null || !isColocateTable(table.getId())) { + continue; + } + table.writeLock(); + try { + Map tblProperties = Maps.newHashMap(); + tblProperties.put("default." + PropertyAnalyzer.PROPERTIES_REPLICATION_ALLOCATION, + replicaAlloc.toCreateStmt()); + table.setReplicaAllocation(tblProperties); + if (table.dynamicPartitionExists()) { + TableProperty tableProperty = table.getTableProperty(); + // Merge the new properties with origin properties, and then analyze them + Map origDynamicProperties = tableProperty.getOriginDynamicPartitionProperty(); + origDynamicProperties.put(DynamicPartitionProperty.REPLICATION_ALLOCATION, + replicaAlloc.toCreateStmt()); + Map analyzedDynamicPartition = DynamicPartitionUtil.analyzeDynamicPartition( + origDynamicProperties, table, db); + tableProperty.modifyTableProperties(analyzedDynamicPartition); + tableProperty.buildDynamicProperty(); + } + for (ReplicaAllocation alloc : table.getPartitionInfo().getPartitionReplicaAllocations().values()) { + Map allocMap = alloc.getAllocMap(); + allocMap.clear(); + allocMap.putAll(replicaAlloc.getAllocMap()); + } + } finally { + table.writeUnlock(); + } + } + + if (!backendsPerBucketSeq.equals(group2BackendsPerBucketSeq.row(groupId))) { + markGroupUnstable(groupId, "change replica allocation", false); + } + groupSchema.setReplicaAlloc(replicaAlloc); + setBackendsPerBucketSeq(groupId, backendsPerBucketSeq); + + if (needEditLog) { + ColocatePersistInfo info = ColocatePersistInfo.createForModifyReplicaAlloc(groupId, + replicaAlloc, backendsPerBucketSeq); + Env.getCurrentEnv().getEditLog().logColocateModifyRepliaAlloc(info); + } + LOG.info("modify group {} replication allocation to {}, is replay {}", groupId, replicaAlloc, !needEditLog); + } } diff --git a/fe/fe-core/src/main/java/org/apache/doris/catalog/PartitionInfo.java b/fe/fe-core/src/main/java/org/apache/doris/catalog/PartitionInfo.java index b7ca3c622cc7b0..34f80a91038e26 100644 --- a/fe/fe-core/src/main/java/org/apache/doris/catalog/PartitionInfo.java +++ b/fe/fe-core/src/main/java/org/apache/doris/catalog/PartitionInfo.java @@ -261,6 +261,10 @@ public void setStoragePolicy(long partitionId, String storagePolicy) { idToStoragePolicy.put(partitionId, storagePolicy); } + public Map getPartitionReplicaAllocations() { + return idToReplicaAllocation; + } + public ReplicaAllocation getReplicaAllocation(long partitionId) { if (!idToReplicaAllocation.containsKey(partitionId)) { LOG.debug("failed to get replica allocation for partition: {}", partitionId); diff --git a/fe/fe-core/src/main/java/org/apache/doris/clone/ColocateTableCheckerAndBalancer.java b/fe/fe-core/src/main/java/org/apache/doris/clone/ColocateTableCheckerAndBalancer.java index ce500b171f5e68..4ec8993be0d2b1 100644 --- a/fe/fe-core/src/main/java/org/apache/doris/clone/ColocateTableCheckerAndBalancer.java +++ b/fe/fe-core/src/main/java/org/apache/doris/clone/ColocateTableCheckerAndBalancer.java @@ -35,6 +35,7 @@ import org.apache.doris.common.Config; import org.apache.doris.common.DdlException; import org.apache.doris.common.FeConstants; +import org.apache.doris.common.UserException; import org.apache.doris.common.util.MasterDaemon; import org.apache.doris.persist.ColocatePersistInfo; import org.apache.doris.resource.Tag; @@ -183,7 +184,12 @@ private void relocateAndBalanceGroup() { List> balancedBackendsPerBucketSeq = Lists.newArrayList(); if (relocateAndBalance(groupId, tag, unavailableBeIdsInGroup, availableBeIds, colocateIndex, infoService, statistic, balancedBackendsPerBucketSeq)) { - colocateIndex.addBackendsPerBucketSeqByTag(groupId, tag, balancedBackendsPerBucketSeq); + if (!colocateIndex.addBackendsPerBucketSeqByTag(groupId, tag, balancedBackendsPerBucketSeq, + replicaAlloc)) { + LOG.warn("relocate group {} succ, but replica allocation has change, old replica alloc {}", + groupId, replicaAlloc); + continue; + } Map>> balancedBackendsPerBucketSeqMap = Maps.newHashMap(); balancedBackendsPerBucketSeqMap.put(tag, balancedBackendsPerBucketSeq); ColocatePersistInfo info = ColocatePersistInfo @@ -298,7 +304,7 @@ private void matchGroup() { // mark group as stable or unstable if (Strings.isNullOrEmpty(unstableReason)) { - colocateIndex.markGroupStable(groupId, true); + colocateIndex.markGroupStable(groupId, true, replicaAlloc); } else { colocateIndex.markGroupUnstable(groupId, unstableReason, true); } @@ -520,6 +526,122 @@ private List> getHostsPerBucketSeq(List> backendsPerBuck return hostsPerBucketSeq; } + public static void modifyGroupReplicaAllocation(ReplicaAllocation replicaAlloc, + Map>> backendBucketsSeq, int bucketNum) throws Exception { + Map allocMap = replicaAlloc.getAllocMap(); + List deleteTags = Lists.newArrayList(); + for (Tag tag : backendBucketsSeq.keySet()) { + if (!allocMap.containsKey(tag)) { + deleteTags.add(tag); + } + Preconditions.checkState(bucketNum == backendBucketsSeq.get(tag).size(), + bucketNum + " vs " + backendBucketsSeq.get(tag).size()); + } + deleteTags.forEach(tag -> backendBucketsSeq.remove(tag)); + + for (Tag tag : replicaAlloc.getAllocMap().keySet()) { + if (!backendBucketsSeq.containsKey(tag)) { + List> tagBackendBucketsSeq = Lists.newArrayList(); + for (int i = 0; i < bucketNum; i++) { + tagBackendBucketsSeq.add(Lists.newArrayList()); + } + backendBucketsSeq.put(tag, tagBackendBucketsSeq); + } + } + + Map backendToBucketNum = Maps.newHashMap(); + backendBucketsSeq.values().forEach(tagBackendIds -> + tagBackendIds.forEach(backendIds -> + backendIds.forEach(backendId -> backendToBucketNum.put( + backendId, backendToBucketNum.getOrDefault(backendId, 0) + 1)))); + + for (Tag tag : backendBucketsSeq.keySet()) { + List> tagBackendBucketsSeq = backendBucketsSeq.get(tag); + int oldReplicaNum = tagBackendBucketsSeq.get(0).size(); + for (List backendIdsOneBucket : tagBackendBucketsSeq) { + Preconditions.checkState(backendIdsOneBucket.size() == oldReplicaNum, + backendIdsOneBucket.size() + " vs " + oldReplicaNum); + } + + int newReplicaNum = allocMap.get(tag); + if (newReplicaNum == oldReplicaNum) { + continue; + } + + List backends = Env.getCurrentSystemInfo().getBackendsByTag(tag); + Set availableBeIds = backends.stream().filter(be -> be.isScheduleAvailable()) + .map(be -> be.getId()).collect(Collectors.toSet()); + + for (Long backendId : availableBeIds) { + if (!backendToBucketNum.containsKey(backendId)) { + backendToBucketNum.put(backendId, 0); + } + } + + for (int i = 0; i < tagBackendBucketsSeq.size(); i++) { + modifyGroupBucketReplicas(tag, newReplicaNum, tagBackendBucketsSeq.get(i), + availableBeIds, backendToBucketNum); + } + } + } + + private static void modifyGroupBucketReplicas(Tag tag, int newReplicaNum, List backendIds, + Set availableBeIds, Map backendToBucketNum) throws Exception { + final boolean smallIdFirst = Math.random() < 0.5; + if (backendIds.size() > newReplicaNum) { + backendIds.sort((id1, id2) -> { + boolean alive1 = availableBeIds.contains(id1); + boolean alive2 = availableBeIds.contains(id2); + if (alive1 != alive2) { + return alive1 ? -1 : 1; + } + int bucketNum1 = backendToBucketNum.getOrDefault(id1, 0); + int bucketNum2 = backendToBucketNum.getOrDefault(id2, 0); + if (bucketNum1 != bucketNum2) { + return Integer.compare(bucketNum1, bucketNum2); + } + + return smallIdFirst ? Long.compare(id1, id2) : Long.compare(id2, id1); + }); + + for (int i = backendIds.size() - 1; i >= newReplicaNum; i--) { + long backendId = backendIds.get(i); + backendIds.remove(i); + backendToBucketNum.put(backendId, backendToBucketNum.getOrDefault(backendId, 0) - 1); + } + } + + if (backendIds.size() < newReplicaNum) { + Set candBackendSet = Sets.newHashSet(); + candBackendSet.addAll(availableBeIds); + candBackendSet.removeAll(backendIds); + if (backendIds.size() + candBackendSet.size() < newReplicaNum) { + throw new UserException("Can not add backend for tag: " + tag); + } + + List candBackendList = Lists.newArrayList(candBackendSet); + candBackendList.sort((id1, id2) -> { + int bucketNum1 = backendToBucketNum.getOrDefault(id1, 0); + int bucketNum2 = backendToBucketNum.getOrDefault(id2, 0); + if (bucketNum1 != bucketNum2) { + return Integer.compare(bucketNum1, bucketNum2); + } + + return smallIdFirst ? Long.compare(id1, id2) : Long.compare(id2, id1); + }); + + int addNum = newReplicaNum - backendIds.size(); + for (int i = 0; i < addNum; i++) { + long backendId = candBackendList.get(i); + backendIds.add(backendId); + backendToBucketNum.put(backendId, backendToBucketNum.getOrDefault(backendId, 0) + 1); + } + } + + Preconditions.checkState(newReplicaNum == backendIds.size(), + newReplicaNum + " vs " + backendIds.size()); + } + private List> getSortedBackendReplicaNumPairs(List allAvailBackendIds, Set unavailBackendIds, LoadStatisticForTag statistic, List flatBackendsPerBucketSeq) { // backend id -> replica num, and sorted by replica num, descending. diff --git a/fe/fe-core/src/main/java/org/apache/doris/common/proc/ColocationGroupProcDir.java b/fe/fe-core/src/main/java/org/apache/doris/common/proc/ColocationGroupProcDir.java index 85dd2c97be6590..f8d3cd17cd22c9 100644 --- a/fe/fe-core/src/main/java/org/apache/doris/common/proc/ColocationGroupProcDir.java +++ b/fe/fe-core/src/main/java/org/apache/doris/common/proc/ColocationGroupProcDir.java @@ -33,7 +33,7 @@ */ public class ColocationGroupProcDir implements ProcDirInterface { public static final ImmutableList TITLE_NAMES = new ImmutableList.Builder() - .add("GroupId").add("GroupName").add("TableIds") + .add("GroupId").add("Database").add("FullGroupName").add("TableIds") .add("BucketsNum").add("ReplicaAllocation").add("DistCols").add("IsStable") .add("ErrorMsg").build(); diff --git a/fe/fe-core/src/main/java/org/apache/doris/httpv2/meta/ColocateMetaService.java b/fe/fe-core/src/main/java/org/apache/doris/httpv2/meta/ColocateMetaService.java index 9e51d38de5cfcc..b7c2a615aac280 100644 --- a/fe/fe-core/src/main/java/org/apache/doris/httpv2/meta/ColocateMetaService.java +++ b/fe/fe-core/src/main/java/org/apache/doris/httpv2/meta/ColocateMetaService.java @@ -114,7 +114,7 @@ public Object group_stable(HttpServletRequest request, HttpServletResponse respo if ("POST".equalsIgnoreCase(method)) { colocateIndex.markGroupUnstable(groupId, "mark unstable via http api", true); } else if ("DELETE".equalsIgnoreCase(method)) { - colocateIndex.markGroupStable(groupId, true); + colocateIndex.markGroupStable(groupId, true, null); } return ResponseEntityBuilder.ok(); } diff --git a/fe/fe-core/src/main/java/org/apache/doris/journal/JournalEntity.java b/fe/fe-core/src/main/java/org/apache/doris/journal/JournalEntity.java index ce2768b46eac29..80ee1a19aac5f4 100644 --- a/fe/fe-core/src/main/java/org/apache/doris/journal/JournalEntity.java +++ b/fe/fe-core/src/main/java/org/apache/doris/journal/JournalEntity.java @@ -469,6 +469,7 @@ public void readFields(DataInput in) throws IOException { isRead = true; break; } + case OperationType.OP_COLOCATE_MOD_REPLICA_ALLOC: case OperationType.OP_COLOCATE_ADD_TABLE: case OperationType.OP_COLOCATE_REMOVE_TABLE: case OperationType.OP_COLOCATE_BACKENDS_PER_BUCKETSEQ: diff --git a/fe/fe-core/src/main/java/org/apache/doris/persist/ColocatePersistInfo.java b/fe/fe-core/src/main/java/org/apache/doris/persist/ColocatePersistInfo.java index 459be6460524ea..429d4e0e1a6b94 100644 --- a/fe/fe-core/src/main/java/org/apache/doris/persist/ColocatePersistInfo.java +++ b/fe/fe-core/src/main/java/org/apache/doris/persist/ColocatePersistInfo.java @@ -18,6 +18,7 @@ package org.apache.doris.persist; import org.apache.doris.catalog.ColocateTableIndex.GroupId; +import org.apache.doris.catalog.ReplicaAllocation; import org.apache.doris.common.io.Text; import org.apache.doris.common.io.Writable; import org.apache.doris.persist.gson.GsonUtils; @@ -45,29 +46,38 @@ public class ColocatePersistInfo implements Writable { private long tableId; @SerializedName(value = "backendsPerBucketSeq") private Map>> backendsPerBucketSeq = Maps.newHashMap(); + @SerializedName(value = "replicaAlloc") + private ReplicaAllocation replicaAlloc = new ReplicaAllocation(); - private ColocatePersistInfo(GroupId groupId, long tableId, Map>> backendsPerBucketSeq) { + private ColocatePersistInfo(GroupId groupId, long tableId, Map>> backendsPerBucketSeq, + ReplicaAllocation replicaAlloc) { this.groupId = groupId; this.tableId = tableId; this.backendsPerBucketSeq = backendsPerBucketSeq; + this.replicaAlloc = replicaAlloc; } public static ColocatePersistInfo createForAddTable(GroupId groupId, long tableId, Map>> backendsPerBucketSeq) { - return new ColocatePersistInfo(groupId, tableId, backendsPerBucketSeq); + return new ColocatePersistInfo(groupId, tableId, backendsPerBucketSeq, new ReplicaAllocation()); } public static ColocatePersistInfo createForBackendsPerBucketSeq(GroupId groupId, Map>> backendsPerBucketSeq) { - return new ColocatePersistInfo(groupId, -1L, backendsPerBucketSeq); + return new ColocatePersistInfo(groupId, -1L, backendsPerBucketSeq, new ReplicaAllocation()); } public static ColocatePersistInfo createForMarkUnstable(GroupId groupId) { - return new ColocatePersistInfo(groupId, -1L, Maps.newHashMap()); + return new ColocatePersistInfo(groupId, -1L, Maps.newHashMap(), new ReplicaAllocation()); } public static ColocatePersistInfo createForMarkStable(GroupId groupId) { - return new ColocatePersistInfo(groupId, -1L, Maps.newHashMap()); + return new ColocatePersistInfo(groupId, -1L, Maps.newHashMap(), new ReplicaAllocation()); + } + + public static ColocatePersistInfo createForModifyReplicaAlloc(GroupId groupId, ReplicaAllocation replicaAlloc, + Map>> backendsPerBucketSeq) { + return new ColocatePersistInfo(groupId, -1L, backendsPerBucketSeq, replicaAlloc); } public static ColocatePersistInfo read(DataInput in) throws IOException { @@ -87,6 +97,10 @@ public Map>> getBackendsPerBucketSeq() { return backendsPerBucketSeq; } + public ReplicaAllocation getReplicaAlloc() { + return replicaAlloc; + } + @Override public void write(DataOutput out) throws IOException { Text.writeString(out, GsonUtils.GSON.toJson(this)); @@ -129,7 +143,7 @@ public boolean equals(Object obj) { ColocatePersistInfo info = (ColocatePersistInfo) obj; return tableId == info.tableId && groupId.equals(info.groupId) && backendsPerBucketSeq.equals( - info.backendsPerBucketSeq); + info.backendsPerBucketSeq) && replicaAlloc.equals(info.replicaAlloc); } @Override @@ -138,6 +152,7 @@ public String toString() { sb.append("table id: ").append(tableId); sb.append(" group id: ").append(groupId); sb.append(" backendsPerBucketSeq: ").append(backendsPerBucketSeq); + sb.append(" replicaAlloc: ").append(replicaAlloc); return sb.toString(); } } diff --git a/fe/fe-core/src/main/java/org/apache/doris/persist/EditLog.java b/fe/fe-core/src/main/java/org/apache/doris/persist/EditLog.java index 014a3c79feaa17..6e714eb3bfcb9a 100644 --- a/fe/fe-core/src/main/java/org/apache/doris/persist/EditLog.java +++ b/fe/fe-core/src/main/java/org/apache/doris/persist/EditLog.java @@ -605,6 +605,11 @@ public static void loadJournal(Env env, Long logId, JournalEntity journal) { env.getColocateTableIndex().replayMarkGroupStable(info); break; } + case OperationType.OP_COLOCATE_MOD_REPLICA_ALLOC: { + final ColocatePersistInfo info = (ColocatePersistInfo) journal.getData(); + env.getColocateTableIndex().replayModifyReplicaAlloc(info); + break; + } case OperationType.OP_MODIFY_TABLE_COLOCATE: { final TablePropertyInfo info = (TablePropertyInfo) journal.getData(); env.replayModifyTableColocate(info); @@ -1548,6 +1553,10 @@ public void logTruncateTable(TruncateTableInfo info) { Env.getCurrentEnv().getBinlogManager().addTruncateTable(info, logId); } + public void logColocateModifyRepliaAlloc(ColocatePersistInfo info) { + logEdit(OperationType.OP_COLOCATE_MOD_REPLICA_ALLOC, info); + } + public void logColocateAddTable(ColocatePersistInfo info) { logEdit(OperationType.OP_COLOCATE_ADD_TABLE, info); } diff --git a/fe/fe-core/src/main/java/org/apache/doris/persist/OperationType.java b/fe/fe-core/src/main/java/org/apache/doris/persist/OperationType.java index 27cb57d214da9e..0235407f359aa9 100644 --- a/fe/fe-core/src/main/java/org/apache/doris/persist/OperationType.java +++ b/fe/fe-core/src/main/java/org/apache/doris/persist/OperationType.java @@ -157,6 +157,7 @@ public class OperationType { public static final short OP_MODIFY_FRONTEND = 92; //colocate table + public static final short OP_COLOCATE_MOD_REPLICA_ALLOC = 93; public static final short OP_COLOCATE_ADD_TABLE = 94; public static final short OP_COLOCATE_REMOVE_TABLE = 95; public static final short OP_COLOCATE_BACKENDS_PER_BUCKETSEQ = 96; diff --git a/fe/fe-core/src/main/java/org/apache/doris/qe/DdlExecutor.java b/fe/fe-core/src/main/java/org/apache/doris/qe/DdlExecutor.java index 9056ac4e802ec5..2af8feda27a5a2 100644 --- a/fe/fe-core/src/main/java/org/apache/doris/qe/DdlExecutor.java +++ b/fe/fe-core/src/main/java/org/apache/doris/qe/DdlExecutor.java @@ -31,6 +31,7 @@ import org.apache.doris.analysis.AdminSetTableStatusStmt; import org.apache.doris.analysis.AlterCatalogNameStmt; import org.apache.doris.analysis.AlterCatalogPropertyStmt; +import org.apache.doris.analysis.AlterColocateGroupStmt; import org.apache.doris.analysis.AlterColumnStatsStmt; import org.apache.doris.analysis.AlterDatabasePropertyStmt; import org.apache.doris.analysis.AlterDatabaseQuotaStmt; @@ -319,6 +320,8 @@ public static void execute(Env env, DdlStmt ddlStmt) throws Exception { env.getRefreshManager().handleRefreshDb((RefreshDbStmt) ddlStmt); } else if (ddlStmt instanceof AlterResourceStmt) { env.getResourceMgr().alterResource((AlterResourceStmt) ddlStmt); + } else if (ddlStmt instanceof AlterColocateGroupStmt) { + env.getColocateTableIndex().alterColocateGroup((AlterColocateGroupStmt) ddlStmt); } else if (ddlStmt instanceof AlterWorkloadGroupStmt) { env.getWorkloadGroupMgr().alterWorkloadGroup((AlterWorkloadGroupStmt) ddlStmt); } else if (ddlStmt instanceof CreatePolicyStmt) { diff --git a/fe/fe-core/src/main/jflex/sql_scanner.flex b/fe/fe-core/src/main/jflex/sql_scanner.flex index 0f8eaa5d9bc4d4..f743d5edd4e6b0 100644 --- a/fe/fe-core/src/main/jflex/sql_scanner.flex +++ b/fe/fe-core/src/main/jflex/sql_scanner.flex @@ -147,6 +147,7 @@ import org.apache.doris.qe.SqlModeHelper; keywordMap.put("clusters", new Integer(SqlParserSymbols.KW_CLUSTERS)); keywordMap.put("collate", new Integer(SqlParserSymbols.KW_COLLATE)); keywordMap.put("collation", new Integer(SqlParserSymbols.KW_COLLATION)); + keywordMap.put("colocate", new Integer(SqlParserSymbols.KW_COLOCATE)); keywordMap.put("column", new Integer(SqlParserSymbols.KW_COLUMN)); keywordMap.put("columns", new Integer(SqlParserSymbols.KW_COLUMNS)); keywordMap.put("comment", new Integer(SqlParserSymbols.KW_COMMENT)); diff --git a/fe/fe-core/src/test/java/org/apache/doris/:w b/fe/fe-core/src/test/java/org/apache/doris/:w new file mode 100644 index 00000000000000..5e10fa2cb53d1f --- /dev/null +++ b/fe/fe-core/src/test/java/org/apache/doris/:w @@ -0,0 +1,1336 @@ +// Licensed to the Apache Software Foundation (ASF) under one +// or more contributor license agreements. See the NOTICE file +// distributed with this work for additional information +// regarding copyright ownership. The ASF licenses this file +// to you under the Apache License, Version 2.0 (the +// "License"); you may not use this file except in compliance +// with the License. You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, +// software distributed under the License is distributed on an +// "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY +// KIND, either express or implied. See the License for the +// specific language governing permissions and limitations +// under the License. + +package org.apache.doris.alter; + +import org.apache.doris.analysis.AlterColocateGroupStmt; +import org.apache.doris.analysis.AlterTableStmt; +import org.apache.doris.analysis.CreateDbStmt; +import org.apache.doris.analysis.CreateMaterializedViewStmt; +import org.apache.doris.analysis.CreatePolicyStmt; +import org.apache.doris.analysis.CreateResourceStmt; +import org.apache.doris.analysis.CreateTableStmt; +import org.apache.doris.analysis.DateLiteral; +import org.apache.doris.analysis.DropResourceStmt; +import org.apache.doris.analysis.ShowCreateMaterializedViewStmt; +import org.apache.doris.catalog.ColocateGroupSchema; +import org.apache.doris.catalog.ColocateTableIndex.GroupId; +import org.apache.doris.catalog.Column; +import org.apache.doris.catalog.DataProperty; +import org.apache.doris.catalog.Database; +import org.apache.doris.catalog.Env; +import org.apache.doris.catalog.MaterializedIndex; +import org.apache.doris.catalog.MysqlTable; +import org.apache.doris.catalog.OdbcTable; +import org.apache.doris.catalog.OlapTable; +import org.apache.doris.catalog.Partition; +import org.apache.doris.catalog.PrimitiveType; +import org.apache.doris.catalog.Replica; +import org.apache.doris.catalog.ReplicaAllocation; +import org.apache.doris.catalog.Table; +import org.apache.doris.catalog.Tablet; +import org.apache.doris.catalog.TabletInvertedIndex; +import org.apache.doris.catalog.Type; +import org.apache.doris.common.AnalysisException; +import org.apache.doris.common.Config; +import org.apache.doris.common.DdlException; +import org.apache.doris.common.ExceptionChecker; +import org.apache.doris.common.FeConstants; +import org.apache.doris.common.util.TimeUtils; +import org.apache.doris.qe.ConnectContext; +import org.apache.doris.qe.DdlExecutor; +import org.apache.doris.qe.ShowExecutor; +import org.apache.doris.resource.Tag; +import org.apache.doris.system.Backend; +import org.apache.doris.thrift.TStorageMedium; +import org.apache.doris.utframe.UtFrameUtils; + +import com.google.common.collect.Lists; +import com.google.common.collect.Maps; +import org.junit.AfterClass; +import org.junit.Assert; +import org.junit.BeforeClass; +import org.junit.Test; + +import java.io.File; +import java.util.List; +import java.util.Map; +import java.util.UUID; + +public class AlterTest { + + private static String runningDir = "fe/mocked/AlterTest/" + UUID.randomUUID().toString() + "/"; + + private static ConnectContext connectContext; + + private static Map backendTags; + + @BeforeClass + public static void beforeClass() throws Exception { + FeConstants.runningUnitTest = true; + FeConstants.default_scheduler_interval_millisecond = 100; + FeConstants.tablet_checker_interval_ms = 100; + Config.tablet_checker_interval_ms = 100; + Config.dynamic_partition_check_interval_seconds = 1; + Config.disable_storage_medium_check = true; + Config.enable_storage_policy = true; + Config.disable_balance = true; + Config.schedule_batch_size = 200; + UtFrameUtils.createDorisClusterWithMultiTag(runningDir, 5); + + List backends = Env.getCurrentSystemInfo().getIdToBackend().values().asList(); + + Map tagMap = Maps.newHashMap(); + tagMap.put(Tag.TYPE_LOCATION, "group_a"); + backends.get(2).setTagMap(tagMap); + backends.get(3).setTagMap(tagMap); + + tagMap = Maps.newHashMap(); + tagMap.put(Tag.TYPE_LOCATION, "group_b"); + backends.get(4).setTagMap(tagMap); + + backendTags = Maps.newHashMap(); + backends.forEach(be -> backendTags.put(be.getId(), be.getLocationTag())); + + // create connect context + connectContext = UtFrameUtils.createDefaultCtx(); + // create database + String createDbStmtStr = "create database test;"; + CreateDbStmt createDbStmt = (CreateDbStmt) UtFrameUtils.parseAndAnalyzeStmt(createDbStmtStr, connectContext); + Env.getCurrentEnv().createDb(createDbStmt); + + createTable("CREATE TABLE test.tbl1\n" + "(\n" + " k1 date,\n" + " k2 int,\n" + " v1 int sum\n" + ")\n" + + "PARTITION BY RANGE(k1)\n" + "(\n" + " PARTITION p1 values less than('2020-02-01'),\n" + + " PARTITION p2 values less than('2020-03-01')\n" + ")\n" + "DISTRIBUTED BY HASH(k2) BUCKETS 3\n" + + "PROPERTIES('replication_num' = '1');"); + + createTable("CREATE TABLE test.tbl2\n" + "(\n" + " k1 date,\n" + " v1 int sum\n" + ")\n" + + "DISTRIBUTED BY HASH (k1) BUCKETS 3\n" + "PROPERTIES('replication_num' = '1');"); + + createTable("CREATE TABLE test.tbl3\n" + "(\n" + " k1 date,\n" + " k2 int,\n" + " v1 int sum\n" + ")\n" + + "PARTITION BY RANGE(k1)\n" + "(\n" + " PARTITION p1 values less than('2020-02-01'),\n" + + " PARTITION p2 values less than('2020-03-01')\n" + ")\n" + "DISTRIBUTED BY HASH(k2) BUCKETS 3\n" + + "PROPERTIES('replication_num' = '1');"); + + createTable("CREATE TABLE test.tbl4\n" + "(\n" + " k1 date,\n" + " k2 int,\n" + " v1 int sum\n" + ")\n" + + "PARTITION BY RANGE(k1)\n" + "(\n" + " PARTITION p1 values less than('2020-02-01'),\n" + + " PARTITION p2 values less than('2020-03-01'),\n" + + " PARTITION p3 values less than('2020-04-01'),\n" + + " PARTITION p4 values less than('2020-05-01')\n" + ")\n" + "DISTRIBUTED BY HASH(k2) BUCKETS 3\n" + + "PROPERTIES" + "(" + " 'replication_num' = '1',\n" + " 'in_memory' = 'false',\n" + + " 'storage_medium' = 'SSD',\n" + " 'storage_cooldown_time' = '2999-12-31 00:00:00'\n" + ");"); + + createTable("CREATE TABLE test.tbl5\n" + "(\n" + " k1 date,\n" + " k2 int,\n" + " v1 int \n" + + ") ENGINE=OLAP\n" + "UNIQUE KEY (k1,k2)\n" + "PARTITION BY RANGE(k1)\n" + "(\n" + + " PARTITION p1 values less than('2020-02-01'),\n" + + " PARTITION p2 values less than('2020-03-01')\n" + ")\n" + "DISTRIBUTED BY HASH(k2) BUCKETS 3\n" + + "PROPERTIES('replication_num' = '1');"); + + createTable( + "CREATE TABLE test.tbl6\n" + "(\n" + " k1 datetime(3),\n" + " k2 datetime(3),\n" + + " v1 int \n," + + " v2 datetime(3)\n" + ") ENGINE=OLAP\n" + "UNIQUE KEY (k1,k2)\n" + + "PARTITION BY RANGE(k1)\n" + "(\n" + + " PARTITION p1 values less than('2020-02-01 00:00:00'),\n" + + " PARTITION p2 values less than('2020-03-01 00:00:00')\n" + ")\n" + + "DISTRIBUTED BY HASH(k2) BUCKETS 3\n" + "PROPERTIES('replication_num' = '1','enable_unique_key_merge_on_write' = 'false');"); + + createTable("create external table test.odbc_table\n" + "( `k1` bigint(20) COMMENT \"\",\n" + + " `k2` datetime COMMENT \"\",\n" + " `k3` varchar(20) COMMENT \"\",\n" + + " `k4` varchar(100) COMMENT \"\",\n" + " `k5` float COMMENT \"\"\n" + ")ENGINE=ODBC\n" + + "PROPERTIES (\n" + "\"host\" = \"127.0.0.1\",\n" + "\"port\" = \"3306\",\n" + "\"user\" = \"root\",\n" + + "\"password\" = \"123\",\n" + "\"database\" = \"db1\",\n" + "\"table\" = \"tbl1\",\n" + + "\"driver\" = \"Oracle Driver\",\n" + "\"odbc_type\" = \"oracle\"\n" + ");"); + + // s3 resource + createRemoteStorageResource( + "create resource \"remote_s3\"\n" + "properties\n" + "(\n" + " \"type\" = \"s3\", \n" + + " \"AWS_ENDPOINT\" = \"bj\",\n" + " \"AWS_REGION\" = \"bj\",\n" + + " \"AWS_ROOT_PATH\" = \"/path/to/root\",\n" + " \"AWS_ACCESS_KEY\" = \"bbb\",\n" + + " \"AWS_SECRET_KEY\" = \"aaaa\",\n" + " \"AWS_MAX_CONNECTIONS\" = \"50\",\n" + + " \"AWS_REQUEST_TIMEOUT_MS\" = \"3000\",\n" + " \"AWS_CONNECTION_TIMEOUT_MS\" = \"1000\",\n" + + " \"AWS_BUCKET\" = \"test-bucket\", \"s3_validity_check\" = \"false\"\n" + + ");"); + + createRemoteStorageResource( + "create resource \"remote_s3_1\"\n" + "properties\n" + "(\n" + " \"type\" = \"s3\", \n" + + " \"AWS_ENDPOINT\" = \"bj\",\n" + " \"AWS_REGION\" = \"bj\",\n" + + " \"AWS_ROOT_PATH\" = \"/path/to/root\",\n" + " \"AWS_ACCESS_KEY\" = \"bbb\",\n" + + " \"AWS_SECRET_KEY\" = \"aaaa\",\n" + " \"AWS_MAX_CONNECTIONS\" = \"50\",\n" + + " \"AWS_REQUEST_TIMEOUT_MS\" = \"3000\",\n" + " \"AWS_CONNECTION_TIMEOUT_MS\" = \"1000\",\n" + + " \"AWS_BUCKET\" = \"test-bucket\", \"s3_validity_check\" = \"false\"\n" + + ");"); + + createRemoteStoragePolicy( + "CREATE STORAGE POLICY testPolicy\n" + "PROPERTIES(\n" + " \"storage_resource\" = \"remote_s3\",\n" + + " \"cooldown_datetime\" = \"2100-05-10 00:00:00\"\n" + ");"); + + createRemoteStoragePolicy( + "CREATE STORAGE POLICY testPolicy2\n" + "PROPERTIES(\n" + " \"storage_resource\" = \"remote_s3\",\n" + + " \"cooldown_ttl\" = \"1\"\n" + ");"); + + createRemoteStoragePolicy( + "CREATE STORAGE POLICY testPolicyAnotherResource\n" + "PROPERTIES(\n" + " \"storage_resource\" = \"remote_s3_1\",\n" + + " \"cooldown_ttl\" = \"1\"\n" + ");"); + + createTable("CREATE TABLE test.tbl_remote\n" + "(\n" + " k1 date,\n" + " k2 int,\n" + " v1 int sum\n" + + ")\n" + "PARTITION BY RANGE(k1)\n" + "(\n" + " PARTITION p1 values less than('2020-02-01'),\n" + + " PARTITION p2 values less than('2020-03-01'),\n" + + " PARTITION p3 values less than('2020-04-01'),\n" + + " PARTITION p4 values less than('2020-05-01')\n" + ")\n" + "DISTRIBUTED BY HASH(k2) BUCKETS 3\n" + + "PROPERTIES" + "(" + " 'replication_num' = '1',\n" + " 'in_memory' = 'false',\n" + + " 'storage_medium' = 'SSD',\n" + " 'storage_cooldown_time' = '2100-05-09 00:00:00',\n" + + " 'storage_policy' = 'testPolicy'\n" + ");"); + + createTable("create table test.show_test (k1 int, k2 int) distributed by hash(k1) " + + "buckets 1 properties(\"replication_num\" = \"1\");"); + + createTable("create table test.unique_sequence_col (k1 int, v1 int, v2 date) ENGINE=OLAP " + + " UNIQUE KEY(`k1`) DISTRIBUTED BY HASH(`k1`) BUCKETS 1" + + " PROPERTIES (\"replication_num\" = \"1\", \"function_column.sequence_col\" = \"v1\");"); + } + + @AfterClass + public static void tearDown() { + File file = new File(runningDir); + file.delete(); + } + + private static void createTable(String sql) throws Exception { + Config.enable_odbc_table = true; + CreateTableStmt createTableStmt = (CreateTableStmt) UtFrameUtils.parseAndAnalyzeStmt(sql, connectContext); + Env.getCurrentEnv().createTable(createTableStmt); + } + + private static void createRemoteStorageResource(String sql) throws Exception { + CreateResourceStmt stmt = (CreateResourceStmt) UtFrameUtils.parseAndAnalyzeStmt(sql, connectContext); + Env.getCurrentEnv().getResourceMgr().createResource(stmt); + } + + private static void createRemoteStoragePolicy(String sql) throws Exception { + CreatePolicyStmt stmt = (CreatePolicyStmt) UtFrameUtils.parseAndAnalyzeStmt(sql, connectContext); + Env.getCurrentEnv().getPolicyMgr().createPolicy(stmt); + } + + private static void alterTable(String sql, boolean expectedException) { + try { + AlterTableStmt alterTableStmt = (AlterTableStmt) UtFrameUtils.parseAndAnalyzeStmt(sql, connectContext); + Env.getCurrentEnv().alterTable(alterTableStmt); + if (expectedException) { + Assert.fail(); + } + } catch (Exception e) { + e.printStackTrace(); + if (!expectedException) { + Assert.fail(); + } + } + } + + private static void createMV(String sql, boolean expectedException) { + try { + CreateMaterializedViewStmt createMaterializedViewStmt + = (CreateMaterializedViewStmt) UtFrameUtils.parseAndAnalyzeStmt(sql, connectContext); + Env.getCurrentEnv().createMaterializedView(createMaterializedViewStmt); + if (expectedException) { + Assert.fail(); + } + } catch (Exception e) { + e.printStackTrace(); + if (!expectedException) { + Assert.fail(); + } + } + } + + private static void alterTableWithExceptionMsg(String sql, String msg) throws Exception { + try { + AlterTableStmt alterTableStmt = (AlterTableStmt) UtFrameUtils.parseAndAnalyzeStmt(sql, connectContext); + Env.getCurrentEnv().alterTable(alterTableStmt); + } catch (Exception e) { + Assert.assertEquals(msg, e.getMessage()); + } + } + + @Test + public void alterTableWithEnableFeature() throws Exception { + String stmt = "alter table test.tbl5 enable feature \"SEQUENCE_LOAD\" with properties (\"function_column.sequence_type\" = \"int\") "; + alterTable(stmt, false); + + stmt = "alter table test.tbl5 enable feature \"SEQUENCE_LOAD\" with properties (\"function_column.sequence_type\" = \"double\") "; + alterTable(stmt, true); + } + + @Test + public void alterTableModifyComment() throws Exception { + Database db = Env.getCurrentInternalCatalog().getDbOrMetaException("default_cluster:test"); + Table tbl = db.getTableOrMetaException("tbl5"); + + // table comment + String stmt = "alter table test.tbl5 modify comment 'comment1'"; + alterTable(stmt, false); + Assert.assertEquals("comment1", tbl.getComment()); + + // column comment + stmt = "alter table test.tbl5 modify column k1 comment 'k1'"; + alterTable(stmt, false); + Assert.assertEquals("k1", tbl.getColumn("k1").getComment()); + + // columns comment + stmt = "alter table test.tbl5 modify column k1 comment 'k11', modify column v1 comment 'v11'"; + alterTable(stmt, false); + Assert.assertEquals("k11", tbl.getColumn("k1").getComment()); + Assert.assertEquals("v11", tbl.getColumn("v1").getComment()); + + // empty comment + stmt = "alter table test.tbl5 modify comment ''"; + alterTable(stmt, false); + Assert.assertEquals("OLAP", tbl.getComment()); + + // empty column comment + stmt = "alter table test.tbl5 modify column k1 comment '', modify column v1 comment 'v111'"; + alterTable(stmt, false); + Assert.assertEquals("", tbl.getColumn("k1").getComment()); + Assert.assertEquals("v111", tbl.getColumn("v1").getComment()); + + // unknown column + stmt = "alter table test.tbl5 modify column x comment '', modify column v1 comment 'v111'"; + alterTable(stmt, true); + Assert.assertEquals("", tbl.getColumn("k1").getComment()); + Assert.assertEquals("v111", tbl.getColumn("v1").getComment()); + + // duplicate column + stmt = "alter table test.tbl5 modify column k1 comment '', modify column k1 comment 'v111'"; + alterTable(stmt, true); + Assert.assertEquals("", tbl.getColumn("k1").getComment()); + Assert.assertEquals("v111", tbl.getColumn("v1").getComment()); + } + + @Test + public void testConflictAlterOperations() throws Exception { + String stmt = "alter table test.tbl1 add partition p3 values less than('2020-04-01'), add partition p4 values less than('2020-05-01')"; + alterTable(stmt, true); + + stmt = "alter table test.tbl1 add partition p3 values less than('2020-04-01'), drop partition p4"; + alterTable(stmt, true); + + stmt = "alter table test.tbl1 drop partition p3, drop partition p4"; + alterTable(stmt, true); + + stmt = "alter table test.tbl1 drop partition p3, add column k3 int"; + alterTable(stmt, true); + + // no conflict + stmt = "alter table test.tbl1 add column k3 int, add column k4 int"; + alterTable(stmt, false); + waitSchemaChangeJobDone(false); + + stmt = "alter table test.tbl1 add rollup r1 (k1)"; + alterTable(stmt, false); + waitSchemaChangeJobDone(true); + + stmt = "alter table test.tbl1 add rollup r2 (k1), r3 (k1)"; + alterTable(stmt, false); + waitSchemaChangeJobDone(true); + + // enable dynamic partition + // not adding the `start` property so that it won't drop the origin partition p1, p2 and p3 + stmt = "alter table test.tbl1 set (\n" + + "'dynamic_partition.enable' = 'true',\n" + + "'dynamic_partition.time_unit' = 'DAY',\n" + + "'dynamic_partition.end' = '3',\n" + + "'dynamic_partition.prefix' = 'p',\n" + + "'dynamic_partition.buckets' = '3'\n" + + " );"; + alterTable(stmt, false); + Database db = Env.getCurrentInternalCatalog().getDbOrMetaException("default_cluster:test"); + OlapTable tbl = (OlapTable) db.getTableOrMetaException("tbl1"); + Assert.assertTrue(tbl.getTableProperty().getDynamicPartitionProperty().getEnable()); + Assert.assertEquals(4, tbl.getIndexIdToSchema().size()); + + // add partition when dynamic partition is enable + stmt = "alter table test.tbl1 add partition p3 values less than('2020-04-01') distributed by hash(k2) buckets 4 PROPERTIES ('replication_num' = '1')"; + alterTable(stmt, true); + + // add temp partition when dynamic partition is enable + stmt = "alter table test.tbl1 add temporary partition tp3 values less than('2020-04-01') distributed by hash(k2) buckets 4 PROPERTIES ('replication_num' = '1')"; + alterTable(stmt, false); + Assert.assertEquals(1, tbl.getTempPartitions().size()); + + // disable the dynamic partition + stmt = "alter table test.tbl1 set ('dynamic_partition.enable' = 'false')"; + alterTable(stmt, false); + Assert.assertFalse(tbl.getTableProperty().getDynamicPartitionProperty().getEnable()); + + // add partition when dynamic partition is disable + stmt = "alter table test.tbl1 add partition p3 values less than('2020-04-01') distributed by hash(k2) buckets 4"; + alterTable(stmt, false); + + // set table's default replication num + Assert.assertEquals((short) 1, tbl.getDefaultReplicaAllocation().getTotalReplicaNum()); + stmt = "alter table test.tbl1 set ('default.replication_num' = '3');"; + alterTable(stmt, false); + Assert.assertEquals((short) 3, tbl.getDefaultReplicaAllocation().getTotalReplicaNum()); + + // set range table's real replication num + Partition p1 = tbl.getPartition("p1"); + Assert.assertEquals(Short.valueOf("1"), + Short.valueOf(tbl.getPartitionInfo().getReplicaAllocation(p1.getId()).getTotalReplicaNum())); + stmt = "alter table test.tbl1 set ('replication_num' = '3');"; + alterTable(stmt, true); + Assert.assertEquals(Short.valueOf("1"), + Short.valueOf(tbl.getPartitionInfo().getReplicaAllocation(p1.getId()).getTotalReplicaNum())); + + // set un-partitioned table's real replication num + // first we need to change be's tag + OlapTable tbl2 = (OlapTable) db.getTableOrMetaException("tbl2"); + Partition partition = tbl2.getPartition(tbl2.getName()); + Assert.assertEquals(Short.valueOf("1"), + Short.valueOf(tbl2.getPartitionInfo().getReplicaAllocation(partition.getId()).getTotalReplicaNum())); + stmt = "alter table test.tbl2 set ('replication_allocation' = 'tag.location.group_a:1');"; + alterTable(stmt, false); + Assert.assertEquals((short) 1, (short) tbl2.getPartitionInfo().getReplicaAllocation(partition.getId()) + .getReplicaNumByTag(Tag.createNotCheck(Tag.TYPE_LOCATION, "group_a"))); + Assert.assertEquals((short) 1, (short) tbl2.getTableProperty().getReplicaAllocation() + .getReplicaNumByTag(Tag.createNotCheck(Tag.TYPE_LOCATION, "group_a"))); + + Thread.sleep(5000); // sleep to wait dynamic partition scheduler run + // add partition without set replication num, and default num is 3. + stmt = "alter table test.tbl1 add partition p4 values less than('2020-04-10')"; + alterTable(stmt, true); + + // add partition when dynamic partition is disable + stmt = "alter table test.tbl1 add partition p4 values less than('2020-04-10') ('replication_num' = '1')"; + alterTable(stmt, false); + } + + @Test + public void testAlterDateV2Operations() throws Exception { + String stmt = "alter table test.tbl6 add partition p3 values less than('2020-04-01 00:00:00')," + + "add partition p4 values less than('2020-05-01 00:00:00')"; + alterTable(stmt, true); + + stmt = "alter table test.tbl6 add partition p3 values less than('2020-04-01 00:00:00'), drop partition p4"; + alterTable(stmt, true); + + stmt = "alter table test.tbl6 drop partition p3, drop partition p4"; + alterTable(stmt, true); + + stmt = "alter table test.tbl6 drop partition p3, add column k3 datetime(6)"; + alterTable(stmt, true); + + // no conflict + stmt = "alter table test.tbl6 add column k3 int, add column k4 datetime(6)"; + alterTable(stmt, false); + waitSchemaChangeJobDone(false); + + stmt = "alter table test.tbl6 add rollup r1 (k2, k1)"; + alterTable(stmt, false); + waitSchemaChangeJobDone(true); + + stmt = "alter table test.tbl6 add rollup r2 (k2, k1), r3 (k2, k1)"; + alterTable(stmt, false); + waitSchemaChangeJobDone(true); + + // enable dynamic partition + // not adding the `start` property so that it won't drop the origin partition p1, p2 and p3 + stmt = "alter table test.tbl6 set (\n" + + "'dynamic_partition.enable' = 'true',\n" + + "'dynamic_partition.time_unit' = 'DAY',\n" + + "'dynamic_partition.end' = '3',\n" + + "'dynamic_partition.prefix' = 'p',\n" + + "'dynamic_partition.buckets' = '3'\n" + + " );"; + alterTable(stmt, false); + Database db = Env.getCurrentInternalCatalog().getDbOrMetaException("default_cluster:test"); + OlapTable tbl = (OlapTable) db.getTableOrMetaException("tbl6"); + Assert.assertTrue(tbl.getTableProperty().getDynamicPartitionProperty().getEnable()); + Assert.assertEquals(4, tbl.getIndexIdToSchema().size()); + + // add partition when dynamic partition is enable + stmt = "alter table test.tbl6 add partition p3 values less than('2020-04-01 00:00:00') distributed by" + + " hash(k2) buckets 4 PROPERTIES ('replication_num' = '1')"; + alterTable(stmt, true); + + // add temp partition when dynamic partition is enable + stmt = "alter table test.tbl6 add temporary partition tp3 values less than('2020-04-01 00:00:00') distributed" + + " by hash(k2) buckets 4 PROPERTIES ('replication_num' = '1')"; + alterTable(stmt, false); + Assert.assertEquals(1, tbl.getTempPartitions().size()); + + // disable the dynamic partition + stmt = "alter table test.tbl6 set ('dynamic_partition.enable' = 'false')"; + alterTable(stmt, false); + Assert.assertFalse(tbl.getTableProperty().getDynamicPartitionProperty().getEnable()); + + String alterStmt = "alter table test.tbl6 set ('in_memory' = 'true')"; + String errorMsg = "errCode = 2, detailMessage = Not support set 'in_memory'='true' now!"; + alterTableWithExceptionMsg(alterStmt, errorMsg); + + // add partition when dynamic partition is disable + stmt = "alter table test.tbl6 add partition p3 values less than('2020-04-01 00:00:00') distributed" + + " by hash(k2) buckets 4"; + alterTable(stmt, false); + + // set table's default replication num + Assert.assertEquals((short) 1, tbl.getDefaultReplicaAllocation().getTotalReplicaNum()); + stmt = "alter table test.tbl6 set ('default.replication_num' = '3');"; + alterTable(stmt, false); + Assert.assertEquals((short) 3, tbl.getDefaultReplicaAllocation().getTotalReplicaNum()); + + // set range table's real replication num + Partition p1 = tbl.getPartition("p1"); + Assert.assertEquals(Short.valueOf("1"), Short.valueOf(tbl.getPartitionInfo().getReplicaAllocation(p1.getId()) + .getTotalReplicaNum())); + stmt = "alter table test.tbl6 set ('replication_num' = '3');"; + alterTable(stmt, true); + Assert.assertEquals(Short.valueOf("1"), Short.valueOf(tbl.getPartitionInfo().getReplicaAllocation(p1.getId()) + .getTotalReplicaNum())); + } + + // test batch update range partitions' properties + @Test + public void testBatchUpdatePartitionProperties() throws Exception { + Database db = Env.getCurrentInternalCatalog().getDbOrMetaException("default_cluster:test"); + OlapTable tbl4 = (OlapTable) db.getTableOrMetaException("tbl4"); + Partition p1 = tbl4.getPartition("p1"); + Partition p2 = tbl4.getPartition("p2"); + Partition p3 = tbl4.getPartition("p3"); + Partition p4 = tbl4.getPartition("p4"); + + // batch update replication_num property + String stmt = "alter table test.tbl4 modify partition (p1, p2, p4) set ('replication_num' = '1')"; + List partitionList = Lists.newArrayList(p1, p2, p4); + for (Partition partition : partitionList) { + Assert.assertEquals(Short.valueOf("1"), Short.valueOf(tbl4.getPartitionInfo().getReplicaAllocation(partition.getId()).getTotalReplicaNum())); + } + alterTable(stmt, false); + for (Partition partition : partitionList) { + Assert.assertEquals(Short.valueOf("1"), Short.valueOf(tbl4.getPartitionInfo().getReplicaAllocation(partition.getId()).getTotalReplicaNum())); + } + Assert.assertEquals(Short.valueOf("1"), Short.valueOf(tbl4.getPartitionInfo().getReplicaAllocation(p3.getId()).getTotalReplicaNum())); + + // batch update in_memory property + stmt = "alter table test.tbl4 modify partition (p1, p2, p3) set ('in_memory' = 'false')"; + partitionList = Lists.newArrayList(p1, p2, p3); + for (Partition partition : partitionList) { + Assert.assertEquals(false, tbl4.getPartitionInfo().getIsInMemory(partition.getId())); + } + alterTable(stmt, false); + for (Partition partition : partitionList) { + Assert.assertEquals(false, tbl4.getPartitionInfo().getIsInMemory(partition.getId())); + } + Assert.assertEquals(false, tbl4.getPartitionInfo().getIsInMemory(p4.getId())); + + String alterStmt = "alter table test.tbl4 modify partition (p1, p2, p3) set ('in_memory' = 'true')"; + String errorMsg = "errCode = 2, detailMessage = Not support set 'in_memory'='true' now!"; + alterTableWithExceptionMsg(alterStmt, errorMsg); + + // batch update storage_medium and storage_cooldown properties + // alter storage_medium + stmt = "alter table test.tbl4 modify partition (p3, p4) set ('storage_medium' = 'HDD')"; + DateLiteral dateLiteral = new DateLiteral("2999-12-31 00:00:00", Type.DATETIME); + long cooldownTimeMs = dateLiteral.unixTimestamp(TimeUtils.getTimeZone()); + DataProperty oldDataProperty = new DataProperty(TStorageMedium.SSD, cooldownTimeMs, ""); + partitionList = Lists.newArrayList(p3, p4); + for (Partition partition : partitionList) { + Assert.assertEquals(oldDataProperty, tbl4.getPartitionInfo().getDataProperty(partition.getId())); + } + alterTable(stmt, false); + DataProperty newDataProperty = new DataProperty(TStorageMedium.HDD, DataProperty.MAX_COOLDOWN_TIME_MS, ""); + for (Partition partition : partitionList) { + Assert.assertEquals(newDataProperty, tbl4.getPartitionInfo().getDataProperty(partition.getId())); + } + Assert.assertEquals(oldDataProperty, tbl4.getPartitionInfo().getDataProperty(p1.getId())); + Assert.assertEquals(oldDataProperty, tbl4.getPartitionInfo().getDataProperty(p2.getId())); + + // alter cooldown_time + stmt = "alter table test.tbl4 modify partition (p1, p2) set ('storage_cooldown_time' = '2100-12-31 00:00:00')"; + alterTable(stmt, false); + + dateLiteral = new DateLiteral("2100-12-31 00:00:00", Type.DATETIME); + cooldownTimeMs = dateLiteral.unixTimestamp(TimeUtils.getTimeZone()); + DataProperty newDataProperty1 = new DataProperty(TStorageMedium.SSD, cooldownTimeMs, ""); + partitionList = Lists.newArrayList(p1, p2); + for (Partition partition : partitionList) { + Assert.assertEquals(newDataProperty1, tbl4.getPartitionInfo().getDataProperty(partition.getId())); + } + Assert.assertEquals(newDataProperty, tbl4.getPartitionInfo().getDataProperty(p3.getId())); + Assert.assertEquals(newDataProperty, tbl4.getPartitionInfo().getDataProperty(p4.getId())); + + // batch update range partitions' properties with * + stmt = "alter table test.tbl4 modify partition (*) set ('replication_num' = '1')"; + partitionList = Lists.newArrayList(p1, p2, p3, p4); + alterTable(stmt, false); + for (Partition partition : partitionList) { + Assert.assertEquals(Short.valueOf("1"), Short.valueOf(tbl4.getPartitionInfo().getReplicaAllocation(partition.getId()).getTotalReplicaNum())); + } + } + + @Test + public void testAlterRemoteStorageTableDataProperties() throws Exception { + Database db = Env.getCurrentInternalCatalog().getDbOrMetaException("default_cluster:test"); + OlapTable tblRemote = (OlapTable) db.getTableOrMetaException("tbl_remote"); + Partition p1 = tblRemote.getPartition("p1"); + Partition p2 = tblRemote.getPartition("p2"); + Partition p3 = tblRemote.getPartition("p3"); + Partition p4 = tblRemote.getPartition("p4"); + + DateLiteral dateLiteral = new DateLiteral("2100-05-09 00:00:00", Type.DATETIME); + long cooldownTimeMs = dateLiteral.unixTimestamp(TimeUtils.getTimeZone()); + DataProperty oldDataProperty = new DataProperty(TStorageMedium.SSD, cooldownTimeMs, "testPolicy"); + List partitionList = Lists.newArrayList(p2, p3, p4); + for (Partition partition : partitionList) { + Assert.assertEquals(oldDataProperty, tblRemote.getPartitionInfo().getDataProperty(partition.getId())); + } + + // alter cooldown_time + String stmt = "alter table test.tbl_remote modify partition (p2, p3, p4) set ('storage_cooldown_time' = '2100-04-01 22:22:22')"; + alterTable(stmt, false); + DateLiteral newDateLiteral = new DateLiteral("2100-04-01 22:22:22", Type.DATETIME); + long newCooldownTimeMs = newDateLiteral.unixTimestamp(TimeUtils.getTimeZone()); + DataProperty dataProperty2 = new DataProperty(TStorageMedium.SSD, newCooldownTimeMs, "testPolicy"); + for (Partition partition : partitionList) { + Assert.assertEquals(dataProperty2, tblRemote.getPartitionInfo().getDataProperty(partition.getId())); + } + Assert.assertEquals(oldDataProperty, tblRemote.getPartitionInfo().getDataProperty(p1.getId())); + + // alter storage_medium + stmt = "alter table test.tbl_remote modify partition (p2, p3, p4) set ('storage_medium' = 'HDD')"; + alterTable(stmt, false); + DataProperty dataProperty1 = new DataProperty( + TStorageMedium.HDD, DataProperty.MAX_COOLDOWN_TIME_MS, "testPolicy"); + for (Partition partition : partitionList) { + Assert.assertEquals(dataProperty1, tblRemote.getPartitionInfo().getDataProperty(partition.getId())); + } + Assert.assertEquals(oldDataProperty, tblRemote.getPartitionInfo().getDataProperty(p1.getId())); + + // alter remote_storage to one not exist policy + stmt = "alter table test.tbl_remote modify partition (p2, p3, p4) set ('storage_policy' = 'testPolicy3')"; + alterTable(stmt, true); + Assert.assertEquals(oldDataProperty, tblRemote.getPartitionInfo().getDataProperty(p1.getId())); + + // alter remote_storage to one another one which points to another resource + stmt = "alter table test.tbl_remote modify partition (p2, p3, p4) set ('storage_policy' = 'testPolicyAnotherResource')"; + alterTable(stmt, true); + Assert.assertEquals(oldDataProperty, tblRemote.getPartitionInfo().getDataProperty(p1.getId())); + + // alter recover to old state + stmt = "alter table test.tbl_remote modify partition (p2, p3, p4) set (" + + "'storage_medium' = 'SSD', " + + "'storage_cooldown_time' = '2100-05-09 00:00:00'" + + ")"; + alterTable(stmt, false); + for (Partition partition : partitionList) { + Assert.assertEquals(oldDataProperty, tblRemote.getPartitionInfo().getDataProperty(partition.getId())); + } + Assert.assertEquals(oldDataProperty, tblRemote.getPartitionInfo().getDataProperty(p1.getId())); + + } + + @Test + public void testDynamicPartitionDropAndAdd() throws Exception { + // test day range + String stmt = "alter table test.tbl3 set (\n" + + "'dynamic_partition.enable' = 'true',\n" + + "'dynamic_partition.time_unit' = 'DAY',\n" + + "'dynamic_partition.start' = '-3',\n" + + "'dynamic_partition.end' = '3',\n" + + "'dynamic_partition.prefix' = 'p',\n" + + "'dynamic_partition.buckets' = '3'\n" + + " );"; + alterTable(stmt, false); + Thread.sleep(5000); // sleep to wait dynamic partition scheduler run + + Database db = Env.getCurrentInternalCatalog().getDbOrMetaException("default_cluster:test"); + OlapTable tbl = (OlapTable) db.getTableOrMetaException("tbl3"); + Assert.assertEquals(4, tbl.getPartitionNames().size()); + Assert.assertNull(tbl.getPartition("p1")); + Assert.assertNull(tbl.getPartition("p2")); + } + + private void waitSchemaChangeJobDone(boolean rollupJob) throws Exception { + Map alterJobs = Env.getCurrentEnv().getSchemaChangeHandler().getAlterJobsV2(); + if (rollupJob) { + alterJobs = Env.getCurrentEnv().getMaterializedViewHandler().getAlterJobsV2(); + } + for (AlterJobV2 alterJobV2 : alterJobs.values()) { + while (!alterJobV2.getJobState().isFinalState()) { + System.out.println("alter job " + alterJobV2.getJobId() + " is running. state: " + alterJobV2.getJobState()); + Thread.sleep(1000); + } + System.out.println(alterJobV2.getType() + " alter job " + alterJobV2.getJobId() + " is done. state: " + alterJobV2.getJobState()); + Assert.assertEquals(AlterJobV2.JobState.FINISHED, alterJobV2.getJobState()); + Database db = + Env.getCurrentInternalCatalog().getDbOrMetaException(alterJobV2.getDbId()); + OlapTable tbl = (OlapTable) db.getTableOrMetaException(alterJobV2.getTableId()); + while (tbl.getState() != OlapTable.OlapTableState.NORMAL) { + Thread.sleep(1000); + } + } + } + + @Test + public void testSetDynamicPropertiesInNormalTable() throws Exception { + String tableName = "no_dynamic_table"; + String createOlapTblStmt = "CREATE TABLE test.`" + tableName + "` (\n" + + " `k1` date NULL COMMENT \"\",\n" + + " `k2` int NULL COMMENT \"\",\n" + + " `k3` smallint NULL COMMENT \"\",\n" + + " `v1` varchar(2048) NULL COMMENT \"\",\n" + + " `v2` datetime NULL COMMENT \"\"\n" + + ") ENGINE=OLAP\n" + + "DUPLICATE KEY(`k1`, `k2`, `k3`)\n" + + "COMMENT \"OLAP\"\n" + + "PARTITION BY RANGE (k1)\n" + + "(\n" + + "PARTITION p1 VALUES LESS THAN (\"2014-01-01\"),\n" + + "PARTITION p2 VALUES LESS THAN (\"2014-06-01\"),\n" + + "PARTITION p3 VALUES LESS THAN (\"2014-12-01\")\n" + + ")\n" + + "DISTRIBUTED BY HASH(`k1`) BUCKETS 32\n" + + "PROPERTIES (\n" + + "\"replication_num\" = \"1\"\n" + + ");"; + createTable(createOlapTblStmt); + String alterStmt = "alter table test." + tableName + " set (\"dynamic_partition.enable\" = \"true\");"; + String errorMsg = "errCode = 2, detailMessage = Table default_cluster:test.no_dynamic_table is not a dynamic partition table. " + + "Use command `HELP ALTER TABLE` to see how to change a normal table to a dynamic partition table."; + alterTableWithExceptionMsg(alterStmt, errorMsg); + // test set dynamic properties in a no dynamic partition table + String stmt = "alter table test." + tableName + " set (\n" + + "'dynamic_partition.enable' = 'true',\n" + + "'dynamic_partition.time_unit' = 'DAY',\n" + + "'dynamic_partition.start' = '-3',\n" + + "'dynamic_partition.end' = '3',\n" + + "'dynamic_partition.prefix' = 'p',\n" + + "'dynamic_partition.buckets' = '3'\n" + + " );"; + alterTable(stmt, false); + } + + @Test + public void testSetDynamicPropertiesInDynamicPartitionTable() throws Exception { + String tableName = "dynamic_table"; + String createOlapTblStmt = "CREATE TABLE test.`" + tableName + "` (\n" + + " `k1` date NULL COMMENT \"\",\n" + + " `k2` int NULL COMMENT \"\",\n" + + " `k3` smallint NULL COMMENT \"\",\n" + + " `v1` varchar(2048) NULL COMMENT \"\",\n" + + " `v2` datetime NULL COMMENT \"\"\n" + + ") ENGINE=OLAP\n" + + "DUPLICATE KEY(`k1`, `k2`, `k3`)\n" + + "COMMENT \"OLAP\"\n" + + "PARTITION BY RANGE (k1)\n" + + "(\n" + + "PARTITION p1 VALUES LESS THAN (\"2014-01-01\"),\n" + + "PARTITION p2 VALUES LESS THAN (\"2014-06-01\"),\n" + + "PARTITION p3 VALUES LESS THAN (\"2014-12-01\")\n" + + ")\n" + + "DISTRIBUTED BY HASH(`k1`) BUCKETS 32\n" + + "PROPERTIES (\n" + + "\"replication_num\" = \"1\",\n" + + "\"dynamic_partition.enable\" = \"true\",\n" + + "\"dynamic_partition.start\" = \"-3\",\n" + + "\"dynamic_partition.end\" = \"3\",\n" + + "\"dynamic_partition.time_unit\" = \"day\",\n" + + "\"dynamic_partition.prefix\" = \"p\",\n" + + "\"dynamic_partition.buckets\" = \"1\"\n" + + ");"; + + createTable(createOlapTblStmt); + String alterStmt1 = "alter table test." + tableName + " set (\"dynamic_partition.enable\" = \"false\");"; + alterTable(alterStmt1, false); + String alterStmt2 = "alter table test." + tableName + " set (\"dynamic_partition.time_unit\" = \"week\");"; + alterTable(alterStmt2, false); + String alterStmt3 = "alter table test." + tableName + " set (\"dynamic_partition.start\" = \"-10\");"; + alterTable(alterStmt3, false); + String alterStmt4 = "alter table test." + tableName + " set (\"dynamic_partition.end\" = \"10\");"; + alterTable(alterStmt4, false); + String alterStmt5 = "alter table test." + tableName + " set (\"dynamic_partition.prefix\" = \"pp\");"; + alterTable(alterStmt5, false); + String alterStmt6 = "alter table test." + tableName + " set (\"dynamic_partition.buckets\" = \"5\");"; + alterTable(alterStmt6, false); + } + + @Test + public void testReplaceTable() throws Exception { + String stmt1 = "CREATE TABLE test.replace1\n" + + "(\n" + + " k1 int, k2 int, k3 int sum\n" + + ")\n" + + "AGGREGATE KEY(k1, k2)\n" + + "DISTRIBUTED BY HASH(k1) BUCKETS 10\n" + + "rollup (\n" + + "r1(k1),\n" + + "r2(k2, k3)\n" + + ")\n" + + "PROPERTIES(\"replication_num\" = \"1\");"; + + + String stmt2 = "CREATE TABLE test.r1\n" + + "(\n" + + " k1 int, k2 int\n" + + ")\n" + + "DISTRIBUTED BY HASH(k1) BUCKETS 11\n" + + "PROPERTIES(\"replication_num\" = \"1\");"; + + String stmt3 = "CREATE TABLE test.replace2\n" + + "(\n" + + " k1 int, k2 int\n" + + ")\n" + + "DISTRIBUTED BY HASH(k1) BUCKETS 11\n" + + "PROPERTIES(\"replication_num\" = \"1\");"; + + String stmt4 = "CREATE TABLE test.replace3\n" + + "(\n" + + " k1 int, k2 int, k3 int sum\n" + + ")\n" + + "PARTITION BY RANGE(k1)\n" + + "(\n" + + "\tPARTITION p1 values less than(\"100\"),\n" + + "\tPARTITION p2 values less than(\"200\")\n" + + ")\n" + + "DISTRIBUTED BY HASH(k1) BUCKETS 1\n" + + "rollup (\n" + + "r3(k1),\n" + + "r4(k2, k3)\n" + + ")\n" + + "PROPERTIES(\"replication_num\" = \"1\");"; + + createTable(stmt1); + createTable(stmt2); + createTable(stmt3); + createTable(stmt4); + Database db = Env.getCurrentInternalCatalog().getDbOrMetaException("default_cluster:test"); + + // table name -> tabletIds + Map> tblNameToTabletIds = Maps.newHashMap(); + OlapTable replace1Tbl = (OlapTable) db.getTableOrMetaException("replace1"); + OlapTable r1Tbl = (OlapTable) db.getTableOrMetaException("r1"); + OlapTable replace2Tbl = (OlapTable) db.getTableOrMetaException("replace2"); + OlapTable replace3Tbl = (OlapTable) db.getTableOrMetaException("replace3"); + + tblNameToTabletIds.put("replace1", Lists.newArrayList()); + for (Partition partition : replace1Tbl.getAllPartitions()) { + for (MaterializedIndex index : partition.getMaterializedIndices(MaterializedIndex.IndexExtState.VISIBLE)) { + for (Tablet tablet : index.getTablets()) { + tblNameToTabletIds.get("replace1").add(tablet.getId()); + } + } + } + + tblNameToTabletIds.put("r1", Lists.newArrayList()); + for (Partition partition : r1Tbl.getAllPartitions()) { + for (MaterializedIndex index : partition.getMaterializedIndices(MaterializedIndex.IndexExtState.VISIBLE)) { + for (Tablet tablet : index.getTablets()) { + tblNameToTabletIds.get("r1").add(tablet.getId()); + } + } + } + + tblNameToTabletIds.put("replace2", Lists.newArrayList()); + for (Partition partition : replace2Tbl.getAllPartitions()) { + for (MaterializedIndex index : partition.getMaterializedIndices(MaterializedIndex.IndexExtState.VISIBLE)) { + for (Tablet tablet : index.getTablets()) { + tblNameToTabletIds.get("replace2").add(tablet.getId()); + } + } + } + + tblNameToTabletIds.put("replace3", Lists.newArrayList()); + for (Partition partition : replace3Tbl.getAllPartitions()) { + for (MaterializedIndex index : partition.getMaterializedIndices(MaterializedIndex.IndexExtState.VISIBLE)) { + for (Tablet tablet : index.getTablets()) { + tblNameToTabletIds.get("replace3").add(tablet.getId()); + } + } + } + + // name conflict + String replaceStmt = "ALTER TABLE test.replace1 REPLACE WITH TABLE r1"; + alterTable(replaceStmt, true); + + // replace1 with replace2 + replaceStmt = "ALTER TABLE test.replace1 REPLACE WITH TABLE replace2"; + OlapTable replace1 = (OlapTable) db.getTableOrMetaException("replace1"); + OlapTable replace2 = (OlapTable) db.getTableOrMetaException("replace2"); + Assert.assertEquals(3, replace1.getPartition("replace1").getMaterializedIndices(MaterializedIndex.IndexExtState.VISIBLE).size()); + Assert.assertEquals(1, replace2.getPartition("replace2").getMaterializedIndices(MaterializedIndex.IndexExtState.VISIBLE).size()); + + alterTable(replaceStmt, false); + Assert.assertTrue(checkAllTabletsExists(tblNameToTabletIds.get("replace1"))); + Assert.assertTrue(checkAllTabletsExists(tblNameToTabletIds.get("replace2"))); + + replace1 = (OlapTable) db.getTableOrMetaException("replace1"); + replace2 = (OlapTable) db.getTableOrMetaException("replace2"); + Assert.assertEquals(1, replace1.getPartition("replace1").getMaterializedIndices(MaterializedIndex.IndexExtState.VISIBLE).size()); + Assert.assertEquals(3, replace2.getPartition("replace2").getMaterializedIndices(MaterializedIndex.IndexExtState.VISIBLE).size()); + Assert.assertEquals("replace1", replace1.getIndexNameById(replace1.getBaseIndexId())); + Assert.assertEquals("replace2", replace2.getIndexNameById(replace2.getBaseIndexId())); + + // replace with no swap + replaceStmt = "ALTER TABLE test.replace1 REPLACE WITH TABLE replace2 properties('swap' = 'false')"; + alterTable(replaceStmt, false); + replace1 = (OlapTable) db.getTableNullable("replace1"); + replace2 = (OlapTable) db.getTableNullable("replace2"); + Assert.assertNull(replace2); + Assert.assertEquals(3, replace1.getPartition("replace1").getMaterializedIndices(MaterializedIndex.IndexExtState.VISIBLE).size()); + Assert.assertEquals("replace1", replace1.getIndexNameById(replace1.getBaseIndexId())); + Assert.assertTrue(checkAllTabletsNotExists(tblNameToTabletIds.get("replace2"))); + Assert.assertTrue(checkAllTabletsExists(tblNameToTabletIds.get("replace1"))); + + replaceStmt = "ALTER TABLE test.replace1 REPLACE WITH TABLE replace3 properties('swap' = 'true')"; + alterTable(replaceStmt, false); + replace1 = (OlapTable) db.getTableOrMetaException("replace1"); + OlapTable replace3 = (OlapTable) db.getTableOrMetaException("replace3"); + Assert.assertEquals(3, replace1.getPartition("p1").getMaterializedIndices(MaterializedIndex.IndexExtState.VISIBLE).size()); + Assert.assertEquals(3, replace1.getPartition("p2").getMaterializedIndices(MaterializedIndex.IndexExtState.VISIBLE).size()); + Assert.assertNotNull(replace1.getIndexIdByName("r3")); + Assert.assertNotNull(replace1.getIndexIdByName("r4")); + + Assert.assertTrue(checkAllTabletsExists(tblNameToTabletIds.get("replace1"))); + Assert.assertTrue(checkAllTabletsExists(tblNameToTabletIds.get("replace3"))); + + Assert.assertEquals(3, replace3.getPartition("replace3").getMaterializedIndices(MaterializedIndex.IndexExtState.VISIBLE).size()); + Assert.assertNotNull(replace3.getIndexIdByName("r1")); + Assert.assertNotNull(replace3.getIndexIdByName("r2")); + } + + @Test + public void testModifyBucketNum() throws Exception { + String stmt = "CREATE TABLE test.bucket\n" + + "(\n" + + " k1 int, k2 int, k3 int sum\n" + + ")\n" + + "ENGINE = OLAP\n" + + "PARTITION BY RANGE(k1)\n" + + "(\n" + + "PARTITION p1 VALUES LESS THAN (\"100000\"),\n" + + "PARTITION p2 VALUES LESS THAN (\"200000\"),\n" + + "PARTITION p3 VALUES LESS THAN (\"300000\")\n" + + ")\n" + + "DISTRIBUTED BY HASH(k1) BUCKETS 10\n" + + "PROPERTIES(\"replication_num\" = \"1\");"; + + createTable(stmt); + Database db = Env.getCurrentInternalCatalog().getDbOrMetaException("default_cluster:test"); + + String modifyBucketNumStmt = "ALTER TABLE test.bucket MODIFY DISTRIBUTION DISTRIBUTED BY HASH(k1) BUCKETS 1;"; + alterTable(modifyBucketNumStmt, false); + OlapTable bucket = (OlapTable) db.getTableOrMetaException("bucket"); + Assert.assertEquals(1, bucket.getDefaultDistributionInfo().getBucketNum()); + + modifyBucketNumStmt = "ALTER TABLE test.bucket MODIFY DISTRIBUTION DISTRIBUTED BY HASH(k1) BUCKETS 30;"; + alterTable(modifyBucketNumStmt, false); + bucket = (OlapTable) db.getTableOrMetaException("bucket"); + Assert.assertEquals(30, bucket.getDefaultDistributionInfo().getBucketNum()); + + } + + @Test + public void testChangeOrder() throws Exception { + createTable("CREATE TABLE test.change_order\n" + + "(\n" + + " k1 date,\n" + + " k2 int,\n" + + " v1 int sum\n" + + ")\n" + + "PARTITION BY RANGE(k1)\n" + + "(\n" + + " PARTITION p1 values less than('2020-02-01'),\n" + + " PARTITION p2 values less than('2020-03-01')\n" + + ")\n" + + "DISTRIBUTED BY HASH(k2) BUCKETS 3\n" + + "PROPERTIES('replication_num' = '1');"); + + String changeOrderStmt = "ALTER TABLE test.change_order ORDER BY (k2, k1, v1);;"; + alterTable(changeOrderStmt, false); + } + + @Test + public void testAlterUniqueTablePartitionColumn() throws Exception { + createTable("CREATE TABLE test.unique_partition\n" + + "(\n" + + " k1 date,\n" + + " k2 int,\n" + + " v1 int\n" + + ")\n" + + "UNIQUE KEY(k1, k2)\n" + + "PARTITION BY RANGE(k1)\n" + + "(\n" + + " PARTITION p1 values less than('2020-02-01'),\n" + + " PARTITION p2 values less than('2020-03-01')\n" + ")\n" + "DISTRIBUTED BY HASH(k2) BUCKETS 3\n" + + "PROPERTIES('replication_num' = '1');"); + + // partition key can not be changed. + // this test is also for validating a bug fix about invisible columns(delete flag column) + String changeOrderStmt = "ALTER TABLE test.unique_partition modify column k1 int key null"; + alterTable(changeOrderStmt, true); + } + + @Test + public void testAlterDateV2Schema() throws Exception { + createTable("CREATE TABLE test.unique_partition_datev2\n" + "(\n" + " k1 date,\n" + " k2 datetime(3),\n" + + " k3 datetime,\n" + " v1 date,\n" + " v2 datetime(3),\n" + " v3 datetime,\n" + " v4 int\n" + + ")\n" + "UNIQUE KEY(k1, k2, k3)\n" + "PARTITION BY RANGE(k1)\n" + "(\n" + " PARTITION p1 values less than('2020-02-01'),\n" + + " PARTITION p2 values less than('2020-03-01')\n" + ")\n" + "DISTRIBUTED BY HASH(k1) BUCKETS 3\n" + "PROPERTIES('replication_num' = '1');"); + + // partition key can not be changed. + String changeOrderStmt = "ALTER TABLE test.unique_partition_datev2 modify column k1 int key null"; + alterTable(changeOrderStmt, true); + changeOrderStmt = "ALTER TABLE test.unique_partition_datev2 modify column k2 int key null"; + alterTable(changeOrderStmt, true); + changeOrderStmt = "ALTER TABLE test.unique_partition_datev2 modify column k3 int key null"; + alterTable(changeOrderStmt, true); + + // partition keys which are date type should be changed between each other. + changeOrderStmt = "ALTER TABLE test.unique_partition_datev2 modify column k2 datetime key null"; + alterTable(changeOrderStmt, false); + waitSchemaChangeJobDone(false); + changeOrderStmt = "ALTER TABLE test.unique_partition_datev2 modify column k3 datetime(3) key null"; + alterTable(changeOrderStmt, false); + waitSchemaChangeJobDone(false); + // Change to another precision datetime + changeOrderStmt = "ALTER TABLE test.unique_partition_datev2 modify column k3 datetime(6) key null"; + alterTable(changeOrderStmt, false); + waitSchemaChangeJobDone(false); + } + + private boolean checkAllTabletsExists(List tabletIds) { + TabletInvertedIndex invertedIndex = Env.getCurrentEnv().getTabletInvertedIndex(); + for (long tabletId : tabletIds) { + if (invertedIndex.getTabletMeta(tabletId) == null) { + return false; + } + if (invertedIndex.getReplicasByTabletId(tabletId).isEmpty()) { + return false; + } + } + return true; + } + + private boolean checkAllTabletsNotExists(List tabletIds) { + TabletInvertedIndex invertedIndex = Env.getCurrentEnv().getTabletInvertedIndex(); + for (long tabletId : tabletIds) { + if (invertedIndex.getTabletMeta(tabletId) != null) { + return false; + } + + if (!invertedIndex.getReplicasByTabletId(tabletId).isEmpty()) { + return false; + } + } + return true; + } + + @Test + public void testExternalTableAlterOperations() throws Exception { + // external table do not support partition operation + String stmt = "alter table test.odbc_table add partition p3 values less than('2020-04-01'), add partition p4 values less than('2020-05-01')"; + alterTable(stmt, true); + + // external table do not support rollup + stmt = "alter table test.odbc_table add rollup r1 (k1)"; + alterTable(stmt, true); + + // external table support add column + stmt = "alter table test.odbc_table add column k6 INT KEY after k1, add column k7 TINYINT KEY after k6"; + alterTable(stmt, false); + Database db = Env.getCurrentInternalCatalog().getDbOrMetaException("default_cluster:test"); + Table odbcTable = db.getTableOrMetaException("odbc_table"); + Assert.assertEquals(odbcTable.getBaseSchema().size(), 7); + Assert.assertEquals(odbcTable.getBaseSchema().get(1).getDataType(), PrimitiveType.INT); + Assert.assertEquals(odbcTable.getBaseSchema().get(2).getDataType(), PrimitiveType.TINYINT); + + // external table support drop column + stmt = "alter table test.odbc_table drop column k7"; + alterTable(stmt, false); + db = Env.getCurrentInternalCatalog().getDbOrMetaException("default_cluster:test"); + odbcTable = db.getTableOrMetaException("odbc_table"); + Assert.assertEquals(odbcTable.getBaseSchema().size(), 6); + + // external table support modify column + stmt = "alter table test.odbc_table modify column k6 bigint after k5"; + alterTable(stmt, false); + db = Env.getCurrentInternalCatalog().getDbOrMetaException("default_cluster:test"); + odbcTable = db.getTableOrMetaException("odbc_table"); + Assert.assertEquals(odbcTable.getBaseSchema().size(), 6); + Assert.assertEquals(odbcTable.getBaseSchema().get(5).getDataType(), PrimitiveType.BIGINT); + + // external table support reorder column + db = Env.getCurrentInternalCatalog().getDbOrMetaException("default_cluster:test"); + odbcTable = db.getTableOrMetaException("odbc_table"); + Assert.assertEquals(odbcTable.getBaseSchema().stream() + .map(column -> column.getName()) + .reduce("", (totalName, columnName) -> totalName + columnName), "k1k2k3k4k5k6"); + stmt = "alter table test.odbc_table order by (k6, k5, k4, k3, k2, k1)"; + alterTable(stmt, false); + Assert.assertEquals(odbcTable.getBaseSchema().stream() + .map(column -> column.getName()) + .reduce("", (totalName, columnName) -> totalName + columnName), "k6k5k4k3k2k1"); + + // external table support drop column + stmt = "alter table test.odbc_table drop column k6"; + alterTable(stmt, false); + stmt = "alter table test.odbc_table drop column k5"; + alterTable(stmt, false); + stmt = "alter table test.odbc_table drop column k4"; + alterTable(stmt, false); + stmt = "alter table test.odbc_table drop column k3"; + alterTable(stmt, false); + stmt = "alter table test.odbc_table drop column k2"; + alterTable(stmt, false); + // do not allow drop last column + Assert.assertEquals(odbcTable.getBaseSchema().size(), 1); + stmt = "alter table test.odbc_table drop column k1"; + alterTable(stmt, true); + Assert.assertEquals(odbcTable.getBaseSchema().size(), 1); + + // external table support rename operation + stmt = "alter table test.odbc_table rename oracle_table"; + alterTable(stmt, false); + db = Env.getCurrentInternalCatalog().getDbOrMetaException("default_cluster:test"); + odbcTable = db.getTableNullable("oracle_table"); + Assert.assertNotNull(odbcTable); + odbcTable = db.getTableNullable("odbc_table"); + Assert.assertNull(odbcTable); + } + + @Test + public void testModifyTableEngine() throws Exception { + String createOlapTblStmt = "CREATE TABLE test.mysql_table (\n" + + " `k1` date NULL COMMENT \"\",\n" + + " `k2` int NULL COMMENT \"\",\n" + + " `k3` smallint NULL COMMENT \"\",\n" + + " `v1` varchar(2048) NULL COMMENT \"\",\n" + + " `v2` datetime NULL COMMENT \"\"\n" + + ") ENGINE=MYSQL\n" + + "PROPERTIES (\n" + + "\"host\" = \"172.16.0.1\",\n" + + "\"port\" = \"3306\",\n" + + "\"user\" = \"cmy\",\n" + + "\"password\" = \"abc\",\n" + + "\"database\" = \"db1\",\n" + + "\"table\" = \"tbl1\"" + + ");"; + createTable(createOlapTblStmt); + + Database db = Env.getCurrentInternalCatalog().getDbNullable("default_cluster:test"); + MysqlTable mysqlTable = (MysqlTable) db.getTableOrMetaException("mysql_table", Table.TableType.MYSQL); + + String alterEngineStmt = "alter table test.mysql_table modify engine to odbc"; + alterTable(alterEngineStmt, true); + + alterEngineStmt = "alter table test.mysql_table modify engine to odbc properties(\"driver\" = \"MySQL\")"; + alterTable(alterEngineStmt, false); + + OdbcTable odbcTable = (OdbcTable) db.getTableNullable(mysqlTable.getId()); + Assert.assertEquals("mysql_table", odbcTable.getName()); + List schema = odbcTable.getBaseSchema(); + Assert.assertEquals(5, schema.size()); + Assert.assertEquals("172.16.0.1", odbcTable.getHost()); + Assert.assertEquals("3306", odbcTable.getPort()); + Assert.assertEquals("cmy", odbcTable.getUserName()); + Assert.assertEquals("abc", odbcTable.getPasswd()); + Assert.assertEquals("db1", odbcTable.getOdbcDatabaseName()); + Assert.assertEquals("tbl1", odbcTable.getOdbcTableName()); + Assert.assertEquals("MySQL", odbcTable.getOdbcDriver()); + } + + @Test(expected = DdlException.class) + public void testDropInUseResource() throws Exception { + String sql = "drop resource remote_s3"; + DropResourceStmt stmt = (DropResourceStmt) UtFrameUtils.parseAndAnalyzeStmt(sql, connectContext); + Env.getCurrentEnv().getResourceMgr().dropResource(stmt); + } + + @Test + public void testModifyColocateGroupReplicaAlloc() throws Exception { + Config.enable_round_robin_create_tablet = true; + + createTable("CREATE TABLE test.col_tbl0\n" + "(\n" + " k1 date,\n" + " k2 int,\n" + " v1 int \n" + + ") ENGINE=OLAP\n" + "UNIQUE KEY (k1,k2)\n" + + "DISTRIBUTED BY HASH(k2) BUCKETS 4\n" + + "PROPERTIES('replication_num' = '2', 'colocate_with' = 'mod_group_0');"); + + createTable("CREATE TABLE test.col_tbl1\n" + "(\n" + " k1 date,\n" + " k2 int,\n" + " v1 int \n" + + ") ENGINE=OLAP\n" + "UNIQUE KEY (k1,k2)\n" + "PARTITION BY RANGE(k1)\n" + "(\n" + + " PARTITION p1 values less than('2020-02-01'),\n" + + " PARTITION p2 values less than('2020-03-01')\n" + ")\n" + "DISTRIBUTED BY HASH(k2) BUCKETS 4\n" + + "PROPERTIES('replication_num' = '2', 'colocate_with' = 'mod_group_1');"); + + createTable("CREATE TABLE test.col_tbl2 (\n" + + "`uuid` varchar(255) NULL,\n" + + "`action_datetime` date NULL\n" + + ")\n" + + "DUPLICATE KEY(uuid)\n" + + "PARTITION BY RANGE(action_datetime)()\n" + + "DISTRIBUTED BY HASH(uuid) BUCKETS 4\n" + + "PROPERTIES\n" + + "(\n" + + "\"colocate_with\" = \"mod_group_2\",\n" + + "\"dynamic_partition.enable\" = \"true\",\n" + + "\"dynamic_partition.time_unit\" = \"DAY\",\n" + + "\"dynamic_partition.end\" = \"2\",\n" + + "\"dynamic_partition.prefix\" = \"p\",\n" + + "\"dynamic_partition.buckets\" = \"4\",\n" + + "\"dynamic_partition.replication_num\" = \"2\"\n" + + ");\n"); + + + Env env = Env.getCurrentEnv(); + ReplicaAllocation newReplicaAlloc = new ReplicaAllocation(); + newReplicaAlloc.put(Tag.DEFAULT_BACKEND_TAG, (short) 1); + newReplicaAlloc.put(Tag.create(Tag.TYPE_LOCATION, "group_a"), (short) 1); + newReplicaAlloc.put(Tag.create(Tag.TYPE_LOCATION, "group_b"), (short) 1); + + Database db = env.getInternalCatalog().getDbOrMetaException("default_cluster:test"); + for (int i = 0; i < 3; i++) { + String groupName = GroupId.getFullGroupName(db.getId(), "mod_group_" + i); + String sql = "alter colocate group " + groupName + + " set ( 'replication_allocation' = '" + newReplicaAlloc.toCreateStmt() + "')"; + System.out.println(sql); + AlterColocateGroupStmt stmt = (AlterColocateGroupStmt) UtFrameUtils.parseAndAnalyzeStmt(sql, connectContext); + DdlExecutor.execute(env, stmt); + + ColocateGroupSchema groupSchema = env.getColocateTableIndex().getGroupSchema(groupName); + Assert.assertNotNull(groupSchema); + Assert.assertEquals(newReplicaAlloc, groupSchema.getReplicaAlloc()); + + OlapTable tbl = (OlapTable) db.getTableOrMetaException("col_tbl" + i); + Assert.assertEquals(newReplicaAlloc, tbl.getDefaultReplicaAllocation()); + if (i == 2) { + Assert.assertEquals(newReplicaAlloc, + tbl.getTableProperty().getDynamicPartitionProperty().getReplicaAllocation()); + } + for (Partition partition : tbl.getAllPartitions()) { + Assert.assertEquals(newReplicaAlloc, + tbl.getPartitionInfo().getReplicaAllocation(partition.getId())); + } + + if (i == 2) { + Assert.assertEquals(newReplicaAlloc, + tbl.getTableProperty().getDynamicPartitionProperty().getReplicaAllocation()); + for (int j = 0; true; j++) { + Thread.sleep(2000); + if (tbl.getAllPartitions().size() > 0) { + break; + } + if (j >= 5) { + Assert.assertTrue("dynamic table not create partition", false); + } + } + } + } + + Config.enable_round_robin_create_tablet = false; + + for (int k = 0; true; k++) { + Thread.sleep(1000); // sleep to wait dynamic partition scheduler run + boolean allStable = true; + for (int i = 0; i < 3; i++) { + String groupName = GroupId.getFullGroupName(db.getId(), "mod_group_" + i); + ColocateGroupSchema groupSchema = env.getColocateTableIndex().getGroupSchema(groupName); + Assert.assertNotNull(groupSchema); + + if (env.getColocateTableIndex().isGroupUnstable(groupSchema.getGroupId())) { + allStable = false; + if (k >= 120) { + Assert.assertTrue(groupName + " is unstable" , false); + } + System.out.println("xxxxxxxxxx unstable: " + i); + continue; + } + + Map backendReplicaNum = Maps.newHashMap(); + OlapTable tbl = (OlapTable) db.getTableOrMetaException("col_tbl" + i); + int tabletNum = 0; + for (Partition partition : tbl.getAllPartitions()) { + for (MaterializedIndex idx : partition.getMaterializedIndices( + MaterializedIndex.IndexExtState.VISIBLE)) { + for (Tablet tablet : idx.getTablets()) { + Map allocMap = Maps.newHashMap(); + tabletNum++; + for (Replica replica : tablet.getReplicas()) { + long backendId = replica.getBackendId(); + Tag tag = backendTags.get(backendId); + Assert.assertNotNull(tag); + short oldNum = allocMap.getOrDefault(tag, (short) 0); + allocMap.put(tag, (short) (oldNum + 1)); + backendReplicaNum.put(backendId, backendReplicaNum.getOrDefault(backendId, 0) + 1); + } + Assert.assertEquals(newReplicaAlloc.getAllocMap(), allocMap); + } + } + } + + Assert.assertTrue(tabletNum > 0); + + for (Map.Entry entry : backendReplicaNum.entrySet()) { + long backendId = entry.getKey(); + int replicaNum = entry.getValue(); + Tag tag = backendTags.get(backendId); + int sameTagReplicaNum = tabletNum * newReplicaAlloc.getAllocMap().getOrDefault(tag, (short) 0); + int sameTagBeNum = (int) (backendTags.values().stream().filter(t -> t.equals(tag)).count()); + System.out.println("xx i " + i); + System.out.println("xx backend " + backendId); + System.out.println("xx sameTagReplicaNum " + sameTagReplicaNum); + System.out.println("xx sameTagBeNum " + sameTagBeNum); + System.out.println("xx tabletNum " + tabletNum); + Assert.assertEquals("backend " + backendId + " failed: " + " all backend replica num: " + + backendReplicaNum + ", all backend tag: " + backendTags, + sameTagReplicaNum / sameTagBeNum, replicaNum); + } + } + + if (allStable) { + break; + } + } + } + + @Test + public void testShowMV() throws Exception { + createMV("CREATE MATERIALIZED VIEW test_mv as select k1 from test.show_test group by k1;", false); + waitSchemaChangeJobDone(true); + + String showMvSql = "SHOW CREATE MATERIALIZED VIEW test_mv on test.show_test;"; + ShowCreateMaterializedViewStmt showStmt = (ShowCreateMaterializedViewStmt) UtFrameUtils.parseAndAnalyzeStmt( + showMvSql, connectContext); + ShowExecutor executor = new ShowExecutor(connectContext, showStmt); + Assert.assertEquals(executor.execute().getResultRows().get(0).get(2), + "CREATE MATERIALIZED VIEW test_mv as select k1 from test.show_test group by k1;"); + + showMvSql = "SHOW CREATE MATERIALIZED VIEW test_mv_empty on test.show_test;"; + showStmt = (ShowCreateMaterializedViewStmt) UtFrameUtils.parseAndAnalyzeStmt(showMvSql, connectContext); + executor = new ShowExecutor(connectContext, showStmt); + Assert.assertTrue(executor.execute().getResultRows().isEmpty()); + + showMvSql = "SHOW CREATE MATERIALIZED VIEW test_mv on test.table1_error;"; + showStmt = (ShowCreateMaterializedViewStmt) UtFrameUtils.parseAndAnalyzeStmt(showMvSql, connectContext); + executor = new ShowExecutor(connectContext, showStmt); + ExceptionChecker.expectThrowsWithMsg(AnalysisException.class, "Unknown table 'table1_error'", + executor::execute); + } + + @Test + public void testModifySequenceCol() { + String stmt = "alter table test.unique_sequence_col modify column v1 Date"; + alterTable(stmt, true); + } +} diff --git a/fe/fe-core/src/test/java/org/apache/doris/alter/AlterTest.java b/fe/fe-core/src/test/java/org/apache/doris/alter/AlterTest.java index a3ddd9991a89e3..df69a41a29587c 100644 --- a/fe/fe-core/src/test/java/org/apache/doris/alter/AlterTest.java +++ b/fe/fe-core/src/test/java/org/apache/doris/alter/AlterTest.java @@ -17,6 +17,7 @@ package org.apache.doris.alter; +import org.apache.doris.analysis.AlterColocateGroupStmt; import org.apache.doris.analysis.AlterTableStmt; import org.apache.doris.analysis.CreateDbStmt; import org.apache.doris.analysis.CreateMaterializedViewStmt; @@ -26,6 +27,8 @@ import org.apache.doris.analysis.DateLiteral; import org.apache.doris.analysis.DropResourceStmt; import org.apache.doris.analysis.ShowCreateMaterializedViewStmt; +import org.apache.doris.catalog.ColocateGroupSchema; +import org.apache.doris.catalog.ColocateTableIndex.GroupId; import org.apache.doris.catalog.Column; import org.apache.doris.catalog.DataProperty; import org.apache.doris.catalog.Database; @@ -36,10 +39,13 @@ import org.apache.doris.catalog.OlapTable; import org.apache.doris.catalog.Partition; import org.apache.doris.catalog.PrimitiveType; +import org.apache.doris.catalog.Replica; +import org.apache.doris.catalog.ReplicaAllocation; import org.apache.doris.catalog.Table; import org.apache.doris.catalog.Tablet; import org.apache.doris.catalog.TabletInvertedIndex; import org.apache.doris.catalog.Type; +import org.apache.doris.clone.RebalancerTestUtil; import org.apache.doris.common.AnalysisException; import org.apache.doris.common.Config; import org.apache.doris.common.DdlException; @@ -47,6 +53,7 @@ import org.apache.doris.common.FeConstants; import org.apache.doris.common.util.TimeUtils; import org.apache.doris.qe.ConnectContext; +import org.apache.doris.qe.DdlExecutor; import org.apache.doris.qe.ShowExecutor; import org.apache.doris.resource.Tag; import org.apache.doris.system.Backend; @@ -70,18 +77,36 @@ public class AlterTest { private static String runningDir = "fe/mocked/AlterTest/" + UUID.randomUUID().toString() + "/"; private static ConnectContext connectContext; - private static Backend be; + + private static Map backendTags; @BeforeClass public static void beforeClass() throws Exception { FeConstants.runningUnitTest = true; FeConstants.default_scheduler_interval_millisecond = 100; + FeConstants.tablet_checker_interval_ms = 100; + FeConstants.tablet_checker_interval_ms = 100; Config.dynamic_partition_check_interval_seconds = 1; Config.disable_storage_medium_check = true; Config.enable_storage_policy = true; - UtFrameUtils.createDorisCluster(runningDir); + Config.disable_balance = true; + Config.schedule_batch_size = 400; + Config.schedule_slot_num_per_hdd_path = 100; + UtFrameUtils.createDorisClusterWithMultiTag(runningDir, 5); + + List backends = Env.getCurrentSystemInfo().getIdToBackend().values().asList(); + + Map tagMap = Maps.newHashMap(); + tagMap.put(Tag.TYPE_LOCATION, "group_a"); + backends.get(2).setTagMap(tagMap); + backends.get(3).setTagMap(tagMap); + + tagMap = Maps.newHashMap(); + tagMap.put(Tag.TYPE_LOCATION, "group_b"); + backends.get(4).setTagMap(tagMap); - be = Env.getCurrentSystemInfo().getIdToBackend().values().asList().get(0); + backendTags = Maps.newHashMap(); + backends.forEach(be -> backendTags.put(be.getId(), be.getLocationTag())); // create connect context connectContext = UtFrameUtils.createDefaultCtx(); @@ -435,21 +460,16 @@ public void testConflictAlterOperations() throws Exception { // set un-partitioned table's real replication num // first we need to change be's tag - Map originTagMap = be.getTagMap(); - Map tagMap = Maps.newHashMap(); - tagMap.put(Tag.TYPE_LOCATION, "group1"); - be.setTagMap(tagMap); OlapTable tbl2 = (OlapTable) db.getTableOrMetaException("tbl2"); Partition partition = tbl2.getPartition(tbl2.getName()); Assert.assertEquals(Short.valueOf("1"), Short.valueOf(tbl2.getPartitionInfo().getReplicaAllocation(partition.getId()).getTotalReplicaNum())); - stmt = "alter table test.tbl2 set ('replication_allocation' = 'tag.location.group1:1');"; + stmt = "alter table test.tbl2 set ('replication_allocation' = 'tag.location.group_a:1');"; alterTable(stmt, false); Assert.assertEquals((short) 1, (short) tbl2.getPartitionInfo().getReplicaAllocation(partition.getId()) - .getReplicaNumByTag(Tag.createNotCheck(Tag.TYPE_LOCATION, "group1"))); + .getReplicaNumByTag(Tag.createNotCheck(Tag.TYPE_LOCATION, "group_a"))); Assert.assertEquals((short) 1, (short) tbl2.getTableProperty().getReplicaAllocation() - .getReplicaNumByTag(Tag.createNotCheck(Tag.TYPE_LOCATION, "group1"))); - be.setTagMap(originTagMap); + .getReplicaNumByTag(Tag.createNotCheck(Tag.TYPE_LOCATION, "group_a"))); Thread.sleep(5000); // sleep to wait dynamic partition scheduler run // add partition without set replication num, and default num is 3. @@ -1202,6 +1222,148 @@ public void testDropInUseResource() throws Exception { Env.getCurrentEnv().getResourceMgr().dropResource(stmt); } + @Test + public void testModifyColocateGroupReplicaAlloc() throws Exception { + Config.enable_round_robin_create_tablet = true; + + createTable("CREATE TABLE test.col_tbl0\n" + "(\n" + " k1 date,\n" + " k2 int,\n" + " v1 int \n" + + ") ENGINE=OLAP\n" + "UNIQUE KEY (k1,k2)\n" + + "DISTRIBUTED BY HASH(k2) BUCKETS 4\n" + + "PROPERTIES('replication_num' = '2', 'colocate_with' = 'mod_group_0');"); + + createTable("CREATE TABLE test.col_tbl1\n" + "(\n" + " k1 date,\n" + " k2 int,\n" + " v1 int \n" + + ") ENGINE=OLAP\n" + "UNIQUE KEY (k1,k2)\n" + "PARTITION BY RANGE(k1)\n" + "(\n" + + " PARTITION p1 values less than('2020-02-01'),\n" + + " PARTITION p2 values less than('2020-03-01')\n" + ")\n" + "DISTRIBUTED BY HASH(k2) BUCKETS 4\n" + + "PROPERTIES('replication_num' = '2', 'colocate_with' = 'mod_group_1');"); + + createTable("CREATE TABLE test.col_tbl2 (\n" + + "`uuid` varchar(255) NULL,\n" + + "`action_datetime` date NULL\n" + + ")\n" + + "DUPLICATE KEY(uuid)\n" + + "PARTITION BY RANGE(action_datetime)()\n" + + "DISTRIBUTED BY HASH(uuid) BUCKETS 4\n" + + "PROPERTIES\n" + + "(\n" + + "\"replication_num\" = \"2\",\n" + + "\"colocate_with\" = \"mod_group_2\",\n" + + "\"dynamic_partition.enable\" = \"true\",\n" + + "\"dynamic_partition.time_unit\" = \"DAY\",\n" + + "\"dynamic_partition.end\" = \"2\",\n" + + "\"dynamic_partition.prefix\" = \"p\",\n" + + "\"dynamic_partition.buckets\" = \"4\",\n" + + "\"dynamic_partition.replication_num\" = \"2\"\n" + + ");\n"); + + Env env = Env.getCurrentEnv(); + Database db = env.getInternalCatalog().getDbOrMetaException("default_cluster:test"); + OlapTable tbl2 = (OlapTable) db.getTableOrMetaException("col_tbl2"); + for (int j = 0; true; j++) { + Thread.sleep(2000); + if (tbl2.getAllPartitions().size() > 0) { + break; + } + if (j >= 5) { + Assert.assertTrue("dynamic table not create partition", false); + } + } + + RebalancerTestUtil.updateReplicaPathHash(); + + ReplicaAllocation newReplicaAlloc = new ReplicaAllocation(); + newReplicaAlloc.put(Tag.DEFAULT_BACKEND_TAG, (short) 1); + newReplicaAlloc.put(Tag.create(Tag.TYPE_LOCATION, "group_a"), (short) 1); + newReplicaAlloc.put(Tag.create(Tag.TYPE_LOCATION, "group_b"), (short) 1); + + for (int i = 0; i < 3; i++) { + String groupName = GroupId.getFullGroupName(db.getId(), "mod_group_" + i); + String sql = "alter colocate group " + groupName + + " set ( 'replication_allocation' = '" + newReplicaAlloc.toCreateStmt() + "')"; + AlterColocateGroupStmt stmt = (AlterColocateGroupStmt) UtFrameUtils.parseAndAnalyzeStmt(sql, connectContext); + DdlExecutor.execute(env, stmt); + + ColocateGroupSchema groupSchema = env.getColocateTableIndex().getGroupSchema(groupName); + Assert.assertNotNull(groupSchema); + Assert.assertEquals(newReplicaAlloc, groupSchema.getReplicaAlloc()); + + OlapTable tbl = (OlapTable) db.getTableOrMetaException("col_tbl" + i); + Assert.assertEquals(newReplicaAlloc, tbl.getDefaultReplicaAllocation()); + if (i == 2) { + Assert.assertEquals(newReplicaAlloc, + tbl.getTableProperty().getDynamicPartitionProperty().getReplicaAllocation()); + } + for (Partition partition : tbl.getAllPartitions()) { + Assert.assertEquals(newReplicaAlloc, + tbl.getPartitionInfo().getReplicaAllocation(partition.getId())); + } + + if (i == 2) { + Assert.assertEquals(newReplicaAlloc, + tbl.getTableProperty().getDynamicPartitionProperty().getReplicaAllocation()); + } + } + + Config.enable_round_robin_create_tablet = false; + + for (int k = 0; true; k++) { + Thread.sleep(1000); // sleep to wait dynamic partition scheduler run + boolean allStable = true; + for (int i = 0; i < 3; i++) { + String groupName = GroupId.getFullGroupName(db.getId(), "mod_group_" + i); + ColocateGroupSchema groupSchema = env.getColocateTableIndex().getGroupSchema(groupName); + Assert.assertNotNull(groupSchema); + + if (env.getColocateTableIndex().isGroupUnstable(groupSchema.getGroupId())) { + allStable = false; + if (k >= 120) { + Assert.assertTrue(groupName + " is unstable" , false); + } + continue; + } + + Map backendReplicaNum = Maps.newHashMap(); + OlapTable tbl = (OlapTable) db.getTableOrMetaException("col_tbl" + i); + int tabletNum = 0; + for (Partition partition : tbl.getAllPartitions()) { + for (MaterializedIndex idx : partition.getMaterializedIndices( + MaterializedIndex.IndexExtState.VISIBLE)) { + for (Tablet tablet : idx.getTablets()) { + Map allocMap = Maps.newHashMap(); + tabletNum++; + for (Replica replica : tablet.getReplicas()) { + long backendId = replica.getBackendId(); + Tag tag = backendTags.get(backendId); + Assert.assertNotNull(tag); + short oldNum = allocMap.getOrDefault(tag, (short) 0); + allocMap.put(tag, (short) (oldNum + 1)); + backendReplicaNum.put(backendId, backendReplicaNum.getOrDefault(backendId, 0) + 1); + } + Assert.assertEquals(newReplicaAlloc.getAllocMap(), allocMap); + } + } + } + + Assert.assertTrue(tabletNum > 0); + + for (Map.Entry entry : backendReplicaNum.entrySet()) { + long backendId = entry.getKey(); + int replicaNum = entry.getValue(); + Tag tag = backendTags.get(backendId); + int sameTagReplicaNum = tabletNum * newReplicaAlloc.getAllocMap().getOrDefault(tag, (short) 0); + int sameTagBeNum = (int) (backendTags.values().stream().filter(t -> t.equals(tag)).count()); + Assert.assertEquals("backend " + backendId + " failed: " + " all backend replica num: " + + backendReplicaNum + ", all backend tag: " + backendTags, + sameTagReplicaNum / sameTagBeNum, replicaNum); + } + } + + if (allStable) { + break; + } + } + } + @Test public void testShowMV() throws Exception { createMV("CREATE MATERIALIZED VIEW test_mv as select k1 from test.show_test group by k1;", false); diff --git a/fe/fe-core/src/test/java/org/apache/doris/utframe/UtFrameUtils.java b/fe/fe-core/src/test/java/org/apache/doris/utframe/UtFrameUtils.java index 407171a69c7ec9..8bb8581fd85220 100644 --- a/fe/fe-core/src/test/java/org/apache/doris/utframe/UtFrameUtils.java +++ b/fe/fe-core/src/test/java/org/apache/doris/utframe/UtFrameUtils.java @@ -255,12 +255,16 @@ public static void createDorisClusterWithMultiTag(String runningDir, int backend FeConstants.runningUnitTest = true; FeConstants.enableInternalSchemaDb = false; int feRpcPort = startFEServer(runningDir); + List bes = Lists.newArrayList(); for (int i = 0; i < backendNum; i++) { String host = "127.0.0." + (i + 1); createBackend(host, feRpcPort); } + System.out.println("after create backend"); + checkBEHeartbeat(bes); // sleep to wait first heartbeat - Thread.sleep(6000); + // Thread.sleep(6000); + System.out.println("after create backend2"); } public static Backend createBackend(String beHost, int feRpcPort) throws IOException, InterruptedException { @@ -296,6 +300,7 @@ beHttpPort, beArrowFlightSqlPort, new DefaultHeartbeatServiceImpl(beThriftPort, diskInfo1.setTotalCapacityB(1000000); diskInfo1.setAvailableCapacityB(500000); diskInfo1.setDataUsedCapacityB(480000); + diskInfo1.setPathHash(be.getId()); disks.put(diskInfo1.getRootPath(), diskInfo1); be.setDisks(ImmutableMap.copyOf(disks)); be.setAlive(true); From 1178e8c590189e3bc2f4848398d3e9426cf2a1e2 Mon Sep 17 00:00:00 2001 From: yujun777 Date: Tue, 22 Aug 2023 16:39:06 +0800 Subject: [PATCH 4/7] remove wrong add file --- fe/fe-core/src/test/java/org/apache/doris/:w | 1336 ------------------ 1 file changed, 1336 deletions(-) delete mode 100644 fe/fe-core/src/test/java/org/apache/doris/:w diff --git a/fe/fe-core/src/test/java/org/apache/doris/:w b/fe/fe-core/src/test/java/org/apache/doris/:w deleted file mode 100644 index 5e10fa2cb53d1f..00000000000000 --- a/fe/fe-core/src/test/java/org/apache/doris/:w +++ /dev/null @@ -1,1336 +0,0 @@ -// Licensed to the Apache Software Foundation (ASF) under one -// or more contributor license agreements. See the NOTICE file -// distributed with this work for additional information -// regarding copyright ownership. The ASF licenses this file -// to you under the Apache License, Version 2.0 (the -// "License"); you may not use this file except in compliance -// with the License. You may obtain a copy of the License at -// -// http://www.apache.org/licenses/LICENSE-2.0 -// -// Unless required by applicable law or agreed to in writing, -// software distributed under the License is distributed on an -// "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY -// KIND, either express or implied. See the License for the -// specific language governing permissions and limitations -// under the License. - -package org.apache.doris.alter; - -import org.apache.doris.analysis.AlterColocateGroupStmt; -import org.apache.doris.analysis.AlterTableStmt; -import org.apache.doris.analysis.CreateDbStmt; -import org.apache.doris.analysis.CreateMaterializedViewStmt; -import org.apache.doris.analysis.CreatePolicyStmt; -import org.apache.doris.analysis.CreateResourceStmt; -import org.apache.doris.analysis.CreateTableStmt; -import org.apache.doris.analysis.DateLiteral; -import org.apache.doris.analysis.DropResourceStmt; -import org.apache.doris.analysis.ShowCreateMaterializedViewStmt; -import org.apache.doris.catalog.ColocateGroupSchema; -import org.apache.doris.catalog.ColocateTableIndex.GroupId; -import org.apache.doris.catalog.Column; -import org.apache.doris.catalog.DataProperty; -import org.apache.doris.catalog.Database; -import org.apache.doris.catalog.Env; -import org.apache.doris.catalog.MaterializedIndex; -import org.apache.doris.catalog.MysqlTable; -import org.apache.doris.catalog.OdbcTable; -import org.apache.doris.catalog.OlapTable; -import org.apache.doris.catalog.Partition; -import org.apache.doris.catalog.PrimitiveType; -import org.apache.doris.catalog.Replica; -import org.apache.doris.catalog.ReplicaAllocation; -import org.apache.doris.catalog.Table; -import org.apache.doris.catalog.Tablet; -import org.apache.doris.catalog.TabletInvertedIndex; -import org.apache.doris.catalog.Type; -import org.apache.doris.common.AnalysisException; -import org.apache.doris.common.Config; -import org.apache.doris.common.DdlException; -import org.apache.doris.common.ExceptionChecker; -import org.apache.doris.common.FeConstants; -import org.apache.doris.common.util.TimeUtils; -import org.apache.doris.qe.ConnectContext; -import org.apache.doris.qe.DdlExecutor; -import org.apache.doris.qe.ShowExecutor; -import org.apache.doris.resource.Tag; -import org.apache.doris.system.Backend; -import org.apache.doris.thrift.TStorageMedium; -import org.apache.doris.utframe.UtFrameUtils; - -import com.google.common.collect.Lists; -import com.google.common.collect.Maps; -import org.junit.AfterClass; -import org.junit.Assert; -import org.junit.BeforeClass; -import org.junit.Test; - -import java.io.File; -import java.util.List; -import java.util.Map; -import java.util.UUID; - -public class AlterTest { - - private static String runningDir = "fe/mocked/AlterTest/" + UUID.randomUUID().toString() + "/"; - - private static ConnectContext connectContext; - - private static Map backendTags; - - @BeforeClass - public static void beforeClass() throws Exception { - FeConstants.runningUnitTest = true; - FeConstants.default_scheduler_interval_millisecond = 100; - FeConstants.tablet_checker_interval_ms = 100; - Config.tablet_checker_interval_ms = 100; - Config.dynamic_partition_check_interval_seconds = 1; - Config.disable_storage_medium_check = true; - Config.enable_storage_policy = true; - Config.disable_balance = true; - Config.schedule_batch_size = 200; - UtFrameUtils.createDorisClusterWithMultiTag(runningDir, 5); - - List backends = Env.getCurrentSystemInfo().getIdToBackend().values().asList(); - - Map tagMap = Maps.newHashMap(); - tagMap.put(Tag.TYPE_LOCATION, "group_a"); - backends.get(2).setTagMap(tagMap); - backends.get(3).setTagMap(tagMap); - - tagMap = Maps.newHashMap(); - tagMap.put(Tag.TYPE_LOCATION, "group_b"); - backends.get(4).setTagMap(tagMap); - - backendTags = Maps.newHashMap(); - backends.forEach(be -> backendTags.put(be.getId(), be.getLocationTag())); - - // create connect context - connectContext = UtFrameUtils.createDefaultCtx(); - // create database - String createDbStmtStr = "create database test;"; - CreateDbStmt createDbStmt = (CreateDbStmt) UtFrameUtils.parseAndAnalyzeStmt(createDbStmtStr, connectContext); - Env.getCurrentEnv().createDb(createDbStmt); - - createTable("CREATE TABLE test.tbl1\n" + "(\n" + " k1 date,\n" + " k2 int,\n" + " v1 int sum\n" + ")\n" - + "PARTITION BY RANGE(k1)\n" + "(\n" + " PARTITION p1 values less than('2020-02-01'),\n" - + " PARTITION p2 values less than('2020-03-01')\n" + ")\n" + "DISTRIBUTED BY HASH(k2) BUCKETS 3\n" - + "PROPERTIES('replication_num' = '1');"); - - createTable("CREATE TABLE test.tbl2\n" + "(\n" + " k1 date,\n" + " v1 int sum\n" + ")\n" - + "DISTRIBUTED BY HASH (k1) BUCKETS 3\n" + "PROPERTIES('replication_num' = '1');"); - - createTable("CREATE TABLE test.tbl3\n" + "(\n" + " k1 date,\n" + " k2 int,\n" + " v1 int sum\n" + ")\n" - + "PARTITION BY RANGE(k1)\n" + "(\n" + " PARTITION p1 values less than('2020-02-01'),\n" - + " PARTITION p2 values less than('2020-03-01')\n" + ")\n" + "DISTRIBUTED BY HASH(k2) BUCKETS 3\n" - + "PROPERTIES('replication_num' = '1');"); - - createTable("CREATE TABLE test.tbl4\n" + "(\n" + " k1 date,\n" + " k2 int,\n" + " v1 int sum\n" + ")\n" - + "PARTITION BY RANGE(k1)\n" + "(\n" + " PARTITION p1 values less than('2020-02-01'),\n" - + " PARTITION p2 values less than('2020-03-01'),\n" - + " PARTITION p3 values less than('2020-04-01'),\n" - + " PARTITION p4 values less than('2020-05-01')\n" + ")\n" + "DISTRIBUTED BY HASH(k2) BUCKETS 3\n" - + "PROPERTIES" + "(" + " 'replication_num' = '1',\n" + " 'in_memory' = 'false',\n" - + " 'storage_medium' = 'SSD',\n" + " 'storage_cooldown_time' = '2999-12-31 00:00:00'\n" + ");"); - - createTable("CREATE TABLE test.tbl5\n" + "(\n" + " k1 date,\n" + " k2 int,\n" + " v1 int \n" - + ") ENGINE=OLAP\n" + "UNIQUE KEY (k1,k2)\n" + "PARTITION BY RANGE(k1)\n" + "(\n" - + " PARTITION p1 values less than('2020-02-01'),\n" - + " PARTITION p2 values less than('2020-03-01')\n" + ")\n" + "DISTRIBUTED BY HASH(k2) BUCKETS 3\n" - + "PROPERTIES('replication_num' = '1');"); - - createTable( - "CREATE TABLE test.tbl6\n" + "(\n" + " k1 datetime(3),\n" + " k2 datetime(3),\n" - + " v1 int \n," - + " v2 datetime(3)\n" + ") ENGINE=OLAP\n" + "UNIQUE KEY (k1,k2)\n" - + "PARTITION BY RANGE(k1)\n" + "(\n" - + " PARTITION p1 values less than('2020-02-01 00:00:00'),\n" - + " PARTITION p2 values less than('2020-03-01 00:00:00')\n" + ")\n" - + "DISTRIBUTED BY HASH(k2) BUCKETS 3\n" + "PROPERTIES('replication_num' = '1','enable_unique_key_merge_on_write' = 'false');"); - - createTable("create external table test.odbc_table\n" + "( `k1` bigint(20) COMMENT \"\",\n" - + " `k2` datetime COMMENT \"\",\n" + " `k3` varchar(20) COMMENT \"\",\n" - + " `k4` varchar(100) COMMENT \"\",\n" + " `k5` float COMMENT \"\"\n" + ")ENGINE=ODBC\n" - + "PROPERTIES (\n" + "\"host\" = \"127.0.0.1\",\n" + "\"port\" = \"3306\",\n" + "\"user\" = \"root\",\n" - + "\"password\" = \"123\",\n" + "\"database\" = \"db1\",\n" + "\"table\" = \"tbl1\",\n" - + "\"driver\" = \"Oracle Driver\",\n" + "\"odbc_type\" = \"oracle\"\n" + ");"); - - // s3 resource - createRemoteStorageResource( - "create resource \"remote_s3\"\n" + "properties\n" + "(\n" + " \"type\" = \"s3\", \n" - + " \"AWS_ENDPOINT\" = \"bj\",\n" + " \"AWS_REGION\" = \"bj\",\n" - + " \"AWS_ROOT_PATH\" = \"/path/to/root\",\n" + " \"AWS_ACCESS_KEY\" = \"bbb\",\n" - + " \"AWS_SECRET_KEY\" = \"aaaa\",\n" + " \"AWS_MAX_CONNECTIONS\" = \"50\",\n" - + " \"AWS_REQUEST_TIMEOUT_MS\" = \"3000\",\n" + " \"AWS_CONNECTION_TIMEOUT_MS\" = \"1000\",\n" - + " \"AWS_BUCKET\" = \"test-bucket\", \"s3_validity_check\" = \"false\"\n" - + ");"); - - createRemoteStorageResource( - "create resource \"remote_s3_1\"\n" + "properties\n" + "(\n" + " \"type\" = \"s3\", \n" - + " \"AWS_ENDPOINT\" = \"bj\",\n" + " \"AWS_REGION\" = \"bj\",\n" - + " \"AWS_ROOT_PATH\" = \"/path/to/root\",\n" + " \"AWS_ACCESS_KEY\" = \"bbb\",\n" - + " \"AWS_SECRET_KEY\" = \"aaaa\",\n" + " \"AWS_MAX_CONNECTIONS\" = \"50\",\n" - + " \"AWS_REQUEST_TIMEOUT_MS\" = \"3000\",\n" + " \"AWS_CONNECTION_TIMEOUT_MS\" = \"1000\",\n" - + " \"AWS_BUCKET\" = \"test-bucket\", \"s3_validity_check\" = \"false\"\n" - + ");"); - - createRemoteStoragePolicy( - "CREATE STORAGE POLICY testPolicy\n" + "PROPERTIES(\n" + " \"storage_resource\" = \"remote_s3\",\n" - + " \"cooldown_datetime\" = \"2100-05-10 00:00:00\"\n" + ");"); - - createRemoteStoragePolicy( - "CREATE STORAGE POLICY testPolicy2\n" + "PROPERTIES(\n" + " \"storage_resource\" = \"remote_s3\",\n" - + " \"cooldown_ttl\" = \"1\"\n" + ");"); - - createRemoteStoragePolicy( - "CREATE STORAGE POLICY testPolicyAnotherResource\n" + "PROPERTIES(\n" + " \"storage_resource\" = \"remote_s3_1\",\n" - + " \"cooldown_ttl\" = \"1\"\n" + ");"); - - createTable("CREATE TABLE test.tbl_remote\n" + "(\n" + " k1 date,\n" + " k2 int,\n" + " v1 int sum\n" - + ")\n" + "PARTITION BY RANGE(k1)\n" + "(\n" + " PARTITION p1 values less than('2020-02-01'),\n" - + " PARTITION p2 values less than('2020-03-01'),\n" - + " PARTITION p3 values less than('2020-04-01'),\n" - + " PARTITION p4 values less than('2020-05-01')\n" + ")\n" + "DISTRIBUTED BY HASH(k2) BUCKETS 3\n" - + "PROPERTIES" + "(" + " 'replication_num' = '1',\n" + " 'in_memory' = 'false',\n" - + " 'storage_medium' = 'SSD',\n" + " 'storage_cooldown_time' = '2100-05-09 00:00:00',\n" - + " 'storage_policy' = 'testPolicy'\n" + ");"); - - createTable("create table test.show_test (k1 int, k2 int) distributed by hash(k1) " - + "buckets 1 properties(\"replication_num\" = \"1\");"); - - createTable("create table test.unique_sequence_col (k1 int, v1 int, v2 date) ENGINE=OLAP " - + " UNIQUE KEY(`k1`) DISTRIBUTED BY HASH(`k1`) BUCKETS 1" - + " PROPERTIES (\"replication_num\" = \"1\", \"function_column.sequence_col\" = \"v1\");"); - } - - @AfterClass - public static void tearDown() { - File file = new File(runningDir); - file.delete(); - } - - private static void createTable(String sql) throws Exception { - Config.enable_odbc_table = true; - CreateTableStmt createTableStmt = (CreateTableStmt) UtFrameUtils.parseAndAnalyzeStmt(sql, connectContext); - Env.getCurrentEnv().createTable(createTableStmt); - } - - private static void createRemoteStorageResource(String sql) throws Exception { - CreateResourceStmt stmt = (CreateResourceStmt) UtFrameUtils.parseAndAnalyzeStmt(sql, connectContext); - Env.getCurrentEnv().getResourceMgr().createResource(stmt); - } - - private static void createRemoteStoragePolicy(String sql) throws Exception { - CreatePolicyStmt stmt = (CreatePolicyStmt) UtFrameUtils.parseAndAnalyzeStmt(sql, connectContext); - Env.getCurrentEnv().getPolicyMgr().createPolicy(stmt); - } - - private static void alterTable(String sql, boolean expectedException) { - try { - AlterTableStmt alterTableStmt = (AlterTableStmt) UtFrameUtils.parseAndAnalyzeStmt(sql, connectContext); - Env.getCurrentEnv().alterTable(alterTableStmt); - if (expectedException) { - Assert.fail(); - } - } catch (Exception e) { - e.printStackTrace(); - if (!expectedException) { - Assert.fail(); - } - } - } - - private static void createMV(String sql, boolean expectedException) { - try { - CreateMaterializedViewStmt createMaterializedViewStmt - = (CreateMaterializedViewStmt) UtFrameUtils.parseAndAnalyzeStmt(sql, connectContext); - Env.getCurrentEnv().createMaterializedView(createMaterializedViewStmt); - if (expectedException) { - Assert.fail(); - } - } catch (Exception e) { - e.printStackTrace(); - if (!expectedException) { - Assert.fail(); - } - } - } - - private static void alterTableWithExceptionMsg(String sql, String msg) throws Exception { - try { - AlterTableStmt alterTableStmt = (AlterTableStmt) UtFrameUtils.parseAndAnalyzeStmt(sql, connectContext); - Env.getCurrentEnv().alterTable(alterTableStmt); - } catch (Exception e) { - Assert.assertEquals(msg, e.getMessage()); - } - } - - @Test - public void alterTableWithEnableFeature() throws Exception { - String stmt = "alter table test.tbl5 enable feature \"SEQUENCE_LOAD\" with properties (\"function_column.sequence_type\" = \"int\") "; - alterTable(stmt, false); - - stmt = "alter table test.tbl5 enable feature \"SEQUENCE_LOAD\" with properties (\"function_column.sequence_type\" = \"double\") "; - alterTable(stmt, true); - } - - @Test - public void alterTableModifyComment() throws Exception { - Database db = Env.getCurrentInternalCatalog().getDbOrMetaException("default_cluster:test"); - Table tbl = db.getTableOrMetaException("tbl5"); - - // table comment - String stmt = "alter table test.tbl5 modify comment 'comment1'"; - alterTable(stmt, false); - Assert.assertEquals("comment1", tbl.getComment()); - - // column comment - stmt = "alter table test.tbl5 modify column k1 comment 'k1'"; - alterTable(stmt, false); - Assert.assertEquals("k1", tbl.getColumn("k1").getComment()); - - // columns comment - stmt = "alter table test.tbl5 modify column k1 comment 'k11', modify column v1 comment 'v11'"; - alterTable(stmt, false); - Assert.assertEquals("k11", tbl.getColumn("k1").getComment()); - Assert.assertEquals("v11", tbl.getColumn("v1").getComment()); - - // empty comment - stmt = "alter table test.tbl5 modify comment ''"; - alterTable(stmt, false); - Assert.assertEquals("OLAP", tbl.getComment()); - - // empty column comment - stmt = "alter table test.tbl5 modify column k1 comment '', modify column v1 comment 'v111'"; - alterTable(stmt, false); - Assert.assertEquals("", tbl.getColumn("k1").getComment()); - Assert.assertEquals("v111", tbl.getColumn("v1").getComment()); - - // unknown column - stmt = "alter table test.tbl5 modify column x comment '', modify column v1 comment 'v111'"; - alterTable(stmt, true); - Assert.assertEquals("", tbl.getColumn("k1").getComment()); - Assert.assertEquals("v111", tbl.getColumn("v1").getComment()); - - // duplicate column - stmt = "alter table test.tbl5 modify column k1 comment '', modify column k1 comment 'v111'"; - alterTable(stmt, true); - Assert.assertEquals("", tbl.getColumn("k1").getComment()); - Assert.assertEquals("v111", tbl.getColumn("v1").getComment()); - } - - @Test - public void testConflictAlterOperations() throws Exception { - String stmt = "alter table test.tbl1 add partition p3 values less than('2020-04-01'), add partition p4 values less than('2020-05-01')"; - alterTable(stmt, true); - - stmt = "alter table test.tbl1 add partition p3 values less than('2020-04-01'), drop partition p4"; - alterTable(stmt, true); - - stmt = "alter table test.tbl1 drop partition p3, drop partition p4"; - alterTable(stmt, true); - - stmt = "alter table test.tbl1 drop partition p3, add column k3 int"; - alterTable(stmt, true); - - // no conflict - stmt = "alter table test.tbl1 add column k3 int, add column k4 int"; - alterTable(stmt, false); - waitSchemaChangeJobDone(false); - - stmt = "alter table test.tbl1 add rollup r1 (k1)"; - alterTable(stmt, false); - waitSchemaChangeJobDone(true); - - stmt = "alter table test.tbl1 add rollup r2 (k1), r3 (k1)"; - alterTable(stmt, false); - waitSchemaChangeJobDone(true); - - // enable dynamic partition - // not adding the `start` property so that it won't drop the origin partition p1, p2 and p3 - stmt = "alter table test.tbl1 set (\n" - + "'dynamic_partition.enable' = 'true',\n" - + "'dynamic_partition.time_unit' = 'DAY',\n" - + "'dynamic_partition.end' = '3',\n" - + "'dynamic_partition.prefix' = 'p',\n" - + "'dynamic_partition.buckets' = '3'\n" - + " );"; - alterTable(stmt, false); - Database db = Env.getCurrentInternalCatalog().getDbOrMetaException("default_cluster:test"); - OlapTable tbl = (OlapTable) db.getTableOrMetaException("tbl1"); - Assert.assertTrue(tbl.getTableProperty().getDynamicPartitionProperty().getEnable()); - Assert.assertEquals(4, tbl.getIndexIdToSchema().size()); - - // add partition when dynamic partition is enable - stmt = "alter table test.tbl1 add partition p3 values less than('2020-04-01') distributed by hash(k2) buckets 4 PROPERTIES ('replication_num' = '1')"; - alterTable(stmt, true); - - // add temp partition when dynamic partition is enable - stmt = "alter table test.tbl1 add temporary partition tp3 values less than('2020-04-01') distributed by hash(k2) buckets 4 PROPERTIES ('replication_num' = '1')"; - alterTable(stmt, false); - Assert.assertEquals(1, tbl.getTempPartitions().size()); - - // disable the dynamic partition - stmt = "alter table test.tbl1 set ('dynamic_partition.enable' = 'false')"; - alterTable(stmt, false); - Assert.assertFalse(tbl.getTableProperty().getDynamicPartitionProperty().getEnable()); - - // add partition when dynamic partition is disable - stmt = "alter table test.tbl1 add partition p3 values less than('2020-04-01') distributed by hash(k2) buckets 4"; - alterTable(stmt, false); - - // set table's default replication num - Assert.assertEquals((short) 1, tbl.getDefaultReplicaAllocation().getTotalReplicaNum()); - stmt = "alter table test.tbl1 set ('default.replication_num' = '3');"; - alterTable(stmt, false); - Assert.assertEquals((short) 3, tbl.getDefaultReplicaAllocation().getTotalReplicaNum()); - - // set range table's real replication num - Partition p1 = tbl.getPartition("p1"); - Assert.assertEquals(Short.valueOf("1"), - Short.valueOf(tbl.getPartitionInfo().getReplicaAllocation(p1.getId()).getTotalReplicaNum())); - stmt = "alter table test.tbl1 set ('replication_num' = '3');"; - alterTable(stmt, true); - Assert.assertEquals(Short.valueOf("1"), - Short.valueOf(tbl.getPartitionInfo().getReplicaAllocation(p1.getId()).getTotalReplicaNum())); - - // set un-partitioned table's real replication num - // first we need to change be's tag - OlapTable tbl2 = (OlapTable) db.getTableOrMetaException("tbl2"); - Partition partition = tbl2.getPartition(tbl2.getName()); - Assert.assertEquals(Short.valueOf("1"), - Short.valueOf(tbl2.getPartitionInfo().getReplicaAllocation(partition.getId()).getTotalReplicaNum())); - stmt = "alter table test.tbl2 set ('replication_allocation' = 'tag.location.group_a:1');"; - alterTable(stmt, false); - Assert.assertEquals((short) 1, (short) tbl2.getPartitionInfo().getReplicaAllocation(partition.getId()) - .getReplicaNumByTag(Tag.createNotCheck(Tag.TYPE_LOCATION, "group_a"))); - Assert.assertEquals((short) 1, (short) tbl2.getTableProperty().getReplicaAllocation() - .getReplicaNumByTag(Tag.createNotCheck(Tag.TYPE_LOCATION, "group_a"))); - - Thread.sleep(5000); // sleep to wait dynamic partition scheduler run - // add partition without set replication num, and default num is 3. - stmt = "alter table test.tbl1 add partition p4 values less than('2020-04-10')"; - alterTable(stmt, true); - - // add partition when dynamic partition is disable - stmt = "alter table test.tbl1 add partition p4 values less than('2020-04-10') ('replication_num' = '1')"; - alterTable(stmt, false); - } - - @Test - public void testAlterDateV2Operations() throws Exception { - String stmt = "alter table test.tbl6 add partition p3 values less than('2020-04-01 00:00:00')," - + "add partition p4 values less than('2020-05-01 00:00:00')"; - alterTable(stmt, true); - - stmt = "alter table test.tbl6 add partition p3 values less than('2020-04-01 00:00:00'), drop partition p4"; - alterTable(stmt, true); - - stmt = "alter table test.tbl6 drop partition p3, drop partition p4"; - alterTable(stmt, true); - - stmt = "alter table test.tbl6 drop partition p3, add column k3 datetime(6)"; - alterTable(stmt, true); - - // no conflict - stmt = "alter table test.tbl6 add column k3 int, add column k4 datetime(6)"; - alterTable(stmt, false); - waitSchemaChangeJobDone(false); - - stmt = "alter table test.tbl6 add rollup r1 (k2, k1)"; - alterTable(stmt, false); - waitSchemaChangeJobDone(true); - - stmt = "alter table test.tbl6 add rollup r2 (k2, k1), r3 (k2, k1)"; - alterTable(stmt, false); - waitSchemaChangeJobDone(true); - - // enable dynamic partition - // not adding the `start` property so that it won't drop the origin partition p1, p2 and p3 - stmt = "alter table test.tbl6 set (\n" - + "'dynamic_partition.enable' = 'true',\n" - + "'dynamic_partition.time_unit' = 'DAY',\n" - + "'dynamic_partition.end' = '3',\n" - + "'dynamic_partition.prefix' = 'p',\n" - + "'dynamic_partition.buckets' = '3'\n" - + " );"; - alterTable(stmt, false); - Database db = Env.getCurrentInternalCatalog().getDbOrMetaException("default_cluster:test"); - OlapTable tbl = (OlapTable) db.getTableOrMetaException("tbl6"); - Assert.assertTrue(tbl.getTableProperty().getDynamicPartitionProperty().getEnable()); - Assert.assertEquals(4, tbl.getIndexIdToSchema().size()); - - // add partition when dynamic partition is enable - stmt = "alter table test.tbl6 add partition p3 values less than('2020-04-01 00:00:00') distributed by" - + " hash(k2) buckets 4 PROPERTIES ('replication_num' = '1')"; - alterTable(stmt, true); - - // add temp partition when dynamic partition is enable - stmt = "alter table test.tbl6 add temporary partition tp3 values less than('2020-04-01 00:00:00') distributed" - + " by hash(k2) buckets 4 PROPERTIES ('replication_num' = '1')"; - alterTable(stmt, false); - Assert.assertEquals(1, tbl.getTempPartitions().size()); - - // disable the dynamic partition - stmt = "alter table test.tbl6 set ('dynamic_partition.enable' = 'false')"; - alterTable(stmt, false); - Assert.assertFalse(tbl.getTableProperty().getDynamicPartitionProperty().getEnable()); - - String alterStmt = "alter table test.tbl6 set ('in_memory' = 'true')"; - String errorMsg = "errCode = 2, detailMessage = Not support set 'in_memory'='true' now!"; - alterTableWithExceptionMsg(alterStmt, errorMsg); - - // add partition when dynamic partition is disable - stmt = "alter table test.tbl6 add partition p3 values less than('2020-04-01 00:00:00') distributed" - + " by hash(k2) buckets 4"; - alterTable(stmt, false); - - // set table's default replication num - Assert.assertEquals((short) 1, tbl.getDefaultReplicaAllocation().getTotalReplicaNum()); - stmt = "alter table test.tbl6 set ('default.replication_num' = '3');"; - alterTable(stmt, false); - Assert.assertEquals((short) 3, tbl.getDefaultReplicaAllocation().getTotalReplicaNum()); - - // set range table's real replication num - Partition p1 = tbl.getPartition("p1"); - Assert.assertEquals(Short.valueOf("1"), Short.valueOf(tbl.getPartitionInfo().getReplicaAllocation(p1.getId()) - .getTotalReplicaNum())); - stmt = "alter table test.tbl6 set ('replication_num' = '3');"; - alterTable(stmt, true); - Assert.assertEquals(Short.valueOf("1"), Short.valueOf(tbl.getPartitionInfo().getReplicaAllocation(p1.getId()) - .getTotalReplicaNum())); - } - - // test batch update range partitions' properties - @Test - public void testBatchUpdatePartitionProperties() throws Exception { - Database db = Env.getCurrentInternalCatalog().getDbOrMetaException("default_cluster:test"); - OlapTable tbl4 = (OlapTable) db.getTableOrMetaException("tbl4"); - Partition p1 = tbl4.getPartition("p1"); - Partition p2 = tbl4.getPartition("p2"); - Partition p3 = tbl4.getPartition("p3"); - Partition p4 = tbl4.getPartition("p4"); - - // batch update replication_num property - String stmt = "alter table test.tbl4 modify partition (p1, p2, p4) set ('replication_num' = '1')"; - List partitionList = Lists.newArrayList(p1, p2, p4); - for (Partition partition : partitionList) { - Assert.assertEquals(Short.valueOf("1"), Short.valueOf(tbl4.getPartitionInfo().getReplicaAllocation(partition.getId()).getTotalReplicaNum())); - } - alterTable(stmt, false); - for (Partition partition : partitionList) { - Assert.assertEquals(Short.valueOf("1"), Short.valueOf(tbl4.getPartitionInfo().getReplicaAllocation(partition.getId()).getTotalReplicaNum())); - } - Assert.assertEquals(Short.valueOf("1"), Short.valueOf(tbl4.getPartitionInfo().getReplicaAllocation(p3.getId()).getTotalReplicaNum())); - - // batch update in_memory property - stmt = "alter table test.tbl4 modify partition (p1, p2, p3) set ('in_memory' = 'false')"; - partitionList = Lists.newArrayList(p1, p2, p3); - for (Partition partition : partitionList) { - Assert.assertEquals(false, tbl4.getPartitionInfo().getIsInMemory(partition.getId())); - } - alterTable(stmt, false); - for (Partition partition : partitionList) { - Assert.assertEquals(false, tbl4.getPartitionInfo().getIsInMemory(partition.getId())); - } - Assert.assertEquals(false, tbl4.getPartitionInfo().getIsInMemory(p4.getId())); - - String alterStmt = "alter table test.tbl4 modify partition (p1, p2, p3) set ('in_memory' = 'true')"; - String errorMsg = "errCode = 2, detailMessage = Not support set 'in_memory'='true' now!"; - alterTableWithExceptionMsg(alterStmt, errorMsg); - - // batch update storage_medium and storage_cooldown properties - // alter storage_medium - stmt = "alter table test.tbl4 modify partition (p3, p4) set ('storage_medium' = 'HDD')"; - DateLiteral dateLiteral = new DateLiteral("2999-12-31 00:00:00", Type.DATETIME); - long cooldownTimeMs = dateLiteral.unixTimestamp(TimeUtils.getTimeZone()); - DataProperty oldDataProperty = new DataProperty(TStorageMedium.SSD, cooldownTimeMs, ""); - partitionList = Lists.newArrayList(p3, p4); - for (Partition partition : partitionList) { - Assert.assertEquals(oldDataProperty, tbl4.getPartitionInfo().getDataProperty(partition.getId())); - } - alterTable(stmt, false); - DataProperty newDataProperty = new DataProperty(TStorageMedium.HDD, DataProperty.MAX_COOLDOWN_TIME_MS, ""); - for (Partition partition : partitionList) { - Assert.assertEquals(newDataProperty, tbl4.getPartitionInfo().getDataProperty(partition.getId())); - } - Assert.assertEquals(oldDataProperty, tbl4.getPartitionInfo().getDataProperty(p1.getId())); - Assert.assertEquals(oldDataProperty, tbl4.getPartitionInfo().getDataProperty(p2.getId())); - - // alter cooldown_time - stmt = "alter table test.tbl4 modify partition (p1, p2) set ('storage_cooldown_time' = '2100-12-31 00:00:00')"; - alterTable(stmt, false); - - dateLiteral = new DateLiteral("2100-12-31 00:00:00", Type.DATETIME); - cooldownTimeMs = dateLiteral.unixTimestamp(TimeUtils.getTimeZone()); - DataProperty newDataProperty1 = new DataProperty(TStorageMedium.SSD, cooldownTimeMs, ""); - partitionList = Lists.newArrayList(p1, p2); - for (Partition partition : partitionList) { - Assert.assertEquals(newDataProperty1, tbl4.getPartitionInfo().getDataProperty(partition.getId())); - } - Assert.assertEquals(newDataProperty, tbl4.getPartitionInfo().getDataProperty(p3.getId())); - Assert.assertEquals(newDataProperty, tbl4.getPartitionInfo().getDataProperty(p4.getId())); - - // batch update range partitions' properties with * - stmt = "alter table test.tbl4 modify partition (*) set ('replication_num' = '1')"; - partitionList = Lists.newArrayList(p1, p2, p3, p4); - alterTable(stmt, false); - for (Partition partition : partitionList) { - Assert.assertEquals(Short.valueOf("1"), Short.valueOf(tbl4.getPartitionInfo().getReplicaAllocation(partition.getId()).getTotalReplicaNum())); - } - } - - @Test - public void testAlterRemoteStorageTableDataProperties() throws Exception { - Database db = Env.getCurrentInternalCatalog().getDbOrMetaException("default_cluster:test"); - OlapTable tblRemote = (OlapTable) db.getTableOrMetaException("tbl_remote"); - Partition p1 = tblRemote.getPartition("p1"); - Partition p2 = tblRemote.getPartition("p2"); - Partition p3 = tblRemote.getPartition("p3"); - Partition p4 = tblRemote.getPartition("p4"); - - DateLiteral dateLiteral = new DateLiteral("2100-05-09 00:00:00", Type.DATETIME); - long cooldownTimeMs = dateLiteral.unixTimestamp(TimeUtils.getTimeZone()); - DataProperty oldDataProperty = new DataProperty(TStorageMedium.SSD, cooldownTimeMs, "testPolicy"); - List partitionList = Lists.newArrayList(p2, p3, p4); - for (Partition partition : partitionList) { - Assert.assertEquals(oldDataProperty, tblRemote.getPartitionInfo().getDataProperty(partition.getId())); - } - - // alter cooldown_time - String stmt = "alter table test.tbl_remote modify partition (p2, p3, p4) set ('storage_cooldown_time' = '2100-04-01 22:22:22')"; - alterTable(stmt, false); - DateLiteral newDateLiteral = new DateLiteral("2100-04-01 22:22:22", Type.DATETIME); - long newCooldownTimeMs = newDateLiteral.unixTimestamp(TimeUtils.getTimeZone()); - DataProperty dataProperty2 = new DataProperty(TStorageMedium.SSD, newCooldownTimeMs, "testPolicy"); - for (Partition partition : partitionList) { - Assert.assertEquals(dataProperty2, tblRemote.getPartitionInfo().getDataProperty(partition.getId())); - } - Assert.assertEquals(oldDataProperty, tblRemote.getPartitionInfo().getDataProperty(p1.getId())); - - // alter storage_medium - stmt = "alter table test.tbl_remote modify partition (p2, p3, p4) set ('storage_medium' = 'HDD')"; - alterTable(stmt, false); - DataProperty dataProperty1 = new DataProperty( - TStorageMedium.HDD, DataProperty.MAX_COOLDOWN_TIME_MS, "testPolicy"); - for (Partition partition : partitionList) { - Assert.assertEquals(dataProperty1, tblRemote.getPartitionInfo().getDataProperty(partition.getId())); - } - Assert.assertEquals(oldDataProperty, tblRemote.getPartitionInfo().getDataProperty(p1.getId())); - - // alter remote_storage to one not exist policy - stmt = "alter table test.tbl_remote modify partition (p2, p3, p4) set ('storage_policy' = 'testPolicy3')"; - alterTable(stmt, true); - Assert.assertEquals(oldDataProperty, tblRemote.getPartitionInfo().getDataProperty(p1.getId())); - - // alter remote_storage to one another one which points to another resource - stmt = "alter table test.tbl_remote modify partition (p2, p3, p4) set ('storage_policy' = 'testPolicyAnotherResource')"; - alterTable(stmt, true); - Assert.assertEquals(oldDataProperty, tblRemote.getPartitionInfo().getDataProperty(p1.getId())); - - // alter recover to old state - stmt = "alter table test.tbl_remote modify partition (p2, p3, p4) set (" - + "'storage_medium' = 'SSD', " - + "'storage_cooldown_time' = '2100-05-09 00:00:00'" - + ")"; - alterTable(stmt, false); - for (Partition partition : partitionList) { - Assert.assertEquals(oldDataProperty, tblRemote.getPartitionInfo().getDataProperty(partition.getId())); - } - Assert.assertEquals(oldDataProperty, tblRemote.getPartitionInfo().getDataProperty(p1.getId())); - - } - - @Test - public void testDynamicPartitionDropAndAdd() throws Exception { - // test day range - String stmt = "alter table test.tbl3 set (\n" - + "'dynamic_partition.enable' = 'true',\n" - + "'dynamic_partition.time_unit' = 'DAY',\n" - + "'dynamic_partition.start' = '-3',\n" - + "'dynamic_partition.end' = '3',\n" - + "'dynamic_partition.prefix' = 'p',\n" - + "'dynamic_partition.buckets' = '3'\n" - + " );"; - alterTable(stmt, false); - Thread.sleep(5000); // sleep to wait dynamic partition scheduler run - - Database db = Env.getCurrentInternalCatalog().getDbOrMetaException("default_cluster:test"); - OlapTable tbl = (OlapTable) db.getTableOrMetaException("tbl3"); - Assert.assertEquals(4, tbl.getPartitionNames().size()); - Assert.assertNull(tbl.getPartition("p1")); - Assert.assertNull(tbl.getPartition("p2")); - } - - private void waitSchemaChangeJobDone(boolean rollupJob) throws Exception { - Map alterJobs = Env.getCurrentEnv().getSchemaChangeHandler().getAlterJobsV2(); - if (rollupJob) { - alterJobs = Env.getCurrentEnv().getMaterializedViewHandler().getAlterJobsV2(); - } - for (AlterJobV2 alterJobV2 : alterJobs.values()) { - while (!alterJobV2.getJobState().isFinalState()) { - System.out.println("alter job " + alterJobV2.getJobId() + " is running. state: " + alterJobV2.getJobState()); - Thread.sleep(1000); - } - System.out.println(alterJobV2.getType() + " alter job " + alterJobV2.getJobId() + " is done. state: " + alterJobV2.getJobState()); - Assert.assertEquals(AlterJobV2.JobState.FINISHED, alterJobV2.getJobState()); - Database db = - Env.getCurrentInternalCatalog().getDbOrMetaException(alterJobV2.getDbId()); - OlapTable tbl = (OlapTable) db.getTableOrMetaException(alterJobV2.getTableId()); - while (tbl.getState() != OlapTable.OlapTableState.NORMAL) { - Thread.sleep(1000); - } - } - } - - @Test - public void testSetDynamicPropertiesInNormalTable() throws Exception { - String tableName = "no_dynamic_table"; - String createOlapTblStmt = "CREATE TABLE test.`" + tableName + "` (\n" - + " `k1` date NULL COMMENT \"\",\n" - + " `k2` int NULL COMMENT \"\",\n" - + " `k3` smallint NULL COMMENT \"\",\n" - + " `v1` varchar(2048) NULL COMMENT \"\",\n" - + " `v2` datetime NULL COMMENT \"\"\n" - + ") ENGINE=OLAP\n" - + "DUPLICATE KEY(`k1`, `k2`, `k3`)\n" - + "COMMENT \"OLAP\"\n" - + "PARTITION BY RANGE (k1)\n" - + "(\n" - + "PARTITION p1 VALUES LESS THAN (\"2014-01-01\"),\n" - + "PARTITION p2 VALUES LESS THAN (\"2014-06-01\"),\n" - + "PARTITION p3 VALUES LESS THAN (\"2014-12-01\")\n" - + ")\n" - + "DISTRIBUTED BY HASH(`k1`) BUCKETS 32\n" - + "PROPERTIES (\n" - + "\"replication_num\" = \"1\"\n" - + ");"; - createTable(createOlapTblStmt); - String alterStmt = "alter table test." + tableName + " set (\"dynamic_partition.enable\" = \"true\");"; - String errorMsg = "errCode = 2, detailMessage = Table default_cluster:test.no_dynamic_table is not a dynamic partition table. " - + "Use command `HELP ALTER TABLE` to see how to change a normal table to a dynamic partition table."; - alterTableWithExceptionMsg(alterStmt, errorMsg); - // test set dynamic properties in a no dynamic partition table - String stmt = "alter table test." + tableName + " set (\n" - + "'dynamic_partition.enable' = 'true',\n" - + "'dynamic_partition.time_unit' = 'DAY',\n" - + "'dynamic_partition.start' = '-3',\n" - + "'dynamic_partition.end' = '3',\n" - + "'dynamic_partition.prefix' = 'p',\n" - + "'dynamic_partition.buckets' = '3'\n" - + " );"; - alterTable(stmt, false); - } - - @Test - public void testSetDynamicPropertiesInDynamicPartitionTable() throws Exception { - String tableName = "dynamic_table"; - String createOlapTblStmt = "CREATE TABLE test.`" + tableName + "` (\n" - + " `k1` date NULL COMMENT \"\",\n" - + " `k2` int NULL COMMENT \"\",\n" - + " `k3` smallint NULL COMMENT \"\",\n" - + " `v1` varchar(2048) NULL COMMENT \"\",\n" - + " `v2` datetime NULL COMMENT \"\"\n" - + ") ENGINE=OLAP\n" - + "DUPLICATE KEY(`k1`, `k2`, `k3`)\n" - + "COMMENT \"OLAP\"\n" - + "PARTITION BY RANGE (k1)\n" - + "(\n" - + "PARTITION p1 VALUES LESS THAN (\"2014-01-01\"),\n" - + "PARTITION p2 VALUES LESS THAN (\"2014-06-01\"),\n" - + "PARTITION p3 VALUES LESS THAN (\"2014-12-01\")\n" - + ")\n" - + "DISTRIBUTED BY HASH(`k1`) BUCKETS 32\n" - + "PROPERTIES (\n" - + "\"replication_num\" = \"1\",\n" - + "\"dynamic_partition.enable\" = \"true\",\n" - + "\"dynamic_partition.start\" = \"-3\",\n" - + "\"dynamic_partition.end\" = \"3\",\n" - + "\"dynamic_partition.time_unit\" = \"day\",\n" - + "\"dynamic_partition.prefix\" = \"p\",\n" - + "\"dynamic_partition.buckets\" = \"1\"\n" - + ");"; - - createTable(createOlapTblStmt); - String alterStmt1 = "alter table test." + tableName + " set (\"dynamic_partition.enable\" = \"false\");"; - alterTable(alterStmt1, false); - String alterStmt2 = "alter table test." + tableName + " set (\"dynamic_partition.time_unit\" = \"week\");"; - alterTable(alterStmt2, false); - String alterStmt3 = "alter table test." + tableName + " set (\"dynamic_partition.start\" = \"-10\");"; - alterTable(alterStmt3, false); - String alterStmt4 = "alter table test." + tableName + " set (\"dynamic_partition.end\" = \"10\");"; - alterTable(alterStmt4, false); - String alterStmt5 = "alter table test." + tableName + " set (\"dynamic_partition.prefix\" = \"pp\");"; - alterTable(alterStmt5, false); - String alterStmt6 = "alter table test." + tableName + " set (\"dynamic_partition.buckets\" = \"5\");"; - alterTable(alterStmt6, false); - } - - @Test - public void testReplaceTable() throws Exception { - String stmt1 = "CREATE TABLE test.replace1\n" - + "(\n" - + " k1 int, k2 int, k3 int sum\n" - + ")\n" - + "AGGREGATE KEY(k1, k2)\n" - + "DISTRIBUTED BY HASH(k1) BUCKETS 10\n" - + "rollup (\n" - + "r1(k1),\n" - + "r2(k2, k3)\n" - + ")\n" - + "PROPERTIES(\"replication_num\" = \"1\");"; - - - String stmt2 = "CREATE TABLE test.r1\n" - + "(\n" - + " k1 int, k2 int\n" - + ")\n" - + "DISTRIBUTED BY HASH(k1) BUCKETS 11\n" - + "PROPERTIES(\"replication_num\" = \"1\");"; - - String stmt3 = "CREATE TABLE test.replace2\n" - + "(\n" - + " k1 int, k2 int\n" - + ")\n" - + "DISTRIBUTED BY HASH(k1) BUCKETS 11\n" - + "PROPERTIES(\"replication_num\" = \"1\");"; - - String stmt4 = "CREATE TABLE test.replace3\n" - + "(\n" - + " k1 int, k2 int, k3 int sum\n" - + ")\n" - + "PARTITION BY RANGE(k1)\n" - + "(\n" - + "\tPARTITION p1 values less than(\"100\"),\n" - + "\tPARTITION p2 values less than(\"200\")\n" - + ")\n" - + "DISTRIBUTED BY HASH(k1) BUCKETS 1\n" - + "rollup (\n" - + "r3(k1),\n" - + "r4(k2, k3)\n" - + ")\n" - + "PROPERTIES(\"replication_num\" = \"1\");"; - - createTable(stmt1); - createTable(stmt2); - createTable(stmt3); - createTable(stmt4); - Database db = Env.getCurrentInternalCatalog().getDbOrMetaException("default_cluster:test"); - - // table name -> tabletIds - Map> tblNameToTabletIds = Maps.newHashMap(); - OlapTable replace1Tbl = (OlapTable) db.getTableOrMetaException("replace1"); - OlapTable r1Tbl = (OlapTable) db.getTableOrMetaException("r1"); - OlapTable replace2Tbl = (OlapTable) db.getTableOrMetaException("replace2"); - OlapTable replace3Tbl = (OlapTable) db.getTableOrMetaException("replace3"); - - tblNameToTabletIds.put("replace1", Lists.newArrayList()); - for (Partition partition : replace1Tbl.getAllPartitions()) { - for (MaterializedIndex index : partition.getMaterializedIndices(MaterializedIndex.IndexExtState.VISIBLE)) { - for (Tablet tablet : index.getTablets()) { - tblNameToTabletIds.get("replace1").add(tablet.getId()); - } - } - } - - tblNameToTabletIds.put("r1", Lists.newArrayList()); - for (Partition partition : r1Tbl.getAllPartitions()) { - for (MaterializedIndex index : partition.getMaterializedIndices(MaterializedIndex.IndexExtState.VISIBLE)) { - for (Tablet tablet : index.getTablets()) { - tblNameToTabletIds.get("r1").add(tablet.getId()); - } - } - } - - tblNameToTabletIds.put("replace2", Lists.newArrayList()); - for (Partition partition : replace2Tbl.getAllPartitions()) { - for (MaterializedIndex index : partition.getMaterializedIndices(MaterializedIndex.IndexExtState.VISIBLE)) { - for (Tablet tablet : index.getTablets()) { - tblNameToTabletIds.get("replace2").add(tablet.getId()); - } - } - } - - tblNameToTabletIds.put("replace3", Lists.newArrayList()); - for (Partition partition : replace3Tbl.getAllPartitions()) { - for (MaterializedIndex index : partition.getMaterializedIndices(MaterializedIndex.IndexExtState.VISIBLE)) { - for (Tablet tablet : index.getTablets()) { - tblNameToTabletIds.get("replace3").add(tablet.getId()); - } - } - } - - // name conflict - String replaceStmt = "ALTER TABLE test.replace1 REPLACE WITH TABLE r1"; - alterTable(replaceStmt, true); - - // replace1 with replace2 - replaceStmt = "ALTER TABLE test.replace1 REPLACE WITH TABLE replace2"; - OlapTable replace1 = (OlapTable) db.getTableOrMetaException("replace1"); - OlapTable replace2 = (OlapTable) db.getTableOrMetaException("replace2"); - Assert.assertEquals(3, replace1.getPartition("replace1").getMaterializedIndices(MaterializedIndex.IndexExtState.VISIBLE).size()); - Assert.assertEquals(1, replace2.getPartition("replace2").getMaterializedIndices(MaterializedIndex.IndexExtState.VISIBLE).size()); - - alterTable(replaceStmt, false); - Assert.assertTrue(checkAllTabletsExists(tblNameToTabletIds.get("replace1"))); - Assert.assertTrue(checkAllTabletsExists(tblNameToTabletIds.get("replace2"))); - - replace1 = (OlapTable) db.getTableOrMetaException("replace1"); - replace2 = (OlapTable) db.getTableOrMetaException("replace2"); - Assert.assertEquals(1, replace1.getPartition("replace1").getMaterializedIndices(MaterializedIndex.IndexExtState.VISIBLE).size()); - Assert.assertEquals(3, replace2.getPartition("replace2").getMaterializedIndices(MaterializedIndex.IndexExtState.VISIBLE).size()); - Assert.assertEquals("replace1", replace1.getIndexNameById(replace1.getBaseIndexId())); - Assert.assertEquals("replace2", replace2.getIndexNameById(replace2.getBaseIndexId())); - - // replace with no swap - replaceStmt = "ALTER TABLE test.replace1 REPLACE WITH TABLE replace2 properties('swap' = 'false')"; - alterTable(replaceStmt, false); - replace1 = (OlapTable) db.getTableNullable("replace1"); - replace2 = (OlapTable) db.getTableNullable("replace2"); - Assert.assertNull(replace2); - Assert.assertEquals(3, replace1.getPartition("replace1").getMaterializedIndices(MaterializedIndex.IndexExtState.VISIBLE).size()); - Assert.assertEquals("replace1", replace1.getIndexNameById(replace1.getBaseIndexId())); - Assert.assertTrue(checkAllTabletsNotExists(tblNameToTabletIds.get("replace2"))); - Assert.assertTrue(checkAllTabletsExists(tblNameToTabletIds.get("replace1"))); - - replaceStmt = "ALTER TABLE test.replace1 REPLACE WITH TABLE replace3 properties('swap' = 'true')"; - alterTable(replaceStmt, false); - replace1 = (OlapTable) db.getTableOrMetaException("replace1"); - OlapTable replace3 = (OlapTable) db.getTableOrMetaException("replace3"); - Assert.assertEquals(3, replace1.getPartition("p1").getMaterializedIndices(MaterializedIndex.IndexExtState.VISIBLE).size()); - Assert.assertEquals(3, replace1.getPartition("p2").getMaterializedIndices(MaterializedIndex.IndexExtState.VISIBLE).size()); - Assert.assertNotNull(replace1.getIndexIdByName("r3")); - Assert.assertNotNull(replace1.getIndexIdByName("r4")); - - Assert.assertTrue(checkAllTabletsExists(tblNameToTabletIds.get("replace1"))); - Assert.assertTrue(checkAllTabletsExists(tblNameToTabletIds.get("replace3"))); - - Assert.assertEquals(3, replace3.getPartition("replace3").getMaterializedIndices(MaterializedIndex.IndexExtState.VISIBLE).size()); - Assert.assertNotNull(replace3.getIndexIdByName("r1")); - Assert.assertNotNull(replace3.getIndexIdByName("r2")); - } - - @Test - public void testModifyBucketNum() throws Exception { - String stmt = "CREATE TABLE test.bucket\n" - + "(\n" - + " k1 int, k2 int, k3 int sum\n" - + ")\n" - + "ENGINE = OLAP\n" - + "PARTITION BY RANGE(k1)\n" - + "(\n" - + "PARTITION p1 VALUES LESS THAN (\"100000\"),\n" - + "PARTITION p2 VALUES LESS THAN (\"200000\"),\n" - + "PARTITION p3 VALUES LESS THAN (\"300000\")\n" - + ")\n" - + "DISTRIBUTED BY HASH(k1) BUCKETS 10\n" - + "PROPERTIES(\"replication_num\" = \"1\");"; - - createTable(stmt); - Database db = Env.getCurrentInternalCatalog().getDbOrMetaException("default_cluster:test"); - - String modifyBucketNumStmt = "ALTER TABLE test.bucket MODIFY DISTRIBUTION DISTRIBUTED BY HASH(k1) BUCKETS 1;"; - alterTable(modifyBucketNumStmt, false); - OlapTable bucket = (OlapTable) db.getTableOrMetaException("bucket"); - Assert.assertEquals(1, bucket.getDefaultDistributionInfo().getBucketNum()); - - modifyBucketNumStmt = "ALTER TABLE test.bucket MODIFY DISTRIBUTION DISTRIBUTED BY HASH(k1) BUCKETS 30;"; - alterTable(modifyBucketNumStmt, false); - bucket = (OlapTable) db.getTableOrMetaException("bucket"); - Assert.assertEquals(30, bucket.getDefaultDistributionInfo().getBucketNum()); - - } - - @Test - public void testChangeOrder() throws Exception { - createTable("CREATE TABLE test.change_order\n" - + "(\n" - + " k1 date,\n" - + " k2 int,\n" - + " v1 int sum\n" - + ")\n" - + "PARTITION BY RANGE(k1)\n" - + "(\n" - + " PARTITION p1 values less than('2020-02-01'),\n" - + " PARTITION p2 values less than('2020-03-01')\n" - + ")\n" - + "DISTRIBUTED BY HASH(k2) BUCKETS 3\n" - + "PROPERTIES('replication_num' = '1');"); - - String changeOrderStmt = "ALTER TABLE test.change_order ORDER BY (k2, k1, v1);;"; - alterTable(changeOrderStmt, false); - } - - @Test - public void testAlterUniqueTablePartitionColumn() throws Exception { - createTable("CREATE TABLE test.unique_partition\n" - + "(\n" - + " k1 date,\n" - + " k2 int,\n" - + " v1 int\n" - + ")\n" - + "UNIQUE KEY(k1, k2)\n" - + "PARTITION BY RANGE(k1)\n" - + "(\n" - + " PARTITION p1 values less than('2020-02-01'),\n" - + " PARTITION p2 values less than('2020-03-01')\n" + ")\n" + "DISTRIBUTED BY HASH(k2) BUCKETS 3\n" - + "PROPERTIES('replication_num' = '1');"); - - // partition key can not be changed. - // this test is also for validating a bug fix about invisible columns(delete flag column) - String changeOrderStmt = "ALTER TABLE test.unique_partition modify column k1 int key null"; - alterTable(changeOrderStmt, true); - } - - @Test - public void testAlterDateV2Schema() throws Exception { - createTable("CREATE TABLE test.unique_partition_datev2\n" + "(\n" + " k1 date,\n" + " k2 datetime(3),\n" - + " k3 datetime,\n" + " v1 date,\n" + " v2 datetime(3),\n" + " v3 datetime,\n" + " v4 int\n" - + ")\n" + "UNIQUE KEY(k1, k2, k3)\n" + "PARTITION BY RANGE(k1)\n" + "(\n" + " PARTITION p1 values less than('2020-02-01'),\n" - + " PARTITION p2 values less than('2020-03-01')\n" + ")\n" + "DISTRIBUTED BY HASH(k1) BUCKETS 3\n" + "PROPERTIES('replication_num' = '1');"); - - // partition key can not be changed. - String changeOrderStmt = "ALTER TABLE test.unique_partition_datev2 modify column k1 int key null"; - alterTable(changeOrderStmt, true); - changeOrderStmt = "ALTER TABLE test.unique_partition_datev2 modify column k2 int key null"; - alterTable(changeOrderStmt, true); - changeOrderStmt = "ALTER TABLE test.unique_partition_datev2 modify column k3 int key null"; - alterTable(changeOrderStmt, true); - - // partition keys which are date type should be changed between each other. - changeOrderStmt = "ALTER TABLE test.unique_partition_datev2 modify column k2 datetime key null"; - alterTable(changeOrderStmt, false); - waitSchemaChangeJobDone(false); - changeOrderStmt = "ALTER TABLE test.unique_partition_datev2 modify column k3 datetime(3) key null"; - alterTable(changeOrderStmt, false); - waitSchemaChangeJobDone(false); - // Change to another precision datetime - changeOrderStmt = "ALTER TABLE test.unique_partition_datev2 modify column k3 datetime(6) key null"; - alterTable(changeOrderStmt, false); - waitSchemaChangeJobDone(false); - } - - private boolean checkAllTabletsExists(List tabletIds) { - TabletInvertedIndex invertedIndex = Env.getCurrentEnv().getTabletInvertedIndex(); - for (long tabletId : tabletIds) { - if (invertedIndex.getTabletMeta(tabletId) == null) { - return false; - } - if (invertedIndex.getReplicasByTabletId(tabletId).isEmpty()) { - return false; - } - } - return true; - } - - private boolean checkAllTabletsNotExists(List tabletIds) { - TabletInvertedIndex invertedIndex = Env.getCurrentEnv().getTabletInvertedIndex(); - for (long tabletId : tabletIds) { - if (invertedIndex.getTabletMeta(tabletId) != null) { - return false; - } - - if (!invertedIndex.getReplicasByTabletId(tabletId).isEmpty()) { - return false; - } - } - return true; - } - - @Test - public void testExternalTableAlterOperations() throws Exception { - // external table do not support partition operation - String stmt = "alter table test.odbc_table add partition p3 values less than('2020-04-01'), add partition p4 values less than('2020-05-01')"; - alterTable(stmt, true); - - // external table do not support rollup - stmt = "alter table test.odbc_table add rollup r1 (k1)"; - alterTable(stmt, true); - - // external table support add column - stmt = "alter table test.odbc_table add column k6 INT KEY after k1, add column k7 TINYINT KEY after k6"; - alterTable(stmt, false); - Database db = Env.getCurrentInternalCatalog().getDbOrMetaException("default_cluster:test"); - Table odbcTable = db.getTableOrMetaException("odbc_table"); - Assert.assertEquals(odbcTable.getBaseSchema().size(), 7); - Assert.assertEquals(odbcTable.getBaseSchema().get(1).getDataType(), PrimitiveType.INT); - Assert.assertEquals(odbcTable.getBaseSchema().get(2).getDataType(), PrimitiveType.TINYINT); - - // external table support drop column - stmt = "alter table test.odbc_table drop column k7"; - alterTable(stmt, false); - db = Env.getCurrentInternalCatalog().getDbOrMetaException("default_cluster:test"); - odbcTable = db.getTableOrMetaException("odbc_table"); - Assert.assertEquals(odbcTable.getBaseSchema().size(), 6); - - // external table support modify column - stmt = "alter table test.odbc_table modify column k6 bigint after k5"; - alterTable(stmt, false); - db = Env.getCurrentInternalCatalog().getDbOrMetaException("default_cluster:test"); - odbcTable = db.getTableOrMetaException("odbc_table"); - Assert.assertEquals(odbcTable.getBaseSchema().size(), 6); - Assert.assertEquals(odbcTable.getBaseSchema().get(5).getDataType(), PrimitiveType.BIGINT); - - // external table support reorder column - db = Env.getCurrentInternalCatalog().getDbOrMetaException("default_cluster:test"); - odbcTable = db.getTableOrMetaException("odbc_table"); - Assert.assertEquals(odbcTable.getBaseSchema().stream() - .map(column -> column.getName()) - .reduce("", (totalName, columnName) -> totalName + columnName), "k1k2k3k4k5k6"); - stmt = "alter table test.odbc_table order by (k6, k5, k4, k3, k2, k1)"; - alterTable(stmt, false); - Assert.assertEquals(odbcTable.getBaseSchema().stream() - .map(column -> column.getName()) - .reduce("", (totalName, columnName) -> totalName + columnName), "k6k5k4k3k2k1"); - - // external table support drop column - stmt = "alter table test.odbc_table drop column k6"; - alterTable(stmt, false); - stmt = "alter table test.odbc_table drop column k5"; - alterTable(stmt, false); - stmt = "alter table test.odbc_table drop column k4"; - alterTable(stmt, false); - stmt = "alter table test.odbc_table drop column k3"; - alterTable(stmt, false); - stmt = "alter table test.odbc_table drop column k2"; - alterTable(stmt, false); - // do not allow drop last column - Assert.assertEquals(odbcTable.getBaseSchema().size(), 1); - stmt = "alter table test.odbc_table drop column k1"; - alterTable(stmt, true); - Assert.assertEquals(odbcTable.getBaseSchema().size(), 1); - - // external table support rename operation - stmt = "alter table test.odbc_table rename oracle_table"; - alterTable(stmt, false); - db = Env.getCurrentInternalCatalog().getDbOrMetaException("default_cluster:test"); - odbcTable = db.getTableNullable("oracle_table"); - Assert.assertNotNull(odbcTable); - odbcTable = db.getTableNullable("odbc_table"); - Assert.assertNull(odbcTable); - } - - @Test - public void testModifyTableEngine() throws Exception { - String createOlapTblStmt = "CREATE TABLE test.mysql_table (\n" - + " `k1` date NULL COMMENT \"\",\n" - + " `k2` int NULL COMMENT \"\",\n" - + " `k3` smallint NULL COMMENT \"\",\n" - + " `v1` varchar(2048) NULL COMMENT \"\",\n" - + " `v2` datetime NULL COMMENT \"\"\n" - + ") ENGINE=MYSQL\n" - + "PROPERTIES (\n" - + "\"host\" = \"172.16.0.1\",\n" - + "\"port\" = \"3306\",\n" - + "\"user\" = \"cmy\",\n" - + "\"password\" = \"abc\",\n" - + "\"database\" = \"db1\",\n" - + "\"table\" = \"tbl1\"" - + ");"; - createTable(createOlapTblStmt); - - Database db = Env.getCurrentInternalCatalog().getDbNullable("default_cluster:test"); - MysqlTable mysqlTable = (MysqlTable) db.getTableOrMetaException("mysql_table", Table.TableType.MYSQL); - - String alterEngineStmt = "alter table test.mysql_table modify engine to odbc"; - alterTable(alterEngineStmt, true); - - alterEngineStmt = "alter table test.mysql_table modify engine to odbc properties(\"driver\" = \"MySQL\")"; - alterTable(alterEngineStmt, false); - - OdbcTable odbcTable = (OdbcTable) db.getTableNullable(mysqlTable.getId()); - Assert.assertEquals("mysql_table", odbcTable.getName()); - List schema = odbcTable.getBaseSchema(); - Assert.assertEquals(5, schema.size()); - Assert.assertEquals("172.16.0.1", odbcTable.getHost()); - Assert.assertEquals("3306", odbcTable.getPort()); - Assert.assertEquals("cmy", odbcTable.getUserName()); - Assert.assertEquals("abc", odbcTable.getPasswd()); - Assert.assertEquals("db1", odbcTable.getOdbcDatabaseName()); - Assert.assertEquals("tbl1", odbcTable.getOdbcTableName()); - Assert.assertEquals("MySQL", odbcTable.getOdbcDriver()); - } - - @Test(expected = DdlException.class) - public void testDropInUseResource() throws Exception { - String sql = "drop resource remote_s3"; - DropResourceStmt stmt = (DropResourceStmt) UtFrameUtils.parseAndAnalyzeStmt(sql, connectContext); - Env.getCurrentEnv().getResourceMgr().dropResource(stmt); - } - - @Test - public void testModifyColocateGroupReplicaAlloc() throws Exception { - Config.enable_round_robin_create_tablet = true; - - createTable("CREATE TABLE test.col_tbl0\n" + "(\n" + " k1 date,\n" + " k2 int,\n" + " v1 int \n" - + ") ENGINE=OLAP\n" + "UNIQUE KEY (k1,k2)\n" - + "DISTRIBUTED BY HASH(k2) BUCKETS 4\n" - + "PROPERTIES('replication_num' = '2', 'colocate_with' = 'mod_group_0');"); - - createTable("CREATE TABLE test.col_tbl1\n" + "(\n" + " k1 date,\n" + " k2 int,\n" + " v1 int \n" - + ") ENGINE=OLAP\n" + "UNIQUE KEY (k1,k2)\n" + "PARTITION BY RANGE(k1)\n" + "(\n" - + " PARTITION p1 values less than('2020-02-01'),\n" - + " PARTITION p2 values less than('2020-03-01')\n" + ")\n" + "DISTRIBUTED BY HASH(k2) BUCKETS 4\n" - + "PROPERTIES('replication_num' = '2', 'colocate_with' = 'mod_group_1');"); - - createTable("CREATE TABLE test.col_tbl2 (\n" - + "`uuid` varchar(255) NULL,\n" - + "`action_datetime` date NULL\n" - + ")\n" - + "DUPLICATE KEY(uuid)\n" - + "PARTITION BY RANGE(action_datetime)()\n" - + "DISTRIBUTED BY HASH(uuid) BUCKETS 4\n" - + "PROPERTIES\n" - + "(\n" - + "\"colocate_with\" = \"mod_group_2\",\n" - + "\"dynamic_partition.enable\" = \"true\",\n" - + "\"dynamic_partition.time_unit\" = \"DAY\",\n" - + "\"dynamic_partition.end\" = \"2\",\n" - + "\"dynamic_partition.prefix\" = \"p\",\n" - + "\"dynamic_partition.buckets\" = \"4\",\n" - + "\"dynamic_partition.replication_num\" = \"2\"\n" - + ");\n"); - - - Env env = Env.getCurrentEnv(); - ReplicaAllocation newReplicaAlloc = new ReplicaAllocation(); - newReplicaAlloc.put(Tag.DEFAULT_BACKEND_TAG, (short) 1); - newReplicaAlloc.put(Tag.create(Tag.TYPE_LOCATION, "group_a"), (short) 1); - newReplicaAlloc.put(Tag.create(Tag.TYPE_LOCATION, "group_b"), (short) 1); - - Database db = env.getInternalCatalog().getDbOrMetaException("default_cluster:test"); - for (int i = 0; i < 3; i++) { - String groupName = GroupId.getFullGroupName(db.getId(), "mod_group_" + i); - String sql = "alter colocate group " + groupName - + " set ( 'replication_allocation' = '" + newReplicaAlloc.toCreateStmt() + "')"; - System.out.println(sql); - AlterColocateGroupStmt stmt = (AlterColocateGroupStmt) UtFrameUtils.parseAndAnalyzeStmt(sql, connectContext); - DdlExecutor.execute(env, stmt); - - ColocateGroupSchema groupSchema = env.getColocateTableIndex().getGroupSchema(groupName); - Assert.assertNotNull(groupSchema); - Assert.assertEquals(newReplicaAlloc, groupSchema.getReplicaAlloc()); - - OlapTable tbl = (OlapTable) db.getTableOrMetaException("col_tbl" + i); - Assert.assertEquals(newReplicaAlloc, tbl.getDefaultReplicaAllocation()); - if (i == 2) { - Assert.assertEquals(newReplicaAlloc, - tbl.getTableProperty().getDynamicPartitionProperty().getReplicaAllocation()); - } - for (Partition partition : tbl.getAllPartitions()) { - Assert.assertEquals(newReplicaAlloc, - tbl.getPartitionInfo().getReplicaAllocation(partition.getId())); - } - - if (i == 2) { - Assert.assertEquals(newReplicaAlloc, - tbl.getTableProperty().getDynamicPartitionProperty().getReplicaAllocation()); - for (int j = 0; true; j++) { - Thread.sleep(2000); - if (tbl.getAllPartitions().size() > 0) { - break; - } - if (j >= 5) { - Assert.assertTrue("dynamic table not create partition", false); - } - } - } - } - - Config.enable_round_robin_create_tablet = false; - - for (int k = 0; true; k++) { - Thread.sleep(1000); // sleep to wait dynamic partition scheduler run - boolean allStable = true; - for (int i = 0; i < 3; i++) { - String groupName = GroupId.getFullGroupName(db.getId(), "mod_group_" + i); - ColocateGroupSchema groupSchema = env.getColocateTableIndex().getGroupSchema(groupName); - Assert.assertNotNull(groupSchema); - - if (env.getColocateTableIndex().isGroupUnstable(groupSchema.getGroupId())) { - allStable = false; - if (k >= 120) { - Assert.assertTrue(groupName + " is unstable" , false); - } - System.out.println("xxxxxxxxxx unstable: " + i); - continue; - } - - Map backendReplicaNum = Maps.newHashMap(); - OlapTable tbl = (OlapTable) db.getTableOrMetaException("col_tbl" + i); - int tabletNum = 0; - for (Partition partition : tbl.getAllPartitions()) { - for (MaterializedIndex idx : partition.getMaterializedIndices( - MaterializedIndex.IndexExtState.VISIBLE)) { - for (Tablet tablet : idx.getTablets()) { - Map allocMap = Maps.newHashMap(); - tabletNum++; - for (Replica replica : tablet.getReplicas()) { - long backendId = replica.getBackendId(); - Tag tag = backendTags.get(backendId); - Assert.assertNotNull(tag); - short oldNum = allocMap.getOrDefault(tag, (short) 0); - allocMap.put(tag, (short) (oldNum + 1)); - backendReplicaNum.put(backendId, backendReplicaNum.getOrDefault(backendId, 0) + 1); - } - Assert.assertEquals(newReplicaAlloc.getAllocMap(), allocMap); - } - } - } - - Assert.assertTrue(tabletNum > 0); - - for (Map.Entry entry : backendReplicaNum.entrySet()) { - long backendId = entry.getKey(); - int replicaNum = entry.getValue(); - Tag tag = backendTags.get(backendId); - int sameTagReplicaNum = tabletNum * newReplicaAlloc.getAllocMap().getOrDefault(tag, (short) 0); - int sameTagBeNum = (int) (backendTags.values().stream().filter(t -> t.equals(tag)).count()); - System.out.println("xx i " + i); - System.out.println("xx backend " + backendId); - System.out.println("xx sameTagReplicaNum " + sameTagReplicaNum); - System.out.println("xx sameTagBeNum " + sameTagBeNum); - System.out.println("xx tabletNum " + tabletNum); - Assert.assertEquals("backend " + backendId + " failed: " + " all backend replica num: " - + backendReplicaNum + ", all backend tag: " + backendTags, - sameTagReplicaNum / sameTagBeNum, replicaNum); - } - } - - if (allStable) { - break; - } - } - } - - @Test - public void testShowMV() throws Exception { - createMV("CREATE MATERIALIZED VIEW test_mv as select k1 from test.show_test group by k1;", false); - waitSchemaChangeJobDone(true); - - String showMvSql = "SHOW CREATE MATERIALIZED VIEW test_mv on test.show_test;"; - ShowCreateMaterializedViewStmt showStmt = (ShowCreateMaterializedViewStmt) UtFrameUtils.parseAndAnalyzeStmt( - showMvSql, connectContext); - ShowExecutor executor = new ShowExecutor(connectContext, showStmt); - Assert.assertEquals(executor.execute().getResultRows().get(0).get(2), - "CREATE MATERIALIZED VIEW test_mv as select k1 from test.show_test group by k1;"); - - showMvSql = "SHOW CREATE MATERIALIZED VIEW test_mv_empty on test.show_test;"; - showStmt = (ShowCreateMaterializedViewStmt) UtFrameUtils.parseAndAnalyzeStmt(showMvSql, connectContext); - executor = new ShowExecutor(connectContext, showStmt); - Assert.assertTrue(executor.execute().getResultRows().isEmpty()); - - showMvSql = "SHOW CREATE MATERIALIZED VIEW test_mv on test.table1_error;"; - showStmt = (ShowCreateMaterializedViewStmt) UtFrameUtils.parseAndAnalyzeStmt(showMvSql, connectContext); - executor = new ShowExecutor(connectContext, showStmt); - ExceptionChecker.expectThrowsWithMsg(AnalysisException.class, "Unknown table 'table1_error'", - executor::execute); - } - - @Test - public void testModifySequenceCol() { - String stmt = "alter table test.unique_sequence_col modify column v1 Date"; - alterTable(stmt, true); - } -} From b813b2269b6a0dbf4effeda1cc1d644abc582bc8 Mon Sep 17 00:00:00 2001 From: yujun777 Date: Tue, 5 Sep 2023 00:53:04 +0800 Subject: [PATCH 5/7] change colocate group name --- .../Alter/ALTER-COLOCATE-GROUP.md | 15 ++-- .../Alter/ALTER-COLOCATE-GROUP.md | 17 ++--- fe/fe-core/src/main/cup/sql_parser.cup | 14 +++- .../analysis/AlterColocateGroupStmt.java | 26 +++++-- .../doris/analysis/ColocateGroupName.java | 70 +++++++++++++++++++ .../doris/catalog/ColocateTableIndex.java | 17 ++++- .../common/proc/ColocationGroupProcDir.java | 2 +- .../org/apache/doris/alter/AlterTest.java | 13 ++-- 8 files changed, 136 insertions(+), 38 deletions(-) create mode 100644 fe/fe-core/src/main/java/org/apache/doris/analysis/ColocateGroupName.java diff --git a/docs/en/docs/sql-manual/sql-reference/Data-Definition-Statements/Alter/ALTER-COLOCATE-GROUP.md b/docs/en/docs/sql-manual/sql-reference/Data-Definition-Statements/Alter/ALTER-COLOCATE-GROUP.md index ecdecdd48a1646..54c87c05e67e70 100644 --- a/docs/en/docs/sql-manual/sql-reference/Data-Definition-Statements/Alter/ALTER-COLOCATE-GROUP.md +++ b/docs/en/docs/sql-manual/sql-reference/Data-Definition-Statements/Alter/ALTER-COLOCATE-GROUP.md @@ -39,7 +39,7 @@ This statement is used to modify the colocation group. Syntax: ```sql -ALTER COLOCATE GROUP "full_group_name" +ALTER COLOCATE GROUP [database.]group SET ( property_list ); @@ -47,14 +47,9 @@ SET ( NOTE: -1. `full_group_name` is the full name of the colocation group, which can be divided into two cases: - - If the group is global, that is, its name starts with `__global__`, then `full_group_name` is equal to `group_name`; - - If the group is not global, that is, its name does not start with `__global__`, then it belongs to a certain Database, `full_group_name` is equal to `dbId` + `_` + `group_name` - -2. `full_group_name` can also be viewed through the command `show proc '/proc/colocation_group'`; - +1. If the colocate group is global, that is, its name starts with `__global__`, then it does not belong to any database; -3. property_list is a colocation group attribute, currently only supports modifying `replication_num` and `replication_allocation`. After modifying these two attributes of the colocation group, at the same time, change the attribute `default.replication_allocation`, the attribute `dynamic.replication_allocation` of the table of the group, and the `replication_allocation` of the existing partition to be the same as it. +2. property_list is a colocation group attribute, currently only supports modifying `replication_num` and `replication_allocation`. After modifying these two attributes of the colocation group, at the same time, change the attribute `default.replication_allocation`, the attribute `dynamic.replication_allocation` of the table of the group, and the `replication_allocation` of the existing partition to be the same as it. ### Example @@ -72,9 +67,9 @@ NOTE: 2. Modify the number of copies of a non-global group ```sql - # Set "colocate_with" = "bar" when creating the table, and the dbId of the Database where the table is located is 10231 + # Set "colocate_with" = "bar" when creating the table, and the Database is "example_db" - ALTER COLOCATE GROUP 10231_bar + ALTER COLOCATE GROUP example_db.bar SET ( "replication_num"="1" ); diff --git a/docs/zh-CN/docs/sql-manual/sql-reference/Data-Definition-Statements/Alter/ALTER-COLOCATE-GROUP.md b/docs/zh-CN/docs/sql-manual/sql-reference/Data-Definition-Statements/Alter/ALTER-COLOCATE-GROUP.md index de8123b44d6802..2b5ca2cc727296 100644 --- a/docs/zh-CN/docs/sql-manual/sql-reference/Data-Definition-Statements/Alter/ALTER-COLOCATE-GROUP.md +++ b/docs/zh-CN/docs/sql-manual/sql-reference/Data-Definition-Statements/Alter/ALTER-COLOCATE-GROUP.md @@ -39,7 +39,7 @@ ALTER COLOCATE GROUP 语法: ```sql -ALTER COLOCATE GROUP "full_group_name" +ALTER COLOCATE GROUP [database.]group SET ( property_list ); @@ -47,14 +47,11 @@ SET ( 注意: -1. `full_group_name`是colocation group名称全称,其分为两种情况: - - 如果group是全局的,即它的名称是以`__global__` 开头的,那么`full_group_name`即等于`group_name`; - - 如果group不是全局的,即它的名称不是以`__global__ `开头的,那么它是属于某个Database的,`full_group_name ` 等于 `dbId` + `_` + `group_name`。 - -2. `full_group_name` 也可以通过命令 `show proc '/proc/colocation_group'` 来查看。 - +1. 如果colocate group是全局的,即它的名称是以 `__global__` 开头的,那它不属于任何一个Database; -3. property_list 是colocation group属性,目前只支持修改`replication_num` 和 `replication_allocation`。修改colocation group的这两个属性修改之后,同时把该group的表的属性`default.replication_allocation` 、属性`dynamic.replication_allocation `、以及已有分区的`replication_allocation`改成跟它一样。 +2. property_list 是colocation group属性,目前只支持修改`replication_num` 和 `replication_allocation`。 + 修改colocation group的这两个属性修改之后,同时把该group的表的属性`default.replication_allocation` 、 + 属性`dynamic.replication_allocation `、以及已有分区的`replication_allocation`改成跟它一样。 @@ -74,9 +71,9 @@ SET ( 2. 修改一个非全局group的副本数 ```sql - # 建表时设置 "colocate_with" = "bar",且表所在Database的dbId为10231 + # 建表时设置 "colocate_with" = "bar",且表属于Database example_db - ALTER COLOCATE GROUP 10231_bar + ALTER COLOCATE GROUP example_db.bar SET ( "replication_num"="1" ); diff --git a/fe/fe-core/src/main/cup/sql_parser.cup b/fe/fe-core/src/main/cup/sql_parser.cup index 92c8f8d02478bc..eb26ec2992630e 100644 --- a/fe/fe-core/src/main/cup/sql_parser.cup +++ b/fe/fe-core/src/main/cup/sql_parser.cup @@ -813,6 +813,7 @@ nonterminal ArrayList opt_common_hints; nonterminal String optional_on_ident; nonterminal String opt_job_starts; nonterminal String opt_job_ends; +nonterminal ColocateGroupName colocate_group_name; nonterminal LoadTask.MergeType opt_merge_type, opt_with_merge_type; @@ -1348,7 +1349,7 @@ alter_stmt ::= {: RESULT = new AlterResourceStmt(resourceName, properties); :} - | KW_ALTER KW_COLOCATE KW_GROUP ident_or_text:colocateGroupName KW_SET LPAREN key_value_map:properties RPAREN + | KW_ALTER KW_COLOCATE KW_GROUP colocate_group_name:colocateGroupName KW_SET LPAREN key_value_map:properties RPAREN {: RESULT = new AlterColocateGroupStmt(colocateGroupName, properties); :} @@ -5647,6 +5648,17 @@ table_name ::= {: RESULT = new TableName(ctl, db, tbl); :} ; +colocate_group_name ::= + ident:group + {: + RESULT = new ColocateGroupName(null, group); + :} + | ident:db DOT ident:group + {: + RESULT = new ColocateGroupName(db, group); + :} + ; + encryptkey_name ::= ident:name {: diff --git a/fe/fe-core/src/main/java/org/apache/doris/analysis/AlterColocateGroupStmt.java b/fe/fe-core/src/main/java/org/apache/doris/analysis/AlterColocateGroupStmt.java index 02468110a4b6b5..e268322dcc8f9f 100644 --- a/fe/fe-core/src/main/java/org/apache/doris/analysis/AlterColocateGroupStmt.java +++ b/fe/fe-core/src/main/java/org/apache/doris/analysis/AlterColocateGroupStmt.java @@ -26,18 +26,20 @@ import org.apache.doris.mysql.privilege.PrivPredicate; import org.apache.doris.qe.ConnectContext; +import com.google.common.base.Strings; + import java.util.Map; public class AlterColocateGroupStmt extends DdlStmt { - private final String colocateGroupName; + private final ColocateGroupName colocateGroupName; private final Map properties; - public AlterColocateGroupStmt(String colocateGroupName, Map properties) { + public AlterColocateGroupStmt(ColocateGroupName colocateGroupName, Map properties) { this.colocateGroupName = colocateGroupName; this.properties = properties; } - public String getColocateGroupName() { + public ColocateGroupName getColocateGroupName() { return colocateGroupName; } @@ -48,10 +50,20 @@ public Map getProperties() { @Override public void analyze(Analyzer analyzer) throws UserException { super.analyze(analyzer); + colocateGroupName.analyze(analyzer); - if (!Env.getCurrentEnv().getAccessManager().checkGlobalPriv( - ConnectContext.get(), PrivPredicate.ADMIN)) { - ErrorReport.reportAnalysisException(ErrorCode.ERR_SPECIFIC_ACCESS_DENIED_ERROR, "ADMIN"); + String dbName = colocateGroupName.getDb(); + if (Strings.isNullOrEmpty(dbName)) { + if (!Env.getCurrentEnv().getAccessManager().checkGlobalPriv( + ConnectContext.get(), PrivPredicate.ADMIN)) { + ErrorReport.reportAnalysisException(ErrorCode.ERR_SPECIFIC_ACCESS_DENIED_ERROR, "ADMIN"); + } + } else { + if (!Env.getCurrentEnv().getAccessManager().checkDbPriv( + ConnectContext.get(), dbName, PrivPredicate.ADMIN)) { + ErrorReport.reportAnalysisException(ErrorCode.ERR_DBACCESS_DENIED_ERROR, + ConnectContext.get().getQualifiedUser(), dbName); + } } if (properties == null || properties.isEmpty()) { @@ -62,7 +74,7 @@ public void analyze(Analyzer analyzer) throws UserException { @Override public String toSql() { StringBuilder sb = new StringBuilder(); - sb.append("ALTER COLOCATE GROUP '").append(colocateGroupName).append("' "); + sb.append("ALTER COLOCATE GROUP ").append(colocateGroupName.toSql()).append(" "); sb.append("PROPERTIES(").append(new PrintableMap<>(properties, " = ", true, false)).append(")"); return sb.toString(); } diff --git a/fe/fe-core/src/main/java/org/apache/doris/analysis/ColocateGroupName.java b/fe/fe-core/src/main/java/org/apache/doris/analysis/ColocateGroupName.java new file mode 100644 index 00000000000000..c2019067ac4eec --- /dev/null +++ b/fe/fe-core/src/main/java/org/apache/doris/analysis/ColocateGroupName.java @@ -0,0 +1,70 @@ +// Licensed to the Apache Software Foundation (ASF) under one +// or more contributor license agreements. See the NOTICE file +// distributed with this work for additional information +// regarding copyright ownership. The ASF licenses this file +// to you under the Apache License, Version 2.0 (the +// "License"); you may not use this file except in compliance +// with the License. You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, +// software distributed under the License is distributed on an +// "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY +// KIND, either express or implied. See the License for the +// specific language governing permissions and limitations +// under the License. + +package org.apache.doris.analysis; + +import org.apache.doris.catalog.ColocateTableIndex.GroupId; +import org.apache.doris.cluster.ClusterNamespace; +import org.apache.doris.common.AnalysisException; +import org.apache.doris.common.ErrorCode; +import org.apache.doris.common.ErrorReport; + +import com.google.common.base.Strings; + +public class ColocateGroupName { + private String db; + private String group; + + public ColocateGroupName(String db, String group) { + this.db = db; + this.group = group; + } + + public String getDb() { + return db; + } + + public String getGroup() { + return group; + } + + public void analyze(Analyzer analyzer) throws AnalysisException { + if (GroupId.isGlobalGroupName(group)) { + if (!Strings.isNullOrEmpty(db)) { + throw new AnalysisException("group that name starts with `" + GroupId.GLOBAL_COLOCATE_PREFIX + "`" + + " is a global group, it doesn't belong to any specific database"); + } + } else { + if (Strings.isNullOrEmpty(db)) { + if (Strings.isNullOrEmpty(analyzer.getDefaultDb())) { + ErrorReport.reportAnalysisException(ErrorCode.ERR_NO_DB_ERROR); + } + db = analyzer.getDefaultDb(); + } + db = ClusterNamespace.getFullName(analyzer.getClusterName(), db); + } + } + + public String toSql() { + StringBuilder sb = new StringBuilder(); + if (!Strings.isNullOrEmpty(db)) { + sb.append("`").append(db).append("`"); + } + sb.append("`").append(group).append("`"); + return sb.toString(); + } +} diff --git a/fe/fe-core/src/main/java/org/apache/doris/catalog/ColocateTableIndex.java b/fe/fe-core/src/main/java/org/apache/doris/catalog/ColocateTableIndex.java index 14d004ce40e9b0..b4260f026f4fb5 100644 --- a/fe/fe-core/src/main/java/org/apache/doris/catalog/ColocateTableIndex.java +++ b/fe/fe-core/src/main/java/org/apache/doris/catalog/ColocateTableIndex.java @@ -689,7 +689,11 @@ public List> getInfos() { } } info.add(dbName); - info.add(entry.getKey()); + String groupName = entry.getKey(); + if (!GroupId.isGlobalGroupName(groupName)) { + groupName = groupName.substring(groupName.indexOf(".") + 1); + } + info.add(groupName); info.add(Joiner.on(", ").join(group2Tables.get(groupId))); ColocateGroupSchema groupSchema = group2Schema.get(groupId); info.add(String.valueOf(groupSchema.getBucketsNum())); @@ -817,10 +821,17 @@ public void alterColocateGroup(AlterColocateGroupStmt stmt) throws UserException writeLock(); try { Map properties = stmt.getProperties(); - String fullGroupName = stmt.getColocateGroupName(); + String dbName = stmt.getColocateGroupName().getDb(); + String groupName = stmt.getColocateGroupName().getGroup(); + long dbId = 0; + if (!GroupId.isGlobalGroupName(groupName)) { + Database db = (Database) Env.getCurrentInternalCatalog().getDbOrMetaException(dbName); + dbId = db.getId(); + } + String fullGroupName = GroupId.getFullGroupName(dbId, groupName); ColocateGroupSchema groupSchema = getGroupSchema(fullGroupName); if (groupSchema == null) { - throw new DdlException("Not found colocate group [" + fullGroupName + "]"); + throw new DdlException("Not found colocate group " + stmt.getColocateGroupName().toSql()); } GroupId groupId = groupSchema.getGroupId(); diff --git a/fe/fe-core/src/main/java/org/apache/doris/common/proc/ColocationGroupProcDir.java b/fe/fe-core/src/main/java/org/apache/doris/common/proc/ColocationGroupProcDir.java index f8d3cd17cd22c9..fc42b6ef521d93 100644 --- a/fe/fe-core/src/main/java/org/apache/doris/common/proc/ColocationGroupProcDir.java +++ b/fe/fe-core/src/main/java/org/apache/doris/common/proc/ColocationGroupProcDir.java @@ -33,7 +33,7 @@ */ public class ColocationGroupProcDir implements ProcDirInterface { public static final ImmutableList TITLE_NAMES = new ImmutableList.Builder() - .add("GroupId").add("Database").add("FullGroupName").add("TableIds") + .add("GroupId").add("Database").add("GroupName").add("TableIds") .add("BucketsNum").add("ReplicaAllocation").add("DistCols").add("IsStable") .add("ErrorMsg").build(); diff --git a/fe/fe-core/src/test/java/org/apache/doris/alter/AlterTest.java b/fe/fe-core/src/test/java/org/apache/doris/alter/AlterTest.java index df69a41a29587c..1dfd5d0bdc0a75 100644 --- a/fe/fe-core/src/test/java/org/apache/doris/alter/AlterTest.java +++ b/fe/fe-core/src/test/java/org/apache/doris/alter/AlterTest.java @@ -1277,13 +1277,14 @@ public void testModifyColocateGroupReplicaAlloc() throws Exception { newReplicaAlloc.put(Tag.create(Tag.TYPE_LOCATION, "group_b"), (short) 1); for (int i = 0; i < 3; i++) { - String groupName = GroupId.getFullGroupName(db.getId(), "mod_group_" + i); - String sql = "alter colocate group " + groupName + String groupName = "mod_group_" + i; + String sql = "alter colocate group test." + groupName + " set ( 'replication_allocation' = '" + newReplicaAlloc.toCreateStmt() + "')"; + String fullGroupName = GroupId.getFullGroupName(db.getId(), groupName); AlterColocateGroupStmt stmt = (AlterColocateGroupStmt) UtFrameUtils.parseAndAnalyzeStmt(sql, connectContext); DdlExecutor.execute(env, stmt); - ColocateGroupSchema groupSchema = env.getColocateTableIndex().getGroupSchema(groupName); + ColocateGroupSchema groupSchema = env.getColocateTableIndex().getGroupSchema(fullGroupName); Assert.assertNotNull(groupSchema); Assert.assertEquals(newReplicaAlloc, groupSchema.getReplicaAlloc()); @@ -1310,14 +1311,14 @@ public void testModifyColocateGroupReplicaAlloc() throws Exception { Thread.sleep(1000); // sleep to wait dynamic partition scheduler run boolean allStable = true; for (int i = 0; i < 3; i++) { - String groupName = GroupId.getFullGroupName(db.getId(), "mod_group_" + i); - ColocateGroupSchema groupSchema = env.getColocateTableIndex().getGroupSchema(groupName); + String fullGroupName = GroupId.getFullGroupName(db.getId(), "mod_group_" + i); + ColocateGroupSchema groupSchema = env.getColocateTableIndex().getGroupSchema(fullGroupName); Assert.assertNotNull(groupSchema); if (env.getColocateTableIndex().isGroupUnstable(groupSchema.getGroupId())) { allStable = false; if (k >= 120) { - Assert.assertTrue(groupName + " is unstable" , false); + Assert.assertTrue(fullGroupName + " is unstable" , false); } continue; } From 682dbb501ce816f948bdb4d767cf48cd221ad55e Mon Sep 17 00:00:00 2001 From: yujun777 Date: Sat, 23 Sep 2023 13:15:40 +0800 Subject: [PATCH 6/7] update test --- .../doris/analysis/ColocateGroupName.java | 2 +- .../doris/catalog/ColocateTableIndex.java | 7 +- .../common/proc/ColocationGroupProcDir.java | 2 +- .../org/apache/doris/alter/AlterTest.java | 6 +- .../alter_p2/test_alter_colocate_group.groovy | 170 ++++++++++++++++++ 5 files changed, 178 insertions(+), 9 deletions(-) create mode 100644 regression-test/suites/alter_p2/test_alter_colocate_group.groovy diff --git a/fe/fe-core/src/main/java/org/apache/doris/analysis/ColocateGroupName.java b/fe/fe-core/src/main/java/org/apache/doris/analysis/ColocateGroupName.java index c2019067ac4eec..b7f0c0afd34a53 100644 --- a/fe/fe-core/src/main/java/org/apache/doris/analysis/ColocateGroupName.java +++ b/fe/fe-core/src/main/java/org/apache/doris/analysis/ColocateGroupName.java @@ -62,7 +62,7 @@ public void analyze(Analyzer analyzer) throws AnalysisException { public String toSql() { StringBuilder sb = new StringBuilder(); if (!Strings.isNullOrEmpty(db)) { - sb.append("`").append(db).append("`"); + sb.append("`").append(db).append("`."); } sb.append("`").append(group).append("`"); return sb.toString(); diff --git a/fe/fe-core/src/main/java/org/apache/doris/catalog/ColocateTableIndex.java b/fe/fe-core/src/main/java/org/apache/doris/catalog/ColocateTableIndex.java index b4260f026f4fb5..fcefcff132a7a2 100644 --- a/fe/fe-core/src/main/java/org/apache/doris/catalog/ColocateTableIndex.java +++ b/fe/fe-core/src/main/java/org/apache/doris/catalog/ColocateTableIndex.java @@ -686,12 +686,15 @@ public List> getInfos() { Database db = Env.getCurrentInternalCatalog().getDbNullable(groupId.dbId); if (db != null) { dbName = db.getFullName(); + int index = dbName.indexOf(":"); + if (index > 0) { + dbName = dbName.substring(index + 1); //use short db name + } } } - info.add(dbName); String groupName = entry.getKey(); if (!GroupId.isGlobalGroupName(groupName)) { - groupName = groupName.substring(groupName.indexOf(".") + 1); + groupName = dbName + "." + groupName.substring(groupName.indexOf("_") + 1); } info.add(groupName); info.add(Joiner.on(", ").join(group2Tables.get(groupId))); diff --git a/fe/fe-core/src/main/java/org/apache/doris/common/proc/ColocationGroupProcDir.java b/fe/fe-core/src/main/java/org/apache/doris/common/proc/ColocationGroupProcDir.java index fc42b6ef521d93..85dd2c97be6590 100644 --- a/fe/fe-core/src/main/java/org/apache/doris/common/proc/ColocationGroupProcDir.java +++ b/fe/fe-core/src/main/java/org/apache/doris/common/proc/ColocationGroupProcDir.java @@ -33,7 +33,7 @@ */ public class ColocationGroupProcDir implements ProcDirInterface { public static final ImmutableList TITLE_NAMES = new ImmutableList.Builder() - .add("GroupId").add("Database").add("GroupName").add("TableIds") + .add("GroupId").add("GroupName").add("TableIds") .add("BucketsNum").add("ReplicaAllocation").add("DistCols").add("IsStable") .add("ErrorMsg").build(); diff --git a/fe/fe-core/src/test/java/org/apache/doris/alter/AlterTest.java b/fe/fe-core/src/test/java/org/apache/doris/alter/AlterTest.java index 1dfd5d0bdc0a75..d681376d24061b 100644 --- a/fe/fe-core/src/test/java/org/apache/doris/alter/AlterTest.java +++ b/fe/fe-core/src/test/java/org/apache/doris/alter/AlterTest.java @@ -1290,10 +1290,6 @@ public void testModifyColocateGroupReplicaAlloc() throws Exception { OlapTable tbl = (OlapTable) db.getTableOrMetaException("col_tbl" + i); Assert.assertEquals(newReplicaAlloc, tbl.getDefaultReplicaAllocation()); - if (i == 2) { - Assert.assertEquals(newReplicaAlloc, - tbl.getTableProperty().getDynamicPartitionProperty().getReplicaAllocation()); - } for (Partition partition : tbl.getAllPartitions()) { Assert.assertEquals(newReplicaAlloc, tbl.getPartitionInfo().getReplicaAllocation(partition.getId())); @@ -1318,7 +1314,7 @@ public void testModifyColocateGroupReplicaAlloc() throws Exception { if (env.getColocateTableIndex().isGroupUnstable(groupSchema.getGroupId())) { allStable = false; if (k >= 120) { - Assert.assertTrue(fullGroupName + " is unstable" , false); + Assert.assertTrue(fullGroupName + " is unstable", false); } continue; } diff --git a/regression-test/suites/alter_p2/test_alter_colocate_group.groovy b/regression-test/suites/alter_p2/test_alter_colocate_group.groovy new file mode 100644 index 00000000000000..1f5b8496630b40 --- /dev/null +++ b/regression-test/suites/alter_p2/test_alter_colocate_group.groovy @@ -0,0 +1,170 @@ +// Licensed to the Apache Software Foundation (ASF) under one +// or more contributor license agreements. See the NOTICE file +// distributed with this work for additional information +// regarding copyright ownership. The ASF licenses this file +// to you under the Apache License, Version 2.0 (the +// "License"); you may not use this file except in compliance +// with the License. You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, +// software distributed under the License is distributed on an +// "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY +// KIND, either express or implied. See the License for the +// specific language governing permissions and limitations +// under the License. + +suite ("test_alter_colocate_group") { + sql "DROP DATABASE IF EXISTS test_alter_colocate_group_db FORCE" + test { + sql """ + ALTER COLOCATE GROUP test_alter_colocate_group_db.bad_group_1 + SET ( "replication_num" = "1" ); + """ + + exception "unknown databases" + } + test { + sql """ + ALTER COLOCATE GROUP bad_group_2 + SET ( "replication_num" = "1" ); + """ + + exception "Not found colocate group `default_cluster:regression_test_alter_p2`.`bad_group_2`" + } + test { + sql """ + ALTER COLOCATE GROUP bad_db.__global__bad_group_3 + SET ( "replication_num" = "1" ); + """ + + exception "group that name starts with `__global__` is a global group, it doesn't belong to any specific database" + } + test { + sql """ + ALTER COLOCATE GROUP __global__bad_group_4 + SET ( "replication_num" = "1" ); + """ + + exception "Not found colocate group `__global__bad_group_4`" + } + + sql " DROP TABLE IF EXISTS tbl1 FORCE; " + sql " DROP TABLE IF EXISTS tbl2 FORCE; " + sql " DROP TABLE IF EXISTS tbl3 FORCE; " + + sql """ + CREATE TABLE tbl1 + ( + k1 int, + k2 int + ) + DISTRIBUTED BY HASH(k1) BUCKETS 6 + PROPERTIES + ( + "colocate_with" = "group_1", + "replication_num" = "1" + ); + """ + + sql """ + CREATE TABLE tbl2 + ( + k1 date, + k2 int + ) + PARTITION BY RANGE(k1) + ( + PARTITION p1 values less than('2020-02-01'), + PARTITION p2 values less than('2020-03-01') + ) + DISTRIBUTED BY HASH(k2) BUCKETS 5 + PROPERTIES + ( + "colocate_with" = "group_2", + "replication_num" = "1" + ); + """ + + sql """ + CREATE TABLE tbl3 + ( + `uuid` varchar(255) NULL, + `action_datetime` date NULL + ) + DUPLICATE KEY(uuid) + PARTITION BY RANGE(action_datetime)() + DISTRIBUTED BY HASH(uuid) BUCKETS 4 + PROPERTIES + ( + "colocate_with" = "group_3", + "replication_num" = "1", + "dynamic_partition.enable" = "true", + "dynamic_partition.time_unit" = "DAY", + "dynamic_partition.end" = "2", + "dynamic_partition.prefix" = "p", + "dynamic_partition.buckets" = "4", + "dynamic_partition.replication_num" = "1" + ); + """ + + def checkGroupsReplicaAlloc = { groupName, replicaNum -> + // groupName -> replicaAlloc + def allocMap = [:] + def groups = sql """ show proc "/colocation_group" """ + for (def group : groups) { + allocMap[group[1]] = group[4] + } + + assertEquals("tag.location.default: ${replicaNum}".toString(), allocMap[groupName]) + } + + def checkTableReplicaAlloc = { tableName, hasDynamicPart, replicaNum -> + def result = sql """ show create table ${tableName} """ + def createTbl = result[0][1].toString() + assertTrue(createTbl.indexOf("\"replication_allocation\" = \"tag.location.default: ${replicaNum}\"") > 0) + if (hasDynamicPart) { + assertTrue(createTbl.indexOf( + "\"dynamic_partition.replication_allocation\" = \"tag.location.default: ${replicaNum}\"") > 0) + } + + result = sql """ show partitions from ${tableName} """ + assertTrue(result.size() > 0) + for (int i = 0; i < result.size(); i++) { + assertEquals("${replicaNum}".toString(), result[i][9].toString()) + } + } + + for (int i = 1; i <= 3; i++) { + def groupName = "regression_test_alter_p2.group_${i}" + checkGroupsReplicaAlloc(groupName, 1) + + def tableName = "tbl${i}" + def hasDynamicPart = i == 3 + checkTableReplicaAlloc(tableName, hasDynamicPart, 1) + + test { + sql """ + ALTER COLOCATE GROUP ${groupName} + SET ( "replication_num" = "100" ); + """ + + exception "Failed to find enough host" + } + + test { + sql """ + ALTER COLOCATE GROUP ${groupName} + SET ( "replication_num" = "3" ); + """ + } + + checkGroupsReplicaAlloc(groupName, 3) + checkTableReplicaAlloc(tableName, hasDynamicPart, 3) + } + + sql " DROP TABLE IF EXISTS tbl1 FORCE; " + sql " DROP TABLE IF EXISTS tbl2 FORCE; " + sql " DROP TABLE IF EXISTS tbl3 FORCE; " +} From a8227a20b0f347c7ed1952088c028c494f7670ba Mon Sep 17 00:00:00 2001 From: yujun777 Date: Tue, 26 Sep 2023 16:00:45 +0800 Subject: [PATCH 7/7] change code OP_COLOCATE_MOD_REPLICA_ALLOC --- .../src/main/java/org/apache/doris/persist/OperationType.java | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/fe/fe-core/src/main/java/org/apache/doris/persist/OperationType.java b/fe/fe-core/src/main/java/org/apache/doris/persist/OperationType.java index 0235407f359aa9..c5e784cb9d7eff 100644 --- a/fe/fe-core/src/main/java/org/apache/doris/persist/OperationType.java +++ b/fe/fe-core/src/main/java/org/apache/doris/persist/OperationType.java @@ -157,7 +157,6 @@ public class OperationType { public static final short OP_MODIFY_FRONTEND = 92; //colocate table - public static final short OP_COLOCATE_MOD_REPLICA_ALLOC = 93; public static final short OP_COLOCATE_ADD_TABLE = 94; public static final short OP_COLOCATE_REMOVE_TABLE = 95; public static final short OP_COLOCATE_BACKENDS_PER_BUCKETSEQ = 96; @@ -190,6 +189,7 @@ public class OperationType { // modify database/table/tablet/replica meta public static final short OP_SET_REPLICA_VERSION = 141; + public static final short OP_COLOCATE_MOD_REPLICA_ALLOC = 142; // routine load 200 public static final short OP_CREATE_ROUTINE_LOAD_JOB = 200;