From e257a1dbfc3f82a8ee480f41777a808bc1b7a494 Mon Sep 17 00:00:00 2001 From: qijianliang01 Date: Tue, 15 Mar 2022 18:59:16 +0800 Subject: [PATCH 1/6] support remote storage --- docs/.vuepress/sidebar/en.js | 1 + docs/.vuepress/sidebar/zh-CN.js | 1 + .../Administration/ALTER SYSTEM.md | 249 ++++++++++-------- .../Administration/SHOW REMOTE STORAGES.md | 42 +++ .../Data Definition/ALTER TABLE.md | 2 + .../Data Definition/CREATE TABLE.md | 59 +++-- .../Administration/ALTER SYSTEM.md | 38 ++- .../Administration/SHOW REMOTE STORAGES.md | 43 +++ .../Data Definition/ALTER TABLE.md | 2 + .../Data Definition/CREATE TABLE.md | 64 +++-- .../apache/doris/common/FeMetaVersion.java | 4 +- fe/fe-core/src/main/cup/sql_parser.cup | 21 +- .../org/apache/doris/alter/SystemHandler.java | 6 + .../analysis/AddRemoteStorageClause.java | 141 ++++++++++ .../doris/analysis/AlterSystemStmt.java | 15 +- .../analysis/DropRemoteStorageClause.java | 31 +++ .../doris/analysis/RemoteStorageClause.java | 55 ++++ .../analysis/ShowRemoteStoragesStmt.java | 64 +++++ .../org/apache/doris/catalog/Catalog.java | 31 ++- .../apache/doris/catalog/DataProperty.java | 41 ++- .../org/apache/doris/catalog/OlapTable.java | 2 +- .../apache/doris/catalog/PartitionInfo.java | 6 +- .../doris/catalog/RemoteStorageMgr.java | 222 ++++++++++++++++ .../doris/catalog/RemoteStorageProperty.java | 68 +++++ .../org/apache/doris/catalog/S3Property.java | 106 ++++++++ .../org/apache/doris/common/MetaReader.java | 1 + .../org/apache/doris/common/MetaWriter.java | 1 + .../doris/common/proc/PartitionsProcDir.java | 5 +- .../apache/doris/common/proc/ProcService.java | 1 + .../doris/common/util/PrintableMap.java | 3 + .../doris/common/util/PropertyAnalyzer.java | 44 +++- .../apache/doris/journal/JournalEntity.java | 8 + .../org/apache/doris/persist/EditLog.java | 21 ++ .../apache/doris/persist/OperationType.java | 2 + .../org/apache/doris/qe/ShowExecutor.java | 9 + fe/fe-core/src/main/jflex/sql_scanner.flex | 2 + .../org/apache/doris/alter/AlterTest.java | 4 +- .../doris/analysis/RemoteStorageTest.java | 151 +++++++++++ .../apache/doris/backup/CatalogMocker.java | 6 +- .../doris/catalog/DataPropertyTest.java | 6 +- .../org/apache/doris/clone/RebalanceTest.java | 2 +- .../doris/common/PropertyAnalyzerTest.java | 3 +- 42 files changed, 1405 insertions(+), 178 deletions(-) create mode 100644 docs/en/sql-reference/sql-statements/Administration/SHOW REMOTE STORAGES.md create mode 100644 docs/zh-CN/sql-reference/sql-statements/Administration/SHOW REMOTE STORAGES.md create mode 100644 fe/fe-core/src/main/java/org/apache/doris/analysis/AddRemoteStorageClause.java create mode 100644 fe/fe-core/src/main/java/org/apache/doris/analysis/DropRemoteStorageClause.java create mode 100644 fe/fe-core/src/main/java/org/apache/doris/analysis/RemoteStorageClause.java create mode 100644 fe/fe-core/src/main/java/org/apache/doris/analysis/ShowRemoteStoragesStmt.java create mode 100644 fe/fe-core/src/main/java/org/apache/doris/catalog/RemoteStorageMgr.java create mode 100644 fe/fe-core/src/main/java/org/apache/doris/catalog/RemoteStorageProperty.java create mode 100644 fe/fe-core/src/main/java/org/apache/doris/catalog/S3Property.java create mode 100644 fe/fe-core/src/test/java/org/apache/doris/analysis/RemoteStorageTest.java diff --git a/docs/.vuepress/sidebar/en.js b/docs/.vuepress/sidebar/en.js index b96c51e7ac09b6..684e68a2d5a1f5 100644 --- a/docs/.vuepress/sidebar/en.js +++ b/docs/.vuepress/sidebar/en.js @@ -610,6 +610,7 @@ module.exports = [ "SHOW INDEX", "SHOW MIGRATIONS", "SHOW PLUGINS", + "SHOW REMOTE STORAGES", "SHOW TABLE STATUS", "SHOW TRASH", "UNINSTALL PLUGIN", diff --git a/docs/.vuepress/sidebar/zh-CN.js b/docs/.vuepress/sidebar/zh-CN.js index fc7c6bd9d43721..5f26ee2b8fbb60 100644 --- a/docs/.vuepress/sidebar/zh-CN.js +++ b/docs/.vuepress/sidebar/zh-CN.js @@ -623,6 +623,7 @@ module.exports = [ "SHOW INDEX", "SHOW MIGRATIONS", "SHOW PLUGINS", + "SHOW REMOTE STORAGES", "SHOW TABLE STATUS", "SHOW TRASH", "UNINSTALL PLUGIN", diff --git a/docs/en/sql-reference/sql-statements/Administration/ALTER SYSTEM.md b/docs/en/sql-reference/sql-statements/Administration/ALTER SYSTEM.md index 43f1dc284320b5..e6d2cc8c78ea57 100644 --- a/docs/en/sql-reference/sql-statements/Administration/ALTER SYSTEM.md +++ b/docs/en/sql-reference/sql-statements/Administration/ALTER SYSTEM.md @@ -27,112 +27,143 @@ under the License. # ALTER SYSTEM ## Description -This statement is used to operate on nodes in a system. (Administrator only!) -Grammar: -1) Adding nodes (without multi-tenant functionality, add in this way) -ALTER SYSTEM ADD BACKEND "host:heartbeat_port"[,"host:heartbeat_port"...]; -2) Adding idle nodes (that is, adding BACKEND that does not belong to any cluster) -ALTER SYSTEM ADD FREE BACKEND "host:heartbeat_port"[,"host:heartbeat_port"...]; -3) Adding nodes to a cluster -ALTER SYSTEM ADD BACKEND TO cluster_name "host:heartbeat_port"[,"host:heartbeat_port"...]; -4) Delete nodes -ALTER SYSTEM DROP BACKEND "host:heartbeat_port"[,"host:heartbeat_port"...]; -5) Node offline -ALTER SYSTEM DECOMMISSION BACKEND "host:heartbeat_port"[,"host:heartbeat_port"...]; -6)226;- 21152;-Broker -ALTER SYSTEM ADD BROKER broker_name "host:port"[,"host:port"...]; -(7) 20943;"23569;" Broker -ALTER SYSTEM DROP BROKER broker_name "host:port"[,"host:port"...]; -8) Delete all Brokers -ALTER SYSTEM DROP ALL BROKER broker_name -9) Set up a Load error hub for centralized display of import error information -ALTER SYSTEM SET LOAD ERRORS HUB PROPERTIES ("key" = "value"[, ...]); -10) Modify property of BE -ALTER SYSTEM MODIFY BACKEND "host:heartbeat_port" SET ("key" = "value"[, ...]); - -Explain: -1) Host can be hostname or IP address -2) heartbeat_port is the heartbeat port of the node -3) Adding and deleting nodes are synchronous operations. These two operations do not take into account the existing data on the node, the node is directly deleted from the metadata, please use cautiously. -4) Node offline operations are used to secure offline nodes. This operation is asynchronous. If successful, the node will eventually be removed from the metadata. If it fails, the offline will not be completed. -5) The offline operation of the node can be cancelled manually. See CANCEL DECOMMISSION for details -6) Load error hub: - Currently, two types of Hub are supported: Mysql and Broker. You need to specify "type" = "mysql" or "type" = "broker" in PROPERTIES. - If you need to delete the current load error hub, you can set type to null. - 1) When using the Mysql type, the error information generated when importing will be inserted into the specified MySQL library table, and then the error information can be viewed directly through the show load warnings statement. - - Hub of Mysql type needs to specify the following parameters: - host: mysql host - port: mysql port - user: mysql user - password: mysql password - database mysql database - table: mysql table - - 2) When the Broker type is used, the error information generated when importing will form a file and be written to the designated remote storage system through the broker. Make sure that the corresponding broker is deployed - Hub of Broker type needs to specify the following parameters: - Broker: Name of broker - Path: Remote Storage Path - Other properties: Other information necessary to access remote storage, such as authentication information. - -7) Modify BE node attributes currently supports the following attributes: - 1. tag.location:Resource tag - 2. disable_query: Query disabled attribute - 3. disable_load: Load disabled attribute - -## example - -1. Add a node -ALTER SYSTEM ADD BACKEND "host:port"; - -2. Adding an idle node -ALTER SYSTEM ADD FREE BACKEND "host:port"; - -3. Delete two nodes -ALTER SYSTEM DROP BACKEND "host1:port", "host2:port"; - -4. offline two nodes -ALTER SYSTEM DECOMMISSION BACKEND "host1:port", "host2:port"; - -5. Add two Hdfs Broker -ALTER SYSTEM ADD BROKER hdfs "host1:port", "host2:port"; - -6. Add a load error hub of Mysql type -ALTER SYSTEM SET LOAD ERRORS HUB PROPERTIES -("type"= "mysql", -"host" = "192.168.1.17" -"port" = "3306", -"User" = "my" name, -"password" = "my_passwd", -"database" = "doris_load", -"table" = "load_errors" -); - -7. 添加一个 Broker 类型的 load error hub -ALTER SYSTEM SET LOAD ERRORS HUB PROPERTIES -("type"= "broker", -"Name" = BOS, -"path" = "bos://backup-cmy/logs", -"bos_endpoint" ="http://gz.bcebos.com", -"bos_accesskey" = "069fc278xxxxxx24ddb522", -"bos_secret_accesskey"="700adb0c6xxxxxx74d59eaa980a" -); - -8. Delete the current load error hub -ALTER SYSTEM SET LOAD ERRORS HUB PROPERTIES -("type"= "null"); - -9. Modify BE resource tag - -ALTER SYSTEM MODIFY BACKEND "host1:9050" SET ("tag.location" = "group_a"); - -10. Modify the query disabled attribute of BE - -ALTER SYSTEM MODIFY BACKEND "host1:9050" SET ("disable_query" = "true"); - -11. Modify the load disabled attribute of BE - -ALTER SYSTEM MODIFY BACKEND "host1:9050" SET ("disable_load" = "true"); - -## keyword -AGE,SYSTEM,BACKGROUND,BROKER,FREE + This statement is used to operate on nodes in a system. (Administrator only!) + Grammar: + 1) Adding nodes (without multi-tenant functionality, add in this way) + ALTER SYSTEM ADD BACKEND "host:heartbeat_port"[,"host:heartbeat_port"...]; + 2) Adding idle nodes (that is, adding BACKEND that does not belong to any cluster) + ALTER SYSTEM ADD FREE BACKEND "host:heartbeat_port"[,"host:heartbeat_port"...]; + 3) Adding nodes to a cluster + ALTER SYSTEM ADD BACKEND TO cluster_name "host:heartbeat_port"[,"host:heartbeat_port"...]; + 4) Delete nodes + ALTER SYSTEM DROP BACKEND "host:heartbeat_port"[,"host:heartbeat_port"...]; + 5) Node offline + ALTER SYSTEM DECOMMISSION BACKEND "host:heartbeat_port"[,"host:heartbeat_port"...]; + 6) Add Broker + ALTER SYSTEM ADD BROKER broker_name "host:port"[,"host:port"...]; + 7) Drop Broker + ALTER SYSTEM DROP BROKER broker_name "host:port"[,"host:port"...]; + 8) Delete all Brokers + ALTER SYSTEM DROP ALL BROKER broker_name + 9) Set up a Load error hub for centralized display of import error information + ALTER SYSTEM SET LOAD ERRORS HUB PROPERTIES ("key" = "value"[, ...]); + 10) Modify property of BE + ALTER SYSTEM MODIFY BACKEND "host:heartbeat_port" SET ("key" = "value"[, ...]); + 11) Add remote storage + ALTER SYSTEM ADD REMOTE STORAGE storage_name PROPERTIES ("key" = "value"[, ...]); + 12) Drop remote storage + ALTER SYSTEM DROP REMOTE STORAGE storage_name; + + Explain: + 1) Host can be hostname or IP address + 2) heartbeat_port is the heartbeat port of the node + 3) Adding and deleting nodes are synchronous operations. These two operations do not take into account the existing data on the node, the node is directly deleted from the metadata, please use cautiously. + 4) Node offline operations are used to secure offline nodes. This operation is asynchronous. If successful, the node will eventually be removed from the metadata. If it fails, the offline will not be completed. + 5) The offline operation of the node can be cancelled manually. See CANCEL DECOMMISSION for details + 6) Load error hub: + Currently, two types of Hub are supported: Mysql and Broker. You need to specify "type" = "mysql" or "type" = "broker" in PROPERTIES. + If you need to delete the current load error hub, you can set type to null. + 1) When using the Mysql type, the error information generated when importing will be inserted into the specified MySQL library table, and then the error information can be viewed directly through the show load warnings statement. + + Hub of Mysql type needs to specify the following parameters: + host: mysql host + port: mysql port + user: mysql user + password: mysql password + database mysql database + table: mysql table + + 2) When the Broker type is used, the error information generated when importing will form a file and be written to the designated remote storage system through the broker. Make sure that the corresponding broker is deployed + Hub of Broker type needs to specify the following parameters: + Broker: Name of broker + Path: Remote Storage Path + Other properties: Other information necessary to access remote storage, such as authentication information. + + 7) Modify BE node attributes currently supports the following attributes: + 1. tag.location:Resource tag + 2. disable_query: Query disabled attribute + 3. disable_load: Load disabled attribute + + 8) Remote storage + Currently supports adding object storage (S3, BOS) as remote storage. + `type=s3` needs to be specified in PROPERTIES. + 1) When using S3 as remote storage, the following parameters need to be set + s3_endpoint: s3 endpoint + s3_region: s3 region + s3_root_path: s3 root directory + s3_access_key: s3 access key + s3_secret_key: s3 secret key + s3_max_connections: the maximum number of s3 connections, the default is 50 + s3_request_timeout_ms: s3 request timeout, in milliseconds, the default is 3000 + s3_connection_timeout_ms: s3 connection timeout, in milliseconds, the default is 1000 + + ## example + + 1. Add a node + ALTER SYSTEM ADD BACKEND "host:port"; + + 2. Adding an idle node + ALTER SYSTEM ADD FREE BACKEND "host:port"; + + 3. Delete two nodes + ALTER SYSTEM DROP BACKEND "host1:port", "host2:port"; + + 4. offline two nodes + ALTER SYSTEM DECOMMISSION BACKEND "host1:port", "host2:port"; + + 5. Add two Hdfs Broker + ALTER SYSTEM ADD BROKER hdfs "host1:port", "host2:port"; + + 6. Add a load error hub of Mysql type + ALTER SYSTEM SET LOAD ERRORS HUB PROPERTIES + ("type"= "mysql", + "host" = "192.168.1.17" + "port" = "3306", + "User" = "my" name, + "password" = "my_passwd", + "database" = "doris_load", + "table" = "load_errors" + ); + + 7. 添加一个 Broker 类型的 load error hub + ALTER SYSTEM SET LOAD ERRORS HUB PROPERTIES + ("type"= "broker", + "Name" = BOS, + "path" = "bos://backup-cmy/logs", + "bos_endpoint" ="http://gz.bcebos.com", + "bos_accesskey" = "069fc278xxxxxx24ddb522", + "bos_secret_accesskey"="700adb0c6xxxxxx74d59eaa980a" + ); + + 8. Delete the current load error hub + ALTER SYSTEM SET LOAD ERRORS HUB PROPERTIES + ("type"= "null"); + + 9. Modify BE resource tag + ALTER SYSTEM MODIFY BACKEND "host1:9050" SET ("tag.location" = "group_a"); + + 10. Modify the query disabled attribute of BE + ALTER SYSTEM MODIFY BACKEND "host1:9050" SET ("disable_query" = "true"); + + 11. Modify the load disabled attribute of BE + ALTER SYSTEM MODIFY BACKEND "host1:9050" SET ("disable_load" = "true"); + + 12. Add remote storage + ALTER SYSTEM ADD REMOTE STORAGE remote_s3 PROPERTIES + ( + "type" = "s3", + "s3_endpoint" = "bj", + "s3_region" = "bj", + "s3_root_path" = "/path/to/root", + "s3_access_key" = "bbb", + "s3_secret_key" = "aaaa", + "s3_max_connections" = "50", + "s3_request_timeout_ms" = "3000", + "s3_connection_timeout_ms" = "1000" + ); + + 13. Drop remote storage + ALTER SYSTEM DROP REMOTE STORAGE remote_s3; + + ## keyword + AGE,SYSTEM,BACKGROUND,BROKER,FREE,REMOTE STORAGE diff --git a/docs/en/sql-reference/sql-statements/Administration/SHOW REMOTE STORAGES.md b/docs/en/sql-reference/sql-statements/Administration/SHOW REMOTE STORAGES.md new file mode 100644 index 00000000000000..ae396b65735ba4 --- /dev/null +++ b/docs/en/sql-reference/sql-statements/Administration/SHOW REMOTE STORAGES.md @@ -0,0 +1,42 @@ +--- +{ + "title": "SHOW REMOTE STORAGES", + "language": "en" +} +--- + + + +# SHOW REMOTE STORAGES + +## Description + + This statement is used to view the existing remote storages + Grammar: + SHOW REMOTE STORAGES; + + Explain: + 1. Name: name of remote storage + 2. Type: type of remote storage + 3. Properties: properties of reomte storage + +## keyword + + SHOW, REMOTE STORAGES diff --git a/docs/en/sql-reference/sql-statements/Data Definition/ALTER TABLE.md b/docs/en/sql-reference/sql-statements/Data Definition/ALTER TABLE.md index 984220a9c35b3d..10f29a5936720e 100644 --- a/docs/en/sql-reference/sql-statements/Data Definition/ALTER TABLE.md +++ b/docs/en/sql-reference/sql-statements/Data Definition/ALTER TABLE.md @@ -71,6 +71,8 @@ under the License. 1) The following attributes of the modified partition are currently supported. - storage_medium - storage_cooldown_time + - storage_cold_medium + - remote_storage - replication_num — in_memory 2) For single-partition tables, partition_name is the same as the table name. diff --git a/docs/en/sql-reference/sql-statements/Data Definition/CREATE TABLE.md b/docs/en/sql-reference/sql-statements/Data Definition/CREATE TABLE.md index e89cbb7d2e54ff..849e25e7c761ab 100644 --- a/docs/en/sql-reference/sql-statements/Data Definition/CREATE TABLE.md +++ b/docs/en/sql-reference/sql-statements/Data Definition/CREATE TABLE.md @@ -296,6 +296,8 @@ Syntax: ``` PROPERTIES ( "storage_medium" = "[SSD|HDD]", + ["storage_cold_medium" = "[S3]"], + ["remote_storage" = "xxx"], ["storage_cooldown_time" = "yyyy-MM-dd HH:mm:ss"], ["replication_num" = "3"], ["replication_allocation" = "xxx"] @@ -304,6 +306,10 @@ Syntax: storage_medium: SSD or HDD, The default initial storage media can be specified by `default_storage_medium= XXX` in the fe configuration file `fe.conf`, or, if not, by default, HDD. Note: when FE configuration 'enable_strict_storage_medium_check' is' True ', if the corresponding storage medium is not set in the cluster, the construction clause 'Failed to find enough host in all backends with storage medium is SSD|HDD'. + storage_cold_medium: Used to specify the cold data storage medium for this partition, currently only S3 is + supported. Default is S3. + remote_storage: The remote storage name, which needs to be used in conjunction with the storage_cold_medium + parameter. storage_cooldown_time: If storage_medium is SSD, data will be automatically moved to HDD when timeout. Default is 30 days. Format: "yyyy-MM-dd HH:mm:ss" @@ -405,8 +411,28 @@ Syntax: "storage_cooldown_time" = "2015-06-04 00:00:00" ); ``` +3. Create an olap table, distributed by hash, with aggregation type. Also set storage medium and cooldown time. + Setting up remote storage and cold data storage media. + ``` + CREATE TABLE example_db.table_hash + ( + k1 BIGINT, + k2 LARGEINT, + v1 VARCHAR(2048) REPLACE, + v2 SMALLINT SUM DEFAULT "10" + ) + ENGINE=olap + AGGREGATE KEY(k1, k2) + DISTRIBUTED BY HASH (k1, k2) BUCKETS 32 + PROPERTIES( + "storage_medium" = "SSD", + "storage_cold_medium" = "S3", + "remote_storage" = "remote_s3", + "storage_cooldown_time" = "2015-06-04 00:00:00" + ); + ``` -3. Create an olap table, with range partitioned, distributed by hash. Records with the same key exist at the same time, set the initial storage medium and cooling time, use default column storage. +4. Create an olap table, with range partitioned, distributed by hash. Records with the same key exist at the same time, set the initial storage medium and cooling time, use default column storage. 1) LESS THAN @@ -466,7 +492,7 @@ Syntax: "storage_medium" = "SSD" ); ``` -4. Create an olap table, with list partitioned, distributed by hash. Records with the same key exist at the same time, set the initial storage medium and cooling time, use default column storage. +5. Create an olap table, with list partitioned, distributed by hash. Records with the same key exist at the same time, set the initial storage medium and cooling time, use default column storage. 1) Single column partition @@ -540,7 +566,7 @@ Syntax: Data that is not within these partition enumeration values will be filtered as illegal data -5. Create a mysql table +6. Create a mysql table 5.1 Create MySQL table directly from external table information ``` CREATE EXTERNAL TABLE example_db.table_mysql @@ -593,7 +619,7 @@ Syntax: ) ``` -6. Create a broker table, with file on HDFS, line delimit by "|", column separated by "\n" +7. Create a broker table, with file on HDFS, line delimit by "|", column separated by "\n" ``` CREATE EXTERNAL TABLE example_db.table_broker ( @@ -616,7 +642,7 @@ Syntax: ); ``` -7. Create table will HLL column +8. Create table will HLL column ``` CREATE TABLE example_db.example_table @@ -631,7 +657,7 @@ Syntax: DISTRIBUTED BY HASH(k1) BUCKETS 32; ``` -8. Create a table will BITMAP_UNION column +9. Create a table will BITMAP_UNION column ``` CREATE TABLE example_db.example_table @@ -645,7 +671,7 @@ Syntax: AGGREGATE KEY(k1, k2) DISTRIBUTED BY HASH(k1) BUCKETS 32; ``` -9. Create a table with QUANTILE_UNION column (the origin value of **v1** and **v2** columns must be **numeric** types) +10. Create a table with QUANTILE_UNION column (the origin value of **v1** and **v2** columns must be **numeric** types) ``` CREATE TABLE example_db.example_table @@ -659,7 +685,8 @@ Syntax: AGGREGATE KEY(k1, k2) DISTRIBUTED BY HASH(k1) BUCKETS 32; ``` -10. Create 2 colocate join table. + +11. Create 2 colocate join table. ``` CREATE TABLE `t1` ( @@ -682,7 +709,7 @@ Syntax: ); ``` -11. Create a broker table, with file on BOS. +12. Create a broker table, with file on BOS. ``` CREATE EXTERNAL TABLE example_db.table_broker ( @@ -700,7 +727,7 @@ Syntax: ); ``` -12. Create a table with a bitmap index +13. Create a table with a bitmap index ``` CREATE TABLE example_db.table_hash @@ -717,7 +744,7 @@ Syntax: DISTRIBUTED BY HASH(k1) BUCKETS 32; ``` -13. Create a dynamic partitioning table (dynamic partitioning needs to be enabled in FE configuration), which creates partitions 3 days in advance every day. For example, if today is' 2020-01-08 ', partitions named 'p20200108', 'p20200109', 'p20200110', 'p20200111' will be created. +14. Create a dynamic partitioning table (dynamic partitioning needs to be enabled in FE configuration), which creates partitions 3 days in advance every day. For example, if today is' 2020-01-08 ', partitions named 'p20200108', 'p20200109', 'p20200110', 'p20200111' will be created. ``` [types: [DATE]; keys: [2020-01-08]; ‥types: [DATE]; keys: [2020-01-09]; ) @@ -747,7 +774,7 @@ Syntax: "dynamic_partition.buckets" = "32" ); ``` -14. Create a table with rollup index +15. Create a table with rollup index ``` CREATE TABLE example_db.rolup_index_table ( @@ -767,7 +794,7 @@ Syntax: PROPERTIES("replication_num" = "3"); ``` -15. Create a inmemory table: +16. Create a inmemory table: ``` CREATE TABLE example_db.table_hash @@ -785,7 +812,7 @@ Syntax: PROPERTIES ("in_memory"="true"); ``` -16. Create a hive external table +17. Create a hive external table ``` CREATE TABLE example_db.table_hive ( @@ -802,7 +829,7 @@ Syntax: ); ``` -17. Specify the replica distribution of the table through replication_allocation +18. Specify the replica distribution of the table through replication_allocation ``` CREATE TABLE example_db.table_hash @@ -835,7 +862,7 @@ Syntax: ); ``` -17. Create an Iceberg external table +19. Create an Iceberg external table ``` CREATE TABLE example_db.t_iceberg diff --git a/docs/zh-CN/sql-reference/sql-statements/Administration/ALTER SYSTEM.md b/docs/zh-CN/sql-reference/sql-statements/Administration/ALTER SYSTEM.md index 8af7db553de4c6..bdb0f5a82d99c1 100644 --- a/docs/zh-CN/sql-reference/sql-statements/Administration/ALTER SYSTEM.md +++ b/docs/zh-CN/sql-reference/sql-statements/Administration/ALTER SYSTEM.md @@ -49,6 +49,10 @@ under the License. ALTER SYSTEM SET LOAD ERRORS HUB PROPERTIES ("key" = "value"[, ...]); 10) 修改一个 BE 节点的属性 ALTER SYSTEM MODIFY BACKEND "host:heartbeat_port" SET ("key" = "value"[, ...]); + 11)增加一个远端存储 + ALTER SYSTEM ADD REMOTE STORAGE storage_name PROPERTIES ("key" = "value"[, ...]); + 12) 删除一个远端存储 + ALTER SYSTEM DROP REMOTE STORAGE storage_name; 说明: 1) host 可以是主机名或者ip地址 @@ -80,6 +84,18 @@ under the License. 1. tag.location:资源标签 2. disable_query: 查询禁用属性 3. disable_load: 导入禁用属性 + 8)远端存储: + 当前支持添加对象存储(S3,BOS)作为远端存储。 + 需要在 PROPERTIES 中指定 `type = s3`。 + 1)当使用S3作为远端存储时,需要设置以下参数 + s3_endpoint:s3 endpoint + s3_region:s3 region + s3_root_path:s3 根目录 + s3_access_key:s3 access key + s3_secret_key:s3 secret key + s3_max_connections:s3 最大连接数量,默认为 50 + s3_request_timeout_ms:s3 请求超时时间,单位毫秒,默认为 3000 + s3_connection_timeout_ms:s3 连接超时时间,单位毫秒,默认为 1000 ## example @@ -124,17 +140,31 @@ under the License. ("type"= "null"); 9. 修改 BE 的资源标签 - ALTER SYSTEM MODIFY BACKEND "host1:9050" SET ("tag.location" = "group_a"); 10. 修改 BE 的查询禁用属性 - ALTER SYSTEM MODIFY BACKEND "host1:9050" SET ("disable_query" = "true"); 11. 修改 BE 的导入禁用属性 - ALTER SYSTEM MODIFY BACKEND "host1:9050" SET ("disable_load" = "true"); + 12. 增加远端存储 + ALTER SYSTEM ADD REMOTE STORAGE remote_s3 PROPERTIES + ( + "type" = "s3", + "s3_endpoint" = "bj", + "s3_region" = "bj", + "s3_root_path" = "/path/to/root", + "s3_access_key" = "bbb", + "s3_secret_key" = "aaaa", + "s3_max_connections" = "50", + "s3_request_timeout_ms" = "3000", + "s3_connection_timeout_ms" = "1000" + ); + + 13. 删除远端存储 + ALTER SYSTEM DROP REMOTE STORAGE remote_s3; + ## keyword - ALTER,SYSTEM,BACKEND,BROKER,FREE + ALTER,SYSTEM,BACKEND,BROKER,FREE,REMOTE STORAGE diff --git a/docs/zh-CN/sql-reference/sql-statements/Administration/SHOW REMOTE STORAGES.md b/docs/zh-CN/sql-reference/sql-statements/Administration/SHOW REMOTE STORAGES.md new file mode 100644 index 00000000000000..8db5f988a15ef4 --- /dev/null +++ b/docs/zh-CN/sql-reference/sql-statements/Administration/SHOW REMOTE STORAGES.md @@ -0,0 +1,43 @@ +--- +{ + "title": "SHOW REMOTE STORAGES", + "language": "zh-CN" +} +--- + + + +# SHOW REMOTE STORAGES + +## Description + + 该语句用于查看当前存在的远端存储 + 语法: + SHOW REMOTE STORAGES; + + 说明: + 1. Name:远端存储的名字 + 2. Type:远端存储的类型 + 3. Properties:远端存储的参数 + +## keyword + + SHOW, REMOTE STORAGES + diff --git a/docs/zh-CN/sql-reference/sql-statements/Data Definition/ALTER TABLE.md b/docs/zh-CN/sql-reference/sql-statements/Data Definition/ALTER TABLE.md index c5a4a3796b5bc1..fa7169e2576e45 100644 --- a/docs/zh-CN/sql-reference/sql-statements/Data Definition/ALTER TABLE.md +++ b/docs/zh-CN/sql-reference/sql-statements/Data Definition/ALTER TABLE.md @@ -71,6 +71,8 @@ under the License. 1) 当前支持修改分区的下列属性: - storage_medium - storage_cooldown_time + - storage_cold_medium + - remote_storage - replication_num — in_memory 2) 对于单分区表,partition_name 同表名。 diff --git a/docs/zh-CN/sql-reference/sql-statements/Data Definition/CREATE TABLE.md b/docs/zh-CN/sql-reference/sql-statements/Data Definition/CREATE TABLE.md index ae51ab323a9455..2166a10d6ca33a 100644 --- a/docs/zh-CN/sql-reference/sql-statements/Data Definition/CREATE TABLE.md +++ b/docs/zh-CN/sql-reference/sql-statements/Data Definition/CREATE TABLE.md @@ -307,6 +307,8 @@ under the License. ``` PROPERTIES ( "storage_medium" = "[SSD|HDD]", + ["storage_cold_medium" = "[S3]"], + ["remote_storage" = "xxx"], ["storage_cooldown_time" = "yyyy-MM-dd HH:mm:ss"], ["replication_num" = "3"] ["replication_allocation" = "xxx"] @@ -314,7 +316,9 @@ under the License. ``` storage_medium: 用于指定该分区的初始存储介质,可选择 SSD 或 HDD。默认初始存储介质可通过fe的配置文件 `fe.conf` 中指定 `default_storage_medium=xxx`,如果没有指定,则默认为 HDD。 - 注意:当FE配置项 `enable_strict_storage_medium_check` 为 `True` 时,若集群中没有设置对应的存储介质时,建表语句会报错 `Failed to find enough host in all backends with storage medium is SSD|HDD`. + 注意:当FE配置项 `enable_strict_storage_medium_check` 为 `True` 时,若集群中没有设置对应的存储介质时,建表语句会报错 `Failed to find enough host in all backends with storage medium is SSD|HDD`. + storage_cold_medium: 用于指定该分区的冷数据存储介质,当前只支持 S3。默认为 S3。 + remote_storage: 远端存储名称,需要与 storage_cold_medium 参数搭配使用。 storage_cooldown_time: 当设置存储介质为 SSD 时,指定该分区在 SSD 上的存储到期时间。 默认存放 30 天。 格式为:"yyyy-MM-dd HH:mm:ss" @@ -429,7 +433,29 @@ under the License. ); ``` -3. 创建一个 olap 表,使用 Range 分区,使用Hash分桶,默认使用列存, +3. 创建一个 olap 表,使用 Hash 分桶,使用列存,相同key的记录进行覆盖,设置初始存储介质和冷却时间 + 设置远端存储和冷数据存储介质 + + ``` + CREATE TABLE example_db.table_hash + ( + k1 BIGINT, + k2 LARGEINT, + v1 VARCHAR(2048) REPLACE, + v2 SMALLINT SUM DEFAULT "10" + ) + ENGINE=olap + AGGREGATE KEY(k1, k2) + DISTRIBUTED BY HASH (k1, k2) BUCKETS 32 + PROPERTIES( + "storage_medium" = "SSD", + "storage_cold_medium" = "S3", + "remote_storage" = "remote_s3", + "storage_cooldown_time" = "2015-06-04 00:00:00" + ); + ``` + +4. 创建一个 olap 表,使用 Range 分区,使用Hash分桶,默认使用列存, 相同key的记录同时存在,设置初始存储介质和冷却时间 1)LESS THAN @@ -468,7 +494,7 @@ under the License. 不在这些分区范围内的数据将视为非法数据被过滤 - 2) Fixed Range + 1) Fixed Range ``` CREATE TABLE table_range @@ -492,7 +518,7 @@ under the License. ); ``` -4. 创建一个 olap 表,使用 List 分区,使用Hash分桶,默认使用列存, +5. 创建一个 olap 表,使用 List 分区,使用Hash分桶,默认使用列存, 相同key的记录同时存在,设置初始存储介质和冷却时间 1)单列分区 @@ -531,7 +557,7 @@ under the License. 不在这些分区枚举值内的数据将视为非法数据被过滤 - 2) 多列分区 + 1) 多列分区 ``` CREATE TABLE example_db.table_list @@ -567,7 +593,7 @@ under the License. 不在这些分区枚举值内的数据将视为非法数据被过滤 -5. 创建一个 mysql 表 +6. 创建一个 mysql 表 5.1 直接通过外表信息创建mysql表 ``` @@ -621,7 +647,7 @@ under the License. ) ``` -6. 创建一个数据文件存储在HDFS上的 broker 外部表, 数据使用 "|" 分割,"\n" 换行 +7. 创建一个数据文件存储在HDFS上的 broker 外部表, 数据使用 "|" 分割,"\n" 换行 ``` CREATE EXTERNAL TABLE example_db.table_broker ( @@ -644,7 +670,7 @@ under the License. ) ``` -7. 创建一张含有HLL列的表 +8. 创建一张含有HLL列的表 ``` CREATE TABLE example_db.example_table @@ -659,7 +685,7 @@ under the License. DISTRIBUTED BY HASH(k1) BUCKETS 32; ``` -8. 创建一张含有BITMAP_UNION聚合类型的表(v1和v2列的原始数据类型必须是TINYINT,SMALLINT,INT) +9. 创建一张含有BITMAP_UNION聚合类型的表(v1和v2列的原始数据类型必须是TINYINT,SMALLINT,INT) ``` CREATE TABLE example_db.example_table @@ -674,7 +700,7 @@ under the License. DISTRIBUTED BY HASH(k1) BUCKETS 32; ``` -1. 创建一张含有QUANTILE_UNION聚合类型的表(v1和v2列的原始数据类型必须是数值类型) +10. 创建一张含有QUANTILE_UNION聚合类型的表(v1和v2列的原始数据类型必须是数值类型) ``` CREATE TABLE example_db.example_table @@ -689,7 +715,7 @@ under the License. DISTRIBUTED BY HASH(k1) BUCKETS 32; ``` -10. 创建两张支持Colocate Join的表t1 和t2 +11. 创建两张支持Colocate Join的表t1 和t2 ``` CREATE TABLE `t1` ( @@ -713,7 +739,7 @@ under the License. ); ``` -11. 创建一个数据文件存储在BOS上的 broker 外部表 +12. 创建一个数据文件存储在BOS上的 broker 外部表 ``` CREATE EXTERNAL TABLE example_db.table_broker ( @@ -731,7 +757,7 @@ under the License. ) ``` -12. 创建一个带有bitmap 索引的表 +13. 创建一个带有bitmap 索引的表 ``` CREATE TABLE example_db.table_hash @@ -748,7 +774,7 @@ under the License. DISTRIBUTED BY HASH(k1) BUCKETS 32; ``` -13. 创建一个动态分区表(需要在FE配置中开启动态分区功能),该表每天提前创建3天的分区,并删除3天前的分区。例如今天为`2020-01-08`,则会创建分区名为`p20200108`, `p20200109`, `p20200110`, `p20200111`的分区. 分区范围分别为: +14. 创建一个动态分区表(需要在FE配置中开启动态分区功能),该表每天提前创建3天的分区,并删除3天前的分区。例如今天为`2020-01-08`,则会创建分区名为`p20200108`, `p20200109`, `p20200110`, `p20200111`的分区. 分区范围分别为: ``` [types: [DATE]; keys: [2020-01-08]; ‥types: [DATE]; keys: [2020-01-09]; ) @@ -780,7 +806,7 @@ under the License. ); ``` -14. 创建一个带有rollup索引的表 +15. 创建一个带有rollup索引的表 ``` CREATE TABLE example_db.rollup_index_table ( @@ -799,7 +825,7 @@ under the License. ) PROPERTIES("replication_num" = "3"); ``` -15. 创建一个内存表 +16. 创建一个内存表 ``` CREATE TABLE example_db.table_hash @@ -817,7 +843,7 @@ under the License. PROPERTIES ("in_memory"="true"); ``` -16. 创建一个hive外部表 +17. 创建一个hive外部表 ``` CREATE TABLE example_db.table_hive @@ -835,7 +861,7 @@ under the License. ); ``` -17. 通过 replication_allocation 指定表的副本分布 +18. 通过 replication_allocation 指定表的副本分布 ``` CREATE TABLE example_db.table_hash @@ -869,7 +895,7 @@ under the License. ); ``` -17. 创建一个 Iceberg 外表 +19. 创建一个 Iceberg 外表 ``` CREATE TABLE example_db.t_iceberg diff --git a/fe/fe-common/src/main/java/org/apache/doris/common/FeMetaVersion.java b/fe/fe-common/src/main/java/org/apache/doris/common/FeMetaVersion.java index e5288a2c475985..a51034a697b11c 100644 --- a/fe/fe-common/src/main/java/org/apache/doris/common/FeMetaVersion.java +++ b/fe/fe-common/src/main/java/org/apache/doris/common/FeMetaVersion.java @@ -34,8 +34,10 @@ public final class FeMetaVersion { public static final int VERSION_106 = 106; // support stream load 2PC public static final int VERSION_107 = 107; + // support remote storage + public static final int VERSION_108 = 108; // note: when increment meta version, should assign the latest version to VERSION_CURRENT - public static final int VERSION_CURRENT = VERSION_107; + public static final int VERSION_CURRENT = VERSION_108; // all logs meta version should >= the minimum version, so that we could remove many if clause, for example // if (FE_METAVERSION < VERSION_94) ... diff --git a/fe/fe-core/src/main/cup/sql_parser.cup b/fe/fe-core/src/main/cup/sql_parser.cup index 2bf9da4afc6512..93457d4dea54f4 100644 --- a/fe/fe-core/src/main/cup/sql_parser.cup +++ b/fe/fe-core/src/main/cup/sql_parser.cup @@ -260,12 +260,12 @@ terminal String KW_ADD, KW_ADMIN, KW_AFTER, KW_AGGREGATE, KW_ALIAS, KW_ALL, KW_A KW_PLUGIN, KW_PLUGINS, KW_PROC, KW_PROCEDURE, KW_PROCESSLIST, KW_PROFILE, KW_PROPERTIES, KW_PROPERTY, KW_QUERY, KW_QUOTA, - KW_RANDOM, KW_RANGE, KW_READ, KW_REBALANCE, KW_RECOVER, KW_REFRESH, KW_REGEXP, KW_RELEASE, KW_RENAME, + KW_RANDOM, KW_RANGE, KW_READ, KW_REBALANCE, KW_RECOVER, KW_REFRESH, KW_REGEXP, KW_RELEASE, KW_REMOTE, KW_RENAME, KW_REPAIR, KW_REPEATABLE, KW_REPOSITORY, KW_REPOSITORIES, KW_REPLACE, KW_REPLACE_IF_NOT_NULL, KW_REPLICA, KW_RESOURCE, KW_RESOURCES, KW_RESTORE, KW_RETURNS, KW_RESUME, KW_REVOKE, KW_RIGHT, KW_ROLE, KW_ROLES, KW_ROLLBACK, KW_ROLLUP, KW_ROUTINE, KW_ROW, KW_ROWS, KW_S3, KW_SCHEMA, KW_SCHEMAS, KW_SECOND, KW_SELECT, KW_SEMI, KW_SERIALIZABLE, KW_SESSION, KW_SET, KW_SETS, KW_SHOW, KW_SIGNED, KW_SKEW, - KW_SMALLINT, KW_SNAPSHOT, KW_SONAME, KW_SPLIT, KW_START, KW_STATUS, KW_STATS, KW_STOP, KW_STORAGE, KW_STREAM, KW_STRING, KW_STRUCT, + KW_SMALLINT, KW_SNAPSHOT, KW_SONAME, KW_SPLIT, KW_START, KW_STATUS, KW_STATS, KW_STOP, KW_STORAGE, KW_STORAGES, KW_STREAM, KW_STRING, KW_STRUCT, KW_SUM, KW_SUPERUSER, KW_SYNC, KW_SYSTEM, KW_TABLE, KW_TABLES, KW_TABLET, KW_TABLETS, KW_TASK, KW_TEMPORARY, KW_TERMINATED, KW_TEXT, KW_THAN, KW_TIME, KW_THEN, KW_TIMESTAMP, KW_TINYINT,KW_TRASH, KW_TO, KW_TRANSACTION, KW_TRIGGERS, KW_TRIM, KW_TRUE, KW_TRUNCATE, KW_TYPE, KW_TYPES, @@ -1185,6 +1185,15 @@ alter_system_clause ::= {: RESULT = new ModifyBackendClause(hostPorts, properties); :} + // remote storage + | KW_ADD KW_REMOTE KW_STORAGE ident_or_text:remoteStorageName opt_properties:properties + {: + RESULT = new AddRemoteStorageClause(remoteStorageName, properties); + :} + | KW_DROP KW_REMOTE KW_STORAGE ident_or_text:remoteStorageName + {: + RESULT = new DropRemoteStorageClause(remoteStorageName); + :} ; // Sync Stmt @@ -2783,6 +2792,10 @@ show_param ::= {: RESULT = new ShowBrokerStmt(); :} + | KW_REMOTE KW_STORAGES + {: + RESULT = new ShowRemoteStoragesStmt(); + :} | KW_RESOURCES opt_wild_where order_by_clause:orderByClause limit_clause:limitClause {: RESULT = new ShowResourcesStmt(parser.where, orderByClause, limitClause); @@ -5630,6 +5643,8 @@ keyword ::= {: RESULT = id; :} | KW_REFRESH:id {: RESULT = id; :} + | KW_REMOTE:id + {: RESULT = id; :} | KW_REPEATABLE:id {: RESULT = id; :} | KW_REPLACE:id @@ -5674,6 +5689,8 @@ keyword ::= {: RESULT = id; :} | KW_STORAGE:id {: RESULT = id; :} + | KW_STORAGES:id + {: RESULT = id; :} | KW_STREAM:id {: RESULT = id; :} | KW_STRUCT:id diff --git a/fe/fe-core/src/main/java/org/apache/doris/alter/SystemHandler.java b/fe/fe-core/src/main/java/org/apache/doris/alter/SystemHandler.java index c8814a78a99266..df5c24a6540a1f 100644 --- a/fe/fe-core/src/main/java/org/apache/doris/alter/SystemHandler.java +++ b/fe/fe-core/src/main/java/org/apache/doris/alter/SystemHandler.java @@ -20,6 +20,7 @@ import org.apache.doris.analysis.AddBackendClause; import org.apache.doris.analysis.AddFollowerClause; import org.apache.doris.analysis.AddObserverClause; +import org.apache.doris.analysis.AddRemoteStorageClause; import org.apache.doris.analysis.AlterClause; import org.apache.doris.analysis.AlterLoadErrorUrlClause; import org.apache.doris.analysis.CancelAlterSystemStmt; @@ -28,6 +29,7 @@ import org.apache.doris.analysis.DropBackendClause; import org.apache.doris.analysis.DropFollowerClause; import org.apache.doris.analysis.DropObserverClause; +import org.apache.doris.analysis.DropRemoteStorageClause; import org.apache.doris.analysis.ModifyBackendClause; import org.apache.doris.analysis.ModifyBrokerClause; import org.apache.doris.catalog.Catalog; @@ -173,6 +175,10 @@ public synchronized void process(List alterClauses, String clusterN Catalog.getCurrentCatalog().getLoadInstance().setLoadErrorHubInfo(clause.getProperties()); } else if (alterClause instanceof ModifyBackendClause) { Catalog.getCurrentSystemInfo().modifyBackends(((ModifyBackendClause) alterClause)); + } else if (alterClause instanceof AddRemoteStorageClause) { + Catalog.getCurrentCatalog().getRemoteStorageMgr().addRemoteStorage((AddRemoteStorageClause) alterClause); + } else if (alterClause instanceof DropRemoteStorageClause) { + Catalog.getCurrentCatalog().getRemoteStorageMgr().dropRemoteStorage((DropRemoteStorageClause) alterClause); } else { Preconditions.checkState(false, alterClause.getClass()); } diff --git a/fe/fe-core/src/main/java/org/apache/doris/analysis/AddRemoteStorageClause.java b/fe/fe-core/src/main/java/org/apache/doris/analysis/AddRemoteStorageClause.java new file mode 100644 index 00000000000000..9d3ae0c8b07264 --- /dev/null +++ b/fe/fe-core/src/main/java/org/apache/doris/analysis/AddRemoteStorageClause.java @@ -0,0 +1,141 @@ +// Licensed to the Apache Software Foundation (ASF) under one +// or more contributor license agreements. See the NOTICE file +// distributed with this work for additional information +// regarding copyright ownership. The ASF licenses this file +// to you under the Apache License, Version 2.0 (the +// "License"); you may not use this file except in compliance +// with the License. You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, +// software distributed under the License is distributed on an +// "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY +// KIND, either express or implied. See the License for the +// specific language governing permissions and limitations +// under the License. + +package org.apache.doris.analysis; + +import org.apache.doris.catalog.RemoteStorageProperty; +import org.apache.doris.common.AnalysisException; +import org.apache.doris.common.UserException; +import org.apache.doris.common.util.PrintableMap; + +import com.google.common.base.Strings; +import com.google.common.collect.Maps; + +import java.util.Map; + +import static org.apache.doris.catalog.S3Property.S3_ACCESS_KEY; +import static org.apache.doris.catalog.S3Property.S3_CONNECTION_TIMEOUT_MS; +import static org.apache.doris.catalog.S3Property.S3_ENDPOINT; +import static org.apache.doris.catalog.S3Property.S3_MAX_CONNECTIONS; +import static org.apache.doris.catalog.S3Property.S3_REGION; +import static org.apache.doris.catalog.S3Property.S3_REQUEST_TIMEOUT_MS; +import static org.apache.doris.catalog.S3Property.S3_ROOT_PATH; +import static org.apache.doris.catalog.S3Property.S3_SECRET_KEY; + +/** + * Add remote storage clause + * Syntax: + * ALTER SYSTEM ADD REMOTE STORAGE `remote_storage_name` + * PROPERTIES + * ( + * "key" = "value", + * ... + * ) + */ +public class AddRemoteStorageClause extends RemoteStorageClause { + private static final String PROPERTY_MISSING_MSG = "Remote storage %s is null. " + + "Please add properties('%s'='xxx') when create remote storage."; + private static final String TYPE = "type"; + + private Map properties; + private RemoteStorageProperty.RemoteStorageType remoteStorageType; + + public AddRemoteStorageClause(String name, Map properties) { + super(name); + this.properties = properties; + } + + @Override + public Map getProperties() { + return properties; + } + + public RemoteStorageProperty.RemoteStorageType getRemoteStorageType() { + return remoteStorageType; + } + + @Override + public void analyze(Analyzer analyzer) throws UserException { + super.analyze(analyzer); + + // analyze properties + if (properties == null || properties.size() == 0) { + throw new AnalysisException("Please add remote storage properties first. " + + "You can find examples in `HELP ADD REMOTE STORAGE`."); + } + String storageType = properties.get(TYPE); + if (Strings.isNullOrEmpty(storageType)) { + throw new AnalysisException("Remote storage type is empty."); + } + if (storageType.equalsIgnoreCase("s3")) { + this.remoteStorageType = RemoteStorageProperty.RemoteStorageType.S3; + analyzeS3Properties(); + } else { + throw new AnalysisException("Not supported storage type: " + storageType); + } + } + + private void analyzeS3Properties() throws AnalysisException { + Map s3Properties = Maps.newHashMap(properties); + s3Properties.remove(TYPE); + + if (Strings.isNullOrEmpty(s3Properties.get(S3_ENDPOINT))) { + throw new AnalysisException(String.format(PROPERTY_MISSING_MSG, S3_ENDPOINT, S3_ENDPOINT)); + } + s3Properties.remove(S3_ENDPOINT); + + if (Strings.isNullOrEmpty(s3Properties.get(S3_REGION))) { + throw new AnalysisException(String.format(PROPERTY_MISSING_MSG, S3_REGION, S3_REGION)); + } + s3Properties.remove(S3_REGION); + + if (Strings.isNullOrEmpty(s3Properties.get(S3_ROOT_PATH))) { + throw new AnalysisException(String.format(PROPERTY_MISSING_MSG, S3_ROOT_PATH, S3_ROOT_PATH)); + } + s3Properties.remove(S3_ROOT_PATH); + + if (Strings.isNullOrEmpty(s3Properties.get(S3_ACCESS_KEY))) { + throw new AnalysisException(String.format(PROPERTY_MISSING_MSG, S3_ACCESS_KEY, S3_ACCESS_KEY)); + } + s3Properties.remove(S3_ACCESS_KEY); + + if (Strings.isNullOrEmpty(s3Properties.get(S3_SECRET_KEY))) { + throw new AnalysisException(String.format(PROPERTY_MISSING_MSG, S3_SECRET_KEY, S3_SECRET_KEY)); + } + s3Properties.remove(S3_SECRET_KEY); + s3Properties.remove(S3_MAX_CONNECTIONS); + s3Properties.remove(S3_REQUEST_TIMEOUT_MS); + s3Properties.remove(S3_CONNECTION_TIMEOUT_MS); + + if (!s3Properties.isEmpty()) { + throw new AnalysisException("Unknown s3 remote storage properties: " + s3Properties); + } + } + + @Override + public String toSql() { + StringBuilder sb = new StringBuilder(); + sb.append("ADD REMOTE STORAGE ") + .append(getStorageName()); + if (properties != null && !properties.isEmpty()) { + sb.append("\nPROPERTIES ("); + sb.append(new PrintableMap(properties, " = ", true, true, true)); + sb.append(")"); + } + return sb.toString(); + } +} diff --git a/fe/fe-core/src/main/java/org/apache/doris/analysis/AlterSystemStmt.java b/fe/fe-core/src/main/java/org/apache/doris/analysis/AlterSystemStmt.java index cf16640270ac6d..e2160cd3ac3431 100644 --- a/fe/fe-core/src/main/java/org/apache/doris/analysis/AlterSystemStmt.java +++ b/fe/fe-core/src/main/java/org/apache/doris/analysis/AlterSystemStmt.java @@ -41,11 +41,20 @@ public AlterClause getAlterClause() { @Override public void analyze(Analyzer analyzer) throws UserException { - if (!Catalog.getCurrentCatalog().getAuth().checkGlobalPriv(ConnectContext.get(), PrivPredicate.OPERATOR)) { + // Alter remote storage clause does not need `NODE` privilege + if (!(alterClause instanceof RemoteStorageClause) && + (!Catalog.getCurrentCatalog().getAuth().checkGlobalPriv(ConnectContext.get(), PrivPredicate.OPERATOR))) { ErrorReport.reportAnalysisException(ErrorCode.ERR_SPECIFIC_ACCESS_DENIED_ERROR, "NODE"); } + // check privilege, need `ADMIN` to operate remote storage + if ((alterClause instanceof RemoteStorageClause) && + (!Catalog.getCurrentCatalog().getAuth().checkGlobalPriv(ConnectContext.get(), PrivPredicate.ADMIN))) { + ErrorReport.reportAnalysisException(ErrorCode.ERR_SPECIFIC_ACCESS_DENIED_ERROR, + "ADMIN"); + } + Preconditions.checkState((alterClause instanceof AddBackendClause) || (alterClause instanceof DropBackendClause) || (alterClause instanceof DecommissionBackendClause) @@ -55,7 +64,9 @@ public void analyze(Analyzer analyzer) throws UserException { || (alterClause instanceof DropFollowerClause) || (alterClause instanceof ModifyBrokerClause) || (alterClause instanceof AlterLoadErrorUrlClause) - || (alterClause instanceof ModifyBackendClause)); + || (alterClause instanceof ModifyBackendClause) + || (alterClause instanceof AddRemoteStorageClause) + || (alterClause instanceof DropRemoteStorageClause)); alterClause.analyze(analyzer); } diff --git a/fe/fe-core/src/main/java/org/apache/doris/analysis/DropRemoteStorageClause.java b/fe/fe-core/src/main/java/org/apache/doris/analysis/DropRemoteStorageClause.java new file mode 100644 index 00000000000000..9f32fecc6a5dac --- /dev/null +++ b/fe/fe-core/src/main/java/org/apache/doris/analysis/DropRemoteStorageClause.java @@ -0,0 +1,31 @@ +// Licensed to the Apache Software Foundation (ASF) under one +// or more contributor license agreements. See the NOTICE file +// distributed with this work for additional information +// regarding copyright ownership. The ASF licenses this file +// to you under the Apache License, Version 2.0 (the +// "License"); you may not use this file except in compliance +// with the License. You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, +// software distributed under the License is distributed on an +// "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY +// KIND, either express or implied. See the License for the +// specific language governing permissions and limitations +// under the License. + +package org.apache.doris.analysis; + +public class DropRemoteStorageClause extends RemoteStorageClause { + public DropRemoteStorageClause(String storageName) { + super(storageName); + } + + @Override + public String toSql() { + StringBuilder sb = new StringBuilder(); + sb.append("DROP REMOTE STORAGE ").append(getStorageName()); + return sb.toString(); + } +} diff --git a/fe/fe-core/src/main/java/org/apache/doris/analysis/RemoteStorageClause.java b/fe/fe-core/src/main/java/org/apache/doris/analysis/RemoteStorageClause.java new file mode 100644 index 00000000000000..0333638fe2e204 --- /dev/null +++ b/fe/fe-core/src/main/java/org/apache/doris/analysis/RemoteStorageClause.java @@ -0,0 +1,55 @@ +// Licensed to the Apache Software Foundation (ASF) under one +// or more contributor license agreements. See the NOTICE file +// distributed with this work for additional information +// regarding copyright ownership. The ASF licenses this file +// to you under the Apache License, Version 2.0 (the +// "License"); you may not use this file except in compliance +// with the License. You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, +// software distributed under the License is distributed on an +// "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY +// KIND, either express or implied. See the License for the +// specific language governing permissions and limitations +// under the License. + +package org.apache.doris.analysis; + +import org.apache.doris.alter.AlterOpType; +import org.apache.doris.common.AnalysisException; +import org.apache.doris.common.UserException; + +import org.apache.commons.lang.NotImplementedException; + +import com.google.common.base.Strings; + +/** + * Remote storage clause, including add/drop + */ +public class RemoteStorageClause extends AlterClause { + // remote storage name + private String storageName; + + public String getStorageName() { + return storageName; + } + + public RemoteStorageClause(String storageName) { + super(AlterOpType.ALTER_OTHER); + this.storageName = storageName; + } + + @Override + public void analyze(Analyzer analyzer) throws UserException { + if (Strings.isNullOrEmpty(storageName)) { + throw new AnalysisException("Remote storage name cannot be empty."); + } + } + + @Override + public String toSql() { + throw new NotImplementedException(); + } +} diff --git a/fe/fe-core/src/main/java/org/apache/doris/analysis/ShowRemoteStoragesStmt.java b/fe/fe-core/src/main/java/org/apache/doris/analysis/ShowRemoteStoragesStmt.java new file mode 100644 index 00000000000000..5dac90fcdb6ddf --- /dev/null +++ b/fe/fe-core/src/main/java/org/apache/doris/analysis/ShowRemoteStoragesStmt.java @@ -0,0 +1,64 @@ +// Licensed to the Apache Software Foundation (ASF) under one +// or more contributor license agreements. See the NOTICE file +// distributed with this work for additional information +// regarding copyright ownership. The ASF licenses this file +// to you under the Apache License, Version 2.0 (the +// "License"); you may not use this file except in compliance +// with the License. You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, +// software distributed under the License is distributed on an +// "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY +// KIND, either express or implied. See the License for the +// specific language governing permissions and limitations +// under the License. + +package org.apache.doris.analysis; + +import org.apache.doris.catalog.Catalog; +import org.apache.doris.catalog.Column; +import org.apache.doris.catalog.RemoteStorageMgr; +import org.apache.doris.catalog.ScalarType; +import org.apache.doris.common.AnalysisException; +import org.apache.doris.common.ErrorCode; +import org.apache.doris.common.ErrorReport; +import org.apache.doris.common.UserException; +import org.apache.doris.mysql.privilege.PrivPredicate; +import org.apache.doris.qe.ConnectContext; +import org.apache.doris.qe.ShowResultSetMetaData; + +public class ShowRemoteStoragesStmt extends ShowStmt { + + public ShowRemoteStoragesStmt() { + + } + + @Override + public void analyze(Analyzer analyzer) throws AnalysisException, UserException { + // check privilege, need `ADMIN` to operate remote storage + if (!Catalog.getCurrentCatalog().getAuth().checkGlobalPriv(ConnectContext.get(), PrivPredicate.ADMIN)) { + ErrorReport.reportAnalysisException(ErrorCode.ERR_SPECIFIC_ACCESS_DENIED_ERROR, + "ADMIN"); + } + } + + @Override + public ShowResultSetMetaData getMetaData() { + ShowResultSetMetaData.Builder builder = ShowResultSetMetaData.builder(); + for (String title : RemoteStorageMgr.REMOTE_STORAGE_PROC_NODE_TITLE_NAMES) { + builder.addColumn(new Column(title, ScalarType.createVarchar(30))); + } + return builder.build(); + } + + @Override + public RedirectStatus getRedirectStatus() { + if (ConnectContext.get().getSessionVariable().getForwardToMaster()) { + return RedirectStatus.FORWARD_NO_SYNC; + } else { + return RedirectStatus.NO_FORWARD; + } + } +} diff --git a/fe/fe-core/src/main/java/org/apache/doris/catalog/Catalog.java b/fe/fe-core/src/main/java/org/apache/doris/catalog/Catalog.java index 57a066680fe0e3..8a799e05713703 100755 --- a/fe/fe-core/src/main/java/org/apache/doris/catalog/Catalog.java +++ b/fe/fe-core/src/main/java/org/apache/doris/catalog/Catalog.java @@ -412,6 +412,7 @@ public class Catalog { private BrokerMgr brokerMgr; private ResourceMgr resourceMgr; + private RemoteStorageMgr remoteStorageMgr; private GlobalTransactionMgr globalTransactionMgr; @@ -576,6 +577,7 @@ private Catalog(boolean isCheckpointCatalog) { this.brokerMgr = new BrokerMgr(); this.resourceMgr = new ResourceMgr(); + this.remoteStorageMgr = new RemoteStorageMgr(); this.globalTransactionMgr = new GlobalTransactionMgr(this); @@ -665,6 +667,10 @@ public ResourceMgr getResourceMgr() { return resourceMgr; } + public RemoteStorageMgr getRemoteStorageMgr() { + return remoteStorageMgr; + } + public static GlobalTransactionMgr getCurrentGlobalTransactionMgr() { return getCurrentCatalog().globalTransactionMgr; } @@ -4980,7 +4986,7 @@ public HashMap getPartitionIdToStorageMediumMap() { if (dataProperty.getStorageMedium() == TStorageMedium.SSD && dataProperty.getCooldownTimeMs() < currentTimeMs) { // expire. change to HDD. - partitionInfo.setDataProperty(partition.getId(), new DataProperty(TStorageMedium.HDD)); + partitionInfo.setDataProperty(partition.getId(), new DataProperty(TStorageMedium.HDD, TStorageMedium.S3)); storageMediumMap.put(partitionId, TStorageMedium.HDD); LOG.debug("partition[{}-{}-{}] storage medium changed from SSD to HDD", dbId, tableId, partitionId); @@ -6632,6 +6638,29 @@ public long loadBrokers(DataInputStream dis, long checksum) throws IOException, return checksum; } + public long saveRemoteStorage(CountingDataOutputStream dos, long checksum) throws IOException { + Map storageInfoMap = remoteStorageMgr.getStorageInfoMap(); + int size = storageInfoMap.size(); + checksum ^= size; + dos.writeInt(size); + + for (RemoteStorageMgr.RemoteStorageInfo info : storageInfoMap.values()) { + info.write(dos); + } + return checksum; + } + + public long loadRemoteStorage(DataInputStream dis, long checksum) throws IOException { + int count = dis.readInt(); + checksum ^= count; + for (long i = 0; i < count; ++i) { + RemoteStorageMgr.RemoteStorageInfo storageInfo = RemoteStorageMgr.RemoteStorageInfo.readIn(dis); + remoteStorageMgr.replayAddRemoteStorage(storageInfo); + } + LOG.info("finished replay RemoteStorageMgr from image."); + return checksum; + } + public void replayUpdateClusterAndBackends(BackendIdsUpdateInfo info) { for (long id : info.getBackendList()) { final Backend backend = systemInfo.getBackend(id); diff --git a/fe/fe-core/src/main/java/org/apache/doris/catalog/DataProperty.java b/fe/fe-core/src/main/java/org/apache/doris/catalog/DataProperty.java index 8d4c22c9e6cde7..53b1318abc18ac 100644 --- a/fe/fe-core/src/main/java/org/apache/doris/catalog/DataProperty.java +++ b/fe/fe-core/src/main/java/org/apache/doris/catalog/DataProperty.java @@ -18,6 +18,7 @@ package org.apache.doris.catalog; import org.apache.doris.common.Config; +import org.apache.doris.common.FeMetaVersion; import org.apache.doris.common.io.Text; import org.apache.doris.common.io.Writable; import org.apache.doris.common.util.TimeUtils; @@ -30,19 +31,24 @@ public class DataProperty implements Writable { public static final DataProperty DEFAULT_DATA_PROPERTY = new DataProperty( - "SSD".equalsIgnoreCase(Config.default_storage_medium) ? TStorageMedium.SSD : TStorageMedium.HDD); + "SSD".equalsIgnoreCase(Config.default_storage_medium) ? TStorageMedium.SSD : TStorageMedium.HDD, + "HDD".equalsIgnoreCase(Config.default_storage_medium) ? TStorageMedium.HDD : TStorageMedium.S3); public static final long MAX_COOLDOWN_TIME_MS = 253402271999000L; // 9999-12-31 23:59:59 @SerializedName(value = "storageMedium") private TStorageMedium storageMedium; @SerializedName(value = "cooldownTimeMs") private long cooldownTimeMs; + @SerializedName(value = "remoteStorageName") + private String remoteStorageName; + @SerializedName(value = "storageColdMedium") + private TStorageMedium storageColdMedium; private DataProperty() { // for persist } - public DataProperty(TStorageMedium medium) { + public DataProperty(TStorageMedium medium, TStorageMedium coldMedium) { this.storageMedium = medium; if (medium == TStorageMedium.SSD) { long currentTimeMs = System.currentTimeMillis(); @@ -50,11 +56,15 @@ public DataProperty(TStorageMedium medium) { } else { this.cooldownTimeMs = MAX_COOLDOWN_TIME_MS; } + this.remoteStorageName = ""; + this.storageColdMedium = coldMedium; } - public DataProperty(TStorageMedium medium, long cooldown) { + public DataProperty(TStorageMedium medium, long cooldown, String remoteStorageName, TStorageMedium coldMedium) { this.storageMedium = medium; this.cooldownTimeMs = cooldown; + this.remoteStorageName = remoteStorageName; + this.storageColdMedium = coldMedium; } public TStorageMedium getStorageMedium() { @@ -65,6 +75,14 @@ public long getCooldownTimeMs() { return cooldownTimeMs; } + public String getRemoteStorageName() { + return remoteStorageName; + } + + public TStorageMedium getStorageColdMedium() { + return storageColdMedium; + } + public static DataProperty read(DataInput in) throws IOException { DataProperty dataProperty = new DataProperty(); dataProperty.readFields(in); @@ -75,11 +93,20 @@ public static DataProperty read(DataInput in) throws IOException { public void write(DataOutput out) throws IOException { Text.writeString(out, storageMedium.name()); out.writeLong(cooldownTimeMs); + Text.writeString(out, remoteStorageName); + Text.writeString(out, storageColdMedium.name()); } public void readFields(DataInput in) throws IOException { storageMedium = TStorageMedium.valueOf(Text.readString(in)); cooldownTimeMs = in.readLong(); + if (Catalog.getCurrentCatalogJournalVersion() >= FeMetaVersion.VERSION_108) { + remoteStorageName = Text.readString(in); + storageColdMedium = TStorageMedium.valueOf(Text.readString(in)); + } else { + remoteStorageName = ""; + storageColdMedium = TStorageMedium.S3; + } } @Override @@ -95,14 +122,18 @@ public boolean equals(Object obj) { DataProperty other = (DataProperty) obj; return this.storageMedium == other.storageMedium - && this.cooldownTimeMs == other.cooldownTimeMs; + && this.cooldownTimeMs == other.cooldownTimeMs + && this.remoteStorageName == other.remoteStorageName + && this.storageColdMedium == other.storageColdMedium; } @Override public String toString() { StringBuilder sb = new StringBuilder(); sb.append("Storage medium[").append(this.storageMedium).append("]. "); - sb.append("cool down[").append(TimeUtils.longToTimeString(cooldownTimeMs)).append("]."); + sb.append("cool down[").append(TimeUtils.longToTimeString(cooldownTimeMs)).append("]. "); + sb.append("remote storage[").append(this.remoteStorageName).append("]. "); + sb.append("storage cold medium[").append(this.storageColdMedium).append("]."); return sb.toString(); } } diff --git a/fe/fe-core/src/main/java/org/apache/doris/catalog/OlapTable.java b/fe/fe-core/src/main/java/org/apache/doris/catalog/OlapTable.java index 13590e5f9cf4a8..228fdce3c7886c 100644 --- a/fe/fe-core/src/main/java/org/apache/doris/catalog/OlapTable.java +++ b/fe/fe-core/src/main/java/org/apache/doris/catalog/OlapTable.java @@ -1257,7 +1257,7 @@ public OlapTable selectiveCopy(Collection reservedPartitions, IndexExtSt // set storage medium to HDD for backup job, because we want that the backuped table // can be able to restored to another Doris cluster without SSD disk. // But for other operation such as truncate table, keep the origin storage medium. - copied.getPartitionInfo().setDataProperty(partition.getId(), new DataProperty(TStorageMedium.HDD)); + copied.getPartitionInfo().setDataProperty(partition.getId(), new DataProperty(TStorageMedium.HDD, TStorageMedium.S3)); } for (MaterializedIndex idx : partition.getMaterializedIndices(extState)) { idx.setState(IndexState.NORMAL); diff --git a/fe/fe-core/src/main/java/org/apache/doris/catalog/PartitionInfo.java b/fe/fe-core/src/main/java/org/apache/doris/catalog/PartitionInfo.java index a912a35dea4bc8..acb174c0b7a91f 100644 --- a/fe/fe-core/src/main/java/org/apache/doris/catalog/PartitionInfo.java +++ b/fe/fe-core/src/main/java/org/apache/doris/catalog/PartitionInfo.java @@ -303,7 +303,7 @@ public void write(DataOutput out) throws IOException { out.writeInt(idToDataProperty.size()); for (Map.Entry entry : idToDataProperty.entrySet()) { out.writeLong(entry.getKey()); - if (entry.getValue().equals(new DataProperty(TStorageMedium.HDD))) { + if (entry.getValue().equals(new DataProperty(TStorageMedium.HDD, TStorageMedium.S3))) { out.writeBoolean(true); } else { out.writeBoolean(false); @@ -323,7 +323,7 @@ public void readFields(DataInput in) throws IOException { long partitionId = in.readLong(); boolean isDefaultHddDataProperty = in.readBoolean(); if (isDefaultHddDataProperty) { - idToDataProperty.put(partitionId, new DataProperty(TStorageMedium.HDD)); + idToDataProperty.put(partitionId, new DataProperty(TStorageMedium.HDD, TStorageMedium.S3)); } else { idToDataProperty.put(partitionId, DataProperty.read(in)); } @@ -348,7 +348,7 @@ public String toString() { for (Map.Entry entry : idToDataProperty.entrySet()) { buff.append(entry.getKey()).append(" is HDD: "); - if (entry.getValue().equals(new DataProperty(TStorageMedium.HDD))) { + if (entry.getValue().equals(new DataProperty(TStorageMedium.HDD, TStorageMedium.S3))) { buff.append(true); } else { buff.append(false); diff --git a/fe/fe-core/src/main/java/org/apache/doris/catalog/RemoteStorageMgr.java b/fe/fe-core/src/main/java/org/apache/doris/catalog/RemoteStorageMgr.java new file mode 100644 index 00000000000000..69ba743675f0d5 --- /dev/null +++ b/fe/fe-core/src/main/java/org/apache/doris/catalog/RemoteStorageMgr.java @@ -0,0 +1,222 @@ +// Licensed to the Apache Software Foundation (ASF) under one +// or more contributor license agreements. See the NOTICE file +// distributed with this work for additional information +// regarding copyright ownership. The ASF licenses this file +// to you under the Apache License, Version 2.0 (the +// "License"); you may not use this file except in compliance +// with the License. You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, +// software distributed under the License is distributed on an +// "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY +// KIND, either express or implied. See the License for the +// specific language governing permissions and limitations +// under the License. + +package org.apache.doris.catalog; + +import org.apache.doris.analysis.AddRemoteStorageClause; +import org.apache.doris.analysis.DropRemoteStorageClause; +import org.apache.doris.common.AnalysisException; +import org.apache.doris.common.DdlException; +import org.apache.doris.common.io.Text; +import org.apache.doris.common.io.Writable; +import org.apache.doris.common.proc.BaseProcResult; +import org.apache.doris.common.proc.ProcNodeInterface; +import org.apache.doris.common.proc.ProcResult; +import org.apache.doris.common.util.PrintableMap; + +import com.google.common.collect.ImmutableList; +import com.google.common.collect.Lists; +import com.google.common.collect.Maps; + +import java.io.DataInput; +import java.io.DataOutput; +import java.io.IOException; +import java.util.List; +import java.util.Map; +import java.util.concurrent.locks.ReentrantLock; + +public class RemoteStorageMgr { + public static final ImmutableList REMOTE_STORAGE_PROC_NODE_TITLE_NAMES = new ImmutableList.Builder() + .add("Name").add("Type").add("Properties") + .build(); + private final Map storageInfoMap = Maps.newHashMap(); + private final ReentrantLock lock = new ReentrantLock(); + private RemoteStorageProcNode procNode = null; + + public RemoteStorageMgr() { + + } + + public Map getStorageInfoMap() { + return storageInfoMap; + } + + public RemoteStorageProperty getRemoteStorageByName(String storageName) throws AnalysisException { + RemoteStorageInfo info = storageInfoMap.get(storageName); + if (info == null) { + throw new AnalysisException("Unknown remote storage name: " + storageName); + } + return info.getRemoteStorageProperty(); + } + + public void addRemoteStorage(AddRemoteStorageClause clause) throws DdlException { + lock.lock(); + try { + String storageName = clause.getStorageName(); + if (storageInfoMap.containsKey(storageName)) { + throw new DdlException("Remote storage[" + storageName + "] has already in remote storages."); + } + Map properties = clause.getProperties(); + RemoteStorageProperty.RemoteStorageType storageType = clause.getRemoteStorageType(); + RemoteStorageProperty storageProperty; + switch (storageType) { + case S3: + storageProperty = new S3Property(properties); + break; + default: + throw new DdlException("Unknown remote storage type: " + storageType.name()); + } + RemoteStorageInfo storageInfo = new RemoteStorageInfo(storageName, storageProperty); + Catalog.getCurrentCatalog().getEditLog().logAddRemoteStorage(storageInfo); + storageInfoMap.put(storageName, storageInfo); + } finally { + lock.unlock(); + } + } + + public void replayAddRemoteStorage(RemoteStorageInfo storageInfo) { + lock.lock(); + try { + String storageName = storageInfo.getRemoteStorageName(); + RemoteStorageInfo info = storageInfoMap.get(storageName); + if (info == null) { + info = storageInfo; + } + storageInfoMap.put(storageName, info); + } finally { + lock.unlock(); + } + } + + public void dropRemoteStorage(DropRemoteStorageClause clause) throws DdlException { + lock.lock(); + try { + String storageName = clause.getStorageName(); + RemoteStorageInfo storageInfo = storageInfoMap.get(storageName); + if (storageInfo == null) { + throw new DdlException("Unknown remote storage name: " + storageName); + } + Catalog.getCurrentCatalog().getEditLog().logDropRemoteStorage(storageInfo); + storageInfoMap.remove(storageName); + } finally { + lock.unlock(); + } + } + + public void replayDropRemoteStorage(RemoteStorageInfo info) { + lock.lock(); + try { + storageInfoMap.remove(info.getRemoteStorageName()); + } finally { + lock.unlock(); + } + } + + public List> getRemoteStoragesInfo() { + lock.lock(); + try { + if (procNode == null) { + procNode = new RemoteStorageProcNode(); + } + return procNode.fetchResult().getRows(); + } finally { + lock.unlock(); + } + } + + public RemoteStorageProcNode getProcNode() { + lock.lock(); + try { + if (procNode == null) { + procNode = new RemoteStorageProcNode(); + } + return procNode; + } finally { + lock.unlock(); + } + } + + public class RemoteStorageProcNode implements ProcNodeInterface { + + @Override + public ProcResult fetchResult() { + BaseProcResult result = new BaseProcResult(); + result.setNames(REMOTE_STORAGE_PROC_NODE_TITLE_NAMES); + + lock.lock(); + try { + for (Map.Entry entry : storageInfoMap.entrySet()) { + String storageName = entry.getKey(); + RemoteStorageProperty property = entry.getValue().getRemoteStorageProperty(); + String storageType = property.getStorageType().name(); + Map properties = property.getProperties(); + + List row = Lists.newArrayList(); + row.add(storageName); + row.add(storageType); + StringBuilder sb = new StringBuilder(); + sb.append(new PrintableMap(properties, " = ", true, true, true)); + row.add(sb.toString()); + result.addRow(row); + } + } finally { + lock.unlock(); + } + + return result; + } + } + + public static class RemoteStorageInfo implements Writable { + private String remoteStorageName; + private RemoteStorageProperty remoteStorageProperty; + + public RemoteStorageInfo() { + + } + + public RemoteStorageInfo(String remoteStorageName, RemoteStorageProperty storageProperty) { + this.remoteStorageName = remoteStorageName; + this.remoteStorageProperty = storageProperty; + } + + public String getRemoteStorageName() { + return remoteStorageName; + } + + public RemoteStorageProperty getRemoteStorageProperty() { + return remoteStorageProperty; + } + + @Override + public void write(DataOutput out) throws IOException { + Text.writeString(out, remoteStorageName); + RemoteStorageProperty.writeTo(remoteStorageProperty, out); + } + + public void readFields(DataInput in) throws IOException { + remoteStorageName = Text.readString(in); + remoteStorageProperty = RemoteStorageProperty.readIn(in); + } + + public static RemoteStorageInfo readIn(DataInput in) throws IOException { + RemoteStorageInfo info = new RemoteStorageInfo(); + info.readFields(in); + return info; + } + } +} diff --git a/fe/fe-core/src/main/java/org/apache/doris/catalog/RemoteStorageProperty.java b/fe/fe-core/src/main/java/org/apache/doris/catalog/RemoteStorageProperty.java new file mode 100644 index 00000000000000..bb70dbaf1073a4 --- /dev/null +++ b/fe/fe-core/src/main/java/org/apache/doris/catalog/RemoteStorageProperty.java @@ -0,0 +1,68 @@ +// Licensed to the Apache Software Foundation (ASF) under one +// or more contributor license agreements. See the NOTICE file +// distributed with this work for additional information +// regarding copyright ownership. The ASF licenses this file +// to you under the Apache License, Version 2.0 (the +// "License"); you may not use this file except in compliance +// with the License. You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, +// software distributed under the License is distributed on an +// "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY +// KIND, either express or implied. See the License for the +// specific language governing permissions and limitations +// under the License. + +package org.apache.doris.catalog; + + +import org.apache.doris.common.io.Text; +import org.apache.doris.common.io.Writable; + +import org.apache.commons.lang.NotImplementedException; + +import java.io.DataInput; +import java.io.DataOutput; +import java.io.IOException; +import java.util.Map; + +public class RemoteStorageProperty implements Writable { + + public enum RemoteStorageType { + S3 + } + + public RemoteStorageType getStorageType() { + return null; + } + + @Override + public void write(DataOutput out) throws IOException { + throw new IOException("Not implemented serializable."); + } + + public static void writeTo(RemoteStorageProperty property, DataOutput output) throws IOException { + Text.writeString(output, property.getStorageType().name()); + property.write(output); + } + + public static RemoteStorageProperty readIn(DataInput input) throws IOException { + String storageTypeName = Text.readString(input); + RemoteStorageType storageType = RemoteStorageType.valueOf(storageTypeName); + if (storageType == null) { + throw new IOException("Unknown remote storage type: " + storageTypeName); + } + switch (storageType) { + case S3: + return S3Property.read(input); + default: + throw new IOException("Unknown remote storage type: " + storageTypeName); + } + } + + public Map getProperties() { + throw new NotImplementedException(); + } +} \ No newline at end of file diff --git a/fe/fe-core/src/main/java/org/apache/doris/catalog/S3Property.java b/fe/fe-core/src/main/java/org/apache/doris/catalog/S3Property.java new file mode 100644 index 00000000000000..175e5ba9cf4253 --- /dev/null +++ b/fe/fe-core/src/main/java/org/apache/doris/catalog/S3Property.java @@ -0,0 +1,106 @@ +// Licensed to the Apache Software Foundation (ASF) under one +// or more contributor license agreements. See the NOTICE file +// distributed with this work for additional information +// regarding copyright ownership. The ASF licenses this file +// to you under the Apache License, Version 2.0 (the +// "License"); you may not use this file except in compliance +// with the License. You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, +// software distributed under the License is distributed on an +// "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY +// KIND, either express or implied. See the License for the +// specific language governing permissions and limitations +// under the License. + +package org.apache.doris.catalog; + +import org.apache.doris.common.io.Text; +import org.apache.doris.common.io.Writable; +import org.apache.doris.persist.gson.GsonUtils; + +import com.google.gson.annotations.SerializedName; + +import java.io.DataInput; +import java.io.DataOutput; +import java.io.IOException; +import java.util.HashMap; +import java.util.Map; + +public class S3Property extends RemoteStorageProperty implements Writable { + + public static final String S3_ENDPOINT = "s3_endpoint"; + public static final String S3_REGION = "s3_region"; + public static final String S3_ROOT_PATH = "s3_root_path"; + public static final String S3_ACCESS_KEY = "s3_access_key"; + public static final String S3_SECRET_KEY = "s3_secret_key"; + public static final String S3_MAX_CONNECTIONS = "s3_max_connections"; + public static final String S3_REQUEST_TIMEOUT_MS = "s3_request_timeout_ms"; + public static final String S3_CONNECTION_TIMEOUT_MS = "s3_connection_timeout_ms"; + public static final String DEFAULT_S3_MAX_CONNECTIONS = "50"; + public static final String DEFAULT_S3_REQUEST_TIMEOUT_MS = "3000"; + public static final String DEFAULT_S3_CONNECTION_TIMEOUT_MS = "1000"; + + @SerializedName(value = "endPoint") + private String endPoint; + @SerializedName(value = "region") + private String region; + @SerializedName(value = "rootPath") + private String rootPath; + @SerializedName(value = "accessKey") + private String accessKey; + @SerializedName(value = "secretKey") + private String secretKey; + @SerializedName(value = "maxConnections") + private long maxConnections; + @SerializedName(value = "requestTimeoutMs") + private long requestTimeoutMs; + @SerializedName(value = "connectionTimeoutMs") + private long connectionTimeoutMs; + + public S3Property(Map properties) { + this.endPoint = properties.get(S3_ENDPOINT); + this.region = properties.get(S3_REGION); + this.rootPath = properties.get(S3_ROOT_PATH); + this.accessKey = properties.get(S3_ACCESS_KEY); + this.secretKey = properties.get(S3_SECRET_KEY); + this.maxConnections = Long.parseLong(properties.getOrDefault(S3_MAX_CONNECTIONS, DEFAULT_S3_MAX_CONNECTIONS)); + this.requestTimeoutMs = Long.parseLong( + properties.getOrDefault(S3_REQUEST_TIMEOUT_MS, DEFAULT_S3_REQUEST_TIMEOUT_MS)); + this.connectionTimeoutMs = Long.parseLong( + properties.getOrDefault(S3_CONNECTION_TIMEOUT_MS, DEFAULT_S3_CONNECTION_TIMEOUT_MS)); + } + + @Override + public RemoteStorageType getStorageType() { + return RemoteStorageType.S3; + } + + @Override + public void write(DataOutput out) throws IOException { + String json = GsonUtils.GSON.toJson(this); + Text.writeString(out, json); + } + + public static S3Property read(DataInput in) throws IOException { + String json = Text.readString(in); + return GsonUtils.GSON.fromJson(json, S3Property.class); + } + + @Override + public Map getProperties() { + Map properties = new HashMap<>(); + properties.put(S3_ENDPOINT, endPoint); + properties.put(S3_REGION, region); + properties.put(S3_ROOT_PATH, rootPath); + properties.put(S3_ACCESS_KEY, accessKey); + properties.put(S3_SECRET_KEY, secretKey); + properties.put(S3_MAX_CONNECTIONS, String.valueOf(maxConnections)); + properties.put(S3_REQUEST_TIMEOUT_MS, String.valueOf(requestTimeoutMs)); + properties.put(S3_CONNECTION_TIMEOUT_MS, String.valueOf(connectionTimeoutMs)); + + return properties; + } +} diff --git a/fe/fe-core/src/main/java/org/apache/doris/common/MetaReader.java b/fe/fe-core/src/main/java/org/apache/doris/common/MetaReader.java index be646f96ff6f81..834ef8235b6a1a 100644 --- a/fe/fe-core/src/main/java/org/apache/doris/common/MetaReader.java +++ b/fe/fe-core/src/main/java/org/apache/doris/common/MetaReader.java @@ -103,6 +103,7 @@ public static void read(File imageFile, Catalog catalog) throws IOException, Ddl checksum = catalog.loadPlugins(dis, checksum); checksum = catalog.loadDeleteHandler(dis, checksum); checksum = catalog.loadSqlBlockRule(dis, checksum); + checksum = catalog.loadRemoteStorage(dis, checksum); } MetaFooter metaFooter = MetaFooter.read(imageFile); diff --git a/fe/fe-core/src/main/java/org/apache/doris/common/MetaWriter.java b/fe/fe-core/src/main/java/org/apache/doris/common/MetaWriter.java index 6b9d9aa79106ea..1848db1a54450f 100644 --- a/fe/fe-core/src/main/java/org/apache/doris/common/MetaWriter.java +++ b/fe/fe-core/src/main/java/org/apache/doris/common/MetaWriter.java @@ -128,6 +128,7 @@ public static void write(File imageFile, Catalog catalog) throws IOException { checksum.setRef(writer.doWork("plugins", () -> catalog.savePlugins(dos, checksum.getRef()))); checksum.setRef(writer.doWork("deleteHandler", () -> catalog.saveDeleteHandler(dos, checksum.getRef()))); checksum.setRef(writer.doWork("sqlBlockRule", () -> catalog.saveSqlBlockRule(dos, checksum.getRef()))); + checksum.setRef(writer.doWork("remoteStorage", () -> catalog.saveRemoteStorage(dos, checksum.getRef()))); } MetaFooter.write(imageFile, metaIndices, checksum.getRef()); diff --git a/fe/fe-core/src/main/java/org/apache/doris/common/proc/PartitionsProcDir.java b/fe/fe-core/src/main/java/org/apache/doris/common/proc/PartitionsProcDir.java index 661eaaddc7c10a..81bd61d4ee5181 100644 --- a/fe/fe-core/src/main/java/org/apache/doris/common/proc/PartitionsProcDir.java +++ b/fe/fe-core/src/main/java/org/apache/doris/common/proc/PartitionsProcDir.java @@ -66,7 +66,8 @@ public class PartitionsProcDir implements ProcDirInterface { .add("PartitionId").add("PartitionName") .add("VisibleVersion").add("VisibleVersionTime") .add("State").add("PartitionKey").add("Range").add("DistributionKey") - .add("Buckets").add("ReplicationNum").add("StorageMedium").add("CooldownTime") + .add("Buckets").add("ReplicationNum").add("StorageMedium") + .add("StorageColdMedium").add("RemoteStorage").add("CooldownTime") .add("LastConsistencyCheckTime").add("DataSize").add("IsInMemory").add("ReplicaAllocation") .build(); @@ -268,6 +269,8 @@ private List> getPartitionInfos() { DataProperty dataProperty = tblPartitionInfo.getDataProperty(partitionId); partitionInfo.add(dataProperty.getStorageMedium().name()); + partitionInfo.add(dataProperty.getStorageColdMedium().name()); + partitionInfo.add(dataProperty.getRemoteStorageName()); partitionInfo.add(TimeUtils.longToTimeString(dataProperty.getCooldownTimeMs())); partitionInfo.add(TimeUtils.longToTimeString(partition.getLastCheckTime())); diff --git a/fe/fe-core/src/main/java/org/apache/doris/common/proc/ProcService.java b/fe/fe-core/src/main/java/org/apache/doris/common/proc/ProcService.java index da966634daaefc..5bb1cbf69a3bf0 100644 --- a/fe/fe-core/src/main/java/org/apache/doris/common/proc/ProcService.java +++ b/fe/fe-core/src/main/java/org/apache/doris/common/proc/ProcService.java @@ -58,6 +58,7 @@ private ProcService() { root.register("stream_loads", new StreamLoadProcNode()); root.register("colocation_group", new ColocationGroupProcDir()); root.register("bdbje", new BDBJEProcDir()); + root.register("remote_storage", Catalog.getCurrentCatalog().getRemoteStorageMgr().getProcNode()); } // 通过指定的路径获得对应的PROC Node diff --git a/fe/fe-core/src/main/java/org/apache/doris/common/util/PrintableMap.java b/fe/fe-core/src/main/java/org/apache/doris/common/util/PrintableMap.java index 7e0d92067e0da9..f4268aadafcd2b 100644 --- a/fe/fe-core/src/main/java/org/apache/doris/common/util/PrintableMap.java +++ b/fe/fe-core/src/main/java/org/apache/doris/common/util/PrintableMap.java @@ -23,6 +23,8 @@ import java.util.Map; import java.util.Set; +import static org.apache.doris.catalog.S3Property.S3_SECRET_KEY; + public class PrintableMap { private Map map; private String keyValueSeparator; @@ -37,6 +39,7 @@ public class PrintableMap { SENSITIVE_KEY.add("password"); SENSITIVE_KEY.add("kerberos_keytab_content"); SENSITIVE_KEY.add("bos_secret_accesskey"); + SENSITIVE_KEY.add(S3_SECRET_KEY); } public PrintableMap(Map map, String keyValueSeparator, diff --git a/fe/fe-core/src/main/java/org/apache/doris/common/util/PropertyAnalyzer.java b/fe/fe-core/src/main/java/org/apache/doris/common/util/PropertyAnalyzer.java index ceac147bae87e0..576eed7787a594 100644 --- a/fe/fe-core/src/main/java/org/apache/doris/common/util/PropertyAnalyzer.java +++ b/fe/fe-core/src/main/java/org/apache/doris/common/util/PropertyAnalyzer.java @@ -19,11 +19,13 @@ import org.apache.doris.analysis.DataSortInfo; import org.apache.doris.analysis.DateLiteral; +import org.apache.doris.catalog.Catalog; import org.apache.doris.catalog.Column; import org.apache.doris.catalog.DataProperty; import org.apache.doris.catalog.KeysType; import org.apache.doris.catalog.Partition; import org.apache.doris.catalog.PrimitiveType; +import org.apache.doris.catalog.RemoteStorageProperty; import org.apache.doris.catalog.ReplicaAllocation; import org.apache.doris.catalog.ScalarType; import org.apache.doris.catalog.Type; @@ -56,6 +58,8 @@ public class PropertyAnalyzer { public static final String PROPERTIES_REPLICATION_ALLOCATION = "replication_allocation"; public static final String PROPERTIES_STORAGE_TYPE = "storage_type"; public static final String PROPERTIES_STORAGE_MEDIUM = "storage_medium"; + public static final String PROPERTIES_REMOTE_STORAGE = "remote_storage"; + public static final String PROPERTIES_STORAGE_COLD_MEDIUM = "storage_cold_medium"; public static final String PROPERTIES_STORAGE_COLDOWN_TIME = "storage_cooldown_time"; // for 1.x -> 2.x migration public static final String PROPERTIES_VERSION_INFO = "version_info"; @@ -110,10 +114,15 @@ public static DataProperty analyzeDataProperty(Map properties, D } TStorageMedium storageMedium = null; + TStorageMedium coldStorageMedium = TStorageMedium.S3; + String remoteStorageName = ""; + RemoteStorageProperty remotestorageProperty = null; long coolDownTimeStamp = DataProperty.MAX_COOLDOWN_TIME_MS; boolean hasMedium = false; boolean hasCooldown = false; + boolean hasColdMedium = false; + boolean hasRemoteStorage = false; for (Map.Entry entry : properties.entrySet()) { String key = entry.getKey(); String value = entry.getValue(); @@ -130,21 +139,48 @@ public static DataProperty analyzeDataProperty(Map properties, D hasCooldown = true; DateLiteral dateLiteral = new DateLiteral(value, Type.DATETIME); coolDownTimeStamp = dateLiteral.unixTimestamp(TimeUtils.getTimeZone()); + } else if (!hasColdMedium && key.equalsIgnoreCase(PROPERTIES_STORAGE_COLD_MEDIUM)) { + hasColdMedium = true; + if (value.equalsIgnoreCase(TStorageMedium.S3.name())) { + coldStorageMedium = TStorageMedium.S3; + } else { + throw new AnalysisException("Invalid cold storage medium: " + value); + } + } else if (!hasRemoteStorage && key.equalsIgnoreCase(PROPERTIES_REMOTE_STORAGE)) { + hasRemoteStorage = true; + remoteStorageName = value; } } // end for properties - if (!hasCooldown && !hasMedium) { + if (!hasCooldown && !hasMedium && !hasRemoteStorage) { return oldDataProperty; } properties.remove(PROPERTIES_STORAGE_MEDIUM); properties.remove(PROPERTIES_STORAGE_COLDOWN_TIME); + properties.remove(PROPERTIES_STORAGE_COLD_MEDIUM); + properties.remove(PROPERTIES_REMOTE_STORAGE); + + if ((hasColdMedium && !hasRemoteStorage) || (!hasColdMedium && hasRemoteStorage)) { + throw new AnalysisException("Invalid data property, " + + "`storage_cold_medium` must be used with `remote_storage`."); + } + + if (hasColdMedium && hasRemoteStorage) { + remotestorageProperty = Catalog.getCurrentCatalog() + .getRemoteStorageMgr().getRemoteStorageByName(remoteStorageName); + + if (!coldStorageMedium.name().equalsIgnoreCase(remotestorageProperty.getStorageType().name())) { + throw new AnalysisException("Invalid data property, " + + "`storage_cold_medium` is inconsistent with `remote_storage`."); + } + } if (hasCooldown && !hasMedium) { throw new AnalysisException("Invalid data property. storage medium property is not found"); } - if (storageMedium == TStorageMedium.HDD && hasCooldown) { + if ((storageMedium == TStorageMedium.HDD && hasCooldown) && !(hasColdMedium && hasRemoteStorage)) { throw new AnalysisException("Can not assign cooldown timestamp to HDD storage medium"); } @@ -161,9 +197,9 @@ public static DataProperty analyzeDataProperty(Map properties, D } Preconditions.checkNotNull(storageMedium); - return new DataProperty(storageMedium, coolDownTimeStamp); + return new DataProperty(storageMedium, coolDownTimeStamp, remoteStorageName, coldStorageMedium); } - + public static short analyzeShortKeyColumnCount(Map properties) throws AnalysisException { short shortKeyColumnCount = (short) -1; if (properties != null && properties.containsKey(PROPERTIES_SHORT_KEY)) { diff --git a/fe/fe-core/src/main/java/org/apache/doris/journal/JournalEntity.java b/fe/fe-core/src/main/java/org/apache/doris/journal/JournalEntity.java index e35011ce76949d..a4087d4ce3bce1 100644 --- a/fe/fe-core/src/main/java/org/apache/doris/journal/JournalEntity.java +++ b/fe/fe-core/src/main/java/org/apache/doris/journal/JournalEntity.java @@ -30,6 +30,7 @@ import org.apache.doris.catalog.EncryptKeySearchDesc; import org.apache.doris.catalog.Function; import org.apache.doris.catalog.FunctionSearchDesc; +import org.apache.doris.catalog.RemoteStorageMgr; import org.apache.doris.catalog.Resource; import org.apache.doris.cluster.BaseParam; import org.apache.doris.cluster.Cluster; @@ -402,6 +403,13 @@ public void readFields(DataInput in) throws IOException { isRead = true; break; } + case OperationType.OP_ADD_REMOTE_STORAGE: + case OperationType.OP_DROP_REMOTE_STORAGE: { + data = new RemoteStorageMgr.RemoteStorageInfo(); + ((RemoteStorageMgr.RemoteStorageInfo) data).readFields(in); + isRead = true; + break; + } case OperationType.OP_DROP_ALL_BROKER: { data = new Text(); ((Text) data).readFields(in); diff --git a/fe/fe-core/src/main/java/org/apache/doris/persist/EditLog.java b/fe/fe-core/src/main/java/org/apache/doris/persist/EditLog.java index 574c77e156e58b..34182f7bc6128e 100644 --- a/fe/fe-core/src/main/java/org/apache/doris/persist/EditLog.java +++ b/fe/fe-core/src/main/java/org/apache/doris/persist/EditLog.java @@ -32,6 +32,7 @@ import org.apache.doris.catalog.EncryptKeySearchDesc; import org.apache.doris.catalog.Function; import org.apache.doris.catalog.FunctionSearchDesc; +import org.apache.doris.catalog.RemoteStorageMgr; import org.apache.doris.catalog.Resource; import org.apache.doris.cluster.BaseParam; import org.apache.doris.cluster.Cluster; @@ -519,6 +520,18 @@ public static void loadJournal(Catalog catalog, JournalEntity journal) { catalog.getBrokerMgr().replayDropAllBroker(param); break; } + case OperationType.OP_ADD_REMOTE_STORAGE: { + final RemoteStorageMgr.RemoteStorageInfo param = + (RemoteStorageMgr.RemoteStorageInfo) journal.getData(); + catalog.getRemoteStorageMgr().replayAddRemoteStorage(param); + break; + } + case OperationType.OP_DROP_REMOTE_STORAGE: { + final RemoteStorageMgr.RemoteStorageInfo param = + (RemoteStorageMgr.RemoteStorageInfo) journal.getData(); + catalog.getRemoteStorageMgr().replayDropRemoteStorage(param); + break; + } case OperationType.OP_SET_LOAD_ERROR_HUB: { final LoadErrorHub.Param param = (LoadErrorHub.Param) journal.getData(); catalog.getLoadInstance().setLoadErrorHubInfo(param); @@ -1164,6 +1177,14 @@ public void logDropBroker(BrokerMgr.ModifyBrokerInfo info) { logEdit(OperationType.OP_DROP_BROKER, info); } + public void logAddRemoteStorage(RemoteStorageMgr.RemoteStorageInfo info) { + logEdit(OperationType.OP_ADD_REMOTE_STORAGE, info); + } + + public void logDropRemoteStorage(RemoteStorageMgr.RemoteStorageInfo info) { + logEdit(OperationType.OP_DROP_REMOTE_STORAGE, info); + } + public void logDropAllBroker(String brokerName) { logEdit(OperationType.OP_DROP_ALL_BROKER, new Text(brokerName)); } diff --git a/fe/fe-core/src/main/java/org/apache/doris/persist/OperationType.java b/fe/fe-core/src/main/java/org/apache/doris/persist/OperationType.java index bb5aaa971edadf..5b93f88f5744a9 100644 --- a/fe/fe-core/src/main/java/org/apache/doris/persist/OperationType.java +++ b/fe/fe-core/src/main/java/org/apache/doris/persist/OperationType.java @@ -137,6 +137,8 @@ public class OperationType { public static final short OP_CREATE_REPOSITORY = 89; public static final short OP_DROP_REPOSITORY = 90; public static final short OP_MODIFY_BACKEND = 91; + public static final short OP_ADD_REMOTE_STORAGE = 92; + public static final short OP_DROP_REMOTE_STORAGE = 93; //colocate table public static final short OP_COLOCATE_ADD_TABLE = 94; diff --git a/fe/fe-core/src/main/java/org/apache/doris/qe/ShowExecutor.java b/fe/fe-core/src/main/java/org/apache/doris/qe/ShowExecutor.java index f92d6b7cd5a3d5..cc51da3ec2c2de 100644 --- a/fe/fe-core/src/main/java/org/apache/doris/qe/ShowExecutor.java +++ b/fe/fe-core/src/main/java/org/apache/doris/qe/ShowExecutor.java @@ -60,6 +60,7 @@ import org.apache.doris.analysis.ShowProcStmt; import org.apache.doris.analysis.ShowProcesslistStmt; import org.apache.doris.analysis.ShowQueryProfileStmt; +import org.apache.doris.analysis.ShowRemoteStoragesStmt; import org.apache.doris.analysis.ShowRepositoriesStmt; import org.apache.doris.analysis.ShowResourcesStmt; import org.apache.doris.analysis.ShowRestoreStmt; @@ -277,6 +278,8 @@ public ShowResultSet execute() throws AnalysisException { handleShowMigrations(); } else if (stmt instanceof ShowBrokerStmt) { handleShowBroker(); + } else if (stmt instanceof ShowRemoteStoragesStmt) { + handleShowRemoteStorages(); } else if (stmt instanceof ShowResourcesStmt) { handleShowResources(); } else if (stmt instanceof ShowExportStmt) { @@ -1574,6 +1577,12 @@ private void handleShowBroker() { resultSet = new ShowResultSet(showStmt.getMetaData(), brokersInfo); } + private void handleShowRemoteStorages() { + ShowRemoteStoragesStmt showStmt = (ShowRemoteStoragesStmt) stmt; + List> storageInfo = Catalog.getCurrentCatalog().getRemoteStorageMgr().getRemoteStoragesInfo(); + resultSet = new ShowResultSet(showStmt.getMetaData(), storageInfo); + } + // Handle show resources private void handleShowResources() { ShowResourcesStmt showStmt = (ShowResourcesStmt) stmt; diff --git a/fe/fe-core/src/main/jflex/sql_scanner.flex b/fe/fe-core/src/main/jflex/sql_scanner.flex index 7ab3c7ed0b6eb2..858b62b0e6965e 100644 --- a/fe/fe-core/src/main/jflex/sql_scanner.flex +++ b/fe/fe-core/src/main/jflex/sql_scanner.flex @@ -325,6 +325,7 @@ import org.apache.doris.qe.SqlModeHelper; keywordMap.put("refresh", new Integer(SqlParserSymbols.KW_REFRESH)); keywordMap.put("regexp", new Integer(SqlParserSymbols.KW_REGEXP)); keywordMap.put("release", new Integer(SqlParserSymbols.KW_RELEASE)); + keywordMap.put("remote", new Integer(SqlParserSymbols.KW_REMOTE)); keywordMap.put("rename", new Integer(SqlParserSymbols.KW_RENAME)); keywordMap.put("repair", new Integer(SqlParserSymbols.KW_REPAIR)); keywordMap.put("repeatable", new Integer(SqlParserSymbols.KW_REPEATABLE)); @@ -369,6 +370,7 @@ import org.apache.doris.qe.SqlModeHelper; keywordMap.put("stats", new Integer(SqlParserSymbols.KW_STATS)); keywordMap.put("stop", new Integer(SqlParserSymbols.KW_STOP)); keywordMap.put("storage", new Integer(SqlParserSymbols.KW_STORAGE)); + keywordMap.put("storages", new Integer(SqlParserSymbols.KW_STORAGES)); keywordMap.put("stream", new Integer(SqlParserSymbols.KW_STREAM)); keywordMap.put("string", new Integer(SqlParserSymbols.KW_STRING)); keywordMap.put("struct", new Integer(SqlParserSymbols.KW_STRUCT)); diff --git a/fe/fe-core/src/test/java/org/apache/doris/alter/AlterTest.java b/fe/fe-core/src/test/java/org/apache/doris/alter/AlterTest.java index c055ba20ba0d98..1fe2a078dbee87 100644 --- a/fe/fe-core/src/test/java/org/apache/doris/alter/AlterTest.java +++ b/fe/fe-core/src/test/java/org/apache/doris/alter/AlterTest.java @@ -387,13 +387,13 @@ public void testBatchUpdatePartitionProperties() throws Exception { stmt = "alter table test.tbl4 modify partition (p2, p3, p4) set ('storage_medium' = 'HDD')"; DateLiteral dateLiteral = new DateLiteral("9999-12-31 00:00:00", Type.DATETIME); long coolDownTimeMs = dateLiteral.unixTimestamp(TimeUtils.getTimeZone()); - DataProperty oldDataProperty = new DataProperty(TStorageMedium.SSD, coolDownTimeMs); + DataProperty oldDataProperty = new DataProperty(TStorageMedium.SSD, coolDownTimeMs, "", TStorageMedium.S3); partitionList = Lists.newArrayList(p2, p3, p4); for (Partition partition : partitionList) { Assert.assertEquals(oldDataProperty, tbl4.getPartitionInfo().getDataProperty(partition.getId())); } alterTable(stmt, false); - DataProperty newDataProperty = new DataProperty(TStorageMedium.HDD, DataProperty.MAX_COOLDOWN_TIME_MS); + DataProperty newDataProperty = new DataProperty(TStorageMedium.HDD, DataProperty.MAX_COOLDOWN_TIME_MS, "", TStorageMedium.S3); for (Partition partition : partitionList) { Assert.assertEquals(newDataProperty, tbl4.getPartitionInfo().getDataProperty(partition.getId())); } diff --git a/fe/fe-core/src/test/java/org/apache/doris/analysis/RemoteStorageTest.java b/fe/fe-core/src/test/java/org/apache/doris/analysis/RemoteStorageTest.java new file mode 100644 index 00000000000000..d794e3c4264f67 --- /dev/null +++ b/fe/fe-core/src/test/java/org/apache/doris/analysis/RemoteStorageTest.java @@ -0,0 +1,151 @@ +// Licensed to the Apache Software Foundation (ASF) under one +// or more contributor license agreements. See the NOTICE file +// distributed with this work for additional information +// regarding copyright ownership. The ASF licenses this file +// to you under the Apache License, Version 2.0 (the +// "License"); you may not use this file except in compliance +// with the License. You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, +// software distributed under the License is distributed on an +// "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY +// KIND, either express or implied. See the License for the +// specific language governing permissions and limitations +// under the License. + +package org.apache.doris.analysis; + +import org.apache.doris.common.AnalysisException; + +import org.junit.Assert; +import org.junit.BeforeClass; +import org.junit.Test; + +import com.google.common.collect.Maps; + +import java.util.HashMap; +import java.util.Map; + +import static org.apache.doris.catalog.S3Property.S3_ACCESS_KEY; +import static org.apache.doris.catalog.S3Property.S3_CONNECTION_TIMEOUT_MS; +import static org.apache.doris.catalog.S3Property.S3_ENDPOINT; +import static org.apache.doris.catalog.S3Property.S3_MAX_CONNECTIONS; +import static org.apache.doris.catalog.S3Property.S3_REGION; +import static org.apache.doris.catalog.S3Property.S3_REQUEST_TIMEOUT_MS; +import static org.apache.doris.catalog.S3Property.S3_ROOT_PATH; +import static org.apache.doris.catalog.S3Property.S3_SECRET_KEY; + +public class RemoteStorageTest { + private static Analyzer analyzer; + + @BeforeClass + public static void setUp() throws Exception { + analyzer = AccessTestUtil.fetchAdminAnalyzer(true); + } + + public RemoteStorageClause createStmt(int type, String name, Map properties) { + RemoteStorageClause stmt = null; + switch (type) { + case 1: + stmt = new AddRemoteStorageClause(name, properties); + break; + case 2: + stmt = new DropRemoteStorageClause(name); + break; + default: + break; + } + return stmt; + } + + private Map initPropertyMap(String type) { + Map properties = new HashMap<>(); + properties.put("type", type); + properties.put(S3_ENDPOINT, "s3.amazonaws.com"); + properties.put(S3_REGION, "us"); + properties.put(S3_ROOT_PATH, "/doris"); + properties.put(S3_ACCESS_KEY, "xxxxxxx"); + properties.put(S3_SECRET_KEY, "yyyyyyyy"); + properties.put(S3_MAX_CONNECTIONS, "50"); + properties.put(S3_REQUEST_TIMEOUT_MS, "3000"); + properties.put(S3_CONNECTION_TIMEOUT_MS, "1000"); + return properties; + } + + @Test(expected = AnalysisException.class) + public void addRemoteStorageNullNameTest() throws Exception { + RemoteStorageClause stmt = createStmt(1, null, initPropertyMap("s3")); + stmt.analyze(analyzer); + } + + @Test(expected = AnalysisException.class) + public void addRemoteStorageEmptyNameTest() throws Exception { + RemoteStorageClause stmt = createStmt(1, "", initPropertyMap("s3")); + stmt.analyze(analyzer); + } + + @Test(expected = AnalysisException.class) + public void addRemoteStorageMissingPropertyTest() throws Exception { + Map properties = initPropertyMap("s3"); + properties.remove(S3_ENDPOINT); + RemoteStorageClause stmt = createStmt(1, "remote_s3", properties); + stmt.analyze(analyzer); + } + + @Test(expected = AnalysisException.class) + public void addRemoteStorageUnknownTypeTest() throws Exception { + RemoteStorageClause stmt = createStmt(1, "remote_s3", initPropertyMap("hdfs")); + stmt.analyze(analyzer); + } + + @Test(expected = AnalysisException.class) + public void addRemoteStorageEmptyTypeTest() throws Exception { + RemoteStorageClause stmt = createStmt(1, "remote_s3", initPropertyMap("hdfs")); + stmt.analyze(analyzer); + } + + @Test(expected = AnalysisException.class) + public void addRemoteStorageEmptyPropertiesTest() throws Exception { + RemoteStorageClause stmt = createStmt(1, "remote_s3", Maps.newHashMap()); + stmt.analyze(analyzer); + } + + @Test(expected = AnalysisException.class) + public void addRemoteStorageUnknownPropertyTest() throws Exception { + Map properties = initPropertyMap("s3"); + properties.put("s3_unknown", "xxx"); + RemoteStorageClause stmt = createStmt(1, "remote_s3", properties); + stmt.analyze(analyzer); + } + + @Test + public void addRemoteStorageNormalTest() throws Exception { + RemoteStorageClause stmt = createStmt(1, "remote_s3", initPropertyMap("s3")); + stmt.analyze(analyzer); + Assert.assertEquals("ADD REMOTE STORAGE remote_s3\n" + + "PROPERTIES (\"s3_secret_key\" = \"*XXX\",\n" + + "\"s3_region\" = \"us\",\n" + + "\"s3_access_key\" = \"xxxxxxx\",\n" + + "\"s3_max_connections\" = \"50\",\n" + + "\"s3_connection_timeout_ms\" = \"1000\",\n" + + "\"type\" = \"s3\",\n" + + "\"s3_root_path\" = \"/doris\",\n" + + "\"s3_endpoint\" = \"s3.amazonaws.com\",\n" + + "\"s3_request_timeout_ms\" = \"3000\")", stmt.toSql()); + } + + @Test(expected = AnalysisException.class) + public void dropRemoteStorageEmptyNameTest() throws Exception { + RemoteStorageClause stmt = createStmt(2, "", Maps.newHashMap()); + stmt.analyze(analyzer); + } + + @Test + public void dropRemoteStorageNormalTest() throws Exception { + RemoteStorageClause stmt = createStmt(2, "remote_s3", Maps.newHashMap()); + stmt.analyze(analyzer); + Assert.assertEquals("DROP REMOTE STORAGE remote_s3", stmt.toSql()); + } +} diff --git a/fe/fe-core/src/test/java/org/apache/doris/backup/CatalogMocker.java b/fe/fe-core/src/test/java/org/apache/doris/backup/CatalogMocker.java index d4cf23d3f5dfe7..19de064df40910 100644 --- a/fe/fe-core/src/test/java/org/apache/doris/backup/CatalogMocker.java +++ b/fe/fe-core/src/test/java/org/apache/doris/backup/CatalogMocker.java @@ -241,7 +241,7 @@ public static Database mockDb() throws UserException { PartitionInfo partitionInfo = new SinglePartitionInfo(); partitionInfo.setReplicaAllocation(TEST_SINGLE_PARTITION_ID, new ReplicaAllocation((short) 3)); partitionInfo.setIsInMemory(TEST_SINGLE_PARTITION_ID, false); - DataProperty dataProperty = new DataProperty(TStorageMedium.HDD); + DataProperty dataProperty = new DataProperty(TStorageMedium.HDD, TStorageMedium.S3); partitionInfo.setDataProperty(TEST_SINGLE_PARTITION_ID, dataProperty); OlapTable olapTable = new OlapTable(TEST_TBL_ID, TEST_TBL_NAME, TEST_TBL_BASE_SCHEMA, KeysType.AGG_KEYS, partitionInfo, distributionInfo); @@ -312,8 +312,8 @@ public static Database mockDb() throws UserException { rangePartitionInfo.setReplicaAllocation(TEST_PARTITION1_ID, new ReplicaAllocation((short) 3)); rangePartitionInfo.setReplicaAllocation(TEST_PARTITION2_ID, new ReplicaAllocation((short) 3)); - DataProperty dataPropertyP1 = new DataProperty(TStorageMedium.HDD); - DataProperty dataPropertyP2 = new DataProperty(TStorageMedium.HDD); + DataProperty dataPropertyP1 = new DataProperty(TStorageMedium.HDD, TStorageMedium.S3); + DataProperty dataPropertyP2 = new DataProperty(TStorageMedium.HDD, TStorageMedium.S3); rangePartitionInfo.setDataProperty(TEST_PARTITION1_ID, dataPropertyP1); rangePartitionInfo.setDataProperty(TEST_PARTITION2_ID, dataPropertyP2); diff --git a/fe/fe-core/src/test/java/org/apache/doris/catalog/DataPropertyTest.java b/fe/fe-core/src/test/java/org/apache/doris/catalog/DataPropertyTest.java index 7bb4eac6659d38..16ba75638dd5b4 100644 --- a/fe/fe-core/src/test/java/org/apache/doris/catalog/DataPropertyTest.java +++ b/fe/fe-core/src/test/java/org/apache/doris/catalog/DataPropertyTest.java @@ -30,13 +30,13 @@ public void tesCooldownTimeMs() throws Exception { DataProperty dataProperty = DataProperty.DEFAULT_DATA_PROPERTY; Assert.assertNotEquals(DataProperty.MAX_COOLDOWN_TIME_MS, dataProperty.getCooldownTimeMs()); - dataProperty = new DataProperty(TStorageMedium.SSD); + dataProperty = new DataProperty(TStorageMedium.SSD, TStorageMedium.S3); Assert.assertNotEquals(DataProperty.MAX_COOLDOWN_TIME_MS, dataProperty.getCooldownTimeMs()); - dataProperty = new DataProperty(TStorageMedium.SSD, System.currentTimeMillis() + 24 * 3600 * 1000L); + dataProperty = new DataProperty(TStorageMedium.SSD, System.currentTimeMillis() + 24 * 3600 * 1000L, "", TStorageMedium.S3); Assert.assertEquals(System.currentTimeMillis() + 24 * 3600 * 1000L, dataProperty.getCooldownTimeMs()); - dataProperty = new DataProperty(TStorageMedium.HDD); + dataProperty = new DataProperty(TStorageMedium.HDD, TStorageMedium.S3); Assert.assertEquals(DataProperty.MAX_COOLDOWN_TIME_MS, dataProperty.getCooldownTimeMs()); } } \ No newline at end of file diff --git a/fe/fe-core/src/test/java/org/apache/doris/clone/RebalanceTest.java b/fe/fe-core/src/test/java/org/apache/doris/clone/RebalanceTest.java index e94fa8655df515..02061842449865 100644 --- a/fe/fe-core/src/test/java/org/apache/doris/clone/RebalanceTest.java +++ b/fe/fe-core/src/test/java/org/apache/doris/clone/RebalanceTest.java @@ -178,7 +178,7 @@ private void createPartitionsForTable(OlapTable olapTable, MaterializedIndex ind long id = 31 + idx; Partition partition = new Partition(id, "p" + idx, index, new HashDistributionInfo()); olapTable.addPartition(partition); - olapTable.getPartitionInfo().addPartition(id, new DataProperty(TStorageMedium.HDD), + olapTable.getPartitionInfo().addPartition(id, new DataProperty(TStorageMedium.HDD, TStorageMedium.S3), ReplicaAllocation.DEFAULT_ALLOCATION, false); }); } diff --git a/fe/fe-core/src/test/java/org/apache/doris/common/PropertyAnalyzerTest.java b/fe/fe-core/src/test/java/org/apache/doris/common/PropertyAnalyzerTest.java index 913499a726664c..7ffa5b4f703013 100644 --- a/fe/fe-core/src/test/java/org/apache/doris/common/PropertyAnalyzerTest.java +++ b/fe/fe-core/src/test/java/org/apache/doris/common/PropertyAnalyzerTest.java @@ -147,7 +147,8 @@ public void testStorageMedium() throws AnalysisException { Map properties = Maps.newHashMap(); properties.put(PropertyAnalyzer.PROPERTIES_STORAGE_MEDIUM, "SSD"); properties.put(PropertyAnalyzer.PROPERTIES_STORAGE_COLDOWN_TIME, tomorrowTimeStr); - DataProperty dataProperty = PropertyAnalyzer.analyzeDataProperty(properties, new DataProperty(TStorageMedium.SSD)); + DataProperty dataProperty = PropertyAnalyzer.analyzeDataProperty(properties, + new DataProperty(TStorageMedium.SSD, TStorageMedium.S3)); // avoid UT fail because time zone different DateLiteral dateLiteral = new DateLiteral(tomorrowTimeStr, Type.DATETIME); Assert.assertEquals(dateLiteral.unixTimestamp(TimeUtils.getTimeZone()), dataProperty.getCooldownTimeMs()); From 394fec5f6ce0ae99174416bcca8e1cfb59248768 Mon Sep 17 00:00:00 2001 From: qijianliang01 Date: Tue, 22 Mar 2022 14:55:03 +0800 Subject: [PATCH 2/6] add modify remote storage clause and fix reviews Change-Id: I7c1b0fcdcea3123413064232445a03fb76342fe6 --- .../Administration/ALTER SYSTEM.md | 10 +++ .../Administration/SHOW REMOTE STORAGES.md | 22 ++++- .../Data Definition/CREATE TABLE.md | 5 +- .../Administration/ALTER SYSTEM.md | 10 +++ .../Administration/SHOW REMOTE STORAGES.md | 22 ++++- .../Data Definition/CREATE TABLE.md | 4 +- fe/fe-core/src/main/cup/sql_parser.cup | 4 + .../org/apache/doris/alter/SystemHandler.java | 5 +- .../analysis/AddRemoteStorageClause.java | 2 +- .../doris/analysis/AlterSystemStmt.java | 3 +- .../analysis/DropRemoteStorageClause.java | 5 ++ .../analysis/ModifyRemoteStorageClause.java | 80 +++++++++++++++++++ .../org/apache/doris/catalog/Catalog.java | 2 +- .../apache/doris/catalog/DataProperty.java | 6 +- .../org/apache/doris/catalog/OlapTable.java | 2 +- .../apache/doris/catalog/PartitionInfo.java | 6 +- .../doris/catalog/RemoteStorageMgr.java | 60 ++++++++++++++ .../doris/catalog/RemoteStorageProperty.java | 5 ++ .../org/apache/doris/catalog/S3Property.java | 61 +++++++++++++- .../doris/common/util/PropertyAnalyzer.java | 9 ++- .../apache/doris/journal/JournalEntity.java | 15 ++-- .../org/apache/doris/persist/EditLog.java | 10 +++ .../apache/doris/persist/OperationType.java | 5 +- .../apache/doris/backup/CatalogMocker.java | 6 +- .../doris/catalog/DataPropertyTest.java | 4 +- .../org/apache/doris/clone/RebalanceTest.java | 2 +- .../doris/common/PropertyAnalyzerTest.java | 2 +- 27 files changed, 325 insertions(+), 42 deletions(-) create mode 100644 fe/fe-core/src/main/java/org/apache/doris/analysis/ModifyRemoteStorageClause.java diff --git a/docs/en/sql-reference/sql-statements/Administration/ALTER SYSTEM.md b/docs/en/sql-reference/sql-statements/Administration/ALTER SYSTEM.md index e6d2cc8c78ea57..7c4010cf405dfe 100644 --- a/docs/en/sql-reference/sql-statements/Administration/ALTER SYSTEM.md +++ b/docs/en/sql-reference/sql-statements/Administration/ALTER SYSTEM.md @@ -53,6 +53,8 @@ under the License. ALTER SYSTEM ADD REMOTE STORAGE storage_name PROPERTIES ("key" = "value"[, ...]); 12) Drop remote storage ALTER SYSTEM DROP REMOTE STORAGE storage_name; + 13) Modify remote storage + ALTER SYSTEM MODIFY REMOTE STORAGE storage_name PROPERTIES ("key" = "value"[, ...]); Explain: 1) Host can be hostname or IP address @@ -96,6 +98,7 @@ under the License. s3_max_connections: the maximum number of s3 connections, the default is 50 s3_request_timeout_ms: s3 request timeout, in milliseconds, the default is 3000 s3_connection_timeout_ms: s3 connection timeout, in milliseconds, the default is 1000 + 2) Supports modifying parameter information other than `type`. ## example @@ -165,5 +168,12 @@ under the License. 13. Drop remote storage ALTER SYSTEM DROP REMOTE STORAGE remote_s3; + 14. Modify remote storage + ALTER SYSTEM MODIFY REMOTE STORAGE remote_s3 PROPERTIES + ( + "s3_access_key" = "bbb", + "s3_secret_key" = "aaaa" + ); + ## keyword AGE,SYSTEM,BACKGROUND,BROKER,FREE,REMOTE STORAGE diff --git a/docs/en/sql-reference/sql-statements/Administration/SHOW REMOTE STORAGES.md b/docs/en/sql-reference/sql-statements/Administration/SHOW REMOTE STORAGES.md index ae396b65735ba4..4796a242d1f3b3 100644 --- a/docs/en/sql-reference/sql-statements/Administration/SHOW REMOTE STORAGES.md +++ b/docs/en/sql-reference/sql-statements/Administration/SHOW REMOTE STORAGES.md @@ -37,6 +37,26 @@ under the License. 2. Type: type of remote storage 3. Properties: properties of reomte storage +## Example + + View the remote storage information of the current cluster. + + ``` + mysql> show remote storages; + +-----------+------+---------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------+ + | Name | Type | Properties | + +-----------+------+---------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------+ + | remote_s3 | S3 | "s3_secret_key" = "*XXX", + "s3_region" = "bj", + "s3_access_key" = "bbb", + "s3_max_connections" = "50", + "s3_connection_timeout_ms" = "1000", + "s3_root_path" = "/path/to/root", + "s3_endpoint" = "bj", + "s3_request_timeout_ms" = "3000" | + +-----------+------+---------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------+ + ``` + ## keyword - SHOW, REMOTE STORAGES + SHOW, REMOTE, REMOTE STORAGES diff --git a/docs/en/sql-reference/sql-statements/Data Definition/CREATE TABLE.md b/docs/en/sql-reference/sql-statements/Data Definition/CREATE TABLE.md index 849e25e7c761ab..6b25e3f251e3a1 100644 --- a/docs/en/sql-reference/sql-statements/Data Definition/CREATE TABLE.md +++ b/docs/en/sql-reference/sql-statements/Data Definition/CREATE TABLE.md @@ -296,7 +296,7 @@ Syntax: ``` PROPERTIES ( "storage_medium" = "[SSD|HDD]", - ["storage_cold_medium" = "[S3]"], + ["storage_cold_medium" = "[HDD|S3]"], ["remote_storage" = "xxx"], ["storage_cooldown_time" = "yyyy-MM-dd HH:mm:ss"], ["replication_num" = "3"], @@ -306,8 +306,7 @@ Syntax: storage_medium: SSD or HDD, The default initial storage media can be specified by `default_storage_medium= XXX` in the fe configuration file `fe.conf`, or, if not, by default, HDD. Note: when FE configuration 'enable_strict_storage_medium_check' is' True ', if the corresponding storage medium is not set in the cluster, the construction clause 'Failed to find enough host in all backends with storage medium is SSD|HDD'. - storage_cold_medium: Used to specify the cold data storage medium for this partition, currently only S3 is - supported. Default is S3. + storage_cold_medium: Used to specify the cold data storage medium for this partition, currently supports HDD and S3. Default is HDD. remote_storage: The remote storage name, which needs to be used in conjunction with the storage_cold_medium parameter. storage_cooldown_time: If storage_medium is SSD, data will be automatically moved to HDD when timeout. diff --git a/docs/zh-CN/sql-reference/sql-statements/Administration/ALTER SYSTEM.md b/docs/zh-CN/sql-reference/sql-statements/Administration/ALTER SYSTEM.md index bdb0f5a82d99c1..aa60d019b9d670 100644 --- a/docs/zh-CN/sql-reference/sql-statements/Administration/ALTER SYSTEM.md +++ b/docs/zh-CN/sql-reference/sql-statements/Administration/ALTER SYSTEM.md @@ -53,6 +53,8 @@ under the License. ALTER SYSTEM ADD REMOTE STORAGE storage_name PROPERTIES ("key" = "value"[, ...]); 12) 删除一个远端存储 ALTER SYSTEM DROP REMOTE STORAGE storage_name; + 13) 修改一个远端存储 + ALTER SYSTEM MODIFY REMOTE STORAGE storage_name PROPERTIES ("key" = "value"[, ...]); 说明: 1) host 可以是主机名或者ip地址 @@ -96,6 +98,7 @@ under the License. s3_max_connections:s3 最大连接数量,默认为 50 s3_request_timeout_ms:s3 请求超时时间,单位毫秒,默认为 3000 s3_connection_timeout_ms:s3 连接超时时间,单位毫秒,默认为 1000 + 2) 支持修改除 `type` 之外的参数信息。 ## example @@ -164,6 +167,13 @@ under the License. 13. 删除远端存储 ALTER SYSTEM DROP REMOTE STORAGE remote_s3; + + 14. 修改远端存储 + ALTER SYSTEM MODIFY REMOTE STORAGE remote_s3 PROPERTIES + ( + "s3_access_key" = "bbb", + "s3_secret_key" = "aaaa" + ); ## keyword ALTER,SYSTEM,BACKEND,BROKER,FREE,REMOTE STORAGE diff --git a/docs/zh-CN/sql-reference/sql-statements/Administration/SHOW REMOTE STORAGES.md b/docs/zh-CN/sql-reference/sql-statements/Administration/SHOW REMOTE STORAGES.md index 8db5f988a15ef4..7d53aa284a173b 100644 --- a/docs/zh-CN/sql-reference/sql-statements/Administration/SHOW REMOTE STORAGES.md +++ b/docs/zh-CN/sql-reference/sql-statements/Administration/SHOW REMOTE STORAGES.md @@ -36,8 +36,28 @@ under the License. 1. Name:远端存储的名字 2. Type:远端存储的类型 3. Properties:远端存储的参数 + +## Example + + 查看当前集群的远端存储信息 + + ``` + mysql> show remote storages; + +-----------+------+---------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------+ + | Name | Type | Properties | + +-----------+------+---------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------+ + | remote_s3 | S3 | "s3_secret_key" = "*XXX", + "s3_region" = "bj", + "s3_access_key" = "bbb", + "s3_max_connections" = "50", + "s3_connection_timeout_ms" = "1000", + "s3_root_path" = "/path/to/root", + "s3_endpoint" = "bj", + "s3_request_timeout_ms" = "3000" | + +-----------+------+---------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------+ + ``` ## keyword - SHOW, REMOTE STORAGES + SHOW, REMOTE, REMOTE STORAGES diff --git a/docs/zh-CN/sql-reference/sql-statements/Data Definition/CREATE TABLE.md b/docs/zh-CN/sql-reference/sql-statements/Data Definition/CREATE TABLE.md index 2166a10d6ca33a..cbe8dab7222d4d 100644 --- a/docs/zh-CN/sql-reference/sql-statements/Data Definition/CREATE TABLE.md +++ b/docs/zh-CN/sql-reference/sql-statements/Data Definition/CREATE TABLE.md @@ -307,7 +307,7 @@ under the License. ``` PROPERTIES ( "storage_medium" = "[SSD|HDD]", - ["storage_cold_medium" = "[S3]"], + ["storage_cold_medium" = "[HDD|S3]"], ["remote_storage" = "xxx"], ["storage_cooldown_time" = "yyyy-MM-dd HH:mm:ss"], ["replication_num" = "3"] @@ -317,7 +317,7 @@ under the License. storage_medium: 用于指定该分区的初始存储介质,可选择 SSD 或 HDD。默认初始存储介质可通过fe的配置文件 `fe.conf` 中指定 `default_storage_medium=xxx`,如果没有指定,则默认为 HDD。 注意:当FE配置项 `enable_strict_storage_medium_check` 为 `True` 时,若集群中没有设置对应的存储介质时,建表语句会报错 `Failed to find enough host in all backends with storage medium is SSD|HDD`. - storage_cold_medium: 用于指定该分区的冷数据存储介质,当前只支持 S3。默认为 S3。 + storage_cold_medium: 用于指定该分区的冷数据存储介质,当前支持 HDD、S3。默认为 HDD。 remote_storage: 远端存储名称,需要与 storage_cold_medium 参数搭配使用。 storage_cooldown_time: 当设置存储介质为 SSD 时,指定该分区在 SSD 上的存储到期时间。 默认存放 30 天。 diff --git a/fe/fe-core/src/main/cup/sql_parser.cup b/fe/fe-core/src/main/cup/sql_parser.cup index 93457d4dea54f4..6287601fc9b54f 100644 --- a/fe/fe-core/src/main/cup/sql_parser.cup +++ b/fe/fe-core/src/main/cup/sql_parser.cup @@ -1194,6 +1194,10 @@ alter_system_clause ::= {: RESULT = new DropRemoteStorageClause(remoteStorageName); :} + | KW_MODIFY KW_REMOTE KW_STORAGE ident_or_text:remoteStorageName opt_properties:properties + {: + RESULT = new ModifyRemoteStorageClause(remoteStorageName, properties); + :} ; // Sync Stmt diff --git a/fe/fe-core/src/main/java/org/apache/doris/alter/SystemHandler.java b/fe/fe-core/src/main/java/org/apache/doris/alter/SystemHandler.java index df5c24a6540a1f..528d14b644604f 100644 --- a/fe/fe-core/src/main/java/org/apache/doris/alter/SystemHandler.java +++ b/fe/fe-core/src/main/java/org/apache/doris/alter/SystemHandler.java @@ -32,6 +32,7 @@ import org.apache.doris.analysis.DropRemoteStorageClause; import org.apache.doris.analysis.ModifyBackendClause; import org.apache.doris.analysis.ModifyBrokerClause; +import org.apache.doris.analysis.ModifyRemoteStorageClause; import org.apache.doris.catalog.Catalog; import org.apache.doris.catalog.Database; import org.apache.doris.catalog.OlapTable; @@ -179,7 +180,9 @@ public synchronized void process(List alterClauses, String clusterN Catalog.getCurrentCatalog().getRemoteStorageMgr().addRemoteStorage((AddRemoteStorageClause) alterClause); } else if (alterClause instanceof DropRemoteStorageClause) { Catalog.getCurrentCatalog().getRemoteStorageMgr().dropRemoteStorage((DropRemoteStorageClause) alterClause); - } else { + } else if (alterClause instanceof ModifyRemoteStorageClause) { + Catalog.getCurrentCatalog().getRemoteStorageMgr().modifyRemoteStorage((ModifyRemoteStorageClause) alterClause); + }else { Preconditions.checkState(false, alterClause.getClass()); } } diff --git a/fe/fe-core/src/main/java/org/apache/doris/analysis/AddRemoteStorageClause.java b/fe/fe-core/src/main/java/org/apache/doris/analysis/AddRemoteStorageClause.java index 9d3ae0c8b07264..63f08f5c55d149 100644 --- a/fe/fe-core/src/main/java/org/apache/doris/analysis/AddRemoteStorageClause.java +++ b/fe/fe-core/src/main/java/org/apache/doris/analysis/AddRemoteStorageClause.java @@ -49,7 +49,7 @@ public class AddRemoteStorageClause extends RemoteStorageClause { private static final String PROPERTY_MISSING_MSG = "Remote storage %s is null. " + "Please add properties('%s'='xxx') when create remote storage."; - private static final String TYPE = "type"; + protected static final String TYPE = "type"; private Map properties; private RemoteStorageProperty.RemoteStorageType remoteStorageType; diff --git a/fe/fe-core/src/main/java/org/apache/doris/analysis/AlterSystemStmt.java b/fe/fe-core/src/main/java/org/apache/doris/analysis/AlterSystemStmt.java index e2160cd3ac3431..e66f467633303e 100644 --- a/fe/fe-core/src/main/java/org/apache/doris/analysis/AlterSystemStmt.java +++ b/fe/fe-core/src/main/java/org/apache/doris/analysis/AlterSystemStmt.java @@ -66,7 +66,8 @@ public void analyze(Analyzer analyzer) throws UserException { || (alterClause instanceof AlterLoadErrorUrlClause) || (alterClause instanceof ModifyBackendClause) || (alterClause instanceof AddRemoteStorageClause) - || (alterClause instanceof DropRemoteStorageClause)); + || (alterClause instanceof DropRemoteStorageClause) + || (alterClause instanceof ModifyRemoteStorageClause)); alterClause.analyze(analyzer); } diff --git a/fe/fe-core/src/main/java/org/apache/doris/analysis/DropRemoteStorageClause.java b/fe/fe-core/src/main/java/org/apache/doris/analysis/DropRemoteStorageClause.java index 9f32fecc6a5dac..ff7ef37b95e460 100644 --- a/fe/fe-core/src/main/java/org/apache/doris/analysis/DropRemoteStorageClause.java +++ b/fe/fe-core/src/main/java/org/apache/doris/analysis/DropRemoteStorageClause.java @@ -17,6 +17,11 @@ package org.apache.doris.analysis; +/** + * Drop remote storage by name + * Syntax: + * ALTER SYSTEM DROP REMOTE STORAGE `remote_storage_name` + */ public class DropRemoteStorageClause extends RemoteStorageClause { public DropRemoteStorageClause(String storageName) { super(storageName); diff --git a/fe/fe-core/src/main/java/org/apache/doris/analysis/ModifyRemoteStorageClause.java b/fe/fe-core/src/main/java/org/apache/doris/analysis/ModifyRemoteStorageClause.java new file mode 100644 index 00000000000000..de3ea1591b3d02 --- /dev/null +++ b/fe/fe-core/src/main/java/org/apache/doris/analysis/ModifyRemoteStorageClause.java @@ -0,0 +1,80 @@ +// Licensed to the Apache Software Foundation (ASF) under one +// or more contributor license agreements. See the NOTICE file +// distributed with this work for additional information +// regarding copyright ownership. The ASF licenses this file +// to you under the Apache License, Version 2.0 (the +// "License"); you may not use this file except in compliance +// with the License. You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, +// software distributed under the License is distributed on an +// "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY +// KIND, either express or implied. See the License for the +// specific language governing permissions and limitations +// under the License. + +package org.apache.doris.analysis; + +import org.apache.doris.common.AnalysisException; +import org.apache.doris.common.UserException; +import org.apache.doris.common.util.PrintableMap; + +import com.google.common.base.Strings; + +import java.util.Map; + +import static org.apache.doris.analysis.AddRemoteStorageClause.TYPE; + +/** + * Modify remote storage properties by name. + * Syntax: + * ALTER SYSTEM MODIFY REMOTE STORAGE `remote_storage_name` + * PROPERTIES + * ( + * "key" = "value", + * ... + * ) + */ +public class ModifyRemoteStorageClause extends RemoteStorageClause { + + private Map properties; + + public ModifyRemoteStorageClause(String storageName, Map properties) { + super(storageName); + this.properties = properties; + } + + @Override + public Map getProperties() { + return properties; + } + + @Override + public void analyze(Analyzer analyzer) throws UserException { + super.analyze(analyzer); + + if (properties == null || properties.size() == 0) { + throw new AnalysisException("Empty remote storage properties."); + } + // Do not support modify remote storage type + String storageType = properties.get(TYPE); + if (!Strings.isNullOrEmpty(storageType)) { + throw new AnalysisException("Can not modify remote storage type."); + } + } + + @Override + public String toSql() { + StringBuilder sb = new StringBuilder(); + sb.append("MODIFY REMOTE STORAGE ") + .append(getStorageName()); + if (properties != null && !properties.isEmpty()) { + sb.append("\nPROPERTIES ("); + sb.append(new PrintableMap(properties, " = ", true, true, true)); + sb.append(")"); + } + return sb.toString(); + } +} diff --git a/fe/fe-core/src/main/java/org/apache/doris/catalog/Catalog.java b/fe/fe-core/src/main/java/org/apache/doris/catalog/Catalog.java index 8a799e05713703..07b484f7df965f 100755 --- a/fe/fe-core/src/main/java/org/apache/doris/catalog/Catalog.java +++ b/fe/fe-core/src/main/java/org/apache/doris/catalog/Catalog.java @@ -4986,7 +4986,7 @@ public HashMap getPartitionIdToStorageMediumMap() { if (dataProperty.getStorageMedium() == TStorageMedium.SSD && dataProperty.getCooldownTimeMs() < currentTimeMs) { // expire. change to HDD. - partitionInfo.setDataProperty(partition.getId(), new DataProperty(TStorageMedium.HDD, TStorageMedium.S3)); + partitionInfo.setDataProperty(partition.getId(), new DataProperty(TStorageMedium.HDD, TStorageMedium.HDD)); storageMediumMap.put(partitionId, TStorageMedium.HDD); LOG.debug("partition[{}-{}-{}] storage medium changed from SSD to HDD", dbId, tableId, partitionId); diff --git a/fe/fe-core/src/main/java/org/apache/doris/catalog/DataProperty.java b/fe/fe-core/src/main/java/org/apache/doris/catalog/DataProperty.java index 53b1318abc18ac..5225fade2866b9 100644 --- a/fe/fe-core/src/main/java/org/apache/doris/catalog/DataProperty.java +++ b/fe/fe-core/src/main/java/org/apache/doris/catalog/DataProperty.java @@ -32,7 +32,7 @@ public class DataProperty implements Writable { public static final DataProperty DEFAULT_DATA_PROPERTY = new DataProperty( "SSD".equalsIgnoreCase(Config.default_storage_medium) ? TStorageMedium.SSD : TStorageMedium.HDD, - "HDD".equalsIgnoreCase(Config.default_storage_medium) ? TStorageMedium.HDD : TStorageMedium.S3); + "SSD".equalsIgnoreCase(Config.default_storage_medium) ? TStorageMedium.SSD : TStorageMedium.HDD); public static final long MAX_COOLDOWN_TIME_MS = 253402271999000L; // 9999-12-31 23:59:59 @SerializedName(value = "storageMedium") @@ -105,7 +105,7 @@ public void readFields(DataInput in) throws IOException { storageColdMedium = TStorageMedium.valueOf(Text.readString(in)); } else { remoteStorageName = ""; - storageColdMedium = TStorageMedium.S3; + storageColdMedium = storageMedium; } } @@ -123,7 +123,7 @@ public boolean equals(Object obj) { return this.storageMedium == other.storageMedium && this.cooldownTimeMs == other.cooldownTimeMs - && this.remoteStorageName == other.remoteStorageName + && this.remoteStorageName.equals(other.remoteStorageName) && this.storageColdMedium == other.storageColdMedium; } diff --git a/fe/fe-core/src/main/java/org/apache/doris/catalog/OlapTable.java b/fe/fe-core/src/main/java/org/apache/doris/catalog/OlapTable.java index 228fdce3c7886c..164e31cc808bcf 100644 --- a/fe/fe-core/src/main/java/org/apache/doris/catalog/OlapTable.java +++ b/fe/fe-core/src/main/java/org/apache/doris/catalog/OlapTable.java @@ -1257,7 +1257,7 @@ public OlapTable selectiveCopy(Collection reservedPartitions, IndexExtSt // set storage medium to HDD for backup job, because we want that the backuped table // can be able to restored to another Doris cluster without SSD disk. // But for other operation such as truncate table, keep the origin storage medium. - copied.getPartitionInfo().setDataProperty(partition.getId(), new DataProperty(TStorageMedium.HDD, TStorageMedium.S3)); + copied.getPartitionInfo().setDataProperty(partition.getId(), new DataProperty(TStorageMedium.HDD, TStorageMedium.HDD)); } for (MaterializedIndex idx : partition.getMaterializedIndices(extState)) { idx.setState(IndexState.NORMAL); diff --git a/fe/fe-core/src/main/java/org/apache/doris/catalog/PartitionInfo.java b/fe/fe-core/src/main/java/org/apache/doris/catalog/PartitionInfo.java index acb174c0b7a91f..f6ac89fb193d21 100644 --- a/fe/fe-core/src/main/java/org/apache/doris/catalog/PartitionInfo.java +++ b/fe/fe-core/src/main/java/org/apache/doris/catalog/PartitionInfo.java @@ -303,7 +303,7 @@ public void write(DataOutput out) throws IOException { out.writeInt(idToDataProperty.size()); for (Map.Entry entry : idToDataProperty.entrySet()) { out.writeLong(entry.getKey()); - if (entry.getValue().equals(new DataProperty(TStorageMedium.HDD, TStorageMedium.S3))) { + if (entry.getValue().equals(new DataProperty(TStorageMedium.HDD, TStorageMedium.HDD))) { out.writeBoolean(true); } else { out.writeBoolean(false); @@ -323,7 +323,7 @@ public void readFields(DataInput in) throws IOException { long partitionId = in.readLong(); boolean isDefaultHddDataProperty = in.readBoolean(); if (isDefaultHddDataProperty) { - idToDataProperty.put(partitionId, new DataProperty(TStorageMedium.HDD, TStorageMedium.S3)); + idToDataProperty.put(partitionId, new DataProperty(TStorageMedium.HDD, TStorageMedium.HDD)); } else { idToDataProperty.put(partitionId, DataProperty.read(in)); } @@ -348,7 +348,7 @@ public String toString() { for (Map.Entry entry : idToDataProperty.entrySet()) { buff.append(entry.getKey()).append(" is HDD: "); - if (entry.getValue().equals(new DataProperty(TStorageMedium.HDD, TStorageMedium.S3))) { + if (entry.getValue().equals(new DataProperty(TStorageMedium.HDD, TStorageMedium.HDD))) { buff.append(true); } else { buff.append(false); diff --git a/fe/fe-core/src/main/java/org/apache/doris/catalog/RemoteStorageMgr.java b/fe/fe-core/src/main/java/org/apache/doris/catalog/RemoteStorageMgr.java index 69ba743675f0d5..5ca3ca7a4eefcd 100644 --- a/fe/fe-core/src/main/java/org/apache/doris/catalog/RemoteStorageMgr.java +++ b/fe/fe-core/src/main/java/org/apache/doris/catalog/RemoteStorageMgr.java @@ -19,6 +19,7 @@ import org.apache.doris.analysis.AddRemoteStorageClause; import org.apache.doris.analysis.DropRemoteStorageClause; +import org.apache.doris.analysis.ModifyRemoteStorageClause; import org.apache.doris.common.AnalysisException; import org.apache.doris.common.DdlException; import org.apache.doris.common.io.Text; @@ -31,15 +32,21 @@ import com.google.common.collect.ImmutableList; import com.google.common.collect.Lists; import com.google.common.collect.Maps; +import org.apache.logging.log4j.LogManager; +import org.apache.logging.log4j.Logger; import java.io.DataInput; import java.io.DataOutput; import java.io.IOException; +import java.util.ArrayList; import java.util.List; import java.util.Map; +import java.util.Optional; import java.util.concurrent.locks.ReentrantLock; public class RemoteStorageMgr { + private static final Logger LOG = LogManager.getLogger(RemoteStorageMgr.class); + public static final ImmutableList REMOTE_STORAGE_PROC_NODE_TITLE_NAMES = new ImmutableList.Builder() .add("Name").add("Type").add("Properties") .build(); @@ -110,6 +117,33 @@ public void dropRemoteStorage(DropRemoteStorageClause clause) throws DdlExceptio if (storageInfo == null) { throw new DdlException("Unknown remote storage name: " + storageName); } + + // Check table using the remote storage before dropping + List usedTables = new ArrayList<>(); + List dbIds = Catalog.getCurrentCatalog().getDbIds(); + for (Long dbId : dbIds) { + Optional database = Catalog.getCurrentCatalog().getDb(dbId); + database.ifPresent(db -> { + List tables = db.getTablesOnIdOrder(); + for (Table table : tables) { + if (table instanceof OlapTable) { + PartitionInfo partitionInfo = ((OlapTable) table).getPartitionInfo(); + List partitionIds = ((OlapTable) table).getPartitionIds(); + for (Long partitionId : partitionIds) { + DataProperty dataProperty = partitionInfo.getDataProperty(partitionId); + if (storageName.equals(dataProperty.getRemoteStorageName())) { + usedTables.add(db.getFullName() + "." + table.getName()); + } + } + } + } + }); + } + if (usedTables.size() > 0) { + LOG.warn("Can not drop remote storage, since it's used in tables {}", usedTables); + throw new DdlException("Can not drop remote storage, since it's used in tables " + usedTables); + } + Catalog.getCurrentCatalog().getEditLog().logDropRemoteStorage(storageInfo); storageInfoMap.remove(storageName); } finally { @@ -126,6 +160,32 @@ public void replayDropRemoteStorage(RemoteStorageInfo info) { } } + public void modifyRemoteStorage(ModifyRemoteStorageClause clause) throws DdlException { + lock.lock(); + try { + String storageName = clause.getStorageName(); + RemoteStorageInfo storageInfo = storageInfoMap.get(storageName); + if (storageInfo == null) { + throw new DdlException("Unknown remote storage name: " + storageName); + } + storageInfo.getRemoteStorageProperty().modifyRemoteStorage(clause.getProperties()); + Catalog.getCurrentCatalog().getEditLog().logModifyRemoteStorage(storageInfo); + storageInfoMap.put(storageName, storageInfo); + } finally { + lock.unlock(); + } + } + + public void replayModifyRemoteStorage(RemoteStorageInfo storageInfo) { + lock.lock(); + try { + String storageName = storageInfo.getRemoteStorageName(); + storageInfoMap.put(storageName, storageInfo); + } finally { + lock.unlock(); + } + } + public List> getRemoteStoragesInfo() { lock.lock(); try { diff --git a/fe/fe-core/src/main/java/org/apache/doris/catalog/RemoteStorageProperty.java b/fe/fe-core/src/main/java/org/apache/doris/catalog/RemoteStorageProperty.java index bb70dbaf1073a4..de6e1b57a46823 100644 --- a/fe/fe-core/src/main/java/org/apache/doris/catalog/RemoteStorageProperty.java +++ b/fe/fe-core/src/main/java/org/apache/doris/catalog/RemoteStorageProperty.java @@ -18,6 +18,7 @@ package org.apache.doris.catalog; +import org.apache.doris.common.DdlException; import org.apache.doris.common.io.Text; import org.apache.doris.common.io.Writable; @@ -38,6 +39,10 @@ public RemoteStorageType getStorageType() { return null; } + public void modifyRemoteStorage(Map properties) throws DdlException { + throw new NotImplementedException(); + } + @Override public void write(DataOutput out) throws IOException { throw new IOException("Not implemented serializable."); diff --git a/fe/fe-core/src/main/java/org/apache/doris/catalog/S3Property.java b/fe/fe-core/src/main/java/org/apache/doris/catalog/S3Property.java index 175e5ba9cf4253..f69d52f2112541 100644 --- a/fe/fe-core/src/main/java/org/apache/doris/catalog/S3Property.java +++ b/fe/fe-core/src/main/java/org/apache/doris/catalog/S3Property.java @@ -17,10 +17,12 @@ package org.apache.doris.catalog; +import org.apache.doris.common.DdlException; import org.apache.doris.common.io.Text; import org.apache.doris.common.io.Writable; import org.apache.doris.persist.gson.GsonUtils; +import com.google.common.base.Strings; import com.google.gson.annotations.SerializedName; import java.io.DataInput; @@ -43,8 +45,8 @@ public class S3Property extends RemoteStorageProperty implements Writable { public static final String DEFAULT_S3_REQUEST_TIMEOUT_MS = "3000"; public static final String DEFAULT_S3_CONNECTION_TIMEOUT_MS = "1000"; - @SerializedName(value = "endPoint") - private String endPoint; + @SerializedName(value = "endpoint") + private String endpoint; @SerializedName(value = "region") private String region; @SerializedName(value = "rootPath") @@ -61,7 +63,7 @@ public class S3Property extends RemoteStorageProperty implements Writable { private long connectionTimeoutMs; public S3Property(Map properties) { - this.endPoint = properties.get(S3_ENDPOINT); + this.endpoint = properties.get(S3_ENDPOINT); this.region = properties.get(S3_REGION); this.rootPath = properties.get(S3_ROOT_PATH); this.accessKey = properties.get(S3_ACCESS_KEY); @@ -78,6 +80,57 @@ public RemoteStorageType getStorageType() { return RemoteStorageType.S3; } + @Override + public void modifyRemoteStorage(Map properties) throws DdlException { + // check properties + String endpoint = properties.get(S3_ENDPOINT); + properties.remove(S3_ENDPOINT); + String region = properties.get(S3_REGION); + properties.remove(S3_REGION); + String rootPath = properties.get(S3_ROOT_PATH); + properties.remove(S3_ROOT_PATH); + String accessKey = properties.get(S3_ACCESS_KEY); + properties.remove(S3_ACCESS_KEY); + String secretKey = properties.get(S3_SECRET_KEY); + properties.remove(S3_SECRET_KEY); + String maxConnections = properties.get(S3_MAX_CONNECTIONS); + properties.remove(S3_MAX_CONNECTIONS); + String requestTimeoutMs = properties.get(S3_REQUEST_TIMEOUT_MS); + properties.remove(S3_REQUEST_TIMEOUT_MS); + String connectionTimeoutMs = properties.get(S3_CONNECTION_TIMEOUT_MS); + properties.remove(S3_CONNECTION_TIMEOUT_MS); + + if (!properties.isEmpty()) { + throw new DdlException("Unknown S3 remote storage properties: " + properties); + } + + // modify properties + if (!Strings.isNullOrEmpty(endpoint)) { + this.endpoint = endpoint; + } + if (!Strings.isNullOrEmpty(region)) { + this.region = region; + } + if (!Strings.isNullOrEmpty(rootPath)) { + this.rootPath = rootPath; + } + if (!Strings.isNullOrEmpty(accessKey)) { + this.accessKey = accessKey; + } + if (!Strings.isNullOrEmpty(secretKey)) { + this.secretKey = secretKey; + } + if (!Strings.isNullOrEmpty(maxConnections)) { + this.maxConnections = Long.parseLong(maxConnections); + } + if (!Strings.isNullOrEmpty(requestTimeoutMs)) { + this.requestTimeoutMs = Long.parseLong(requestTimeoutMs); + } + if (!Strings.isNullOrEmpty((connectionTimeoutMs))) { + this.connectionTimeoutMs = Long.parseLong(connectionTimeoutMs); + } + } + @Override public void write(DataOutput out) throws IOException { String json = GsonUtils.GSON.toJson(this); @@ -92,7 +145,7 @@ public static S3Property read(DataInput in) throws IOException { @Override public Map getProperties() { Map properties = new HashMap<>(); - properties.put(S3_ENDPOINT, endPoint); + properties.put(S3_ENDPOINT, endpoint); properties.put(S3_REGION, region); properties.put(S3_ROOT_PATH, rootPath); properties.put(S3_ACCESS_KEY, accessKey); diff --git a/fe/fe-core/src/main/java/org/apache/doris/common/util/PropertyAnalyzer.java b/fe/fe-core/src/main/java/org/apache/doris/common/util/PropertyAnalyzer.java index 576eed7787a594..c478dd7c7e1d50 100644 --- a/fe/fe-core/src/main/java/org/apache/doris/common/util/PropertyAnalyzer.java +++ b/fe/fe-core/src/main/java/org/apache/doris/common/util/PropertyAnalyzer.java @@ -152,7 +152,8 @@ public static DataProperty analyzeDataProperty(Map properties, D } } // end for properties - if (!hasCooldown && !hasMedium && !hasRemoteStorage) { + // remote_storage_medium may be empty, when data moves SSD to HDD. + if (!hasCooldown && !hasMedium && !hasColdMedium) { return oldDataProperty; } @@ -161,9 +162,9 @@ public static DataProperty analyzeDataProperty(Map properties, D properties.remove(PROPERTIES_STORAGE_COLD_MEDIUM); properties.remove(PROPERTIES_REMOTE_STORAGE); - if ((hasColdMedium && !hasRemoteStorage) || (!hasColdMedium && hasRemoteStorage)) { + if (hasRemoteStorage && !hasColdMedium) { throw new AnalysisException("Invalid data property, " + - "`storage_cold_medium` must be used with `remote_storage`."); + "`remote_storage` must be used with `storage_cold_medium`."); } if (hasColdMedium && hasRemoteStorage) { @@ -181,7 +182,7 @@ public static DataProperty analyzeDataProperty(Map properties, D } if ((storageMedium == TStorageMedium.HDD && hasCooldown) && !(hasColdMedium && hasRemoteStorage)) { - throw new AnalysisException("Can not assign cooldown timestamp to HDD storage medium"); + throw new AnalysisException("Can not assign cooldown timestamp to HDD storage medium without remote storage"); } long currentTimeMs = System.currentTimeMillis(); diff --git a/fe/fe-core/src/main/java/org/apache/doris/journal/JournalEntity.java b/fe/fe-core/src/main/java/org/apache/doris/journal/JournalEntity.java index a4087d4ce3bce1..32aea243bf25a1 100644 --- a/fe/fe-core/src/main/java/org/apache/doris/journal/JournalEntity.java +++ b/fe/fe-core/src/main/java/org/apache/doris/journal/JournalEntity.java @@ -403,19 +403,20 @@ public void readFields(DataInput in) throws IOException { isRead = true; break; } - case OperationType.OP_ADD_REMOTE_STORAGE: - case OperationType.OP_DROP_REMOTE_STORAGE: { - data = new RemoteStorageMgr.RemoteStorageInfo(); - ((RemoteStorageMgr.RemoteStorageInfo) data).readFields(in); - isRead = true; - break; - } case OperationType.OP_DROP_ALL_BROKER: { data = new Text(); ((Text) data).readFields(in); isRead = true; break; } + case OperationType.OP_ADD_REMOTE_STORAGE: + case OperationType.OP_DROP_REMOTE_STORAGE: + case OperationType.OP_MODIFY_REMOTE_STORAGE: { + data = new RemoteStorageMgr.RemoteStorageInfo(); + ((RemoteStorageMgr.RemoteStorageInfo) data).readFields(in); + isRead = true; + break; + } case OperationType.OP_UPDATE_CLUSTER_AND_BACKENDS: { data = new BackendIdsUpdateInfo(); ((BackendIdsUpdateInfo) data).readFields(in); diff --git a/fe/fe-core/src/main/java/org/apache/doris/persist/EditLog.java b/fe/fe-core/src/main/java/org/apache/doris/persist/EditLog.java index 34182f7bc6128e..b11e9645dff7f5 100644 --- a/fe/fe-core/src/main/java/org/apache/doris/persist/EditLog.java +++ b/fe/fe-core/src/main/java/org/apache/doris/persist/EditLog.java @@ -532,6 +532,12 @@ public static void loadJournal(Catalog catalog, JournalEntity journal) { catalog.getRemoteStorageMgr().replayDropRemoteStorage(param); break; } + case OperationType.OP_MODIFY_REMOTE_STORAGE: { + final RemoteStorageMgr.RemoteStorageInfo param = + (RemoteStorageMgr.RemoteStorageInfo) journal.getData(); + catalog.getRemoteStorageMgr().replayModifyRemoteStorage(param); + break; + } case OperationType.OP_SET_LOAD_ERROR_HUB: { final LoadErrorHub.Param param = (LoadErrorHub.Param) journal.getData(); catalog.getLoadInstance().setLoadErrorHubInfo(param); @@ -1185,6 +1191,10 @@ public void logDropRemoteStorage(RemoteStorageMgr.RemoteStorageInfo info) { logEdit(OperationType.OP_DROP_REMOTE_STORAGE, info); } + public void logModifyRemoteStorage(RemoteStorageMgr.RemoteStorageInfo info) { + logEdit(OperationType.OP_MODIFY_REMOTE_STORAGE, info); + } + public void logDropAllBroker(String brokerName) { logEdit(OperationType.OP_DROP_ALL_BROKER, new Text(brokerName)); } diff --git a/fe/fe-core/src/main/java/org/apache/doris/persist/OperationType.java b/fe/fe-core/src/main/java/org/apache/doris/persist/OperationType.java index 5b93f88f5744a9..50fbf24ea1139b 100644 --- a/fe/fe-core/src/main/java/org/apache/doris/persist/OperationType.java +++ b/fe/fe-core/src/main/java/org/apache/doris/persist/OperationType.java @@ -137,8 +137,9 @@ public class OperationType { public static final short OP_CREATE_REPOSITORY = 89; public static final short OP_DROP_REPOSITORY = 90; public static final short OP_MODIFY_BACKEND = 91; - public static final short OP_ADD_REMOTE_STORAGE = 92; - public static final short OP_DROP_REMOTE_STORAGE = 93; + public static final short OP_ADD_REMOTE_STORAGE = 10001; + public static final short OP_DROP_REMOTE_STORAGE = 10002; + public static final short OP_MODIFY_REMOTE_STORAGE = 10003; //colocate table public static final short OP_COLOCATE_ADD_TABLE = 94; diff --git a/fe/fe-core/src/test/java/org/apache/doris/backup/CatalogMocker.java b/fe/fe-core/src/test/java/org/apache/doris/backup/CatalogMocker.java index 19de064df40910..98fea0496637a8 100644 --- a/fe/fe-core/src/test/java/org/apache/doris/backup/CatalogMocker.java +++ b/fe/fe-core/src/test/java/org/apache/doris/backup/CatalogMocker.java @@ -241,7 +241,7 @@ public static Database mockDb() throws UserException { PartitionInfo partitionInfo = new SinglePartitionInfo(); partitionInfo.setReplicaAllocation(TEST_SINGLE_PARTITION_ID, new ReplicaAllocation((short) 3)); partitionInfo.setIsInMemory(TEST_SINGLE_PARTITION_ID, false); - DataProperty dataProperty = new DataProperty(TStorageMedium.HDD, TStorageMedium.S3); + DataProperty dataProperty = new DataProperty(TStorageMedium.HDD, TStorageMedium.HDD); partitionInfo.setDataProperty(TEST_SINGLE_PARTITION_ID, dataProperty); OlapTable olapTable = new OlapTable(TEST_TBL_ID, TEST_TBL_NAME, TEST_TBL_BASE_SCHEMA, KeysType.AGG_KEYS, partitionInfo, distributionInfo); @@ -312,8 +312,8 @@ public static Database mockDb() throws UserException { rangePartitionInfo.setReplicaAllocation(TEST_PARTITION1_ID, new ReplicaAllocation((short) 3)); rangePartitionInfo.setReplicaAllocation(TEST_PARTITION2_ID, new ReplicaAllocation((short) 3)); - DataProperty dataPropertyP1 = new DataProperty(TStorageMedium.HDD, TStorageMedium.S3); - DataProperty dataPropertyP2 = new DataProperty(TStorageMedium.HDD, TStorageMedium.S3); + DataProperty dataPropertyP1 = new DataProperty(TStorageMedium.HDD, TStorageMedium.HDD); + DataProperty dataPropertyP2 = new DataProperty(TStorageMedium.HDD, TStorageMedium.HDD); rangePartitionInfo.setDataProperty(TEST_PARTITION1_ID, dataPropertyP1); rangePartitionInfo.setDataProperty(TEST_PARTITION2_ID, dataPropertyP2); diff --git a/fe/fe-core/src/test/java/org/apache/doris/catalog/DataPropertyTest.java b/fe/fe-core/src/test/java/org/apache/doris/catalog/DataPropertyTest.java index 16ba75638dd5b4..908b87779953ca 100644 --- a/fe/fe-core/src/test/java/org/apache/doris/catalog/DataPropertyTest.java +++ b/fe/fe-core/src/test/java/org/apache/doris/catalog/DataPropertyTest.java @@ -30,13 +30,13 @@ public void tesCooldownTimeMs() throws Exception { DataProperty dataProperty = DataProperty.DEFAULT_DATA_PROPERTY; Assert.assertNotEquals(DataProperty.MAX_COOLDOWN_TIME_MS, dataProperty.getCooldownTimeMs()); - dataProperty = new DataProperty(TStorageMedium.SSD, TStorageMedium.S3); + dataProperty = new DataProperty(TStorageMedium.SSD, TStorageMedium.SSD); Assert.assertNotEquals(DataProperty.MAX_COOLDOWN_TIME_MS, dataProperty.getCooldownTimeMs()); dataProperty = new DataProperty(TStorageMedium.SSD, System.currentTimeMillis() + 24 * 3600 * 1000L, "", TStorageMedium.S3); Assert.assertEquals(System.currentTimeMillis() + 24 * 3600 * 1000L, dataProperty.getCooldownTimeMs()); - dataProperty = new DataProperty(TStorageMedium.HDD, TStorageMedium.S3); + dataProperty = new DataProperty(TStorageMedium.HDD, TStorageMedium.HDD); Assert.assertEquals(DataProperty.MAX_COOLDOWN_TIME_MS, dataProperty.getCooldownTimeMs()); } } \ No newline at end of file diff --git a/fe/fe-core/src/test/java/org/apache/doris/clone/RebalanceTest.java b/fe/fe-core/src/test/java/org/apache/doris/clone/RebalanceTest.java index 02061842449865..e32eb4e0b55cc5 100644 --- a/fe/fe-core/src/test/java/org/apache/doris/clone/RebalanceTest.java +++ b/fe/fe-core/src/test/java/org/apache/doris/clone/RebalanceTest.java @@ -178,7 +178,7 @@ private void createPartitionsForTable(OlapTable olapTable, MaterializedIndex ind long id = 31 + idx; Partition partition = new Partition(id, "p" + idx, index, new HashDistributionInfo()); olapTable.addPartition(partition); - olapTable.getPartitionInfo().addPartition(id, new DataProperty(TStorageMedium.HDD, TStorageMedium.S3), + olapTable.getPartitionInfo().addPartition(id, new DataProperty(TStorageMedium.HDD, TStorageMedium.HDD), ReplicaAllocation.DEFAULT_ALLOCATION, false); }); } diff --git a/fe/fe-core/src/test/java/org/apache/doris/common/PropertyAnalyzerTest.java b/fe/fe-core/src/test/java/org/apache/doris/common/PropertyAnalyzerTest.java index 7ffa5b4f703013..0be290f7a93ee7 100644 --- a/fe/fe-core/src/test/java/org/apache/doris/common/PropertyAnalyzerTest.java +++ b/fe/fe-core/src/test/java/org/apache/doris/common/PropertyAnalyzerTest.java @@ -148,7 +148,7 @@ public void testStorageMedium() throws AnalysisException { properties.put(PropertyAnalyzer.PROPERTIES_STORAGE_MEDIUM, "SSD"); properties.put(PropertyAnalyzer.PROPERTIES_STORAGE_COLDOWN_TIME, tomorrowTimeStr); DataProperty dataProperty = PropertyAnalyzer.analyzeDataProperty(properties, - new DataProperty(TStorageMedium.SSD, TStorageMedium.S3)); + new DataProperty(TStorageMedium.SSD, TStorageMedium.SSD)); // avoid UT fail because time zone different DateLiteral dateLiteral = new DateLiteral(tomorrowTimeStr, Type.DATETIME); Assert.assertEquals(dateLiteral.unixTimestamp(TimeUtils.getTimeZone()), dataProperty.getCooldownTimeMs()); From 2a33c2263a4e979a45765952180fc48126901cae Mon Sep 17 00:00:00 2001 From: qijianliang01 Date: Mon, 28 Mar 2022 15:26:50 +0800 Subject: [PATCH 3/6] add alter tests, support alter single data property --- .../java/org/apache/doris/alter/Alter.java | 32 +++- .../doris/analysis/ModifyPartitionClause.java | 6 +- .../apache/doris/catalog/DataProperty.java | 2 +- .../doris/catalog/RemoteStorageMgr.java | 6 +- .../java/org/apache/doris/common/Config.java | 7 + .../doris/common/util/PropertyAnalyzer.java | 43 +++-- .../org/apache/doris/alter/AlterTest.java | 155 +++++++++++++++++- .../doris/catalog/DataPropertyTest.java | 2 +- 8 files changed, 219 insertions(+), 34 deletions(-) diff --git a/fe/fe-core/src/main/java/org/apache/doris/alter/Alter.java b/fe/fe-core/src/main/java/org/apache/doris/alter/Alter.java index 2667a0c12ece9d..fd2440fcf73f94 100644 --- a/fe/fe-core/src/main/java/org/apache/doris/alter/Alter.java +++ b/fe/fe-core/src/main/java/org/apache/doris/alter/Alter.java @@ -24,6 +24,7 @@ import org.apache.doris.analysis.AlterViewStmt; import org.apache.doris.analysis.ColumnRenameClause; import org.apache.doris.analysis.CreateMaterializedViewStmt; +import org.apache.doris.analysis.DateLiteral; import org.apache.doris.analysis.DropMaterializedViewStmt; import org.apache.doris.analysis.DropPartitionClause; import org.apache.doris.analysis.ModifyColumnCommentClause; @@ -51,6 +52,7 @@ import org.apache.doris.catalog.ReplicaAllocation; import org.apache.doris.catalog.Table; import org.apache.doris.catalog.Table.TableType; +import org.apache.doris.catalog.Type; import org.apache.doris.catalog.View; import org.apache.doris.common.AnalysisException; import org.apache.doris.common.DdlException; @@ -59,6 +61,7 @@ import org.apache.doris.common.util.DynamicPartitionUtil; import org.apache.doris.common.util.MetaLockUtils; import org.apache.doris.common.util.PropertyAnalyzer; +import org.apache.doris.common.util.TimeUtils; import org.apache.doris.persist.AlterViewInfo; import org.apache.doris.persist.BatchModifyPartitionsInfo; import org.apache.doris.persist.ModifyCommentOperationLog; @@ -79,9 +82,15 @@ import java.util.Arrays; import java.util.Comparator; +import java.util.HashMap; import java.util.List; import java.util.Map; +import static org.apache.doris.common.util.PropertyAnalyzer.PROPERTIES_REMOTE_STORAGE; +import static org.apache.doris.common.util.PropertyAnalyzer.PROPERTIES_STORAGE_COLDOWN_TIME; +import static org.apache.doris.common.util.PropertyAnalyzer.PROPERTIES_STORAGE_COLD_MEDIUM; +import static org.apache.doris.common.util.PropertyAnalyzer.PROPERTIES_STORAGE_MEDIUM; + public class Alter { private static final Logger LOG = LogManager.getLogger(Alter.class); @@ -654,15 +663,13 @@ public void modifyPartitionsProperty(Database db, } // get value from properties here - // 1. data property - DataProperty newDataProperty = PropertyAnalyzer.analyzeDataProperty(properties, null); - // 2. replica allocation + // 1. replica allocation ReplicaAllocation replicaAlloc = PropertyAnalyzer.analyzeReplicaAllocation(properties, ""); Catalog.getCurrentSystemInfo().checkReplicaAllocation(db.getClusterName(), replicaAlloc); - // 3. in memory + // 2. in memory boolean newInMemory = PropertyAnalyzer.analyzeBooleanProp(properties, PropertyAnalyzer.PROPERTIES_INMEMORY, false); - // 4. tablet type + // 3. tablet type TTabletType tTabletType = PropertyAnalyzer.analyzeTabletType(properties); @@ -670,6 +677,21 @@ public void modifyPartitionsProperty(Database db, PartitionInfo partitionInfo = olapTable.getPartitionInfo(); for (String partitionName : partitionNames) { Partition partition = olapTable.getPartition(partitionName); + // 4. data property + // 4.1 get old data property from partition + DataProperty dataProperty = partitionInfo.getDataProperty(partition.getId()); + // 4.2 combine the old properties with new ones + Map newProperties = new HashMap<>(); + newProperties.put(PROPERTIES_STORAGE_MEDIUM, dataProperty.getStorageMedium().name()); + DateLiteral dateLiteral = new DateLiteral(dataProperty.getCooldownTimeMs(), + TimeUtils.getTimeZone(), Type.DATETIME); + newProperties.put(PROPERTIES_STORAGE_COLDOWN_TIME, dateLiteral.getStringValue()); + newProperties.put(PROPERTIES_STORAGE_COLD_MEDIUM, dataProperty.getStorageColdMedium().name()); + newProperties.put(PROPERTIES_REMOTE_STORAGE, dataProperty.getRemoteStorageName()); + newProperties.putAll(properties); + // 4.3 analyze new properties + DataProperty newDataProperty = PropertyAnalyzer.analyzeDataProperty(newProperties, null); + // 1. date property if (newDataProperty != null) { partitionInfo.setDataProperty(partition.getId(), newDataProperty); diff --git a/fe/fe-core/src/main/java/org/apache/doris/analysis/ModifyPartitionClause.java b/fe/fe-core/src/main/java/org/apache/doris/analysis/ModifyPartitionClause.java index 504f197ab66351..4b434b82728514 100644 --- a/fe/fe-core/src/main/java/org/apache/doris/analysis/ModifyPartitionClause.java +++ b/fe/fe-core/src/main/java/org/apache/doris/analysis/ModifyPartitionClause.java @@ -98,11 +98,7 @@ public void analyze(Analyzer analyzer) throws AnalysisException { // 3. in_memory // 4. tablet type private void checkProperties(Map properties) throws AnalysisException { - // 1. data property - DataProperty newDataProperty = null; - newDataProperty = PropertyAnalyzer.analyzeDataProperty(properties, DataProperty.DEFAULT_DATA_PROPERTY); - Preconditions.checkNotNull(newDataProperty); - + // 1. no need analyzing data property here, analyze it before modify meta. // 2. replica allocation PropertyAnalyzer.analyzeReplicaAllocation(properties, ""); diff --git a/fe/fe-core/src/main/java/org/apache/doris/catalog/DataProperty.java b/fe/fe-core/src/main/java/org/apache/doris/catalog/DataProperty.java index 5225fade2866b9..06e448e1b6794f 100644 --- a/fe/fe-core/src/main/java/org/apache/doris/catalog/DataProperty.java +++ b/fe/fe-core/src/main/java/org/apache/doris/catalog/DataProperty.java @@ -32,7 +32,7 @@ public class DataProperty implements Writable { public static final DataProperty DEFAULT_DATA_PROPERTY = new DataProperty( "SSD".equalsIgnoreCase(Config.default_storage_medium) ? TStorageMedium.SSD : TStorageMedium.HDD, - "SSD".equalsIgnoreCase(Config.default_storage_medium) ? TStorageMedium.SSD : TStorageMedium.HDD); + "S3".equalsIgnoreCase(Config.default_storage_cold_medium) ? TStorageMedium.S3 : TStorageMedium.HDD); public static final long MAX_COOLDOWN_TIME_MS = 253402271999000L; // 9999-12-31 23:59:59 @SerializedName(value = "storageMedium") diff --git a/fe/fe-core/src/main/java/org/apache/doris/catalog/RemoteStorageMgr.java b/fe/fe-core/src/main/java/org/apache/doris/catalog/RemoteStorageMgr.java index 5ca3ca7a4eefcd..d7b2841198fd12 100644 --- a/fe/fe-core/src/main/java/org/apache/doris/catalog/RemoteStorageMgr.java +++ b/fe/fe-core/src/main/java/org/apache/doris/catalog/RemoteStorageMgr.java @@ -73,7 +73,7 @@ public RemoteStorageProperty getRemoteStorageByName(String storageName) throws A public void addRemoteStorage(AddRemoteStorageClause clause) throws DdlException { lock.lock(); try { - String storageName = clause.getStorageName(); + String storageName = clause.getStorageName().trim(); if (storageInfoMap.containsKey(storageName)) { throw new DdlException("Remote storage[" + storageName + "] has already in remote storages."); } @@ -112,7 +112,7 @@ public void replayAddRemoteStorage(RemoteStorageInfo storageInfo) { public void dropRemoteStorage(DropRemoteStorageClause clause) throws DdlException { lock.lock(); try { - String storageName = clause.getStorageName(); + String storageName = clause.getStorageName().trim(); RemoteStorageInfo storageInfo = storageInfoMap.get(storageName); if (storageInfo == null) { throw new DdlException("Unknown remote storage name: " + storageName); @@ -163,7 +163,7 @@ public void replayDropRemoteStorage(RemoteStorageInfo info) { public void modifyRemoteStorage(ModifyRemoteStorageClause clause) throws DdlException { lock.lock(); try { - String storageName = clause.getStorageName(); + String storageName = clause.getStorageName().trim(); RemoteStorageInfo storageInfo = storageInfoMap.get(storageName); if (storageInfo == null) { throw new DdlException("Unknown remote storage name: " + storageName); diff --git a/fe/fe-core/src/main/java/org/apache/doris/common/Config.java b/fe/fe-core/src/main/java/org/apache/doris/common/Config.java index 15516769109b59..5d358457934c14 100644 --- a/fe/fe-core/src/main/java/org/apache/doris/common/Config.java +++ b/fe/fe-core/src/main/java/org/apache/doris/common/Config.java @@ -802,6 +802,13 @@ public class Config extends ConfigBase { * If not set, this specifies the default medium when created. */ @ConfField public static String default_storage_medium = "HDD"; + + /** + * When create a table(or partition), you can specify its storage cold medium(HDD or S3). + * If not set, this specifies the default medium when created. + */ + @ConfField public static String default_storage_cold_medium = "HDD"; + /** * When create a table(or partition), you can specify its storage medium(HDD or SSD). * If set to SSD, this specifies the default duration that tablets will stay on SSD. diff --git a/fe/fe-core/src/main/java/org/apache/doris/common/util/PropertyAnalyzer.java b/fe/fe-core/src/main/java/org/apache/doris/common/util/PropertyAnalyzer.java index c478dd7c7e1d50..cbd77ecde19c4b 100644 --- a/fe/fe-core/src/main/java/org/apache/doris/common/util/PropertyAnalyzer.java +++ b/fe/fe-core/src/main/java/org/apache/doris/common/util/PropertyAnalyzer.java @@ -109,12 +109,12 @@ public class PropertyAnalyzer { public static DataProperty analyzeDataProperty(Map properties, DataProperty oldDataProperty) throws AnalysisException { - if (properties == null) { + if (properties == null || properties.isEmpty()) { return oldDataProperty; } TStorageMedium storageMedium = null; - TStorageMedium coldStorageMedium = TStorageMedium.S3; + TStorageMedium coldStorageMedium = TStorageMedium.HDD; String remoteStorageName = ""; RemoteStorageProperty remotestorageProperty = null; long coolDownTimeStamp = DataProperty.MAX_COOLDOWN_TIME_MS; @@ -143,16 +143,22 @@ public static DataProperty analyzeDataProperty(Map properties, D hasColdMedium = true; if (value.equalsIgnoreCase(TStorageMedium.S3.name())) { coldStorageMedium = TStorageMedium.S3; + } else if (value.equalsIgnoreCase((TStorageMedium.HDD.name()))) { + coldStorageMedium = TStorageMedium.HDD; } else { throw new AnalysisException("Invalid cold storage medium: " + value); } } else if (!hasRemoteStorage && key.equalsIgnoreCase(PROPERTIES_REMOTE_STORAGE)) { - hasRemoteStorage = true; - remoteStorageName = value; + if (!Strings.isNullOrEmpty(value)) { + hasRemoteStorage = true; + remoteStorageName = value; + } } } // end for properties - // remote_storage_medium may be empty, when data moves SSD to HDD. + // Check properties + + // 1. remote_storage_medium may be empty, when data moves SSD to HDD. if (!hasCooldown && !hasMedium && !hasColdMedium) { return oldDataProperty; } @@ -162,11 +168,14 @@ public static DataProperty analyzeDataProperty(Map properties, D properties.remove(PROPERTIES_STORAGE_COLD_MEDIUM); properties.remove(PROPERTIES_REMOTE_STORAGE); + // 2. check remote_storage and storage_cold_medium + // 2.1 when using remote_storage, must have cold storage medium if (hasRemoteStorage && !hasColdMedium) { throw new AnalysisException("Invalid data property, " + "`remote_storage` must be used with `storage_cold_medium`."); } + // 2.2 check cold storage medium and remote storage type if (hasColdMedium && hasRemoteStorage) { remotestorageProperty = Catalog.getCurrentCatalog() .getRemoteStorageMgr().getRemoteStorageByName(remoteStorageName); @@ -177,24 +186,32 @@ public static DataProperty analyzeDataProperty(Map properties, D } } + // 3. Check cooldown_time + // 3.1 cooldown_time must use with storage medium if (hasCooldown && !hasMedium) { throw new AnalysisException("Invalid data property. storage medium property is not found"); } - if ((storageMedium == TStorageMedium.HDD && hasCooldown) && !(hasColdMedium && hasRemoteStorage)) { - throw new AnalysisException("Can not assign cooldown timestamp to HDD storage medium without remote storage"); - } - + // 3.2 cooldown time must be later than now + // Both HDD and SSD can have cooldown time long currentTimeMs = System.currentTimeMillis(); - if (storageMedium == TStorageMedium.SSD && hasCooldown) { + if (hasCooldown) { if (coolDownTimeStamp <= currentTimeMs) { - throw new AnalysisException("Cooldown time should later than now"); + throw new AnalysisException("Cooldown time should be later than now"); } } - if (storageMedium == TStorageMedium.SSD && !hasCooldown) { - // set default cooldown time + // 3.3 set default cooldown time + // 3.3.1 set default cooldown time to 30 days + // 1) SSD -> HDD, SSD -> remote_storage + // 2) HDD -> remote_storage + if ((storageMedium == TStorageMedium.SSD || + (storageMedium == TStorageMedium.HDD && coldStorageMedium == TStorageMedium.S3)) && + !hasCooldown) { coolDownTimeStamp = currentTimeMs + Config.storage_cooldown_second * 1000L; + } else if (storageMedium == TStorageMedium.HDD && coldStorageMedium == TStorageMedium.HDD) { + // 3.3.2 set default to MAX, ignore user's setting + coolDownTimeStamp = DataProperty.MAX_COOLDOWN_TIME_MS; } Preconditions.checkNotNull(storageMedium); diff --git a/fe/fe-core/src/test/java/org/apache/doris/alter/AlterTest.java b/fe/fe-core/src/test/java/org/apache/doris/alter/AlterTest.java index 1fe2a078dbee87..cf6a869679781e 100644 --- a/fe/fe-core/src/test/java/org/apache/doris/alter/AlterTest.java +++ b/fe/fe-core/src/test/java/org/apache/doris/alter/AlterTest.java @@ -17,6 +17,7 @@ package org.apache.doris.alter; +import org.apache.doris.analysis.AlterSystemStmt; import org.apache.doris.analysis.AlterTableStmt; import org.apache.doris.analysis.CreateDbStmt; import org.apache.doris.analysis.CreateTableStmt; @@ -132,7 +133,7 @@ public static void beforeClass() throws Exception { " 'replication_num' = '1',\n" + " 'in_memory' = 'false',\n" + " 'storage_medium' = 'SSD',\n" + - " 'storage_cooldown_time' = '9999-12-31 00:00:00'\n" + + " 'storage_cooldown_time' = '2999-12-31 00:00:00'\n" + ");"); createTable("CREATE TABLE test.tbl5\n" + @@ -167,6 +168,59 @@ public static void beforeClass() throws Exception { "\"driver\" = \"Oracle Driver\",\n" + "\"odbc_type\" = \"oracle\"\n" + ");"); + + // remote storage + createRemoteStorage("alter system add remote storage \"remote_s3\"\n" + + "properties\n" + + "(\n" + + " \"type\" = \"s3\",\n" + + " \"s3_endpoint\" = \"bj\",\n" + + " \"s3_region\" = \"bj\",\n" + + " \"s3_root_path\" = \"/path/to/root\",\n" + + " \"s3_access_key\" = \"bbb\",\n" + + " \"s3_secret_key\" = \"aaaa\",\n" + + " \"s3_max_connections\" = \"50\",\n" + + " \"s3_request_timeout_ms\" = \"3000\",\n" + + " \"s3_connection_timeout_ms\" = \"1000\"\n" + + ");"); + + createRemoteStorage("alter system add remote storage \"remote_s3_1\"\n" + + "properties\n" + + "(\n" + + " \"type\" = \"s3\",\n" + + " \"s3_endpoint\" = \"bj\",\n" + + " \"s3_region\" = \"bj\",\n" + + " \"s3_root_path\" = \"/path/to/root\",\n" + + " \"s3_access_key\" = \"bbb\",\n" + + " \"s3_secret_key\" = \"aaaa\",\n" + + " \"s3_max_connections\" = \"50\",\n" + + " \"s3_request_timeout_ms\" = \"3000\",\n" + + " \"s3_connection_timeout_ms\" = \"1000\"\n" + + ");"); + + createTable("CREATE TABLE test.tbl_remote\n" + + "(\n" + + " k1 date,\n" + + " k2 int,\n" + + " v1 int sum\n" + + ")\n" + + "PARTITION BY RANGE(k1)\n" + + "(\n" + + " PARTITION p1 values less than('2020-02-01'),\n" + + " PARTITION p2 values less than('2020-03-01'),\n" + + " PARTITION p3 values less than('2020-04-01'),\n" + + " PARTITION p4 values less than('2020-05-01')\n" + + ")\n" + + "DISTRIBUTED BY HASH(k2) BUCKETS 3\n" + + "PROPERTIES" + + "(" + + " 'replication_num' = '1',\n" + + " 'in_memory' = 'false',\n" + + " 'storage_medium' = 'SSD',\n" + + " 'storage_cold_medium' = 'S3',\n" + + " 'remote_storage' = 'remote_s3',\n" + + " 'storage_cooldown_time' = '2122-04-01 20:24:00'" + + ");"); } @AfterClass @@ -180,6 +234,11 @@ private static void createTable(String sql) throws Exception { Catalog.getCurrentCatalog().createTable(createTableStmt); } + private static void createRemoteStorage(String sql) throws Exception { + AlterSystemStmt stmt = (AlterSystemStmt) UtFrameUtils.parseAndAnalyzeStmt(sql, connectContext); + Catalog.getCurrentCatalog().alterCluster(stmt); + } + private static void alterTable(String sql, boolean expectedException) throws Exception { try { AlterTableStmt alterTableStmt = (AlterTableStmt) UtFrameUtils.parseAndAnalyzeStmt(sql, connectContext); @@ -384,20 +443,36 @@ public void testBatchUpdatePartitionProperties() throws Exception { Assert.assertEquals(false, tbl4.getPartitionInfo().getIsInMemory(p4.getId())); // batch update storage_medium and storage_cool_down properties - stmt = "alter table test.tbl4 modify partition (p2, p3, p4) set ('storage_medium' = 'HDD')"; - DateLiteral dateLiteral = new DateLiteral("9999-12-31 00:00:00", Type.DATETIME); + // alter storage_medium + stmt = "alter table test.tbl4 modify partition (p3, p4) set ('storage_medium' = 'HDD')"; + DateLiteral dateLiteral = new DateLiteral("2999-12-31 00:00:00", Type.DATETIME); long coolDownTimeMs = dateLiteral.unixTimestamp(TimeUtils.getTimeZone()); - DataProperty oldDataProperty = new DataProperty(TStorageMedium.SSD, coolDownTimeMs, "", TStorageMedium.S3); - partitionList = Lists.newArrayList(p2, p3, p4); + DataProperty oldDataProperty = new DataProperty(TStorageMedium.SSD, coolDownTimeMs, "", TStorageMedium.HDD); + partitionList = Lists.newArrayList(p3, p4); for (Partition partition : partitionList) { Assert.assertEquals(oldDataProperty, tbl4.getPartitionInfo().getDataProperty(partition.getId())); } alterTable(stmt, false); - DataProperty newDataProperty = new DataProperty(TStorageMedium.HDD, DataProperty.MAX_COOLDOWN_TIME_MS, "", TStorageMedium.S3); + DataProperty newDataProperty = new DataProperty(TStorageMedium.HDD, DataProperty.MAX_COOLDOWN_TIME_MS, "", TStorageMedium.HDD); for (Partition partition : partitionList) { Assert.assertEquals(newDataProperty, tbl4.getPartitionInfo().getDataProperty(partition.getId())); } Assert.assertEquals(oldDataProperty, tbl4.getPartitionInfo().getDataProperty(p1.getId())); + Assert.assertEquals(oldDataProperty, tbl4.getPartitionInfo().getDataProperty(p2.getId())); + + // alter cooldown_time + stmt = "alter table test.tbl4 modify partition (p1, p2) set ('storage_cooldown_time' = '2100-12-31 00:00:00')"; + alterTable(stmt, false); + + dateLiteral = new DateLiteral("2100-12-31 00:00:00", Type.DATETIME); + coolDownTimeMs = dateLiteral.unixTimestamp(TimeUtils.getTimeZone()); + DataProperty newDataProperty1 = new DataProperty(TStorageMedium.SSD, coolDownTimeMs, "", TStorageMedium.HDD); + partitionList = Lists.newArrayList(p1, p2); + for (Partition partition : partitionList) { + Assert.assertEquals(newDataProperty1, tbl4.getPartitionInfo().getDataProperty(partition.getId())); + } + Assert.assertEquals(newDataProperty, tbl4.getPartitionInfo().getDataProperty(p3.getId())); + Assert.assertEquals(newDataProperty, tbl4.getPartitionInfo().getDataProperty(p4.getId())); // batch update range partitions' properties with * stmt = "alter table test.tbl4 modify partition (*) set ('replication_num' = '1')"; @@ -408,6 +483,74 @@ public void testBatchUpdatePartitionProperties() throws Exception { } } + @Test + public void testAlterRemoteStorageTableDataProperties() throws Exception { + Database db = Catalog.getCurrentCatalog().getDbOrMetaException("default_cluster:test"); + OlapTable tbl_remote = (OlapTable) db.getTableOrMetaException("tbl_remote"); + Partition p1 = tbl_remote.getPartition("p1"); + Partition p2 = tbl_remote.getPartition("p2"); + Partition p3 = tbl_remote.getPartition("p3"); + Partition p4 = tbl_remote.getPartition("p4"); + + // alter storage_medium + String stmt = "alter table test.tbl_remote modify partition (p2, p3, p4) set ('storage_medium' = 'HDD')"; + DateLiteral dateLiteral = new DateLiteral("2122-04-01 20:24:00", Type.DATETIME); + long coolDownTimeMs = dateLiteral.unixTimestamp(TimeUtils.getTimeZone()); + DataProperty oldDataProperty = new DataProperty(TStorageMedium.SSD, coolDownTimeMs, "remote_s3", TStorageMedium.S3); + List partitionList = Lists.newArrayList(p2, p3, p4); + for (Partition partition : partitionList) { + Assert.assertEquals(oldDataProperty, tbl_remote.getPartitionInfo().getDataProperty(partition.getId())); + } + alterTable(stmt, false); + DataProperty dataProperty1 = new DataProperty(TStorageMedium.HDD, coolDownTimeMs, "remote_s3", TStorageMedium.S3); + for (Partition partition : partitionList) { + Assert.assertEquals(dataProperty1, tbl_remote.getPartitionInfo().getDataProperty(partition.getId())); + } + Assert.assertEquals(oldDataProperty, tbl_remote.getPartitionInfo().getDataProperty(p1.getId())); + + // alter cooldown_time + stmt = "alter table test.tbl_remote modify partition (p2, p3, p4) set ('storage_cooldown_time' = '2100-04-01 22:22:22')"; + alterTable(stmt, false); + DateLiteral newDateLiteral = new DateLiteral("2100-04-01 22:22:22", Type.DATETIME); + long newCoolDownTimeMs = newDateLiteral.unixTimestamp(TimeUtils.getTimeZone()); + DataProperty dataProperty2 = new DataProperty(TStorageMedium.HDD, newCoolDownTimeMs, "remote_s3", TStorageMedium.S3); + for (Partition partition : partitionList) { + Assert.assertEquals(dataProperty2, tbl_remote.getPartitionInfo().getDataProperty(partition.getId())); + } + Assert.assertEquals(oldDataProperty, tbl_remote.getPartitionInfo().getDataProperty(p1.getId())); + + // alter remote_storage + stmt = "alter table test.tbl_remote modify partition (p2, p3, p4) set ('remote_storage' = 'remote_s3_1')"; + alterTable(stmt, false); + DataProperty dataProperty3 = new DataProperty(TStorageMedium.HDD, newCoolDownTimeMs, "remote_s3_1", TStorageMedium.S3); + for (Partition partition : partitionList) { + Assert.assertEquals(dataProperty3, tbl_remote.getPartitionInfo().getDataProperty(partition.getId())); + } + Assert.assertEquals(oldDataProperty, tbl_remote.getPartitionInfo().getDataProperty(p1.getId())); + + // alter storage_cold_medium + stmt = "alter table test.tbl_remote modify partition (p2, p3, p4) set ('storage_cold_medium' = 'HDD', 'remote_storage' = '')"; + alterTable(stmt, false); + DataProperty dataProperty4 = new DataProperty(TStorageMedium.HDD, DataProperty.MAX_COOLDOWN_TIME_MS, "", TStorageMedium.HDD); + for (Partition partition : partitionList) { + Assert.assertEquals(dataProperty4, tbl_remote.getPartitionInfo().getDataProperty(partition.getId())); + } + Assert.assertEquals(oldDataProperty, tbl_remote.getPartitionInfo().getDataProperty(p1.getId())); + + // alter recover to old state + stmt = "alter table test.tbl_remote modify partition (p2, p3, p4) set ('storage_cold_medium' = 'S3', " + + "'remote_storage' = 'remote_s3', " + + "'storage_medium' = 'SSD', " + + "'storage_cooldown_time' = '2122-04-01 20:24:00'" + + ")"; + alterTable(stmt, false); + for (Partition partition : partitionList) { + Assert.assertEquals(oldDataProperty, tbl_remote.getPartitionInfo().getDataProperty(partition.getId())); + } + Assert.assertEquals(oldDataProperty, tbl_remote.getPartitionInfo().getDataProperty(p1.getId())); + + } + @Test public void testDynamicPartitionDropAndAdd() throws Exception { // test day range diff --git a/fe/fe-core/src/test/java/org/apache/doris/catalog/DataPropertyTest.java b/fe/fe-core/src/test/java/org/apache/doris/catalog/DataPropertyTest.java index 908b87779953ca..bbf5246129eb1a 100644 --- a/fe/fe-core/src/test/java/org/apache/doris/catalog/DataPropertyTest.java +++ b/fe/fe-core/src/test/java/org/apache/doris/catalog/DataPropertyTest.java @@ -33,7 +33,7 @@ public void tesCooldownTimeMs() throws Exception { dataProperty = new DataProperty(TStorageMedium.SSD, TStorageMedium.SSD); Assert.assertNotEquals(DataProperty.MAX_COOLDOWN_TIME_MS, dataProperty.getCooldownTimeMs()); - dataProperty = new DataProperty(TStorageMedium.SSD, System.currentTimeMillis() + 24 * 3600 * 1000L, "", TStorageMedium.S3); + dataProperty = new DataProperty(TStorageMedium.SSD, System.currentTimeMillis() + 24 * 3600 * 1000L, "", TStorageMedium.HDD); Assert.assertEquals(System.currentTimeMillis() + 24 * 3600 * 1000L, dataProperty.getCooldownTimeMs()); dataProperty = new DataProperty(TStorageMedium.HDD, TStorageMedium.HDD); From 743e6fa183b97a7558207f9a34681e48debefce7 Mon Sep 17 00:00:00 2001 From: qijianliang01 Date: Mon, 28 Mar 2022 17:02:58 +0800 Subject: [PATCH 4/6] fix imports and optimize if conditions Change-Id: Id6fd861166eb7c2431fe9abb4be8cea74137ffb6 --- .../org/apache/doris/analysis/ModifyPartitionClause.java | 2 -- .../org/apache/doris/common/util/PropertyAnalyzer.java | 8 ++++---- .../java/org/apache/doris/clone/DiskRebalanceTest.java | 5 +---- 3 files changed, 5 insertions(+), 10 deletions(-) diff --git a/fe/fe-core/src/main/java/org/apache/doris/analysis/ModifyPartitionClause.java b/fe/fe-core/src/main/java/org/apache/doris/analysis/ModifyPartitionClause.java index 4b434b82728514..4229d2682c8dc9 100644 --- a/fe/fe-core/src/main/java/org/apache/doris/analysis/ModifyPartitionClause.java +++ b/fe/fe-core/src/main/java/org/apache/doris/analysis/ModifyPartitionClause.java @@ -18,13 +18,11 @@ package org.apache.doris.analysis; import org.apache.doris.alter.AlterOpType; -import org.apache.doris.catalog.DataProperty; import org.apache.doris.common.AnalysisException; import org.apache.doris.common.util.PrintableMap; import org.apache.doris.common.util.PropertyAnalyzer; import com.google.common.base.Joiner; -import com.google.common.base.Preconditions; import com.google.common.base.Strings; import com.google.common.collect.Lists; import com.google.common.collect.Maps; diff --git a/fe/fe-core/src/main/java/org/apache/doris/common/util/PropertyAnalyzer.java b/fe/fe-core/src/main/java/org/apache/doris/common/util/PropertyAnalyzer.java index cbd77ecde19c4b..2f4b94c8ce1e37 100644 --- a/fe/fe-core/src/main/java/org/apache/doris/common/util/PropertyAnalyzer.java +++ b/fe/fe-core/src/main/java/org/apache/doris/common/util/PropertyAnalyzer.java @@ -202,14 +202,14 @@ public static DataProperty analyzeDataProperty(Map properties, D } // 3.3 set default cooldown time + // storage medium: [HDD|SSD] + // cold storage medium: [HDD|S3] // 3.3.1 set default cooldown time to 30 days // 1) SSD -> HDD, SSD -> remote_storage // 2) HDD -> remote_storage - if ((storageMedium == TStorageMedium.SSD || - (storageMedium == TStorageMedium.HDD && coldStorageMedium == TStorageMedium.S3)) && - !hasCooldown) { + if (storageMedium != coldStorageMedium && !hasCooldown) { coolDownTimeStamp = currentTimeMs + Config.storage_cooldown_second * 1000L; - } else if (storageMedium == TStorageMedium.HDD && coldStorageMedium == TStorageMedium.HDD) { + } else if (storageMedium == coldStorageMedium) { // 3.3.2 set default to MAX, ignore user's setting coolDownTimeStamp = DataProperty.MAX_COOLDOWN_TIME_MS; } diff --git a/fe/fe-core/src/test/java/org/apache/doris/clone/DiskRebalanceTest.java b/fe/fe-core/src/test/java/org/apache/doris/clone/DiskRebalanceTest.java index 1d7cb00a3987a7..e42781f2b8d0a8 100644 --- a/fe/fe-core/src/test/java/org/apache/doris/clone/DiskRebalanceTest.java +++ b/fe/fe-core/src/test/java/org/apache/doris/clone/DiskRebalanceTest.java @@ -37,7 +37,6 @@ import org.apache.doris.resource.Tag; import org.apache.doris.system.Backend; import org.apache.doris.system.SystemInfoService; -import org.apache.doris.task.AgentBatchTask; import org.apache.doris.task.AgentTask; import org.apache.doris.task.StorageMediaMigrationTask; import org.apache.doris.thrift.TStorageMedium; @@ -59,7 +58,6 @@ import java.util.ArrayList; import java.util.List; import java.util.Map; -//import java.util.concurrent.atomic.AtomicLong; import java.util.function.Function; import java.util.stream.Collectors; import java.util.stream.LongStream; @@ -67,7 +65,6 @@ import mockit.Delegate; import mockit.Expectations; import mockit.Mocked; -//import static com.google.common.collect.MoreCollectors.onlyElement; public class DiskRebalanceTest { private static final Logger LOG = LogManager.getLogger(DiskRebalanceTest.class); @@ -148,7 +145,7 @@ private void createPartitionsForTable(OlapTable olapTable, MaterializedIndex ind long id = 31 + idx; Partition partition = new Partition(id, "p" + idx, index, new HashDistributionInfo()); olapTable.addPartition(partition); - olapTable.getPartitionInfo().addPartition(id, new DataProperty(TStorageMedium.HDD), + olapTable.getPartitionInfo().addPartition(id, new DataProperty(TStorageMedium.HDD, TStorageMedium.HDD), ReplicaAllocation.DEFAULT_ALLOCATION, false); }); } From 2d5f2dd1a06ca7fc5d3bdb416f2b3f076fb79d44 Mon Sep 17 00:00:00 2001 From: qijianliang01 Date: Mon, 28 Mar 2022 18:06:14 +0800 Subject: [PATCH 5/6] optimize if conditions Change-Id: I2d0599aa7c0e369cce965e00dace3d52908a8a7b --- .../doris/common/util/PropertyAnalyzer.java | 73 +++++++++++-------- 1 file changed, 42 insertions(+), 31 deletions(-) diff --git a/fe/fe-core/src/main/java/org/apache/doris/common/util/PropertyAnalyzer.java b/fe/fe-core/src/main/java/org/apache/doris/common/util/PropertyAnalyzer.java index 2f4b94c8ce1e37..5415aae0b63d41 100644 --- a/fe/fe-core/src/main/java/org/apache/doris/common/util/PropertyAnalyzer.java +++ b/fe/fe-core/src/main/java/org/apache/doris/common/util/PropertyAnalyzer.java @@ -169,49 +169,60 @@ public static DataProperty analyzeDataProperty(Map properties, D properties.remove(PROPERTIES_REMOTE_STORAGE); // 2. check remote_storage and storage_cold_medium - // 2.1 when using remote_storage, must have cold storage medium - if (hasRemoteStorage && !hasColdMedium) { - throw new AnalysisException("Invalid data property, " + - "`remote_storage` must be used with `storage_cold_medium`."); - } - - // 2.2 check cold storage medium and remote storage type - if (hasColdMedium && hasRemoteStorage) { - remotestorageProperty = Catalog.getCurrentCatalog() - .getRemoteStorageMgr().getRemoteStorageByName(remoteStorageName); - - if (!coldStorageMedium.name().equalsIgnoreCase(remotestorageProperty.getStorageType().name())) { + if (hasRemoteStorage) { + // 2.1 when using remote_storage, must have cold storage medium + if (!hasColdMedium) { throw new AnalysisException("Invalid data property, " + - "`storage_cold_medium` is inconsistent with `remote_storage`."); - } - } - - // 3. Check cooldown_time - // 3.1 cooldown_time must use with storage medium - if (hasCooldown && !hasMedium) { - throw new AnalysisException("Invalid data property. storage medium property is not found"); - } + "`remote_storage` must be used with `storage_cold_medium`."); + } else { + // 2.2 check cold storage medium and remote storage type + remotestorageProperty = Catalog.getCurrentCatalog() + .getRemoteStorageMgr().getRemoteStorageByName(remoteStorageName); - // 3.2 cooldown time must be later than now - // Both HDD and SSD can have cooldown time - long currentTimeMs = System.currentTimeMillis(); - if (hasCooldown) { - if (coolDownTimeStamp <= currentTimeMs) { - throw new AnalysisException("Cooldown time should be later than now"); + if (!coldStorageMedium.name().equalsIgnoreCase(remotestorageProperty.getStorageType().name())) { + throw new AnalysisException("Invalid data property, " + + "`storage_cold_medium` is inconsistent with `remote_storage`."); + } } } - // 3.3 set default cooldown time + // 3. check cooldown time + // storage medium: [HDD|SSD] // cold storage medium: [HDD|S3] - // 3.3.1 set default cooldown time to 30 days + // Effective data cool down flow: + // 1) SSD -> HDD + // 2) SSD -> S3 + // 3) HDD -> S3 + boolean effectiveDataCoolDownFlow = storageMedium == TStorageMedium.SSD && coldStorageMedium == TStorageMedium.HDD || + storageMedium == TStorageMedium.SSD && coldStorageMedium == TStorageMedium.S3 || + storageMedium == TStorageMedium.HDD && coldStorageMedium == TStorageMedium.S3; + + long currentTimeMs = System.currentTimeMillis(); + // 3.1 set default cooldown time + // 3.1.1 set default cooldown time to 30 days // 1) SSD -> HDD, SSD -> remote_storage // 2) HDD -> remote_storage - if (storageMedium != coldStorageMedium && !hasCooldown) { + if (!hasCooldown && effectiveDataCoolDownFlow) { coolDownTimeStamp = currentTimeMs + Config.storage_cooldown_second * 1000L; } else if (storageMedium == coldStorageMedium) { - // 3.3.2 set default to MAX, ignore user's setting + // 3.1.2 set default to MAX, ignore user's setting coolDownTimeStamp = DataProperty.MAX_COOLDOWN_TIME_MS; + hasCooldown = false; + } + + if (hasCooldown) { + // 3.2 cooldown time must be later than now + // Both HDD and SSD can have cooldown time + if (coolDownTimeStamp <= currentTimeMs) { + throw new AnalysisException("Cooldown time should be later than now"); + } + + // 3.3 check data cool down flow + if (!effectiveDataCoolDownFlow) { + throw new AnalysisException("Can not move data from storage_medium[" + storageMedium + "] to " + + "storage_cold_medium[" + coldStorageMedium + "]"); + } } Preconditions.checkNotNull(storageMedium); From a14ca7f14b14d67b2136c8c57fb9c885d8acf58e Mon Sep 17 00:00:00 2001 From: qijianliang01 Date: Mon, 28 Mar 2022 18:17:06 +0800 Subject: [PATCH 6/6] optimize if conditions Change-Id: I7a71111a5e4fa9c324c6ede1a16cff766bebd446 --- .../org/apache/doris/common/util/PropertyAnalyzer.java | 9 +++++++-- 1 file changed, 7 insertions(+), 2 deletions(-) diff --git a/fe/fe-core/src/main/java/org/apache/doris/common/util/PropertyAnalyzer.java b/fe/fe-core/src/main/java/org/apache/doris/common/util/PropertyAnalyzer.java index 5415aae0b63d41..1a461226eb31de 100644 --- a/fe/fe-core/src/main/java/org/apache/doris/common/util/PropertyAnalyzer.java +++ b/fe/fe-core/src/main/java/org/apache/doris/common/util/PropertyAnalyzer.java @@ -212,13 +212,18 @@ public static DataProperty analyzeDataProperty(Map properties, D } if (hasCooldown) { - // 3.2 cooldown time must be later than now + // 3.2 check cooldown with storage medium + if (!hasMedium) { + throw new AnalysisException("Invalid data property, " + + "`cooldown_time` must be used with `storage_medium`."); + } + // 3.3 cooldown time must be later than now // Both HDD and SSD can have cooldown time if (coolDownTimeStamp <= currentTimeMs) { throw new AnalysisException("Cooldown time should be later than now"); } - // 3.3 check data cool down flow + // 3.4 check data cool down flow if (!effectiveDataCoolDownFlow) { throw new AnalysisException("Can not move data from storage_medium[" + storageMedium + "] to " + "storage_cold_medium[" + coldStorageMedium + "]");