From 30921042c36be44e72e43cf6427cfcbbe3f9650b Mon Sep 17 00:00:00 2001 From: yangzhg Date: Tue, 13 Oct 2020 13:07:16 +0800 Subject: [PATCH] fix docs typo --- .../alter-table/alter-table-bitmap-index.md | 8 ++++---- .../alter-table/alter-table-schema-change.md | 12 ++++++------ docs/en/administrator-guide/backup-restore.md | 4 ++-- docs/en/administrator-guide/colocation-join.md | 6 +++--- .../en/administrator-guide/config/be_config.md | 4 ++-- .../en/administrator-guide/config/fe_config.md | 2 +- .../administrator-guide/dynamic-partition.md | 10 +++++----- docs/en/administrator-guide/export_manual.md | 6 +++--- .../http-actions/fe-get-log-file.md | 2 +- .../http-actions/fe/connection-action.md | 2 +- .../load-data/broker-load-manual.md | 4 ++-- .../load-data/delete-manual.md | 4 ++-- .../load-data/insert-into-manual.md | 2 +- .../operation/monitor-metrics/be-metrics.md | 2 +- .../operation/monitor-metrics/fe-metrics.md | 2 +- docs/en/community/committer-guide.md | 2 +- docs/en/community/how-to-contribute.md | 2 +- docs/en/developer-guide/fe-eclipse-dev.md | 4 ++-- docs/en/developer-guide/format-code.md | 2 +- docs/en/extending-doris/doris-on-es.md | 2 +- docs/en/getting-started/advance-usage.md | 2 +- docs/en/getting-started/basic-usage.md | 8 ++++---- docs/en/getting-started/best-practice.md | 4 ++-- docs/en/getting-started/data-model-rollup.md | 18 +++++++++--------- docs/en/getting-started/hit-the-rollup.md | 2 +- docs/en/installing/compilation.md | 2 +- docs/en/installing/install-deploy.md | 4 ++-- docs/en/internal/doris_storage_optimization.md | 2 +- docs/en/internal/grouping_sets_design.md | 6 +++--- .../date-time-functions/date_format.md | 2 +- .../sql-functions/string-functions/instr.md | 2 +- .../Account Management/CREATE USER.md | 4 ++-- .../sql-statements/Account Management/GRANT.md | 2 +- .../Administration/ADMIN SET CONFIG.md | 2 +- .../Administration/ALTER CLUSTER.md | 6 +----- .../Administration/ALTER SYSTEM.md | 2 +- .../Administration/CREATE CLUSTER.md | 3 +-- .../Administration/CREATE FILE.md | 2 +- .../sql-statements/Data Definition/BACKUP.md | 2 +- .../Data Definition/CREATE REPOSITORY.md | 4 ++-- .../Data Definition/CREATE TABLE LIKE.md | 2 +- .../Data Definition/CREATE TABLE.md | 6 +++--- .../Data Definition/Colocate Join.md | 2 +- .../Data Definition/DROP MATERIALIZED VIEW.md | 2 +- .../Data Manipulation/CANCEL LABEL.md | 2 +- .../sql-statements/Data Manipulation/insert.md | 6 +++--- .../sql-statements/Data Types/DATE.md | 2 +- .../sql-statements/Data Types/DATETIME.md | 2 +- 48 files changed, 90 insertions(+), 95 deletions(-) diff --git a/docs/en/administrator-guide/alter-table/alter-table-bitmap-index.md b/docs/en/administrator-guide/alter-table/alter-table-bitmap-index.md index c12c82318a113e..73c8a4c39343cb 100644 --- a/docs/en/administrator-guide/alter-table/alter-table-bitmap-index.md +++ b/docs/en/administrator-guide/alter-table/alter-table-bitmap-index.md @@ -32,7 +32,7 @@ This document focuses on how to create an index job, as well as some considerati * bitmap index:a fast data structure that speeds up queries ## Basic Principles -Creating and droping index is essentially a schema change job. For details, please refer to +Creating and dropping index is essentially a schema change job. For details, please refer to [Schema Change](alter-table-schema-change.html)。 ## Syntax @@ -53,12 +53,12 @@ create/drop index syntax Please refer to [DROP INDEX](../../sql-reference/sql-statements/Data%20Definition/DROP%20INDEX.html) or [ALTER TABLE](../../sql-reference/sql-statements/Data%20Definition/ALTER%20TABLE.html) ## Create Job -Please refer to [Scheam Change](alter-table-schema-change.html) +Please refer to [Schema Change](alter-table-schema-change.html) ## View Job -Please refer to [Scheam Change](alter-table-schema-change.html) +Please refer to [Schema Change](alter-table-schema-change.html) ## Cancel Job -Please refer to [Scheam Change](alter-table-schema-change.html) +Please refer to [Schema Change](alter-table-schema-change.html) ## Notice * Currently only index of bitmap type is supported. diff --git a/docs/en/administrator-guide/alter-table/alter-table-schema-change.md b/docs/en/administrator-guide/alter-table/alter-table-schema-change.md index 9424e0ea5076b8..bd2a83479091d0 100644 --- a/docs/en/administrator-guide/alter-table/alter-table-schema-change.md +++ b/docs/en/administrator-guide/alter-table/alter-table-schema-change.md @@ -1,6 +1,6 @@ --- { - "title": "Scheam Change", + "title": "Schema Change", "language": "en" } --- @@ -24,9 +24,9 @@ specific language governing permissions and limitations under the License. --> -# Scheam Change +# Schema Change -Users can modify the schema of existing tables through the Scheam Change operation. Doris currently supports the following modifications: +Users can modify the schema of existing tables through the Schema Change operation. Doris currently supports the following modifications: * Add and delete columns * Modify column type @@ -34,7 +34,7 @@ Users can modify the schema of existing tables through the Scheam Change operati * Add and modify Bloom Filter * Add and delete bitmap index -This document mainly describes how to create a Scheam Change job, as well as some considerations and frequently asked questions about Scheam Change. +This document mainly describes how to create a Schema Change job, as well as some considerations and frequently asked questions about Schema Change. ## Glossary * Base Table:When each table is created, it corresponds to a base table. The base table stores the complete data of this table. Rollups are usually created based on the data in the base table (and can also be created from other rollups). @@ -68,9 +68,9 @@ The basic process of executing a Schema Change is to generate a copy of the inde Before starting the conversion of historical data, Doris will obtain a latest transaction ID. And wait for all import transactions before this Transaction ID to complete. This Transaction ID becomes a watershed. This means that Doris guarantees that all import tasks after the watershed will generate data for both the original Index and the new Index. In this way, when the historical data conversion is completed, the data in the new Index can be guaranteed to be complete. ## Create Job -The specific syntax for creating a Scheam Change can be found in the description of the Scheam Change section in the help `HELP ALTER TABLE`. +The specific syntax for creating a Schema Change can be found in the description of the Schema Change section in the help `HELP ALTER TABLE`. -The creation of Scheam Change is an asynchronous process. After the job is submitted successfully, the user needs to view the job progress through the `SHOW ALTER TABLE COLUMN` command. +The creation of Schema Change is an asynchronous process. After the job is submitted successfully, the user needs to view the job progress through the `SHOW ALTER TABLE COLUMN` command. ## View Job `SHOW ALTER TABLE COLUMN` You can view the Schema Change jobs that are currently executing or completed. When multiple indexes are involved in a Schema Change job, the command displays multiple lines, each corresponding to an index. For example: diff --git a/docs/en/administrator-guide/backup-restore.md b/docs/en/administrator-guide/backup-restore.md index 833505074b520f..c19da829b414fa 100644 --- a/docs/en/administrator-guide/backup-restore.md +++ b/docs/en/administrator-guide/backup-restore.md @@ -121,7 +121,7 @@ The commands related to the backup recovery function are as follows. The followi * Snapshot Finished Time: Snapshot completion time. * Upload Finished Time: Snapshot upload completion time. * FinishedTime: The completion time of this assignment. - * Unfinished Tasks: In the `SNAPSHOTTING', `UPLOADING'and other stages, there will be multiple sub-tasks at the same time, the current stage shown here, the task ID of the unfinished sub-tasks. + * Unfinished Tasks: In the `SNAPSHOTTING`, `UPLOADING` and other stages, there will be multiple sub-tasks at the same time, the current stage shown here, the task ID of the unfinished sub-tasks. * TaskErrMsg: If there is a sub-task execution error, the error message corresponding to the sub-task will be displayed here. * Status: It is used to record some status information that may appear during the whole operation. * Timeout: The timeout time of a job in seconds. @@ -139,7 +139,7 @@ The commands related to the backup recovery function are as follows. The followi * Database: The database corresponding to backup. * Details: Shows the complete data directory structure of the backup. -5. RESTOR +5. RESTORE Perform a recovery operation. diff --git a/docs/en/administrator-guide/colocation-join.md b/docs/en/administrator-guide/colocation-join.md index ea2e5118c3b8cd..debf74b6fd143d 100644 --- a/docs/en/administrator-guide/colocation-join.md +++ b/docs/en/administrator-guide/colocation-join.md @@ -57,7 +57,7 @@ In order for a table to have the same data distribution, the table in the same C Tables in the same CG do not require consistency in the number, scope, and type of partition columns. -After fixing the number of bucket columns and buckets, the tables in the same CG will have the same Buckets Sequnce. The number of replicas determines the number of replicas of Tablets in each bucket, which BE they are stored on. Suppose that Buckets Sequnce is `[0, 1, 2, 3, 4, 5, 6, 7] `, and that BE nodes have `[A, B, C, D] `4. A possible distribution of data is as follows: +After fixing the number of bucket columns and buckets, the tables in the same CG will have the same Buckets Sequence. The number of replicas determines the number of replicas of Tablets in each bucket, which BE they are stored on. Suppose that Buckets Sequence is `[0, 1, 2, 3, 4, 5, 6, 7] `, and that BE nodes have `[A, B, C, D] `4. A possible distribution of data is as follows: ``` +---+ +---+ +---+ +---+ +---+ +---+ +---+ +---+ @@ -141,7 +141,7 @@ SHOW PROC '/colocation_group/10005.10008'; * BucketIndex: Subscript to the bucket sequence. * Backend Ids: A list of BE node IDs where data fragments are located in buckets. -> The above commands require AMDIN privileges. Normal user view is not supported at this time. +> The above commands require ADMIN privileges. Normal user view is not supported at this time. ### Modify Colocate Group @@ -172,7 +172,7 @@ Copies can only be stored on specified BE nodes. So when a BE is unavailable (do ### Duplicate Equilibrium -Doris will try to distribute the fragments of the Collocation table evenly across all BE nodes. For the replica balancing of common tables, the granularity is single replica, that is to say, it is enough to find BE nodes with lower load for each replica alone. The equilibrium of the Colocation table is at the Bucket level, where all replicas within a Bucket migrate together. We adopt a simple equalization algorithm, which distributes Buckets Sequnce evenly on all BEs, regardless of the actual size of the replicas, but only according to the number of replicas. Specific algorithms can be referred to the code annotations in `ColocateTableBalancer.java`. +Doris will try to distribute the fragments of the Collocation table evenly across all BE nodes. For the replica balancing of common tables, the granularity is single replica, that is to say, it is enough to find BE nodes with lower load for each replica alone. The equilibrium of the Colocation table is at the Bucket level, where all replicas within a Bucket migrate together. We adopt a simple equalization algorithm, which distributes Buckets Sequence evenly on all BEs, regardless of the actual size of the replicas, but only according to the number of replicas. Specific algorithms can be referred to the code annotations in `ColocateTableBalancer.java`. > Note 1: Current Colocation replica balancing and repair algorithms may not work well for heterogeneous deployed Oris clusters. The so-called heterogeneous deployment, that is, the BE node's disk capacity, number, disk type (SSD and HDD) is inconsistent. In the case of heterogeneous deployment, small BE nodes and large BE nodes may store the same number of replicas. > diff --git a/docs/en/administrator-guide/config/be_config.md b/docs/en/administrator-guide/config/be_config.md index a2c2af6f658bb6..d164d39182a60d 100644 --- a/docs/en/administrator-guide/config/be_config.md +++ b/docs/en/administrator-guide/config/be_config.md @@ -199,7 +199,7 @@ Similar to `base_compaction_trace_threshold`. * Description: Configure the merge policy of the cumulative compaction stage. Currently, two merge policy have been implemented, num_based and size_based. * Default value: size_based -In detail, ordinary is the initial version of the cumulative compaction merge policy. After a cumulative compaction, the base compaction process is directly performed. The size_based policy is an optimized version of the ordinary strategy. Versions are merged only when the disk volume of the rowset is of the same order of magnitude. After the compaction, the output rowset which satifies the conditions is promoted to the base compaction stage. In the case of a large number of small batch imports: reduce the write magnification of base compact, trade-off between read magnification and space magnification, and reducing file version data. +In detail, ordinary is the initial version of the cumulative compaction merge policy. After a cumulative compaction, the base compaction process is directly performed. The size_based policy is an optimized version of the ordinary strategy. Versions are merged only when the disk volume of the rowset is of the same order of magnitude. After the compaction, the output rowset which satisfies the conditions is promoted to the base compaction stage. In the case of a large number of small batch imports: reduce the write magnification of base compact, trade-off between read magnification and space magnification, and reducing file version data. ### `cumulative_size_based_promotion_size_mbytes` @@ -337,7 +337,7 @@ The default value is `false`. * Default: false The merged expired rowset version path will be deleted after half an hour. In abnormal situations, deleting these versions will result in the problem that the consistent path of the query cannot be constructed. When the configuration is false, the program check is strict and the program will directly report an error and exit. -When configured as true, the program will run normally and ignore this error. In general, ignoring this error will not affect the query, only when the merged version is dispathed by fe, -230 error will appear. +When configured as true, the program will run normally and ignore this error. In general, ignoring this error will not affect the query, only when the merged version is dispatched by fe, -230 error will appear. ### inc_rowset_expired_sec diff --git a/docs/en/administrator-guide/config/fe_config.md b/docs/en/administrator-guide/config/fe_config.md index 8b18254ddfdc77..b37d16d376be28 100644 --- a/docs/en/administrator-guide/config/fe_config.md +++ b/docs/en/administrator-guide/config/fe_config.md @@ -63,7 +63,7 @@ There are two ways to configure FE configuration items:      2. Dynamic configuration - After the FE starts, you can set the configuration items dynamically through the following commands. This command requires administrator priviledge. + After the FE starts, you can set the configuration items dynamically through the following commands. This command requires administrator privilege. `ADMIN SET FRONTEND CONFIG (" fe_config_name "=" fe_config_value ");` diff --git a/docs/en/administrator-guide/dynamic-partition.md b/docs/en/administrator-guide/dynamic-partition.md index e5df88495c1fb8..610d366704cdc9 100644 --- a/docs/en/administrator-guide/dynamic-partition.md +++ b/docs/en/administrator-guide/dynamic-partition.md @@ -26,7 +26,7 @@ under the License. # Dynamic Partition -Dynamic partition is a new feature introduced in Doris verion 0.12. It's designed to manage partition's Time-to-Life (TTL), reducing the burden on users. +Dynamic partition is a new feature introduced in Doris version 0.12. It's designed to manage partition's Time-to-Life (TTL), reducing the burden on users. At present, the functions of dynamically adding partitions and dynamically deleting partitions are realized. @@ -302,11 +302,11 @@ mysql> SHOW DYNAMIC PARTITION TABLES; Whether to enable Doris's dynamic partition feature. The default value is false, which is off. This parameter only affects the partitioning operation of dynamic partition tables, not normal tables. You can modify the parameters in `fe.conf` and restart FE to take effect. You can also execute the following commands at runtime to take effect: - MySQL protocal: + MySQL protocol: `ADMIN SET FRONTEND CONFIG ("dynamic_partition_enable" = "true")` - HTTP protocal: + HTTP protocol: `curl --location-trusted -u username:password -XGET http://fe_host:fe_http_port/api/_set_config?dynamic_partition_enable=true` @@ -316,11 +316,11 @@ mysql> SHOW DYNAMIC PARTITION TABLES; The execution frequency of dynamic partition threads defaults to 3600 (1 hour), that is, scheduling is performed every 1 hour. You can modify the parameters in `fe.conf` and restart FE to take effect. You can also modify the following commands at runtime: - MySQL protocal: + MySQL protocol: `ADMIN SET FRONTEND CONFIG ("dynamic_partition_check_interval_seconds" = "7200")` - HTTP protocal: + HTTP protocol: `curl --location-trusted -u username:password -XGET http://fe_host:fe_http_port/api/_set_config?dynamic_partition_check_interval_seconds=432000` diff --git a/docs/en/administrator-guide/export_manual.md b/docs/en/administrator-guide/export_manual.md index b647e5aa5f8d78..20a1985878de6a 100644 --- a/docs/en/administrator-guide/export_manual.md +++ b/docs/en/administrator-guide/export_manual.md @@ -73,7 +73,7 @@ The overall mode of dispatch is as follows: 1. The user submits an Export job to FE. 2. FE's Export scheduler performs an Export job in two stages: 1. PENDING: FE generates Export Pending Task, sends snapshot command to BE, and takes a snapshot of all Tablets involved. And generate multiple query plans. - 2. EXPORTING: FE generates Export ExporingTask and starts executing the query plan. + 2. EXPORTING: FE generates Export ExportingTask and starts executing the query plan. ### query plan splitting @@ -122,7 +122,7 @@ WITH BROKER "hdfs" * `timeout`: homework timeout. Default 2 hours. Unit seconds. * `tablet_num_per_task`: The maximum number of fragments allocated per query plan. The default is 5. -After submitting a job, the job status can be imported by querying the `SHOW EXPORT'command. The results are as follows: +After submitting a job, the job status can be imported by querying the `SHOW EXPORT` command. The results are as follows: ``` JobId: 14008 @@ -141,7 +141,7 @@ FinishTime: 2019-06-25 17:08:34 * JobId: The unique ID of the job * State: Job status: * PENDING: Jobs to be Scheduled - * EXPORING: Data Export + * EXPORTING: Data Export * FINISHED: Operation Successful * CANCELLED: Job Failure * Progress: Work progress. The schedule is based on the query plan. Assuming a total of 10 query plans have been completed, the progress will be 30%. diff --git a/docs/en/administrator-guide/http-actions/fe-get-log-file.md b/docs/en/administrator-guide/http-actions/fe-get-log-file.md index 254e2ec548f89f..5a7595ad5845ad 100644 --- a/docs/en/administrator-guide/http-actions/fe-get-log-file.md +++ b/docs/en/administrator-guide/http-actions/fe-get-log-file.md @@ -71,4 +71,4 @@ To get FE log via HTTP ## Notification -Need ADMIN priviledge. +Need ADMIN privilege. diff --git a/docs/en/administrator-guide/http-actions/fe/connection-action.md b/docs/en/administrator-guide/http-actions/fe/connection-action.md index 539c5846cb678a..d7ee29e3948800 100644 --- a/docs/en/administrator-guide/http-actions/fe/connection-action.md +++ b/docs/en/administrator-guide/http-actions/fe/connection-action.md @@ -24,7 +24,7 @@ specific language governing permissions and limitations under the License. --> -# Conection Action +# Connection Action ## Request diff --git a/docs/en/administrator-guide/load-data/broker-load-manual.md b/docs/en/administrator-guide/load-data/broker-load-manual.md index 75ad21a503a2c4..0068d93070f0cf 100644 --- a/docs/en/administrator-guide/load-data/broker-load-manual.md +++ b/docs/en/administrator-guide/load-data/broker-load-manual.md @@ -164,7 +164,7 @@ The following is a detailed explanation of some parameters of the data descripti + negative - ```data_desc``` can also set up data fetching and anti-importing. This function is mainly used when aggregated columns in data tables are of SUM type. If you want to revoke a batch of imported data. The `negative'parameter can be used as a batch of data. Doris automatically retrieves this batch of data on aggregated columns to eliminate the same batch of data. + ```data_desc``` can also set up data fetching and anti-importing. This function is mainly used when aggregated columns in data tables are of SUM type. If you want to revoke a batch of imported data. The `negative` parameter can be used as a batch of data. Doris automatically retrieves this batch of data on aggregated columns to eliminate the same batch of data. + partition @@ -377,7 +377,7 @@ The following configurations belong to the Broker load system-level configuratio + min\_bytes\_per\_broker\_scanner/max\_bytes\_per\_broker\_scanner/max\_broker\_concurrency - The first two configurations limit the minimum and maximum amount of data processed by a single BE. The third configuration limits the maximum number of concurrent imports for a job. The minimum amount of data processed, the maximum number of concurrencies, the size of source files and the number of BEs in the current cluster **together determine the concurrency of this import**. + The first two configurations limit the minimum and maximum amount of data processed by a single BE. The third configuration limits the maximum number of concurrent imports for a job. The minimum amount of data processed, the maximum number of concurrency, the size of source files and the number of BEs in the current cluster **together determine the concurrency of this import**. ``` The number of concurrent imports = Math. min (source file size / minimum throughput, maximum concurrency, current number of BE nodes) diff --git a/docs/en/administrator-guide/load-data/delete-manual.md b/docs/en/administrator-guide/load-data/delete-manual.md index 692ca03d8e21cd..6ab2f67f146780 100644 --- a/docs/en/administrator-guide/load-data/delete-manual.md +++ b/docs/en/administrator-guide/load-data/delete-manual.md @@ -59,14 +59,14 @@ The following describes the parameters used in the delete statement: * WHERE - The conditiona of the delete statement. All delete statements must specify a where condition. + The condition of the delete statement. All delete statements must specify a where condition. Explanation: 1. The type of `OP` in the WHERE condition can only include `=, >, <, >=, <=, !=, in, not in`. 2. The column in the WHERE condition can only be the `key` column. 3. Cannot delete when the `key` column does not exist in any rollup table. -4. Each condition in WHERE condition can only be realated by `and`. If you want `or`, you are suggested to write these conditions into two delete statements. +4. Each condition in WHERE condition can only be connected by `and`. If you want `or`, you are suggested to write these conditions into two delete statements. 5. If the specified table is a range partitioned table, `PARTITION` must be specified unless the table is a single partition table,. 6. Unlike the insert into command, delete statement cannot specify `label` manually. You can view the concept of `label` in [Insert Into] (./insert-into-manual.md) diff --git a/docs/en/administrator-guide/load-data/insert-into-manual.md b/docs/en/administrator-guide/load-data/insert-into-manual.md index 5795575acee4b8..d581f1ecb7934a 100644 --- a/docs/en/administrator-guide/load-data/insert-into-manual.md +++ b/docs/en/administrator-guide/load-data/insert-into-manual.md @@ -221,7 +221,7 @@ Insert Into itself is a SQL command, and the return result is divided into the f ## Best Practices ### Application scenarios -1. Users want to import only a few false data to verify the functionality of Doris system. The grammar of INSERT INTO VALUS is suitable at this time. +1. Users want to import only a few false data to verify the functionality of Doris system. The grammar of INSERT INTO VALUES is suitable at this time. 2. Users want to convert the data already in the Doris table into ETL and import it into a new Doris table, which is suitable for using INSERT INTO SELECT grammar. 3. Users can create an external table, such as MySQL external table mapping a table in MySQL system. Or create Broker external tables to map data files on HDFS. Then the data from the external table is imported into the Doris table for storage through the INSERT INTO SELECT grammar. diff --git a/docs/en/administrator-guide/operation/monitor-metrics/be-metrics.md b/docs/en/administrator-guide/operation/monitor-metrics/be-metrics.md index 4fd45d4e40098c..d630d11065b989 100644 --- a/docs/en/administrator-guide/operation/monitor-metrics/be-metrics.md +++ b/docs/en/administrator-guide/operation/monitor-metrics/be-metrics.md @@ -62,7 +62,7 @@ Usually used to troubleshoot network problems. ### `doris_be_snmp{name="tcp_in_segs"}` -Value of the `Tcp: InSegs` field in `/proc/net/snmp`. Represents the number of receivied TCP packets. +Value of the `Tcp: InSegs` field in `/proc/net/snmp`. Represents the number of received TCP packets. Use `(NEW_tcp_in_errs - OLD_tcp_in_errs) / (NEW_tcp_in_segs - OLD_tcp_in_segs)` can calculate the error rate of received TCP packets. diff --git a/docs/en/administrator-guide/operation/monitor-metrics/fe-metrics.md b/docs/en/administrator-guide/operation/monitor-metrics/fe-metrics.md index 26ba0ac06128d8..7a2afae78e4796 100644 --- a/docs/en/administrator-guide/operation/monitor-metrics/fe-metrics.md +++ b/docs/en/administrator-guide/operation/monitor-metrics/fe-metrics.md @@ -62,7 +62,7 @@ Usually used to troubleshoot network problems. ### `doris_fe_snmp{name="tcp_in_segs"}` -Value of the `Tcp: InSegs` field in `/proc/net/snmp`. Represents the number of receivied TCP packets. +Value of the `Tcp: InSegs` field in `/proc/net/snmp`. Represents the number of received TCP packets. Use `(NEW_tcp_in_errs - OLD_tcp_in_errs) / (NEW_tcp_in_segs - OLD_tcp_in_segs)` can calculate the error rate of received TCP packets. diff --git a/docs/en/community/committer-guide.md b/docs/en/community/committer-guide.md index 62fbc05224aa3a..e86cbb20e6c341 100644 --- a/docs/en/community/committer-guide.md +++ b/docs/en/community/committer-guide.md @@ -76,7 +76,7 @@ and you will be able to manage issues and pull request directly through our Gith 5. Once a reviewer has commented on a PR, they need to keep following up on subsequent changes to that PR. -6. A PR must get at least a +1 appove from committer who is not the author. +6. A PR must get at least a +1 approved from committer who is not the author. 7. After the first +1 to the PR, wait at least one working day before merging. The main purpose is to wait for the rest of the community to come to review. diff --git a/docs/en/community/how-to-contribute.md b/docs/en/community/how-to-contribute.md index aa2f5fa2b80142..3f2840327a2919 100644 --- a/docs/en/community/how-to-contribute.md +++ b/docs/en/community/how-to-contribute.md @@ -30,7 +30,7 @@ Thank you very much for your interest in the Doris project. We welcome your sugg Your suggestions, comments and comments on Doris can be made directly through GitHub's [Issues] (https://github.com/apache/incubator-doris/issues/new/selection). -There are many ways to participate in and contribute to Doris projects: code implementation, test writing, process tool improvement, document improvement, and so on. Any contribution will be welcomed and you will be added to the list of contributors. Further, with sufficient contributions, you will have the opportunity to become a Commiter of Aapche with Apache mailbox and be included in the list of [Apache Commiters] (http://people.apache.org/committer-index.html). +There are many ways to participate in and contribute to Doris projects: code implementation, test writing, process tool improvement, document improvement, and so on. Any contribution will be welcomed and you will be added to the list of contributors. Further, with sufficient contributions, you will have the opportunity to become a Committer of Apache with Apache mailbox and be included in the list of [Apache Committers] (http://people.apache.org/committer-index.html). Any questions, you can contact us to get timely answers, including Wechat, Gitter (GitHub instant messaging tool), e-mail and so on. diff --git a/docs/en/developer-guide/fe-eclipse-dev.md b/docs/en/developer-guide/fe-eclipse-dev.md index 00b8df151c32e7..03aa416f4529e9 100644 --- a/docs/en/developer-guide/fe-eclipse-dev.md +++ b/docs/en/developer-guide/fe-eclipse-dev.md @@ -102,7 +102,7 @@ Then just run `Run/Debug`. ## Run FE -You can directly start an FE process in Ecplise to facilitate debugging the code. +You can directly start an FE process in Eclipse to facilitate debugging the code. 1. Create a runtime directory @@ -116,7 +116,7 @@ You can directly start an FE process in Ecplise to facilitate debugging the code Create the configuration file `fe.conf` in the `conf/` directory created in the first step. You can directly copy `conf/fe.conf` in the source directory and make simple changes. -3. Find the `src/main/java/org/apache/doris/PaloFe.java` file in Ecplise, right-click and select `Run As -> Run Configurations...`. Add the following environment variables to the `Environment` tab: +3. Find the `src/main/java/org/apache/doris/PaloFe.java` file in Eclipse, right-click and select `Run As -> Run Configurations...`. Add the following environment variables to the `Environment` tab: * `DORIS_HOME: /path/to/doris/fe/run/` * `PID_DIR: /path/to/doris/fe/run/` diff --git a/docs/en/developer-guide/format-code.md b/docs/en/developer-guide/format-code.md index 5d96814b82759b..a473f6a69b1d0a 100644 --- a/docs/en/developer-guide/format-code.md +++ b/docs/en/developer-guide/format-code.md @@ -66,7 +66,7 @@ the version is lower than clang-format-9.0. `-i`input file -Note: filter out the files which should not be formatted, when batch clang-formating files. +Note: filter out the files which should not be formatted, when batch clang-formatting files. A example of how to filter \*.h/\*.cpp and exclude some dirs: diff --git a/docs/en/extending-doris/doris-on-es.md b/docs/en/extending-doris/doris-on-es.md index 0a65943414fd13..104a8329b43f9f 100644 --- a/docs/en/extending-doris/doris-on-es.md +++ b/docs/en/extending-doris/doris-on-es.md @@ -136,7 +136,7 @@ Parameter | Description **password** | password for the user * For clusters before 7.x, please pay attention to choosing the correct type when building the table -* The authentication method only supports Http Bastic authentication, need to ensure that this user has access to: /\_cluster/state/, \_nodes/http and other paths and index read permissions;The cluster has not turned on security authentication, and the user name and password do not need to be set +* The authentication method only supports Http Basic authentication, need to ensure that this user has access to: /\_cluster/state/, \_nodes/http and other paths and index read permissions;The cluster has not turned on security authentication, and the user name and password do not need to be set * The column names in the Doris table need to exactly match the field names in the ES, and the field types should be as consistent as possible * **ENGINE** must be: **Elasticsearch** diff --git a/docs/en/getting-started/advance-usage.md b/docs/en/getting-started/advance-usage.md index fdf2983dc05c77..7012fb4ce425ee 100644 --- a/docs/en/getting-started/advance-usage.md +++ b/docs/en/getting-started/advance-usage.md @@ -114,7 +114,7 @@ After successful submission, you can view the progress of the job by following c When the job state is FINISHED, the job is completed. -When Rollup is established, you can use `DESC table1 ALL'to view the Rollup information of the table. +When Rollup is established, you can use `DESC table1 ALL` to view the Rollup information of the table. ``` mysql> desc table1 all; diff --git a/docs/en/getting-started/basic-usage.md b/docs/en/getting-started/basic-usage.md index 8853a4662890df..41cffa111020be 100644 --- a/docs/en/getting-started/basic-usage.md +++ b/docs/en/getting-started/basic-usage.md @@ -72,9 +72,9 @@ Initially, a database can be created through root or admin users: `CREATE DATABASE example_db;` -> All commands can use'HELP command;'to see detailed grammar help. For example: `HELP CREATE DATABASE;'` +> All commands can use `HELP` command to see detailed grammar help. For example: `HELP CREATE DATABASE;'` -> If you don't know the full name of the command, you can use "help command a field" for fuzzy query. If you type'HELP CREATE', you can match commands like `CREATE DATABASE', `CREATE TABLE', `CREATE USER', etc. +> If you don't know the full name of the command, you can use "help command a field" for fuzzy query. If you type `HELP CREATE`, you can match commands like `CREATE DATABASE', `CREATE TABLE', `CREATE USER', etc. After the database is created, you can view the database information through `SHOW DATABASES'. @@ -99,7 +99,7 @@ After the example_db is created, the read and write permissions of example_db ca ### 2.3 Formulation -Create a table using the `CREATE TABLE'command. More detailed parameters can be seen: +Create a table using the `CREATE TABLE` command. More detailed parameters can be seen: `HELP CREATE TABLE;` @@ -315,7 +315,7 @@ Broker imports are asynchronous commands. Successful execution of the above comm `SHOW LOAD WHERE LABLE = "table1_20170708";` -In the return result, FINISHED in the `State'field indicates that the import was successful. +In the return result, FINISHED in the `State` field indicates that the import was successful. For more instructions on `SHOW LOAD`, see` HELP SHOW LOAD; ` diff --git a/docs/en/getting-started/best-practice.md b/docs/en/getting-started/best-practice.md index 253695a038d212..b29ca833001b3c 100644 --- a/docs/en/getting-started/best-practice.md +++ b/docs/en/getting-started/best-practice.md @@ -53,7 +53,7 @@ DISTRIBUTED BY HASH(siteid) BUCKETS 10; 1.1.2. KEY UNIQUE -When UNIQUE KEY is the same, the new record covers the old record. At present, UNIQUE KEY implements the same RPLACE aggregation method as GGREGATE KEY, and they are essentially the same. Suitable for analytical business with updated requirements. +When UNIQUE KEY is the same, the new record covers the old record. At present, UNIQUE KEY implements the same REPLACE aggregation method as AGGREGATE KEY, and they are essentially the same. Suitable for analytical business with updated requirements. ``` CREATE TABLE sales_order @@ -141,7 +141,7 @@ For the `site_visit'table: site -u visit (siteid, city, username, pv) ``` -Siteid may lead to a low degree of data aggregation. If business parties often base their PV needs on city statistics, they can build a city-only, PV-based ollup: +Siteid may lead to a low degree of data aggregation. If business parties often base their PV needs on city statistics, they can build a city-only, PV-based rollup: ``` ALTER TABLE site_visit ADD ROLLUP rollup_city(city, pv); diff --git a/docs/en/getting-started/data-model-rollup.md b/docs/en/getting-started/data-model-rollup.md index 2bdb60698dc8f1..0fbe689db9be9a 100644 --- a/docs/en/getting-started/data-model-rollup.md +++ b/docs/en/getting-started/data-model-rollup.md @@ -86,7 +86,7 @@ AGGREGATE KEY(`user_id`, `date`, `timestamp`, `city`, `age`, `sex`) As you can see, this is a typical fact table of user information and access behavior. In general star model, user information and access behavior are stored in dimension table and fact table respectively. Here, in order to explain Doris's data model more conveniently, we store the two parts of information in a single table. -The columns in the table are divided into Key (dimension column) and Value (indicator column) according to whether `AggregationType`is set or not. No `AggregationType`, such as `user_id`, `date`, `age`, etc., is set as **Key**, while AggregationType'is set as **Value**. +The columns in the table are divided into Key (dimension column) and Value (indicator column) according to whether `AggregationType`is set or not. No `AggregationType`, such as `user_id`, `date`, `age`, etc., is set as **Key**, while Aggregation Type is set as **Value**. When we import data, the same rows and aggregates into one row for the Key column, while the Value column aggregates according to the set `AggregationType`. `AggregationType`currently has the following four ways of aggregation: @@ -162,7 +162,7 @@ Following example 1, we modify the table structure as follows: | max dwell time | INT | MAX | Maximum user residence time| | min dwell time | INT | MIN | User minimum residence time| -That is to say, a column of `timestamp'has been added to record the data filling time accurate to seconds. +That is to say, a column of `timestamp` has been added to record the data filling time accurate to seconds. The imported data are as follows: @@ -188,7 +188,7 @@ Then when this batch of data is imported into Doris correctly, the final storage | 10004 | 2017-10-01 | 2017-10-01 12:12:48 | Shenzhen | 35 | 0 | 2017-10-01 10:00:15 | 100 | 3 | 3| | 10004 | 2017-10-03 | 2017-10-03 12:38:20 | Shenzhen | 35 | 0 | 2017-10-03 10:20:22 | 11 | 6 | 6| -We can see that the stored data, just like the imported data, does not aggregate at all. This is because, in this batch of data, because the `timestamp'column is added, the Keys of all rows are **not exactly the same**. That is, as long as the keys of each row are not identical in the imported data, Doris can save the complete detailed data even in the aggregation model. +We can see that the stored data, just like the imported data, does not aggregate at all. This is because, in this batch of data, because the `timestamp` column is added, the Keys of all rows are **not exactly the same**. That is, as long as the keys of each row are not identical in the imported data, Doris can save the complete detailed data even in the aggregation model. ### Example 3: Importing data and aggregating existing data @@ -222,7 +222,7 @@ Then when this batch of data is imported into Doris correctly, the final storage | 10004 | 2017-10-03 | Shenzhen | 35 | 0 | 2017-10-03 11:22:00 | 55 | 19 | 6| | 10005 | 2017-10-03 | Changsha | 29 | 1 | 2017-10-03 18:11:02 | 3 | 1 | 1| -As you can see, the existing data and the newly imported data of user 10004 have been aggregated. At the same time, 10005 new users'data were added. +As you can see, the existing data and the newly imported data of user 10004 have been aggregated. At the same time, 10005 new user's data were added. Data aggregation occurs in Doris in the following three stages: @@ -434,7 +434,7 @@ When we do the following queries: Doris automatically hits the ROLLUP table. -#### OLLUP in Duplicate Model +#### ROLLUP in Duplicate Model Because the Duplicate model has no aggregate semantics. So the ROLLLUP in this model has lost the meaning of "scroll up". It's just to adjust the column order to hit the prefix index. In the next section, we will introduce prefix index in detail, and how to use ROLLUP to change prefix index in order to achieve better query efficiency. @@ -513,15 +513,15 @@ The ROLLUP table is preferred because the prefix index of ROLLUP matches better. ### Some Explanations of ROLLUP -* The fundamental role of ROLLUP is to improve the query efficiency of some queries (whether by aggregating to reduce the amount of data or by modifying column order to match prefix indexes). Therefore, the meaning of ROLLUP has gone beyond the scope of "roll-up". That's why we named it Materized Index in the source code. +* The fundamental role of ROLLUP is to improve the query efficiency of some queries (whether by aggregating to reduce the amount of data or by modifying column order to match prefix indexes). Therefore, the meaning of ROLLUP has gone beyond the scope of "roll-up". That's why we named it Materialized Index in the source code. * ROLLUP is attached to the Base table and can be seen as an auxiliary data structure of the Base table. Users can create or delete ROLLUP based on the Base table, but cannot explicitly specify a query for a ROLLUP in the query. Whether ROLLUP is hit or not is entirely determined by the Doris system. -* ROLLUP data is stored in separate physical storage. Therefore, the more OLLUP you create, the more disk space you occupy. It also has an impact on the speed of import (the ETL phase of import automatically generates all ROLLUP data), but it does not reduce query efficiency (only better). +* ROLLUP data is stored in separate physical storage. Therefore, the more ROLLUP you create, the more disk space you occupy. It also has an impact on the speed of import (the ETL phase of import automatically generates all ROLLUP data), but it does not reduce query efficiency (only better). * Data updates for ROLLUP are fully synchronized with Base representations. Users need not care about this problem. * Columns in ROLLUP are aggregated in exactly the same way as Base tables. There is no need to specify or modify ROLLUP when creating it. * A necessary (inadequate) condition for a query to hit ROLLUP is that all columns ** (including the query condition columns in select list and where) involved in the query exist in the column of the ROLLUP. Otherwise, the query can only hit the Base table. * Certain types of queries (such as count (*)) cannot hit ROLLUP under any conditions. See the next section **Limitations of the aggregation model**. * The query execution plan can be obtained by `EXPLAIN your_sql;` command, and in the execution plan, whether ROLLUP has been hit or not can be checked. -* Base tables and all created ROLLUPs can be displayed by `DESC tbl_name ALL;` statement. +* Base tables and all created ROLLUP can be displayed by `DESC tbl_name ALL;` statement. In this document, you can see [Query how to hit Rollup] (hit-the-rollup) @@ -622,7 +622,7 @@ Therefore, when there are frequent count (*) queries in the business, we recomme Add a count column and import the data with the column value **equal to 1**. The result of `select count (*) from table;`is equivalent to `select sum (count) from table;` The query efficiency of the latter is much higher than that of the former. However, this method also has limitations, that is, users need to guarantee that they will not import rows with the same AGGREGATE KEY column repeatedly. Otherwise, `select sum (count) from table;`can only express the number of rows originally imported, not the semantics of `select count (*) from table;` -Another way is to **change the aggregation type of the count'column above to REPLACE, and still weigh 1**. Then`select sum (count) from table;` and `select count (*) from table;` the results will be consistent. And in this way, there is no restriction on importing duplicate rows. +Another way is to **change the aggregation type of the count column above to REPLACE, and still weigh 1**. Then`select sum (count) from table;` and `select count (*) from table;` the results will be consistent. And in this way, there is no restriction on importing duplicate rows. ### Duplicate Model diff --git a/docs/en/getting-started/hit-the-rollup.md b/docs/en/getting-started/hit-the-rollup.md index f8a0fa9dc39e30..7a1e224b09720d 100644 --- a/docs/en/getting-started/hit-the-rollup.md +++ b/docs/en/getting-started/hit-the-rollup.md @@ -279,7 +279,7 @@ See the following queries: `SELECT SUM(k11) FROM test_rollup WHERE k1 = 10 AND k2 > 200 AND k3 in (1,2,3);` -Firstly, it judges whether the query can hit the aggregated Rolup table. After checking the graph above, it is possible. Then the condition contains three conditions: k1, K2 and k3. The first three columns of test_rollup, rollup1 and rollup2 contain all the three conditions. So the prefix index length is the same. Then, it is obvious that the aggregation degree of rollup2 is the highest when comparing the number of rows. Row 2 is selected because of the minimum number of rows. +Firstly, it judges whether the query can hit the aggregated Rollup table. After checking the graph above, it is possible. Then the condition contains three conditions: k1, K2 and k3. The first three columns of test_rollup, rollup1 and rollup2 contain all the three conditions. So the prefix index length is the same. Then, it is obvious that the aggregation degree of rollup2 is the highest when comparing the number of rows. Row 2 is selected because of the minimum number of rows. ``` | 0:OlapScanNode | diff --git a/docs/en/installing/compilation.md b/docs/en/installing/compilation.md index 0d4d2c7256d456..94bd5f99de2560 100644 --- a/docs/en/installing/compilation.md +++ b/docs/en/installing/compilation.md @@ -84,7 +84,7 @@ Note: For different versions of Oris, you need to download the corresponding mir ### Self-compiling Development Environment Mirror -You can also create a Doris development environment mirror yourself, referring specifically to the `docker/README.md'file. +You can also create a Doris development environment mirror yourself, referring specifically to the `docker/README.md` file. ## Direct Compilation (CentOS/Ubuntu) diff --git a/docs/en/installing/install-deploy.md b/docs/en/installing/install-deploy.md index bac5ab4f330e5a..17d2ea0b585678 100644 --- a/docs/en/installing/install-deploy.md +++ b/docs/en/installing/install-deploy.md @@ -211,7 +211,7 @@ Broker is deployed as a plug-in, independent of Doris. If you need to import dat `ALTER SYSTEM ADD BROKER broker_name "host1:port1","host2:port2",...;` - The host is Broker's node ip; the port is brokeripcport in the Broker configuration file. + The host is Broker's node ip; the port is broker port in the Broker configuration file. * View Broker status @@ -362,7 +362,7 @@ Broker is a stateless process that can be started or stopped at will. Of course, After the BE process starts, if there is data before, there may be several minutes of data index loading time. - If BE is started for the first time or the BE has not joined any cluster, the BE log will periodically scroll the words `waiting to receive first heartbeat from frontend`. BE has not received Master's address through FE's heartbeat and is waiting passively. This error log will disappear after ADD BACKEND in FE sends the heartbeat. If the word `````master client', get client from cache failed. host:, port: 0, code: 7````` master client'appears again after receiving heartbeat, it indicates that FE has successfully connected BE, but BE cannot actively connect FE. It may be necessary to check the connectivity of rpc_port from BE to FE. + If BE is started for the first time or the BE has not joined any cluster, the BE log will periodically scroll the words `waiting to receive first heartbeat from frontend`. BE has not received Master's address through FE's heartbeat and is waiting passively. This error log will disappear after ADD BACKEND in FE sends the heartbeat. If the word `````master client', get client from cache failed. host:, port: 0, code: 7````` master client appears again after receiving heartbeat, it indicates that FE has successfully connected BE, but BE cannot actively connect FE. It may be necessary to check the connectivity of rpc_port from BE to FE. If BE has been added to the cluster, the heartbeat log from FE should be scrolled every five seconds: ```get heartbeat, host:xx. xx.xx.xx, port:9020, cluster id:xxxxxxx```, indicating that the heartbeat is normal. diff --git a/docs/en/internal/doris_storage_optimization.md b/docs/en/internal/doris_storage_optimization.md index 6ceccadde8a577..1dbf69766d5be9 100644 --- a/docs/en/internal/doris_storage_optimization.md +++ b/docs/en/internal/doris_storage_optimization.md @@ -38,7 +38,7 @@ Documents include: - Index Region: Doris stores the index data of each column in Index Region, where the data is loaded according to column granularity, so the data information of the following column is stored separately. - Footer - FileFooterPB: Metadata Information for Definition Files - - Chesum of 4 bytes of footer Pb content + - Checksum of 4 bytes of footer Pb content - Four bytes FileFooterPB message length for reading FileFooterPB - The 8 byte MAGIC CODE is stored in the last bit to facilitate the identification of file types in different scenarios. diff --git a/docs/en/internal/grouping_sets_design.md b/docs/en/internal/grouping_sets_design.md index e680362a2e7324..bbbe74b0b45191 100644 --- a/docs/en/internal/grouping_sets_design.md +++ b/docs/en/internal/grouping_sets_design.md @@ -260,7 +260,7 @@ Presto supports composition, but not nesting. ## 2. Object -Support `GROUPING SETS`, `ROLLUP` and `CUBE ` syntax,impliments 1.1, 1.2, 1.3 1.4, 1.5, not support the combination +Support `GROUPING SETS`, `ROLLUP` and `CUBE ` syntax,implements 1.1, 1.2, 1.3 1.4, 1.5, not support the combination and nesting of GROUPING SETS at current version. ### 2.1 GROUPING SETS Syntax @@ -473,10 +473,10 @@ select NULL, NULL, sum(k3) from t; #### 3.3.1 Tasks -1. Add GroupByClause, repalce groupingExprs. +1. Add GroupByClause, replace groupingExprs. 2. Add Grouping Sets, Cube and RollUp syntax. 3. Add GroupByClause in SelectStmt. -4. Add GroupingFunctionCallExpr, impliments grouping grouping_id function call +4. Add GroupingFunctionCallExpr, implements grouping grouping_id function call 5. Add VirtualSlot, generate the map of virtual slots and real slots 6. add virtual column GROUPING_ID and other virtual columns generated by grouping and grouping_id, insert into groupingExprs, 7. Add a PlanNode, name as RepeatNode. For GroupingSets aggregation insert RepeatNode to the plan. diff --git a/docs/en/sql-reference/sql-functions/date-time-functions/date_format.md b/docs/en/sql-reference/sql-functions/date-time-functions/date_format.md index f0c0c85e9ba008..fcab2558f7f478 100644 --- a/docs/en/sql-reference/sql-functions/date-time-functions/date_format.md +++ b/docs/en/sql-reference/sql-functions/date-time-functions/date_format.md @@ -28,7 +28,7 @@ under the License. ## Description ### Syntax -'WARCHAR DATE'U FORMAT (DATETIME DATE, WARCHAR Format)' +'VARCHAR DATE' FORMAT (DATETIME DATE, VARCHAR Format)' Convert the date type to a bit string according to the format type. diff --git a/docs/en/sql-reference/sql-functions/string-functions/instr.md b/docs/en/sql-reference/sql-functions/string-functions/instr.md index 901877aaab3990..4cad9cd9260b68 100644 --- a/docs/en/sql-reference/sql-functions/string-functions/instr.md +++ b/docs/en/sql-reference/sql-functions/string-functions/instr.md @@ -28,7 +28,7 @@ under the License. ## Description ### Syntax -'INT INSR (WARCHAR STR, WARCHAR substrate)' +'INSTR (VARCHAR STR, VARCHAR substrate)' Returns the location where substr first appeared in str (counting from 1). If substr does not appear in str, return 0. diff --git a/docs/en/sql-reference/sql-statements/Account Management/CREATE USER.md b/docs/en/sql-reference/sql-statements/Account Management/CREATE USER.md index 1cc0e90309f138..79c72de8667277 100644 --- a/docs/en/sql-reference/sql-statements/Account Management/CREATE USER.md +++ b/docs/en/sql-reference/sql-statements/Account Management/CREATE USER.md @@ -58,11 +58,11 @@ Later encrypted content can be obtained through PASSWORD (), for example: SELECT PASSWORD('123456'); -4. Create a user who is allowed to log in from the'192.168'subnet and specify its role as example_role +4. Create a user who is allowed to log in from the `192.168` subnet and specify its role as example_role CREATE USER 'jack'@'192.168.%' DEFAULT ROLE 'example_role'; -5. Create a user who is allowed to log in from the domain name'example_domain'. +5. Create a user who is allowed to log in from the domain name 'example_domain'. CREATE USER 'jack'@['example_domain'] IDENTIFIED BY '12345'; diff --git a/docs/en/sql-reference/sql-statements/Account Management/GRANT.md b/docs/en/sql-reference/sql-statements/Account Management/GRANT.md index 8c2ae659d2de82..397aa304e6dc74 100644 --- a/docs/en/sql-reference/sql-statements/Account Management/GRANT.md +++ b/docs/en/sql-reference/sql-statements/Account Management/GRANT.md @@ -36,7 +36,7 @@ GRANT privilege_list ON db_name[.tbl_name] TO user_identity [ROLE role_name] Privilege_list is a list of permissions that need to be granted, separated by commas. Currently Doris supports the following permissions: -NODE_PRIV: Operational privileges of cluster nodes, including operation of nodes'up and down lines. Only root users have this privilege and can not be given to other users. +NODE_PRIV: Operational privileges of cluster nodes, including operation of nodes' up and down lines. Only root users have this privilege and can not be given to other users. ADMIN_PRIV: All rights except NODE_PRIV. GRANT_PRIV: Permission to operate permissions. Including the creation and deletion of users, roles, authorization and revocation, password settings and so on. SELECT_PRIV: Read permissions for specified libraries or tables diff --git a/docs/en/sql-reference/sql-statements/Administration/ADMIN SET CONFIG.md b/docs/en/sql-reference/sql-statements/Administration/ADMIN SET CONFIG.md index 02f19b9fba87a3..ad87db27399081 100644 --- a/docs/en/sql-reference/sql-statements/Administration/ADMIN SET CONFIG.md +++ b/docs/en/sql-reference/sql-statements/Administration/ADMIN SET CONFIG.md @@ -28,7 +28,7 @@ under the License. ## Description This statement is used to set the configuration items for the cluster (currently only the configuration items for setting FE are supported). -Settable configuration items can be viewed through AMDIN SHOW FRONTEND CONFIG; commands. +Settable configuration items can be viewed through `ADMIN SHOW FRONTEND CONFIG;` commands. Grammar: diff --git a/docs/en/sql-reference/sql-statements/Administration/ALTER CLUSTER.md b/docs/en/sql-reference/sql-statements/Administration/ALTER CLUSTER.md index 618f033b686529..12643abd7d29d8 100644 --- a/docs/en/sql-reference/sql-statements/Administration/ALTER CLUSTER.md +++ b/docs/en/sql-reference/sql-statements/Administration/ALTER CLUSTER.md @@ -33,11 +33,7 @@ grammar ALTER CLUSTER cluster_name PROPERTIES ("key"="value", ...); -1. Scaling, scaling (according to the number of be existing in the cluster, large is scaling, small is scaling), scaling for synchronous operation, scaling for asynchronous operation, through the state of backend can be known whether the scaling is completed. - -Proerties ("Instrume = Unum"= "3") - -Instancefn Microsoft Yahei +1. Scaling, scaling (according to the number of be existing in the cluster, large is scaling, small is scaling), scaling for synchronous operation, scaling for asynchronous operation, through the state of backend can be known whether the scaling is completed. ## example diff --git a/docs/en/sql-reference/sql-statements/Administration/ALTER SYSTEM.md b/docs/en/sql-reference/sql-statements/Administration/ALTER SYSTEM.md index 79844775754c40..ffe4fb3bd9d612 100644 --- a/docs/en/sql-reference/sql-statements/Administration/ALTER SYSTEM.md +++ b/docs/en/sql-reference/sql-statements/Administration/ALTER SYSTEM.md @@ -106,7 +106,7 @@ ALTER SYSTEM SET LOAD ERRORS HUB PROPERTIES ("type"= "broker", "Name" = BOS, "path" = "bos://backup-cmy/logs", -"bosu endpoint" ="http://gz.bcebos.com", +"bos_endpoint" ="http://gz.bcebos.com", "bos_accesskey" = "069fc278xxxxxx24ddb522", "bos_secret_accesskey"="700adb0c6xxxxxx74d59eaa980a" ); diff --git a/docs/en/sql-reference/sql-statements/Administration/CREATE CLUSTER.md b/docs/en/sql-reference/sql-statements/Administration/CREATE CLUSTER.md index b0c0cfa24704ba..440939dce07c9e 100644 --- a/docs/en/sql-reference/sql-statements/Administration/CREATE CLUSTER.md +++ b/docs/en/sql-reference/sql-statements/Administration/CREATE CLUSTER.md @@ -43,9 +43,8 @@ Specify attributes of logical clusters PROPERTIES ("instance_num" = "3") -Instancefn Microsoft Yahei -2. Identify by'password'each logical cluster contains a superuser whose password must be specified when creating a logical cluster +2. Identify by 'password' each logical cluster contains a superuser whose password must be specified when creating a logical cluster ## example diff --git a/docs/en/sql-reference/sql-statements/Administration/CREATE FILE.md b/docs/en/sql-reference/sql-statements/Administration/CREATE FILE.md index b428b01e5d42ba..a7f13f30b90fc9 100644 --- a/docs/en/sql-reference/sql-statements/Administration/CREATE FILE.md +++ b/docs/en/sql-reference/sql-statements/Administration/CREATE FILE.md @@ -30,7 +30,7 @@ under the License. This statement is used to create and upload a file to the Doris cluster. This function is usually used to manage files that need to be used in some other commands, such as certificates, public key, private key, etc. -This command can be executed by users with amdin privileges only. +This command can be executed by users with admin privileges only. A file belongs to a database. This file can be used by users who have access to database. The size of a single file is limited to 1MB. diff --git a/docs/en/sql-reference/sql-statements/Data Definition/BACKUP.md b/docs/en/sql-reference/sql-statements/Data Definition/BACKUP.md index c588e2cb4963d9..2e8f20bb8a0747 100644 --- a/docs/en/sql-reference/sql-statements/Data Definition/BACKUP.md +++ b/docs/en/sql-reference/sql-statements/Data Definition/BACKUP.md @@ -31,7 +31,7 @@ Grammar: BACKUP SNAPSHOT [db_name].{snapshot_name} TO `repository_name` ON ( -"`Table `U name'[Distriction (`P1',...)], +`Table_name` [partition (`P1',...)], ... ) PROPERTIES ("key"="value", ...); diff --git a/docs/en/sql-reference/sql-statements/Data Definition/CREATE REPOSITORY.md b/docs/en/sql-reference/sql-statements/Data Definition/CREATE REPOSITORY.md index b27b50cf0dd7f7..95017f7544aafd 100644 --- a/docs/en/sql-reference/sql-statements/Data Definition/CREATE REPOSITORY.md +++ b/docs/en/sql-reference/sql-statements/Data Definition/CREATE REPOSITORY.md @@ -45,7 +45,7 @@ WITH BROKER `bos_broker` ON LOCATION "bos://palo_backup" PROPERTIES ( -"bosu endpoint" ="http://gz.bcebos.com", +"bos_endpoint" ="http://gz.bcebos.com", "bos_accesskey" = "069fc2786e664e63a5f111111114ddbs22", "bos_secret_accesskey"="70999999999999de274d59eaa980a" ); @@ -56,7 +56,7 @@ WITH BROKER `bos_broker` ON LOCATION "bos://palo_backup" PROPERTIES ( -"bosu endpoint" ="http://gz.bcebos.com", +"bos_endpoint" ="http://gz.bcebos.com", "bos_accesskey" = "069fc2786e664e63a5f111111114ddbs22", "bos_secret_accesskey"="70999999999999de274d59eaa980a" ); diff --git a/docs/en/sql-reference/sql-statements/Data Definition/CREATE TABLE LIKE.md b/docs/en/sql-reference/sql-statements/Data Definition/CREATE TABLE LIKE.md index 53239fd90088e5..e602eb1b63dd0e 100644 --- a/docs/en/sql-reference/sql-statements/Data Definition/CREATE TABLE LIKE.md +++ b/docs/en/sql-reference/sql-statements/Data Definition/CREATE TABLE LIKE.md @@ -36,7 +36,7 @@ Syntax: ``` Explain: - 1. The replicated table structures include Column Defination, Partitions, Table Properties, and so on + 1. The replicated table structures include Column Definition, Partitions, Table Properties, and so on 2. The SELECT privilege is required on the original table. 3. Support to copy external table such as MySQL. diff --git a/docs/en/sql-reference/sql-statements/Data Definition/CREATE TABLE.md b/docs/en/sql-reference/sql-statements/Data Definition/CREATE TABLE.md index f8a3188328391b..72130407c72746 100644 --- a/docs/en/sql-reference/sql-statements/Data Definition/CREATE TABLE.md +++ b/docs/en/sql-reference/sql-statements/Data Definition/CREATE TABLE.md @@ -160,7 +160,7 @@ Syntax: Syntax: key_type(k1[,k2 ...]) Explain: - Data is orderd by specified key columns. And has different behaviors for different key desc. + Data is order by specified key columns. And has different behaviors for different key desc. AGGREGATE KEY: value columns will be aggregated is key columns are same. UNIQUE KEY: @@ -189,7 +189,7 @@ Syntax: 1) Partition name only support [A-z0-9_] 2) Partition key column's type should be: TINYINT, SMALLINT, INT, BIGINT, LARGEINT, DATE, DATETIME - 3) The range is [closed, open). And the lower bound of first partition is MIN VALUE of specifed column type. + 3) The range is [closed, open). And the lower bound of first partition is MIN VALUE of specified column type. 4) NULL values should be save in partition which includes MIN VALUE. 5) Support multi partition columns, the the default partition value is MIN VALUE. 2)Fixed Range @@ -304,7 +304,7 @@ Syntax: PROPERTIES ("storage_type"="column"); ``` -2. Create an olap table, distributed by hash, with aggregation type. Also set storage mediumand cooldown time. +2. Create an olap table, distributed by hash, with aggregation type. Also set storage medium and cooldown time. ``` CREATE TABLE example_db.table_hash diff --git a/docs/en/sql-reference/sql-statements/Data Definition/Colocate Join.md b/docs/en/sql-reference/sql-statements/Data Definition/Colocate Join.md index f2027187554287..11730b55fae01e 100644 --- a/docs/en/sql-reference/sql-statements/Data Definition/Colocate Join.md +++ b/docs/en/sql-reference/sql-statements/Data Definition/Colocate Join.md @@ -61,7 +61,7 @@ PROPERTIES ( 1. Colcoate Table must be an OLAP-type table 2. The BUCKET number of tables with the same colocate_with attribute must be the same 3. The number of copies of tables with the same colocate_with attribute must be the same -4. Data types of DISTRIBUTTED Columns for tables with the same colocate_with attribute must be the same +4. Data types of DISTRIBUTED Columns for tables with the same colocate_with attribute must be the same 3 Colocate Join's applicable scenario: diff --git a/docs/en/sql-reference/sql-statements/Data Definition/DROP MATERIALIZED VIEW.md b/docs/en/sql-reference/sql-statements/Data Definition/DROP MATERIALIZED VIEW.md index bc90ec20db3743..cb1f3a8603a0c5 100644 --- a/docs/en/sql-reference/sql-statements/Data Definition/DROP MATERIALIZED VIEW.md +++ b/docs/en/sql-reference/sql-statements/Data Definition/DROP MATERIALIZED VIEW.md @@ -107,4 +107,4 @@ Query OK, 0 rows affected (0.00 sec) If it exists, it will be deleted; If it does not exist, no error will be reported. ## keyword - DROP, MATERILIAZED, VIEW + DROP, MATERIALIZED, VIEW diff --git a/docs/en/sql-reference/sql-statements/Data Manipulation/CANCEL LABEL.md b/docs/en/sql-reference/sql-statements/Data Manipulation/CANCEL LABEL.md index 5b7ff63b6a788b..990f3b4331f3c0 100644 --- a/docs/en/sql-reference/sql-statements/Data Manipulation/CANCEL LABEL.md +++ b/docs/en/sql-reference/sql-statements/Data Manipulation/CANCEL LABEL.md @@ -50,4 +50,4 @@ ERRORS curl -u root -XPOST http://host:port/api/testDb/testLabel/_cancel ## keyword -Cancel, Rabel +Cancel, Label diff --git a/docs/en/sql-reference/sql-statements/Data Manipulation/insert.md b/docs/en/sql-reference/sql-statements/Data Manipulation/insert.md index 5397ce8ad13af1..2f4d06768d018d 100644 --- a/docs/en/sql-reference/sql-statements/Data Manipulation/insert.md +++ b/docs/en/sql-reference/sql-statements/Data Manipulation/insert.md @@ -58,13 +58,13 @@ INSERT INTO table_name ### Note -When the `INSERT'statement is currently executed, the default behavior for data that does not conform to the target table is filtering, such as string length. However, for business scenarios where data is not filtered, the session variable `enable_insert_strict'can be set to `true' to ensure that `INSERT'will not be successfully executed when data is filtered out. +When the `INSERT'statement is currently executed, the default behavior for data that does not conform to the target table is filtering, such as string length. However, for business scenarios where data is not filtered, the session variable `enable_insert_strict'can be set to `true' to ensure that `INSERT` will not be successfully executed when data is filtered out. ## example ` The test `table contains two columns `c1', `c2'. -1. Import a row of data into the `test'table +1. Import a row of data into the `test` table ``` INSERT INTO test VALUES (1, 2); @@ -76,7 +76,7 @@ INSERT INTO test (c1) VALUES (1); The first and second sentences have the same effect. When the target column is not specified, the column order in the table is used as the default target column. The third and fourth statements express the same meaning, using the default value of `c2'column to complete data import. -2. Import multiline data into the `test'table at one time +2. Import multiline data into the `test` table at one time ``` INSERT INTO test VALUES (1, 2), (3, 2 + 2) diff --git a/docs/en/sql-reference/sql-statements/Data Types/DATE.md b/docs/en/sql-reference/sql-statements/Data Types/DATE.md index 41f359910bf8dc..05610fb8205f9d 100644 --- a/docs/en/sql-reference/sql-statements/Data Types/DATE.md +++ b/docs/en/sql-reference/sql-statements/Data Types/DATE.md @@ -31,7 +31,7 @@ Syntax: Date Convert input type to DATE type date -Date type, the current range of values is ['0000-01-01','9999-12-31'], and the default print form is'YYYYY-MM-DD'. +Date type, the current range of values is ['0000-01-01','9999-12-31'], and the default print form is 'YYYYY-MM-DD'. ## example mysql> SELECT DATE('2003-12-31 01:02:03'); diff --git a/docs/en/sql-reference/sql-statements/Data Types/DATETIME.md b/docs/en/sql-reference/sql-statements/Data Types/DATETIME.md index c04bd064298ef6..152f1da5a5720c 100644 --- a/docs/en/sql-reference/sql-statements/Data Types/DATETIME.md +++ b/docs/en/sql-reference/sql-statements/Data Types/DATETIME.md @@ -28,7 +28,7 @@ under the License. ## Description DATETIME Date and time type, value range is ['0000-01-01 00:00:00','9999-12-31 23:59:59']. -The form of printing is'YYYY-MM-DD HH:MM:SS' +The form of printing is 'YYYY-MM-DD HH:MM:SS' ## keyword DATETIME