From d38fdce673fd7c4c5b4d197f7189f108a0e710ef Mon Sep 17 00:00:00 2001 From: Jonathan Wei Date: Fri, 3 Jan 2020 11:33:19 -0600 Subject: [PATCH] De-incubation cleanup in code, docs, packaging (#9108) * De-incubation cleanup in code, docs, packaging * remove unused docs script --- .github/pull_request_template.md | 8 +- CONTRIBUTING.md | 18 +-- DISCLAIMER | 10 -- LICENSE | 4 +- NOTICE | 2 +- README.template | 13 +- .../LazyFileSessionCredentialsProvider.java | 4 +- codestyle/spotbugs-exclude.xml | 2 +- .../druid/common/config/NullHandling.java | 2 +- .../input/impl/NewSpatialDimensionSchema.java | 2 +- .../druid/java/util/common/FileUtils.java | 2 +- .../guava/ParallelMergeCombiningSequence.java | 4 +- .../java/util/common/lifecycle/Lifecycle.java | 2 +- .../util/common/parsers/DelimitedParser.java | 2 +- .../emitter/core/ParametrizedUriEmitter.java | 2 +- .../apache/druid/timeline/Overshadowable.java | 2 +- .../timeline/VersionedIntervalTimeline.java | 2 +- .../druid/common/utils/StringUtilsTest.java | 2 +- .../java/util/common/GranularityTest.java | 2 +- .../VersionedIntervalTimelineTest.java | 2 +- dev/code-review/concurrency.md | 2 +- dev/committer-instructions.md | 54 +++---- distribution/asf-release-process-guide.md | 74 ++++----- distribution/bin/find-missing-backports.py | 2 +- .../bin/get-milestone-contributors.py | 2 +- distribution/bin/get-milestone-prs.py | 2 +- distribution/bin/tag-missing-milestones.py | 5 +- distribution/docker/README.md | 4 +- distribution/docker/docker-compose.yml | 12 +- distribution/src/assembly/assembly.xml | 1 - docs/_bin/deploy-docs.sh | 142 ------------------ docs/configuration/index.md | 4 +- docs/configuration/logging.md | 2 +- docs/dependencies/deep-storage.md | 2 +- docs/dependencies/metadata-storage.md | 2 +- docs/dependencies/zookeeper.md | 2 +- docs/design/auth.md | 2 +- docs/design/broker.md | 2 +- docs/design/coordinator.md | 2 +- docs/design/extensions-contrib/dropwizard.md | 2 +- docs/design/historical.md | 2 +- docs/design/index.md | 2 +- docs/design/indexer.md | 6 +- docs/design/indexing-service.md | 2 +- docs/design/middlemanager.md | 2 +- docs/design/overlord.md | 2 +- docs/design/peons.md | 2 +- docs/design/router.md | 4 +- docs/design/segments.md | 2 +- docs/development/build.md | 6 +- .../ambari-metrics-emitter.md | 2 +- docs/development/extensions-contrib/azure.md | 2 +- .../extensions-contrib/cassandra.md | 2 +- .../extensions-contrib/cloudfiles.md | 2 +- .../extensions-contrib/distinctcount.md | 2 +- .../extensions-contrib/graphite.md | 2 +- docs/development/extensions-contrib/influx.md | 2 +- .../extensions-contrib/influxdb-emitter.md | 2 +- .../extensions-contrib/kafka-emitter.md | 2 +- .../extensions-contrib/materialized-view.md | 2 +- .../momentsketch-quantiles.md | 2 +- .../moving-average-query.md | 2 +- .../extensions-contrib/opentsdb-emitter.md | 2 +- .../extensions-contrib/redis-cache.md | 2 +- .../extensions-contrib/sqlserver.md | 2 +- docs/development/extensions-contrib/statsd.md | 2 +- .../tdigestsketch-quantiles.md | 2 +- docs/development/extensions-contrib/thrift.md | 2 +- .../extensions-contrib/time-min-max.md | 2 +- .../extensions-core/approximate-histograms.md | 2 +- docs/development/extensions-core/avro.md | 2 +- .../extensions-core/bloom-filter.md | 2 +- .../extensions-core/datasketches-extension.md | 2 +- .../extensions-core/datasketches-hll.md | 2 +- .../extensions-core/datasketches-quantiles.md | 2 +- .../extensions-core/datasketches-theta.md | 2 +- .../extensions-core/datasketches-tuple.md | 2 +- .../extensions-core/druid-basic-security.md | 2 +- .../extensions-core/druid-kerberos.md | 2 +- .../extensions-core/druid-lookups.md | 2 +- docs/development/extensions-core/examples.md | 2 +- docs/development/extensions-core/google.md | 2 +- docs/development/extensions-core/hdfs.md | 2 +- .../kafka-extraction-namespace.md | 2 +- .../extensions-core/kafka-ingestion.md | 4 +- .../extensions-core/kinesis-ingestion.md | 8 +- .../extensions-core/lookups-cached-global.md | 2 +- docs/development/extensions-core/mysql.md | 2 +- docs/development/extensions-core/orc.md | 2 +- docs/development/extensions-core/parquet.md | 2 +- .../development/extensions-core/postgresql.md | 2 +- docs/development/extensions-core/protobuf.md | 2 +- docs/development/extensions-core/s3.md | 2 +- .../simple-client-sslcontext.md | 2 +- docs/development/extensions-core/stats.md | 2 +- .../development/extensions-core/test-stats.md | 2 +- docs/development/extensions.md | 2 +- docs/development/geo.md | 2 +- docs/development/javascript.md | 2 +- docs/development/modules.md | 2 +- docs/development/versioning.md | 2 +- docs/ingestion/data-formats.md | 2 +- docs/ingestion/data-management.md | 8 +- docs/ingestion/hadoop.md | 4 +- docs/ingestion/native-batch.md | 2 +- docs/ingestion/standalone-realtime.md | 2 +- docs/operations/basic-cluster-tuning.md | 2 +- docs/operations/dump-segment.md | 2 +- docs/operations/high-availability.md | 2 +- docs/operations/http-compression.md | 2 +- docs/operations/insert-segment-to-db.md | 2 +- docs/operations/kubernetes.md | 4 +- docs/operations/other-hadoop.md | 2 +- docs/operations/password-provider.md | 2 +- docs/operations/pull-deps.md | 2 +- docs/operations/reset-cluster.md | 2 +- docs/operations/rolling-updates.md | 2 +- docs/operations/rule-configuration.md | 2 +- docs/operations/segment-optimization.md | 2 +- docs/operations/tls-support.md | 4 +- docs/querying/aggregations.md | 2 +- docs/querying/caching.md | 2 +- docs/querying/datasource.md | 2 +- docs/querying/datasourcemetadataquery.md | 2 +- docs/querying/dimensionspecs.md | 2 +- docs/querying/filters.md | 2 +- docs/querying/granularities.md | 2 +- docs/querying/groupbyquery.md | 2 +- docs/querying/having.md | 2 +- docs/querying/hll-old.md | 2 +- docs/querying/joins.md | 2 +- docs/querying/lookups.md | 2 +- docs/querying/multi-value-dimensions.md | 2 +- docs/querying/multitenancy.md | 2 +- docs/querying/post-aggregations.md | 2 +- docs/querying/query-context.md | 2 +- docs/querying/querying.md | 2 +- docs/querying/scan-query.md | 2 +- docs/querying/searchquery.md | 2 +- docs/querying/segmentmetadataquery.md | 2 +- docs/querying/select-query.md | 2 +- docs/querying/sql.md | 4 +- docs/querying/timeboundaryquery.md | 2 +- docs/querying/timeseriesquery.md | 2 +- docs/querying/topnmetricspec.md | 2 +- docs/querying/topnquery.md | 2 +- docs/querying/virtual-columns.md | 2 +- docs/tutorials/cluster.md | 4 +- docs/tutorials/index.md | 2 +- docs/tutorials/tutorial-batch-hadoop.md | 2 +- docs/tutorials/tutorial-batch.md | 2 +- docs/tutorials/tutorial-compaction.md | 2 +- docs/tutorials/tutorial-delete-data.md | 2 +- docs/tutorials/tutorial-ingestion-spec.md | 2 +- docs/tutorials/tutorial-kafka.md | 2 +- docs/tutorials/tutorial-query.md | 2 +- docs/tutorials/tutorial-retention.md | 2 +- docs/tutorials/tutorial-rollup.md | 2 +- docs/tutorials/tutorial-transform-spec.md | 2 +- docs/tutorials/tutorial-update-data.md | 2 +- .../DistinctCountAggregatorFactory.java | 2 +- .../moving-average-query/README.md | 2 +- .../hll/HllSketchBuildBufferAggregator.java | 2 +- .../hll/HllSketchMergeBufferAggregator.java | 2 +- .../ArrayOfDoublesSketchBuildAggregator.java | 4 +- ...yOfDoublesSketchBuildBufferAggregator.java | 4 +- .../ArrayOfDoublesSketchMergeAggregator.java | 4 +- ...yOfDoublesSketchMergeBufferAggregator.java | 4 +- .../authentication/BasicHTTPEscalator.java | 2 +- .../security/kerberos/KerberosEscalator.java | 2 +- .../storage/hdfs/HdfsDataSegmentPusher.java | 2 +- .../storage/hdfs/HdfsStorageDruidModule.java | 2 +- .../org/apache/hadoop/fs/HadoopFsWrapper.java | 2 +- .../indexing/kafka/KafkaIndexTaskTest.java | 2 +- .../data/input/orc/OrcExtensionsModule.java | 2 +- .../parquet/ParquetExtensionsModule.java | 2 +- .../avro/DruidParquetAvroReadSupport.java | 2 +- .../druid/indexer/HadoopTuningConfig.java | 2 +- .../indexer/BatchDeltaIngestionTest.java | 2 +- .../druid/indexing/common/Counters.java | 4 +- .../common/actions/TaskAuditLogConfig.java | 2 +- .../druid/indexing/common/task/MoveTask.java | 2 +- .../task/batch/parallel/SubTaskSpec.java | 2 +- .../indexing/overlord/ForkingTaskRunner.java | 2 +- .../indexing/overlord/RemoteTaskRunner.java | 2 +- .../druid/indexing/overlord/TaskQueue.java | 2 +- .../overlord/hrtr/HttpRemoteTaskRunner.java | 2 +- .../SeekableStreamIndexTask.java | 2 +- .../supervisor/SeekableStreamSupervisor.java | 2 +- ...kRunnerRunPendingTasksConcurrencyTest.java | 2 +- .../overlord/http/OverlordResourceTest.java | 8 +- .../tests/indexer/AbstractIndexerTest.java | 2 +- licenses.yaml | 2 +- licenses/APACHE2 | 4 +- pom.xml | 16 +- processing/pom.xml | 2 +- .../query/IntervalChunkingQueryRunner.java | 2 +- .../IntervalChunkingQueryRunnerDecorator.java | 2 +- .../apache/druid/query/QueryToolChest.java | 2 +- .../FilteredAggregatorFactory.java | 2 +- .../JavaScriptAggregatorFactory.java | 4 +- .../post/JavaScriptPostAggregator.java | 4 +- .../extraction/JavaScriptExtractionFn.java | 4 +- .../query/filter/JavaScriptDimFilter.java | 4 +- .../druid/query/groupby/GroupByQuery.java | 2 +- .../AbstractBufferHashGrouper.java | 2 +- .../epinephelinae/BufferHashGrouper.java | 2 +- .../LimitedBufferHashGrouper.java | 2 +- .../groupby/strategy/GroupByStrategyV2.java | 2 +- .../SpecializationService.java | 2 +- .../search/ConciseBitmapDecisionHelper.java | 2 +- .../search/RoaringBitmapDecisionHelper.java | 2 +- .../query/search/SearchQueryMetrics.java | 2 +- .../apache/druid/segment/BitmapOffset.java | 2 +- .../druid/segment/DimensionSelector.java | 6 +- .../apache/druid/segment/FilteredOffset.java | 2 +- .../column/StringDictionaryEncodedColumn.java | 2 +- .../segment/incremental/IncrementalIndex.java | 2 +- .../IncrementalIndexStorageAdapter.java | 2 +- ...nalizingFieldAccessPostAggregatorTest.java | 2 +- .../query/groupby/GroupByQueryRunnerTest.java | 4 +- .../epinephelinae/BufferHashGrouperTest.java | 2 +- .../metadata/SegmentMetadataQueryTest.java | 2 +- .../TimeseriesQueryQueryToolChestTest.java | 2 +- .../druid/query/topn/TopNQueryRunnerTest.java | 4 +- .../data/GenericIndexedWriterTest.java | 2 +- .../IncrementalIndexStorageAdapterTest.java | 2 +- publications/demo/druid_demo.tex | 2 +- publications/whitepaper/druid.tex | 2 +- .../druid/client/CachingClusteredClient.java | 2 +- .../druid/client/DataSourcesSnapshot.java | 2 +- .../client/ImmutableDruidDataSource.java | 8 +- .../druid/discovery/DiscoveryDruidNode.java | 2 +- .../org/apache/druid/discovery/NodeRole.java | 2 +- .../IndexerSQLMetadataStorageCoordinator.java | 4 +- .../metadata/SQLMetadataRuleManager.java | 2 +- .../metadata/SQLMetadataSegmentManager.java | 2 +- ...dRobinStorageLocationSelectorStrategy.java | 2 +- .../StorageLocationSelectorStrategy.java | 4 +- .../EventReceiverFirehoseFactory.java | 6 +- .../server/ClientQuerySegmentWalker.java | 2 +- .../apache/druid/server/QueryResource.java | 2 +- .../druid/server/coordination/ServerType.java | 4 +- .../CachingCostBalancerStrategyFactory.java | 2 +- .../coordinator/CoordinatorDynamicConfig.java | 10 +- .../coordinator/CostBalancerStrategy.java | 2 +- .../server/coordinator/DruidCoordinator.java | 2 +- .../coordinator/cost/SegmentsCostCache.java | 2 +- .../druid/server/coordinator/rules/Rule.java | 2 +- .../emitter/HttpEmitterSSLClientConfig.java | 2 +- .../druid/server/http/MetadataResource.java | 2 +- .../jetty/JettyServerModule.java | 2 +- .../druid/server/metrics/MonitorsConfig.java | 2 +- .../server/security/AuthenticationResult.java | 2 +- .../druid/curator/CuratorModuleTest.java | 2 +- .../apache/druid/curator/CuratorTestBase.java | 2 +- .../org/apache/druid/cli/CreateTables.java | 2 +- .../org/apache/druid/cli/ExportMetadata.java | 2 +- .../org/apache/druid/cli/ResetCluster.java | 2 +- .../apache/druid/cli/ValidateSegments.java | 2 +- .../cli/validate/DruidJsonValidator.java | 2 +- .../druid/sql/avatica/DruidStatement.java | 4 +- .../druid/sql/calcite/CalciteQueryTest.java | 12 +- web-console/src/utils/ingestion-spec.tsx | 2 +- .../views/load-data-view/load-data-view.tsx | 2 +- website/core/Footer.js | 2 +- website/pom.xml | 2 +- website/script/build-to-docs | 2 +- website/siteConfig.js | 2 +- 269 files changed, 411 insertions(+), 572 deletions(-) delete mode 100644 DISCLAIMER delete mode 100755 docs/_bin/deploy-docs.sh diff --git a/.github/pull_request_template.md b/.github/pull_request_template.md index 79623f11a98f..6893cab217ae 100644 --- a/.github/pull_request_template.md +++ b/.github/pull_request_template.md @@ -1,11 +1,11 @@ - + Fixes #XXXX. +https://github.com/apache/druid/blob/master/dev/committer-instructions.md#pr-and-issue-action-item-checklist-for-committers. --> ### Description @@ -39,10 +39,10 @@ In each section, please describe design decisions made, including: This PR has: - [ ] been self-reviewed. - - [ ] using the [concurrency checklist](https://github.com/apache/incubator-druid/blob/master/dev/code-review/concurrency.md) (Remove this item if the PR doesn't have any relation to concurrency.) + - [ ] using the [concurrency checklist](https://github.com/apache/druid/blob/master/dev/code-review/concurrency.md) (Remove this item if the PR doesn't have any relation to concurrency.) - [ ] added documentation for new or modified features or behaviors. - [ ] added Javadocs for most classes and all non-trivial methods. Linked related entities via Javadoc links. -- [ ] added or updated version, license, or notice information in [licenses.yaml](https://github.com/apache/incubator-druid/blob/master/licenses.yaml) +- [ ] added or updated version, license, or notice information in [licenses.yaml](https://github.com/apache/druid/blob/master/licenses.yaml) - [ ] added comments explaining the "why" and the intent of the code wherever would not be obvious for an unfamiliar reader. - [ ] added unit tests or modified existing tests to cover new code paths. - [ ] added integration tests. diff --git a/CONTRIBUTING.md b/CONTRIBUTING.md index 1c8f3f3a5898..bd806bb9dd56 100644 --- a/CONTRIBUTING.md +++ b/CONTRIBUTING.md @@ -24,9 +24,9 @@ When submitting a pull request (PR), please use the following guidelines: - Make sure your code respects existing formatting conventions. In general, follow the same coding style as the code that you are modifying. - For Intellij you can import our code style settings xml: [`druid_intellij_formatting.xml`]( - https://github.com/apache/incubator-druid/raw/master/dev/druid_intellij_formatting.xml). + https://github.com/apache/druid/raw/master/dev/druid_intellij_formatting.xml). - For Eclipse you can import our code style settings xml: [`eclipse_formatting.xml`]( - https://github.com/apache/incubator-druid/raw/master/dev/eclipse_formatting.xml). + https://github.com/apache/druid/raw/master/dev/eclipse_formatting.xml). - Do add/update documentation appropriately for the change you are making. - If you are introducing a new feature you may want to first write about your idea for feedback to [dev@druid.apache.org](https://lists.apache.org/list.html?dev@druid.apache.org). Or create an issue @@ -37,21 +37,21 @@ When submitting a pull request (PR), please use the following guidelines: - Try to keep pull requests short and submit separate ones for unrelated features, but feel free to combine simple bugfixes/tests into one pull request. - If you are adding or updating a dependency, be sure to update the version, license, or notice information in - [licenses.yaml](https://github.com/apache/incubator-druid/blob/master/licenses.yaml) as appropriate to help ease + [licenses.yaml](https://github.com/apache/druid/blob/master/licenses.yaml) as appropriate to help ease LICENSE and NOTICE management for ASF releases. You can find more developers' resources in [`dev/`](dev) directory. ## GitHub Workflow -1. Fork the apache/incubator-druid repository into your GitHub account +1. Fork the apache/druid repository into your GitHub account - https://github.com/apache/incubator-druid/fork + https://github.com/apache/druid/fork 1. Clone your fork of the GitHub repository ```sh - git clone git@github.com:/incubator-druid.git + git clone git@github.com:/druid.git ``` replace `` with your GitHub username. @@ -59,7 +59,7 @@ You can find more developers' resources in [`dev/`](dev) directory. 1. Add a remote to keep up with upstream changes ``` - git remote add upstream https://github.com/apache/incubator-druid.git + git remote add upstream https://github.com/apache/druid.git ``` If you already have a copy, fetch upstream changes @@ -100,13 +100,13 @@ You can find more developers' resources in [`dev/`](dev) directory. Go to your Druid fork main page ``` - https://github.com//incubator-druid + https://github.com//druid ``` If you recently pushed your changes GitHub will automatically pop up a `Compare & pull request` button for any branches you recently pushed to. If you click that button it will automatically offer you to submit your pull-request - to the apache/incubator-druid repository. + to the apache/druid repository. - Give your pull-request a meaningful title. - In the description, explain your changes and the problem they are solving. diff --git a/DISCLAIMER b/DISCLAIMER deleted file mode 100644 index fc4c54dcd237..000000000000 --- a/DISCLAIMER +++ /dev/null @@ -1,10 +0,0 @@ -Apache Druid (incubating) is an effort undergoing incubation at the Apache Software -Foundation (ASF), sponsored by the Apache Incubator PMC. - -Incubation is required of all newly accepted projects until a further review -indicates that the infrastructure, communications, and decision making process -have stabilized in a manner consistent with other successful ASF projects. - -While incubation status is not necessarily a reflection of the completeness -or stability of the code, it does indicate that the project has yet to be -fully endorsed by the ASF. diff --git a/LICENSE b/LICENSE index 7e54613a180a..c747d409671a 100644 --- a/LICENSE +++ b/LICENSE @@ -201,9 +201,9 @@ See the License for the specific language governing permissions and limitations under the License. - APACHE DRUID (INCUBATING) SUBCOMPONENTS: + APACHE DRUID SUBCOMPONENTS: - Apache Druid (incubating) includes a number of subcomponents with + Apache Druid includes a number of subcomponents with separate copyright notices and license terms. Your use of the source code for these subcomponents is subject to the terms and conditions of the following licenses. diff --git a/NOTICE b/NOTICE index 752e2d646438..64bfc0677782 100644 --- a/NOTICE +++ b/NOTICE @@ -1,4 +1,4 @@ -Apache Druid (incubating) +Apache Druid Copyright 2019 The Apache Software Foundation This product includes software developed at diff --git a/README.template b/README.template index a390fcc3c565..11c97fd10622 100644 --- a/README.template +++ b/README.template @@ -17,7 +17,7 @@ under the License. -Apache Druid (incubating) is a high performance analytics data store for event-driven data. More information about Druid +Apache Druid is a high performance analytics data store for event-driven data. More information about Druid can be found on https://druid.apache.org. Documentation @@ -29,7 +29,7 @@ You can get started with Druid with our quickstart at https://druid.apache.org/d Build from Source ----------------- -You can build Apache Druid (incubating) directly from source. +You can build Apache Druid directly from source. Prerequisites: JDK 8, 8u92+ @@ -71,13 +71,6 @@ dev-subscribe@druid.apache.org. Contributing ------------ -If you find any bugs, please file a GitHub issue at https://github.com/apache/incubator-druid/issues. +If you find any bugs, please file a GitHub issue at https://github.com/apache/druid/issues. If you wish to contribute, please follow the guidelines listed at https://druid.apache.org/community/. - - -Disclaimer: Apache Druid is an effort undergoing incubation at The Apache Software Foundation (ASF), sponsored by the -Apache Incubator. Incubation is required of all newly accepted projects until a further review indicates that the -infrastructure, communications, and decision making process have stabilized in a manner consistent with other successful -ASF projects. While incubation status is not necessarily a reflection of the completeness or stability of the code, it -does indicate that the project has yet to be fully endorsed by the ASF. diff --git a/cloud/aws-common/src/main/java/org/apache/druid/common/aws/LazyFileSessionCredentialsProvider.java b/cloud/aws-common/src/main/java/org/apache/druid/common/aws/LazyFileSessionCredentialsProvider.java index 483b32a777a0..a41f75705b71 100644 --- a/cloud/aws-common/src/main/java/org/apache/druid/common/aws/LazyFileSessionCredentialsProvider.java +++ b/cloud/aws-common/src/main/java/org/apache/druid/common/aws/LazyFileSessionCredentialsProvider.java @@ -33,8 +33,8 @@ public class LazyFileSessionCredentialsProvider implements AWSCredentialsProvide * in {@link #getUnderlyingProvider()} without worrying about final modifiers * on the fields of the created object * - * @see - * https://github.com/apache/incubator-druid/pull/6662#discussion_r237013157 + * @see + * https://github.com/apache/druid/pull/6662#discussion_r237013157 */ @MonotonicNonNull private volatile FileSessionCredentialsProvider provider; diff --git a/codestyle/spotbugs-exclude.xml b/codestyle/spotbugs-exclude.xml index f4d03ae87fb2..d0b7ee2de312 100644 --- a/codestyle/spotbugs-exclude.xml +++ b/codestyle/spotbugs-exclude.xml @@ -25,7 +25,7 @@ Some of the bugs will only occur once or twice on the codebase, while others will occur potentially a lot. - Reference: https://github.com/apache/incubator-druid/pull/7894/files + Reference: https://github.com/apache/druid/pull/7894/files --> diff --git a/core/src/main/java/org/apache/druid/common/config/NullHandling.java b/core/src/main/java/org/apache/druid/common/config/NullHandling.java index f5c46e6de64a..de381554ee4a 100644 --- a/core/src/main/java/org/apache/druid/common/config/NullHandling.java +++ b/core/src/main/java/org/apache/druid/common/config/NullHandling.java @@ -27,7 +27,7 @@ /** * Helper class for NullHandling. This class is used to switch between SQL compatible Null Handling behavior - * introduced as part of https://github.com/apache/incubator-druid/issues/4349 and the old druid behavior + * introduced as part of https://github.com/apache/druid/issues/4349 and the old druid behavior * where null values are replaced with default values e.g Null Strings are replaced with empty values. */ public class NullHandling diff --git a/core/src/main/java/org/apache/druid/data/input/impl/NewSpatialDimensionSchema.java b/core/src/main/java/org/apache/druid/data/input/impl/NewSpatialDimensionSchema.java index 181d26fceb06..00bb6b82f1af 100644 --- a/core/src/main/java/org/apache/druid/data/input/impl/NewSpatialDimensionSchema.java +++ b/core/src/main/java/org/apache/druid/data/input/impl/NewSpatialDimensionSchema.java @@ -28,7 +28,7 @@ /** * NOTE: * This class should be deprecated after Druid supports configurable index types on dimensions. - * When that exists, this should be the implementation: https://github.com/apache/incubator-druid/issues/2622 + * When that exists, this should be the implementation: https://github.com/apache/druid/issues/2622 * * This is a stop-gap solution to consolidate the dimension specs and remove the separate spatial * section in DimensionsSpec. diff --git a/core/src/main/java/org/apache/druid/java/util/common/FileUtils.java b/core/src/main/java/org/apache/druid/java/util/common/FileUtils.java index 7b038dcb0c6a..225b5730bf18 100644 --- a/core/src/main/java/org/apache/druid/java/util/common/FileUtils.java +++ b/core/src/main/java/org/apache/druid/java/util/common/FileUtils.java @@ -227,7 +227,7 @@ public static T writeAtomically(final File file, final File tmpDir, OutputSt // fsync to avoid write-then-rename-then-crash causing empty files on some filesystems. // Must do this before "out" or "fileChannel" is closed. No need to flush "out" first, since // Channels.newOutputStream is unbuffered. - // See also https://github.com/apache/incubator-druid/pull/5187#pullrequestreview-85188984 + // See also https://github.com/apache/druid/pull/5187#pullrequestreview-85188984 fileChannel.force(true); } diff --git a/core/src/main/java/org/apache/druid/java/util/common/guava/ParallelMergeCombiningSequence.java b/core/src/main/java/org/apache/druid/java/util/common/guava/ParallelMergeCombiningSequence.java index 173d721fec86..7e4518947787 100644 --- a/core/src/main/java/org/apache/druid/java/util/common/guava/ParallelMergeCombiningSequence.java +++ b/core/src/main/java/org/apache/druid/java/util/common/guava/ParallelMergeCombiningSequence.java @@ -50,7 +50,7 @@ * Artisanal, locally-sourced, hand-crafted, gluten and GMO free, bespoke, free-range, organic, small-batch parallel * merge combining sequence. * - * See proposal: https://github.com/apache/incubator-druid/issues/8577 + * See proposal: https://github.com/apache/druid/issues/8577 * * Functionally equivalent to wrapping {@link org.apache.druid.common.guava.CombiningSequence} around a * {@link MergeSequence}, but done in parallel on a {@link ForkJoinPool} running in 'async' mode. @@ -60,7 +60,7 @@ public class ParallelMergeCombiningSequence extends YieldingSequenceBase private static final Logger LOG = new Logger(ParallelMergeCombiningSequence.class); // these values were chosen carefully via feedback from benchmarks, - // see PR https://github.com/apache/incubator-druid/pull/8578 for details + // see PR https://github.com/apache/druid/pull/8578 for details public static final int DEFAULT_TASK_TARGET_RUN_TIME_MILLIS = 100; public static final int DEFAULT_TASK_INITIAL_YIELD_NUM_ROWS = 16384; public static final int DEFAULT_TASK_SMALL_BATCH_NUM_ROWS = 4096; diff --git a/core/src/main/java/org/apache/druid/java/util/common/lifecycle/Lifecycle.java b/core/src/main/java/org/apache/druid/java/util/common/lifecycle/Lifecycle.java index b5a3d895f7c9..15146106edc0 100644 --- a/core/src/main/java/org/apache/druid/java/util/common/lifecycle/Lifecycle.java +++ b/core/src/main/java/org/apache/druid/java/util/common/lifecycle/Lifecycle.java @@ -302,7 +302,7 @@ public void addMaybeStartHandler(Handler handler, Stage stage) throws Exception { if (!startStopLock.tryLock()) { // (*) This check is why the state should be changed before startStopLock.lock() in stop(). This check allows to - // spot wrong use of Lifecycle instead of entering deadlock, like https://github.com/apache/incubator-druid/issues/3579. + // spot wrong use of Lifecycle instead of entering deadlock, like https://github.com/apache/druid/issues/3579. if (state.get().equals(State.STOP)) { throw new ISE("Cannot add a handler in the process of Lifecycle stopping"); } diff --git a/core/src/main/java/org/apache/druid/java/util/common/parsers/DelimitedParser.java b/core/src/main/java/org/apache/druid/java/util/common/parsers/DelimitedParser.java index 849d45e85579..0c0dccc1ec5a 100644 --- a/core/src/main/java/org/apache/druid/java/util/common/parsers/DelimitedParser.java +++ b/core/src/main/java/org/apache/druid/java/util/common/parsers/DelimitedParser.java @@ -85,7 +85,7 @@ protected List parseLine(String input) * Copied from Guava's {@link Splitter#splitToList(CharSequence)}. * This is to avoid the error of the missing method signature when using an old Guava library. * For example, it may happen when running Druid Hadoop indexing jobs, since we may inherit the version provided by - * the Hadoop cluster. See https://github.com/apache/incubator-druid/issues/6801. + * the Hadoop cluster. See https://github.com/apache/druid/issues/6801. */ private List splitToList(String input) { diff --git a/core/src/main/java/org/apache/druid/java/util/emitter/core/ParametrizedUriEmitter.java b/core/src/main/java/org/apache/druid/java/util/emitter/core/ParametrizedUriEmitter.java index b72572aff509..1aec8be0e4ca 100644 --- a/core/src/main/java/org/apache/druid/java/util/emitter/core/ParametrizedUriEmitter.java +++ b/core/src/main/java/org/apache/druid/java/util/emitter/core/ParametrizedUriEmitter.java @@ -120,7 +120,7 @@ public void emit(Event event) try { URI uri = uriExtractor.apply(event); // get() before computeIfAbsent() is an optimization to avoid locking in computeIfAbsent() if not needed. - // See https://github.com/apache/incubator-druid/pull/6898#discussion_r251384586. + // See https://github.com/apache/druid/pull/6898#discussion_r251384586. HttpPostEmitter emitter = emitters.get(uri); if (emitter == null) { try { diff --git a/core/src/main/java/org/apache/druid/timeline/Overshadowable.java b/core/src/main/java/org/apache/druid/timeline/Overshadowable.java index 69b4336f459a..62d6b32cdc4e 100644 --- a/core/src/main/java/org/apache/druid/timeline/Overshadowable.java +++ b/core/src/main/java/org/apache/druid/timeline/Overshadowable.java @@ -25,7 +25,7 @@ * which has the same major version in the same time chunk. * * An Overshadowable overshadows another if its root partition range contains that of another - * and has a higher minorVersion. For more details, check https://github.com/apache/incubator-druid/issues/7491. + * and has a higher minorVersion. For more details, check https://github.com/apache/druid/issues/7491. */ public interface Overshadowable { diff --git a/core/src/main/java/org/apache/druid/timeline/VersionedIntervalTimeline.java b/core/src/main/java/org/apache/druid/timeline/VersionedIntervalTimeline.java index 728bd1ae5d53..2f2646846ab8 100644 --- a/core/src/main/java/org/apache/druid/timeline/VersionedIntervalTimeline.java +++ b/core/src/main/java/org/apache/druid/timeline/VersionedIntervalTimeline.java @@ -364,7 +364,7 @@ private TimelineObjectHolder timelineEntryToObjectHolde /** * This method should be deduplicated with DataSourcesSnapshot.determineOvershadowedSegments(): see - * https://github.com/apache/incubator-druid/issues/8070. + * https://github.com/apache/druid/issues/8070. */ public Set> findFullyOvershadowed() { diff --git a/core/src/test/java/org/apache/druid/common/utils/StringUtilsTest.java b/core/src/test/java/org/apache/druid/common/utils/StringUtilsTest.java index 30d4433d0dda..1ee6643e2f65 100644 --- a/core/src/test/java/org/apache/druid/common/utils/StringUtilsTest.java +++ b/core/src/test/java/org/apache/druid/common/utils/StringUtilsTest.java @@ -27,7 +27,7 @@ */ public class StringUtilsTest { - // copied from https://github.com/apache/incubator-druid/pull/2612 + // copied from https://github.com/apache/druid/pull/2612 public static final String[] TEST_STRINGS = new String[]{ "peach", "péché", "pêche", "sin", "", "☃", "C", "c", "Ç", "ç", "G", "g", "Ğ", "ğ", "I", "ı", "İ", "i", diff --git a/core/src/test/java/org/apache/druid/java/util/common/GranularityTest.java b/core/src/test/java/org/apache/druid/java/util/common/GranularityTest.java index b605ab49abc8..76fd93022580 100644 --- a/core/src/test/java/org/apache/druid/java/util/common/GranularityTest.java +++ b/core/src/test/java/org/apache/druid/java/util/common/GranularityTest.java @@ -775,7 +775,7 @@ public void testCustomNestedPeriodFail() } } - @Test // Regression test for https://github.com/apache/incubator-druid/issues/5200. + @Test // Regression test for https://github.com/apache/druid/issues/5200. public void testIncrementOverSpringForward() { // Sao Paulo daylight savings time in 2017 starts at midnight. When we spring forward, 00:00:00 doesn't exist. diff --git a/core/src/test/java/org/apache/druid/timeline/VersionedIntervalTimelineTest.java b/core/src/test/java/org/apache/druid/timeline/VersionedIntervalTimelineTest.java index e1ab76c506b6..c946bc88bf0a 100644 --- a/core/src/test/java/org/apache/druid/timeline/VersionedIntervalTimelineTest.java +++ b/core/src/test/java/org/apache/druid/timeline/VersionedIntervalTimelineTest.java @@ -1026,7 +1026,7 @@ public void testNotFoundReturnsEmpty() Assert.assertTrue(timeline.lookup(Intervals.of("1970/1980")).isEmpty()); } - /** https://github.com/apache/incubator-druid/issues/3010 */ + /** https://github.com/apache/druid/issues/3010 */ @Test public void testRemoveIncompleteKeepsComplete() { diff --git a/dev/code-review/concurrency.md b/dev/code-review/concurrency.md index 2637bdfb17ea..716617fbaceb 100644 --- a/dev/code-review/concurrency.md +++ b/dev/code-review/concurrency.md @@ -151,7 +151,7 @@ Improving scalability https://github.com/code-review-checklists/java-concurrency#long-adder-for-hot-fields) - [Considered queues from JCTools instead of the standard concurrent queues?]( https://github.com/code-review-checklists/java-concurrency#jctools) - - [Caffeine cache is used instead of Guava?](https://github.com/apache/incubator-druid/issues/8399) + - [Caffeine cache is used instead of Guava?](https://github.com/apache/druid/issues/8399) - [Can apply speculation (optimistic concurrency) technique?]( https://github.com/code-review-checklists/java-concurrency#speculation) diff --git a/dev/committer-instructions.md b/dev/committer-instructions.md index ba683c6281f1..029fa7d62f9d 100644 --- a/dev/committer-instructions.md +++ b/dev/committer-instructions.md @@ -26,7 +26,7 @@ committer who visits an issue or a PR authored by a non-committer. 1. Add appropriate labels to the PR, in particular: - - [**`Design Review`**](https://github.com/apache/incubator-druid/labels/Design%20Review) - for changes that will be + - [**`Design Review`**](https://github.com/apache/druid/labels/Design%20Review) - for changes that will be hard to undo after they appear in some Druid release, and/or changes that will have lasting consequences in the codebase. Examples: - Major architectural changes or API changes @@ -46,7 +46,7 @@ committer who visits an issue or a PR authored by a non-committer. `@ExtensionPoint`), configuration options, emitted metric names, HTTP endpoint paths and parameters that are added or changed in the PR. If they are not listed, ask the PR author to update the PR description. - - [**`Incompatible`**](https://github.com/apache/incubator-druid/labels/Incompatible) - for changes that alter public + - [**`Incompatible`**](https://github.com/apache/druid/labels/Incompatible) - for changes that alter public API elements (`@PublicApi` or `@ExtensionPoint`), runtime configuration options, emitted metric names, HTTP endpoint behavior, or server behavior in some way that affects one of the following: @@ -62,7 +62,7 @@ committer who visits an issue or a PR authored by a non-committer. All `Incompatible` PRs should be labelled `Design Review` too, but not vice versa: some `Design Review` issues, proposals and PRs may not be `Incompatible`. - - [**`Release Notes`**](https://github.com/apache/incubator-druid/labels/Release%20Notes) - for important changes + - [**`Release Notes`**](https://github.com/apache/druid/labels/Release%20Notes) - for important changes that should be reflected in the next Druid’s version release notes. Critically, those are changes that require some server or query configuration changes made by Druid cluster operators to preserve the former cluster behaviour, i. e. the majority of PRs labelled `Incompatible`. However, some `Incompatible` PRs may not need to be labelled @@ -72,18 +72,18 @@ committer who visits an issue or a PR authored by a non-committer. Secondarily, PRs that add new features, improve performance or improve Druid cluster operation experience could also be labelled `Release Notes` at your discretion. - - [**`Bug`**](https://github.com/apache/incubator-druid/labels/Bug) / [**`Security`**]( - https://github.com/apache/incubator-druid/labels/Security) / [**`Feature`**]( - https://github.com/apache/incubator-druid/labels/Feature) / [**`Performance`**]( - https://github.com/apache/incubator-druid/labels/Performance) / [**`Refactoring`**]( - https://github.com/apache/incubator-druid/labels/Refactoring) / [**`Improvement`**]( - https://github.com/apache/incubator-druid/labels/Improvement) - can be used to distinguish between types of changes. - [**`Compatibility`**](https://github.com/apache/incubator-druid/labels/Compatibility) label also falls into this + - [**`Bug`**](https://github.com/apache/druid/labels/Bug) / [**`Security`**]( + https://github.com/apache/druid/labels/Security) / [**`Feature`**]( + https://github.com/apache/druid/labels/Feature) / [**`Performance`**]( + https://github.com/apache/druid/labels/Performance) / [**`Refactoring`**]( + https://github.com/apache/druid/labels/Refactoring) / [**`Improvement`**]( + https://github.com/apache/druid/labels/Improvement) - can be used to distinguish between types of changes. + [**`Compatibility`**](https://github.com/apache/druid/labels/Compatibility) label also falls into this category, it's specifically for PRs that restore or improve compatibility with previous Druid versions if it was inadvertently broken, or for changes that ensure forward compatibility with future Druid versions, forseening specific changes that would otherwise break the compatibility. - - [**`Development Blocker`**](https://github.com/apache/incubator-druid/labels/Development%20Blocker) - for changes + - [**`Development Blocker`**](https://github.com/apache/druid/labels/Development%20Blocker) - for changes that need to be merged before some other PRs could even be published. `Development Blocker` PRs should be prioritized by reviewers, so that they could be merged as soon as possible, thus not blocking somebody's work. @@ -94,46 +94,46 @@ the author of the PR) or in a comment (if you have added labels to a PR submitte #creating-a-new-label-on-github) if none of the existing `Area` labels is applicable to the PR or issue. - [`Area - Automation/Static Analysis`]( - https://github.com/apache/incubator-druid/labels/Area%20-%20Automation%2FStatic%20Analysis) - for any PRs and issues + https://github.com/apache/druid/labels/Area%20-%20Automation%2FStatic%20Analysis) - for any PRs and issues about Checkstyle, forbidden-apis, IntelliJ inspections, code style, etc. Should also be used for PRs and issue related to TeamCity CI problems. - - [`Area - Cache`](https://github.com/apache/incubator-druid/labels/Area%20-%20Cache) - for PRs and issues related to + - [`Area - Cache`](https://github.com/apache/druid/labels/Area%20-%20Cache) - for PRs and issues related to Druid's query results cache (local or remote). Don't use for PRs that anyhow relate to caching in different contexts. - - [`Area - Dev`](https://github.com/apache/incubator-druid/labels/Area%20-%20Dev) - for PRs and issues related to the + - [`Area - Dev`](https://github.com/apache/druid/labels/Area%20-%20Dev) - for PRs and issues related to the project itself, such as adding developer's docs and checklists, Github issue and PR templates, Github-related issues. Don't use for PRs and issues related to CI problems: use either `Area - Testing` for problems with Travis or `Area - Automation/Static Analysis` for problems with TeamCity. PRs with `Area - Dev` label should usually change files in `dev/` or `.github/` directories. - - [`Area - Documentation`](https://github.com/apache/incubator-druid/labels/Area%20-%20Documentation) - for PRs and + - [`Area - Documentation`](https://github.com/apache/druid/labels/Area%20-%20Documentation) - for PRs and issues about Druid's documentation for users and cluster operators. Don't use for PRs and issues about the documentation of the Druid's development process itself: use `Area - Dev` for that purpose. Don't use for issues and PR regarding adding internal design documentation and specification to code, usually, in the form of Javadocs or comments (there is no specialized label for this). - - [`Area - Lookups`](https://github.com/apache/incubator-druid/labels/Area%20-%20Lookups) - for PRs and issues + - [`Area - Lookups`](https://github.com/apache/druid/labels/Area%20-%20Lookups) - for PRs and issues related to Druid's Query Time Lookups (QTL) feature. - - [`Area - Metadata`](https://github.com/apache/incubator-druid/labels/Area%20-%20Metadata) - for PRs and issues + - [`Area - Metadata`](https://github.com/apache/druid/labels/Area%20-%20Metadata) - for PRs and issues related to the organization and contents of the metadata store, the metadata store itself, and managing the metadata in the memory of various Druid nodes. - - [`Area - Null Handling`](https://github.com/apache/incubator-druid/labels/Area%20-%20Null%20Handling) - for PRs and - issues related to the [Null Handling project](https://github.com/apache/incubator-druid/issues/4349). - - [`Area - Operations`](https://github.com/apache/incubator-druid/labels/Area%20-%20Operations) - for PRs and issues + - [`Area - Null Handling`](https://github.com/apache/druid/labels/Area%20-%20Null%20Handling) - for PRs and + issues related to the [Null Handling project](https://github.com/apache/druid/issues/4349). + - [`Area - Operations`](https://github.com/apache/druid/labels/Area%20-%20Operations) - for PRs and issues related to Druid cluster operation process, for example, PRs adding more alerting, logging, changing configuration options. - - [`Area - Query UI`](https://github.com/apache/incubator-druid/labels/Area%20-%20Query%20UI) - for issues that + - [`Area - Query UI`](https://github.com/apache/druid/labels/Area%20-%20Query%20UI) - for issues that mention or discuss the questions related to presenting Druid query results for human perception. - - [`Area - Querying`](https://github.com/apache/incubator-druid/labels/Area%20-%20Querying) - for any PRs and issues + - [`Area - Querying`](https://github.com/apache/druid/labels/Area%20-%20Querying) - for any PRs and issues related to the process of making data queries against Druid clusters, including the PRs and issues about query processing and aggregators. - [`Area - Segment Balancing/Coordination`]( - https://github.com/apache/incubator-druid/labels/Area%20-%20Segment%20Balancing%2FCoordination) - for PRs and issue + https://github.com/apache/druid/labels/Area%20-%20Segment%20Balancing%2FCoordination) - for PRs and issue related to the process of loading and dropping segments in Druid clusters according to specified *rules*, and balancing segments between Historical nodes in clusters. Coordinator node is responsible for both processes. This label is not called "Area - Coordinator" because Coordinator has some other duties that are not covered by this label, for example, compacting segments. - - [`Area - Testing`](https://github.com/apache/incubator-druid/labels/Area%20-%20Testing) - use for any PRs and + - [`Area - Testing`](https://github.com/apache/druid/labels/Area%20-%20Testing) - use for any PRs and issues related to testing (including integration testing), Travis CI issues, and flaky tests. For flaky tests, also - add [`Flaky test`](https://github.com/apache/incubator-druid/labels/Flaky%20test) label. - - [`Area - Zookeeper/Curator`](https://github.com/apache/incubator-druid/labels/Area%20-%20Zookeeper%2FCurator) - for + add [`Flaky test`](https://github.com/apache/druid/labels/Flaky%20test) label. + - [`Area - Zookeeper/Curator`](https://github.com/apache/druid/labels/Area%20-%20Zookeeper%2FCurator) - for any PRs and issues related to ZooKeeper, Curator, and node discovery in Druid. @@ -147,7 +147,7 @@ problems even if you don't plan to review the PR. 6. If you create an issue that is relatively small and self-contained and you don't plan to work on it in the near future, consider labelling it [**`Contributions Welcome`**]( -https://github.com/apache/incubator-druid/labels/Contributions%20Welcome) so that other people know that the issue is +https://github.com/apache/druid/labels/Contributions%20Welcome) so that other people know that the issue is free to pick up and is relatively easily doable even for those who are not very familiar with the codebase. ## PR merge action item checklist diff --git a/distribution/asf-release-process-guide.md b/distribution/asf-release-process-guide.md index 863799ffe806..8ce235330cbb 100644 --- a/distribution/asf-release-process-guide.md +++ b/distribution/asf-release-process-guide.md @@ -21,7 +21,7 @@ ### Announce intention to release -First up in performing an official release of Apache Druid (incubating) is to announce in the dev mailing list, dev@druid.apache.org, that it is about time for the next (approximately) quarterly release, or, that there is a critical bug that warrants doing a bug fix release, whatever the reason happens to be. Check for any critical bugs that are still open, or issues or PRs tagged with the release milestone, and give the community a bit of heads up to try and wrap up anything that _needs_ to be in the next release. +First up in performing an official release of Apache Druid is to announce in the dev mailing list, dev@druid.apache.org, that it is about time for the next (approximately) quarterly release, or, that there is a critical bug that warrants doing a bug fix release, whatever the reason happens to be. Check for any critical bugs that are still open, or issues or PRs tagged with the release milestone, and give the community a bit of heads up to try and wrap up anything that _needs_ to be in the next release. ### Create a release branch @@ -181,7 +181,7 @@ It is also the release managers responsibility for correctly assigning all PRs m | [find-missing-backports](bin/find-missing-backports.py) | Find PRs which have been back-ported to one release branch but not another. Useful if a bug fix release based on the previous release is required during a release cycle. | -Next create an issue in the Druid github to contain the release notes and allow the community to provide feedback prior to the release. Make sure to attach it to the release milestone in github. It is highly recommended to review [previous release notes for reference](https://github.com/apache/incubator-druid/issues?utf8=%E2%9C%93&q=is%3Aissue+%22incubating+release+notes%22+label%3A%22Release+Notes%22+is%3Aclosed+) of how to best structure them. Be sure to call out any exciting new features, important bug fixes, and any compatibility concerns for users or operators to consider when upgrading to this release. +Next create an issue in the Druid github to contain the release notes and allow the community to provide feedback prior to the release. Make sure to attach it to the release milestone in github. It is highly recommended to review [previous release notes for reference](https://github.com/apache/druid/issues?utf8=%E2%9C%93&q=is%3Aissue+%22incubating+release+notes%22+label%3A%22Release+Notes%22+is%3Aclosed+) of how to best structure them. Be sure to call out any exciting new features, important bug fixes, and any compatibility concerns for users or operators to consider when upgrading to this release. ## Building a release candidate @@ -201,7 +201,7 @@ In this example it will create a tag, `druid-0.16.0-incubating-rc3`. If this rel ### Do a clean clone (so the source distribution does not pick up extra files) ```bash -$ git clone git@github.com:apache/incubator-druid.git druid-release +$ git clone git@github.com:apache/druid.git druid-release ``` ### Switch to tag @@ -263,18 +263,18 @@ $ svn commit -m 'add 0.16.0-incubating-rc3 artifacts' ### Update druid.staged.apache.org -1. Pull https://github.com/apache/incubator-druid-website and https://github.com/apache/incubator-druid-website-src. These repositories should be in the same directory as your Druid repository that should have the release tag checked out. +1. Pull https://github.com/apache/druid-website and https://github.com/apache/druid-website-src. These repositories should be in the same directory as your Druid repository that should have the release tag checked out. -2. From incubator-druid-website, checkout branch `asf-staging`. +2. From druid-website, checkout branch `asf-staging`. -3. From incubator-druid-website-src, run `./release.sh 0.16.0-incubating 0.16.0-incubating`, replacing `0.16.0-incubating` where the first argument is the release version and 2nd argument is commit-ish. This script will: +3. From druid-website-src, run `./release.sh 0.16.0-incubating 0.16.0-incubating`, replacing `0.16.0-incubating` where the first argument is the release version and 2nd argument is commit-ish. This script will: * checkout the tag of the Druid release version -* build the docs for that version into incubator-druid-website-src -* build incubator-druid-website-src into incubator-druid-website -* stage incubator-druid-website-src and incubator-druid-website repositories to git. +* build the docs for that version into druid-website-src +* build druid-website-src into druid-website +* stage druid-website-src and druid-website repositories to git. -4. Make a PR to the src repo (https://github.com/apache/incubator-druid-website-src) for the release branch. Once the website PR is pushed to `asf-site`, https://druid.staged.apache.org/ will be updated near immediately with the new docs. +4. Make a PR to the src repo (https://github.com/apache/druid-website-src) for the release branch. Once the website PR is pushed to `asf-site`, https://druid.staged.apache.org/ will be updated near immediately with the new docs. ## Release candidates and voting @@ -290,7 +290,7 @@ For the Druid community vote, send an email to dev@druid.apache.org, using somet ##### Subject ```plaintext -[VOTE] Release Apache Druid (incubating) 0.16.0 [RC3] +[VOTE] Release Apache Druid 0.16.0 [RC3] ``` ##### Body @@ -298,17 +298,17 @@ For the Druid community vote, send an email to dev@druid.apache.org, using somet ```plaintext Hi all, -I have created a build for Apache Druid (incubating) 0.16.0, release +I have created a build for Apache Druid 0.16.0, release candidate 3. Thanks for everyone who has helped contribute to the release! You can read the proposed release notes here: -https://github.com/apache/incubator-druid/issues/8369 +https://github.com/apache/druid/issues/8369 The release candidate has been tagged in GitHub as druid-0.16.0-incubating-rc3 (54d29e438a4df34d75e2385af6cefd1092c4ebb3), available here: -https://github.com/apache/incubator-druid/releases/tag/druid-0.16.0-incubating-rc3 +https://github.com/apache/druid/releases/tag/druid-0.16.0-incubating-rc3 The artifacts to be voted on are located here: https://dist.apache.org/repos/dist/dev/incubator/druid/0.16.0-incubating-rc3/ @@ -318,7 +318,7 @@ https://druid.staged.apache.org/docs/0.16.0-incubating/design/index.html A Docker image containing the binary of the release candidate can be retrieved via: -docker pull apache/incubator-druid:0.16.0-incubating-rc3 +docker pull apache/druid:0.16.0-incubating-rc3 artifact checksums src: @@ -372,13 +372,13 @@ majority of at least three +1 PMC votes are cast. Once the vote has passed, the second stage vote will be called on the Apache Incubator mailing list to get approval from the Incubator PMC. -[ ] +1 Release this package as Apache Druid (incubating) 0.16.0 +[ ] +1 Release this package as Apache Druid 0.16.0 [ ] 0 I don't feel strongly about it, but I'm okay with the release [ ] -1 Do not release this package because... Thanks! -Apache Druid (incubating) is an effort undergoing incubation at The Apache +Apache Druid is an effort undergoing incubation at The Apache Software Foundation (ASF), sponsored by the Apache Incubator. Incubation is required of all newly accepted projects until a further review indicates that the infrastructure, communications, and decision making process have @@ -423,7 +423,7 @@ Once the Druid community vote passes (or fails), close the vote with a thread li ##### Subject ```plaintext -[RESULT][VOTE] Release Apache Druid (incubating) 0.16.0 [RC3] +[RESULT][VOTE] Release Apache Druid 0.16.0 [RC3] ``` ##### Body @@ -461,7 +461,7 @@ Here is the IPMC vote template. Please note that "6190EEFC" in the template is y ##### Subject ```plaintext -[VOTE] Release Apache Druid (incubating) 0.16.0 [RC3] +[VOTE] Release Apache Druid 0.16.0 [RC3] ``` ##### Body @@ -470,12 +470,12 @@ Here is the IPMC vote template. Please note that "6190EEFC" in the template is y Hi IPMC, The Apache Druid community has voted on and approved a proposal to release -Apache Druid (incubating) 0.16.0 (rc3). +Apache Druid 0.16.0 (rc3). We now kindly request the Incubator PMC members review and vote on this incubator release. -Apache Druid (incubating) is a high performance analytics data store for +Apache Druid is a high performance analytics data store for event-driven data. The community voting thread can be found here: @@ -495,12 +495,12 @@ distribute this file then we intend to switch to a different example data file in a future release. The release notes are available here: -https://github.com/apache/incubator-druid/issues/8369 +https://github.com/apache/druid/issues/8369 The release candidate has been tagged in GitHub as druid-0.16.0-incubating-rc3 (54d29e438a4df34d75e2385af6cefd1092c4ebb3), available here: -https://github.com/apache/incubator-druid/releases/tag/druid-0.16.0-incubating-rc3 +https://github.com/apache/druid/releases/tag/druid-0.16.0-incubating-rc3 The artifacts to be voted on are located here: https://dist.apache.org/repos/dist/dev/incubator/druid/0.16.0-incubating-rc3/ @@ -510,7 +510,7 @@ https://repository.apache.org/content/repositories/orgapachedruid-1009/ A Docker image containing the binary of the release candidate can be retrieved via: -docker pull apache/incubator-druid:0.16.0-incubating-rc3 +docker pull apache/druid:0.16.0-incubating-rc3 artifact checksums src: @@ -539,7 +539,7 @@ mvn apache-rat:check -Prat This vote will be open for at least 72 hours. The vote will pass if a majority of at least three +1 IPMC votes are cast. -[ ] +1 Release this package as Apache Druid (incubating) 0.16.0 +[ ] +1 Release this package as Apache Druid 0.16.0 [ ] 0 I don't feel strongly about it, but I'm okay with the release [ ] -1 Do not release this package because... @@ -549,7 +549,7 @@ community to validate this release. On behalf of the Apache Druid Community, Clint -Apache Druid (incubating) is an effort undergoing incubation at The Apache +Apache Druid is an effort undergoing incubation at The Apache Software Foundation (ASF), sponsored by the Apache Incubator. Incubation is required of all newly accepted projects until a further review indicates that the infrastructure, communications, and decision making process have @@ -561,7 +561,7 @@ fully endorsed by the ASF. ### Cancelling a vote -If for any reason during the Druid PPMC or IPMC vote a blocking issue becomes apparent, a vote should be officially cancelled by sending an email with the following subject line: `[CANCEL][VOTE] Release Apache Druid (incubating) 0.15.1 [RC1]` and the reasons for the cancellation in the body. +If for any reason during the Druid PPMC or IPMC vote a blocking issue becomes apparent, a vote should be officially cancelled by sending an email with the following subject line: `[CANCEL][VOTE] Release Apache Druid 0.15.1 [RC1]` and the reasons for the cancellation in the body. ### Previous vote threads for additional examples @@ -585,7 +585,7 @@ Once a release candidate has passed the incubator PMC vote, you'll need to do th ##### Subject ```plaintext -[RESULT] [VOTE] Release Apache Druid (incubating) 0.16.0 [RC3] +[RESULT] [VOTE] Release Apache Druid 0.16.0 [RC3] ``` ##### Body @@ -593,7 +593,7 @@ Once a release candidate has passed the incubator PMC vote, you'll need to do th ```plaintext Hi all, -The vote to release Apache Druid (incubating) 0.16.0 has passed with 3 +1 +The vote to release Apache Druid 0.16.0 has passed with 3 +1 binding votes: Julian Hyde @@ -655,7 +655,7 @@ http://www.apache.org/legal/release-policy.html#release-announcements ### Update druid.apache.org -1. Pull https://github.com/apache/incubator-druid-website and https://github.com/apache/incubator-druid-website-src. These repositories should be in the same directory as your Druid repository that should have the release tag checked out. +1. Pull https://github.com/apache/druid-website and https://github.com/apache/druid-website-src. These repositories should be in the same directory as your Druid repository that should have the release tag checked out. 2. To update the downloads page of the website, update the _config.yml file in the root of the website src repo. Versions are grouped by release branch: @@ -671,14 +671,14 @@ druid_versions: date: 2019-08-15 ``` -3. From incubator-druid-website-src, run `./release.sh 0.16.0-incubating 0.16.0-incubating`, replacing `0.16.0-incubating` where the first argument is the release version and 2nd argument is commit-ish. This script will: +3. From druid-website-src, run `./release.sh 0.16.0-incubating 0.16.0-incubating`, replacing `0.16.0-incubating` where the first argument is the release version and 2nd argument is commit-ish. This script will: * checkout the tag of the Druid release version -* build the docs for that version into incubator-druid-website-src -* build incubator-druid-website-src into incubator-druid-website -* stage incubator-druid-website-src and incubator-druid-website repositories to git. +* build the docs for that version into druid-website-src +* build druid-website-src into druid-website +* stage druid-website-src and druid-website repositories to git. -4. Make a PR to the src repo (https://github.com/apache/incubator-druid-website-src) and to the website repo (https://github.com/apache/incubator-druid-website). Once the website PR is merged, https://druid.apache.org/ will be updated immediately. +4. Make a PR to the src repo (https://github.com/apache/druid-website-src) and to the website repo (https://github.com/apache/druid-website). Once the website PR is merged, https://druid.apache.org/ will be updated immediately. ### Draft a release on github @@ -696,7 +696,7 @@ Additionally, announce it to the Druid official ASF Slack channel, https://druid ##### subject ```plaintext -[ANNOUNCE] Apache Druid (incubating) 0.16.0 release +[ANNOUNCE] Apache Druid 0.16.0 release ``` ##### body @@ -722,7 +722,7 @@ Source and binary distributions can be downloaded from: https://druid.apache.org/downloads.html Release notes are at: -https://github.com/apache/incubator-druid/releases/tag/druid-0.16.0-incubating +https://github.com/apache/druid/releases/tag/druid-0.16.0-incubating A big thank you to all the contributors in this milestone release! diff --git a/distribution/bin/find-missing-backports.py b/distribution/bin/find-missing-backports.py index 60f374d49491..ec72538b6cbb 100755 --- a/distribution/bin/find-missing-backports.py +++ b/distribution/bin/find-missing-backports.py @@ -100,7 +100,7 @@ def find_next_url(links): release_pr_subjects.add(title) # Get all closed PRs and filter out with milestone -next_url = "https://api.github.com/repos/apache/incubator-druid/pulls?state=closed" +next_url = "https://api.github.com/repos/apache/druid/pulls?state=closed" while next_url is not None: resp = requests.get(next_url, auth=(github_username, os.environ["GIT_TOKEN"])) diff --git a/distribution/bin/get-milestone-contributors.py b/distribution/bin/get-milestone-contributors.py index 1ea62d568b94..ceef1a7c29e9 100755 --- a/distribution/bin/get-milestone-contributors.py +++ b/distribution/bin/get-milestone-contributors.py @@ -35,7 +35,7 @@ # Get all users who created a closed issue or merged PR for a given milestone while not done: - resp = requests.get("https://api.github.com/repos/apache/incubator-druid/issues?milestone=%s&state=closed&page=%s" % (milestone_num, page_counter)) + resp = requests.get("https://api.github.com/repos/apache/druid/issues?milestone=%s&state=closed&page=%s" % (milestone_num, page_counter)) if "Link" in resp.headers: pagination_link = resp.headers["Link"] diff --git a/distribution/bin/get-milestone-prs.py b/distribution/bin/get-milestone-prs.py index 0948c0238c09..f78a70e37c77 100755 --- a/distribution/bin/get-milestone-prs.py +++ b/distribution/bin/get-milestone-prs.py @@ -56,7 +56,7 @@ # wait 3 seconds between calls to avoid hitting the rate limit time.sleep(3) - search_url = "https://api.github.com/search/issues?q=type:pr+is:merged+is:closed+repo:apache/incubator-druid+SHA:{}" + search_url = "https://api.github.com/search/issues?q=type:pr+is:merged+is:closed+repo:apache/druid+SHA:{}" resp = requests.get(search_url.format(commit_id), auth=(github_username, os.environ["GIT_TOKEN"])) resp_json = resp.json() diff --git a/distribution/bin/tag-missing-milestones.py b/distribution/bin/tag-missing-milestones.py index 9f78d284ab88..0841bc00daa5 100755 --- a/distribution/bin/tag-missing-milestones.py +++ b/distribution/bin/tag-missing-milestones.py @@ -27,7 +27,6 @@ sys.stderr.write(" It is also necessary to set a GIT_TOKEN environment variable containing a personal access token.\n") sys.exit(1) - github_username = sys.argv[1] previous_release_commit = sys.argv[2] new_release_commit = sys.argv[3] @@ -40,7 +39,7 @@ for sha in all_commits.splitlines(): try: - url = "https://api.github.com/repos/apache/incubator-druid/commits/{}/pulls".format(sha) + url = "https://api.github.com/repos/apache/druid/commits/{}/pulls".format(sha) headers = {'Accept': 'application/vnd.github.groot-preview+json'} pull_requests = requests.get(url, headers=headers, auth=(github_username, os.environ["GIT_TOKEN"])).json() @@ -49,7 +48,7 @@ pr_number = pr['number'] if pr['milestone'] is None: print("Tagging Pull Request {} with milestone {}".format(pr_number, milestone)) - url = "https://api.github.com/repos/apache/incubator-druid/issues/{}".format(pr_number) + url = "https://api.github.com/repos/apache/druid/issues/{}".format(pr_number) requests.patch(url, json=milestone_json, auth=(github_username, os.environ["GIT_TOKEN"])) else: print("Skipping Pull Request {} since it's already tagged with milestone {}".format(pr_number, milestone)) diff --git a/distribution/docker/README.md b/distribution/docker/README.md index 09a1e7c0ca65..f91b4b34ad20 100644 --- a/distribution/docker/README.md +++ b/distribution/docker/README.md @@ -19,7 +19,7 @@ ## Build -From the root of the repo, run `docker build -t apache/incubator-druid:tag -f distribution/docker/Dockerfile .` +From the root of the repo, run `docker build -t apache/druid:tag -f distribution/docker/Dockerfile .` ## Run @@ -31,6 +31,6 @@ This image contains solely the postgres metadata storage connector. If you need the mysql metadata storage connector, you can use Dockerfile.mysql to add it to the base image above. -`docker build -t apache/incubator-druid:tag-mysql --build-arg DRUID_RELEASE=apache/incubator-druid:tag -f distribution/docker/Dockerfile.mysql .` +`docker build -t apache/druid:tag-mysql --build-arg DRUID_RELEASE=apache/druid:tag -f distribution/docker/Dockerfile.mysql .` where `druid:tag` is the version to use as the base. diff --git a/distribution/docker/docker-compose.yml b/distribution/docker/docker-compose.yml index 7160dcd8b0eb..7a89cc6bec88 100644 --- a/distribution/docker/docker-compose.yml +++ b/distribution/docker/docker-compose.yml @@ -46,7 +46,7 @@ services: - ZOO_MY_ID=1 coordinator: - image: apache/incubator-druid + image: apache/druid container_name: coordinator volumes: - coordinator_var:/opt/druid/var @@ -61,7 +61,7 @@ services: - environment broker: - image: apache/incubator-druid + image: apache/druid container_name: broker volumes: - broker_var:/opt/druid/var @@ -77,7 +77,7 @@ services: - environment historical: - image: apache/incubator-druid + image: apache/druid container_name: historical volumes: - historical_var:/opt/druid/var @@ -93,7 +93,7 @@ services: - environment overlord: - image: apache/incubator-druid + image: apache/druid container_name: overlord volumes: - overlord_var:/opt/druid/var @@ -108,7 +108,7 @@ services: - environment middlemanager: - image: apache/incubator-druid + image: apache/druid container_name: middlemanager volumes: - middle_var:/opt/druid/var @@ -124,7 +124,7 @@ services: - environment router: - image: apache/incubator-druid + image: apache/druid container_name: router volumes: - router_var:/opt/druid/var diff --git a/distribution/src/assembly/assembly.xml b/distribution/src/assembly/assembly.xml index 854f41cac2bb..ff8e0d2fdd5b 100644 --- a/distribution/src/assembly/assembly.xml +++ b/distribution/src/assembly/assembly.xml @@ -98,7 +98,6 @@ ../ - DISCLAIMER licenses/** diff --git a/docs/_bin/deploy-docs.sh b/docs/_bin/deploy-docs.sh deleted file mode 100755 index e4503d0a2888..000000000000 --- a/docs/_bin/deploy-docs.sh +++ /dev/null @@ -1,142 +0,0 @@ -#!/bin/bash -e - -# Licensed to the Apache Software Foundation (ASF) under one or more -# contributor license agreements. See the NOTICE file distributed with -# this work for additional information regarding copyright ownership. -# The ASF licenses this file to You under the Apache License, Version 2.0 -# (the "License"); you may not use this file except in compliance with -# the License. You may obtain a copy of the License at -# -# http://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, -# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -# See the License for the specific language governing permissions and -# limitations under the License. - -opt_api=1 -opt_docs=1 -while getopts ":adn" opt; do - case $opt in - n) - opt_dryrun="1" - ;; - d) - opt_api= - ;; - a) - opt_docs= - ;; - \?) - echo "Invalid option: -$OPTARG" >&2 - ;; - esac -done -shift $((OPTIND-1)) - -# Set $version to Druid version (tag will be "druid-$version") -if [ -z "$1" ]; then - version="latest" -else - version=$1 -fi - -# Set $origin to name of origin remote -if [ -z "$2" ]; then - origin="origin" -else - origin=$2 -fi - -# Use s3cmd if available, otherwise try awscli -if command -v s3cmd >/dev/null 2>&1 -then - s3sync="s3cmd sync --delete-removed" -else - s3sync="aws s3 sync --delete" -fi - -# Location of git repository containing this script -druid=$(git -C "$(dirname "$0")" rev-parse --show-toplevel) - -if [ -n "$(git -C "$druid" status --porcelain --untracked-files=no)" ]; then - echo "Working directory is not clean, aborting" - exit 1 -fi - -branch=druid-$version -if [ "$version" == "latest" ]; then - branch=master -fi - -if [ -z "$(git tag -l "$branch")" ] && [ "$branch" != "master" ]; then - echo "Version tag does not exist: druid-$version" - exit 1; -fi - -tmp=$(mktemp -d -t druid-docs-deploy) -target=$tmp/docs -src=$tmp/druid - -echo "Using Version [$version]" -echo "Working directory [$tmp]" - -git clone -q --depth 1 git@github.com:apache/incubator-druid-io.github.io.git "$target" - -remote=$(git -C "$druid" config --local --get "remote.$origin.url") -git clone -q --depth 1 --branch $branch $remote "$src" - -if [ -n "$opt_docs" ] ; then - # Check for broken links - "$src/docs/_bin/broken-link-check.py" "$src/docs/content" "$src/docs/_redirects.json" - - # Copy docs - mkdir -p $target/docs/$version - rsync -a --delete "$src/docs/content/" $target/docs/$version - - # Replace #{DRUIDVERSION} with current Druid version - # Escaping of $version is weak here, but it should be fine for typical version strings - find "$target/docs/$version" -name "*.md" -print0 | xargs -0 perl -pi -e's/\#\{DRUIDVERSION\}/'"$version"'/g' - - # Create redirects - "$src/docs/_bin/make-redirects.py" "$target/docs/$version" "$src/docs/_redirects.json" -fi - -# generate javadocs for releases (not for master) -if [ "$version" != "latest" ] && [ -n "$opt_api" ] ; then - (cd $src && mvn javadoc:aggregate) - mkdir -p $target/api/$version - if [ -z "$opt_dryrun" ]; then - $s3sync "$src/target/site/apidocs/" "s3://static.druid.io/api/$version/" - fi -fi - -updatebranch=update-docs-$version - -git -C $target checkout -b $updatebranch -git -C $target add -A . -git -C $target commit -m "Update $version docs" -if [ -z "$opt_dryrun" ]; then - git -C $target push origin $updatebranch - - if [ -n "$GIT_TOKEN" ]; then - curl -u "$GIT_TOKEN:x-oauth-basic" -XPOST -d@- \ - https://api.github.com/repos/apache/incubator-druid-io.github.io/pulls < -Apache Druid (incubating) processes will emit logs that are useful for debugging to the console. Druid processes also emit periodic metrics about their state. For more about metrics, see [Configuration](../configuration/index.html#enabling-metrics). Metric logs are printed to the console by default, and can be disabled with `-Ddruid.emitter.logging.logLevel=debug`. +Apache Druid processes will emit logs that are useful for debugging to the console. Druid processes also emit periodic metrics about their state. For more about metrics, see [Configuration](../configuration/index.html#enabling-metrics). Metric logs are printed to the console by default, and can be disabled with `-Ddruid.emitter.logging.logLevel=debug`. Druid uses [log4j2](http://logging.apache.org/log4j/2.x/) for logging. Logging can be configured with a log4j2.xml file. Add the path to the directory containing the log4j2.xml file (e.g. the _common/ dir) to your classpath if you want to override default Druid log configuration. Note that this directory should be earlier in the classpath than the druid jars. The easiest way to do this is to prefix the classpath with the config dir. diff --git a/docs/dependencies/deep-storage.md b/docs/dependencies/deep-storage.md index ef338508693f..77ae9b27dab0 100644 --- a/docs/dependencies/deep-storage.md +++ b/docs/dependencies/deep-storage.md @@ -23,7 +23,7 @@ title: "Deep storage" --> -Deep storage is where segments are stored. It is a storage mechanism that Apache Druid (incubating) does not provide. This deep storage infrastructure defines the level of durability of your data, as long as Druid processes can see this storage infrastructure and get at the segments stored on it, you will not lose data no matter how many Druid nodes you lose. If segments disappear from this storage layer, then you will lose whatever data those segments represented. +Deep storage is where segments are stored. It is a storage mechanism that Apache Druid does not provide. This deep storage infrastructure defines the level of durability of your data, as long as Druid processes can see this storage infrastructure and get at the segments stored on it, you will not lose data no matter how many Druid nodes you lose. If segments disappear from this storage layer, then you will lose whatever data those segments represented. ## Local Mount diff --git a/docs/dependencies/metadata-storage.md b/docs/dependencies/metadata-storage.md index fab7641af4fe..072d00d8e92a 100644 --- a/docs/dependencies/metadata-storage.md +++ b/docs/dependencies/metadata-storage.md @@ -23,7 +23,7 @@ title: "Metadata storage" --> -The Metadata Storage is an external dependency of Apache Druid (incubating). Druid uses it to store +The Metadata Storage is an external dependency of Apache Druid. Druid uses it to store various metadata about the system, but not to store the actual data. There are a number of tables used for various purposes described below. diff --git a/docs/dependencies/zookeeper.md b/docs/dependencies/zookeeper.md index 300ef74481fa..0855a376fef8 100644 --- a/docs/dependencies/zookeeper.md +++ b/docs/dependencies/zookeeper.md @@ -23,7 +23,7 @@ title: "ZooKeeper" --> -Apache Druid (incubating) uses [Apache ZooKeeper](http://zookeeper.apache.org/) (ZK) for management of current cluster state. The operations that happen over ZK are +Apache Druid uses [Apache ZooKeeper](http://zookeeper.apache.org/) (ZK) for management of current cluster state. The operations that happen over ZK are 1. [Coordinator](../design/coordinator.md) leader election 2. Segment "publishing" protocol from [Historical](../design/historical.md) diff --git a/docs/design/auth.md b/docs/design/auth.md index a2ff534827c8..ac6e8188d977 100644 --- a/docs/design/auth.md +++ b/docs/design/auth.md @@ -23,7 +23,7 @@ title: "Authentication and Authorization" --> -This document describes non-extension specific Apache Druid (incubating) authentication and authorization configurations. +This document describes non-extension specific Apache Druid authentication and authorization configurations. |Property|Type|Description|Default|Required| |--------|-----------|--------|--------|--------| diff --git a/docs/design/broker.md b/docs/design/broker.md index c1c517d489b4..741dfc9a7cb7 100644 --- a/docs/design/broker.md +++ b/docs/design/broker.md @@ -25,7 +25,7 @@ title: "Broker" ### Configuration -For Apache Druid (incubating) Broker Process Configuration, see [Broker Configuration](../configuration/index.html#broker). +For Apache Druid Broker Process Configuration, see [Broker Configuration](../configuration/index.html#broker). ### HTTP endpoints diff --git a/docs/design/coordinator.md b/docs/design/coordinator.md index 80721bcace86..fbf5455cd0cf 100644 --- a/docs/design/coordinator.md +++ b/docs/design/coordinator.md @@ -25,7 +25,7 @@ title: "Coordinator Process" ### Configuration -For Apache Druid (incubating) Coordinator Process Configuration, see [Coordinator Configuration](../configuration/index.html#coordinator). +For Apache Druid Coordinator Process Configuration, see [Coordinator Configuration](../configuration/index.html#coordinator). ### HTTP endpoints diff --git a/docs/design/extensions-contrib/dropwizard.md b/docs/design/extensions-contrib/dropwizard.md index 393944efee75..41ec1007cd9e 100644 --- a/docs/design/extensions-contrib/dropwizard.md +++ b/docs/design/extensions-contrib/dropwizard.md @@ -91,7 +91,7 @@ druid.emitter.dropwizard.reporters=[{"type":"console","emitIntervalInSecs":30}"} ``` ### Default Metrics Mapping -Latest default metrics mapping can be found [here] (https://github.com/apache/incubator-druid/tree/master/extensions-contrib/dropwizard/src/main/resources/defaultMetricDimensions.json) +Latest default metrics mapping can be found [here] (https://github.com/apache/druid/tree/master/extensions-contrib/dropwizard/src/main/resources/defaultMetricDimensions.json) ```json { "query/time": { diff --git a/docs/design/historical.md b/docs/design/historical.md index 8abbfc6d8fd1..4a6768691fe4 100644 --- a/docs/design/historical.md +++ b/docs/design/historical.md @@ -25,7 +25,7 @@ title: "Historical Process" ### Configuration -For Apache Druid (incubating) Historical Process Configuration, see [Historical Configuration](../configuration/index.html#historical). +For Apache Druid Historical Process Configuration, see [Historical Configuration](../configuration/index.html#historical). ### HTTP endpoints diff --git a/docs/design/index.md b/docs/design/index.md index b53a506192c7..63c1db52ed4d 100644 --- a/docs/design/index.md +++ b/docs/design/index.md @@ -24,7 +24,7 @@ title: "Introduction to Apache Druid" ## What is Druid? -Apache Druid (incubating) is a real-time analytics database designed for fast slice-and-dice analytics +Apache Druid is a real-time analytics database designed for fast slice-and-dice analytics ("[OLAP](http://en.wikipedia.org/wiki/Online_analytical_processing)" queries) on large data sets. Druid is most often used as a database for powering use cases where real-time ingest, fast query performance, and high uptime are important. As such, Druid is commonly used for powering GUIs of analytical applications, or as a backend for highly-concurrent APIs diff --git a/docs/design/indexer.md b/docs/design/indexer.md index 93871d46aab8..791bde1a44b8 100644 --- a/docs/design/indexer.md +++ b/docs/design/indexer.md @@ -25,13 +25,13 @@ title: "Indexer Process" > The Indexer is an optional and experimental feature. > Its memory management system is still under development and will be significantly enhanced in later releases. -The Apache Druid (incubating) Indexer process is an alternative to the MiddleManager + Peon task execution system. Instead of forking a separate JVM process per-task, the Indexer runs tasks as separate threads within a single JVM process. +The Apache Druid Indexer process is an alternative to the MiddleManager + Peon task execution system. Instead of forking a separate JVM process per-task, the Indexer runs tasks as separate threads within a single JVM process. The Indexer is designed to be easier to configure and deploy compared to the MiddleManager + Peon system and to better enable resource sharing across tasks. ### Configuration -For Apache Druid (incubating) Indexer Process Configuration, see [Indexer Configuration](../configuration/index.html#indexer). +For Apache Druid Indexer Process Configuration, see [Indexer Configuration](../configuration/index.html#indexer). ### HTTP endpoints @@ -91,4 +91,4 @@ Separate task logs are not currently supported when using the Indexer; all task The Indexer currently imposes an identical memory limit on each task. In later releases, the per-task memory limit will be removed and only the global limit will apply. The limit on concurrent merges will also be removed. -In later releases, per-task memory usage will be dynamically managed. Please see https://github.com/apache/incubator-druid/issues/7900 for details on future enhancements to the Indexer. \ No newline at end of file +In later releases, per-task memory usage will be dynamically managed. Please see https://github.com/apache/druid/issues/7900 for details on future enhancements to the Indexer. \ No newline at end of file diff --git a/docs/design/indexing-service.md b/docs/design/indexing-service.md index 847f29af7d7d..d7bc46f89eae 100644 --- a/docs/design/indexing-service.md +++ b/docs/design/indexing-service.md @@ -23,7 +23,7 @@ title: "Indexing Service" --> -The Apache Druid (incubating) indexing service is a highly-available, distributed service that runs indexing related tasks. +The Apache Druid indexing service is a highly-available, distributed service that runs indexing related tasks. Indexing [tasks](../ingestion/tasks.md) create (and sometimes destroy) Druid [segments](../design/segments.md). The indexing service has a master/slave like architecture. diff --git a/docs/design/middlemanager.md b/docs/design/middlemanager.md index 694bab888c57..89301f494d94 100644 --- a/docs/design/middlemanager.md +++ b/docs/design/middlemanager.md @@ -25,7 +25,7 @@ title: "MiddleManager Process" ### Configuration -For Apache Druid (incubating) MiddleManager Process Configuration, see [Indexing Service Configuration](../configuration/index.html#middlemanager-and-peons). +For Apache Druid MiddleManager Process Configuration, see [Indexing Service Configuration](../configuration/index.html#middlemanager-and-peons). ### HTTP endpoints diff --git a/docs/design/overlord.md b/docs/design/overlord.md index f1346d7eedaa..b7a4a3f704e4 100644 --- a/docs/design/overlord.md +++ b/docs/design/overlord.md @@ -25,7 +25,7 @@ title: "Overlord Process" ### Configuration -For Apache Druid (incubating) Overlord Process Configuration, see [Overlord Configuration](../configuration/index.html#overlord). +For Apache Druid Overlord Process Configuration, see [Overlord Configuration](../configuration/index.html#overlord). ### HTTP endpoints diff --git a/docs/design/peons.md b/docs/design/peons.md index f81162967741..72eb72e1a78b 100644 --- a/docs/design/peons.md +++ b/docs/design/peons.md @@ -25,7 +25,7 @@ title: "Peons" ### Configuration -For Apache Druid (incubating) Peon Configuration, see [Peon Query Configuration](../configuration/index.html#peon-query-configuration) and [Additional Peon Configuration](../configuration/index.html#additional-peon-configuration). +For Apache Druid Peon Configuration, see [Peon Query Configuration](../configuration/index.html#peon-query-configuration) and [Additional Peon Configuration](../configuration/index.html#additional-peon-configuration). ### HTTP endpoints diff --git a/docs/design/router.md b/docs/design/router.md index ec917cabc422..c5f73084a873 100644 --- a/docs/design/router.md +++ b/docs/design/router.md @@ -26,7 +26,7 @@ title: "Router Process" > The Router is an optional and [experimental](../development/experimental.md) feature due to the fact that its recommended place in the Druid cluster architecture is still evolving. > However, it has been battle-tested in production, and it hosts the powerful [Druid Console](../operations/management-uis.html#druid-console), so you should feel safe deploying it. -The Apache Druid (incubating) Router process can be used to route queries to different Broker processes. By default, the broker routes queries based on how [Rules](../operations/rule-configuration.md) are set up. For example, if 1 month of recent data is loaded into a `hot` cluster, queries that fall within the recent month can be routed to a dedicated set of brokers. Queries outside this range are routed to another set of brokers. This set up provides query isolation such that queries for more important data are not impacted by queries for less important data. +The Apache Druid Router process can be used to route queries to different Broker processes. By default, the broker routes queries based on how [Rules](../operations/rule-configuration.md) are set up. For example, if 1 month of recent data is loaded into a `hot` cluster, queries that fall within the recent month can be routed to a dedicated set of brokers. Queries outside this range are routed to another set of brokers. This set up provides query isolation such that queries for more important data are not impacted by queries for less important data. For query routing purposes, you should only ever need the Router process if you have a Druid cluster well into the terabyte range. @@ -34,7 +34,7 @@ In addition to query routing, the Router also runs the [Druid Console](../operat ### Configuration -For Apache Druid (incubating) Router Process Configuration, see [Router Configuration](../configuration/index.html#router). +For Apache Druid Router Process Configuration, see [Router Configuration](../configuration/index.html#router). ### HTTP endpoints diff --git a/docs/design/segments.md b/docs/design/segments.md index ede47a18f777..3d2939a72cc4 100644 --- a/docs/design/segments.md +++ b/docs/design/segments.md @@ -23,7 +23,7 @@ title: "Segments" --> -Apache Druid (incubating) stores its index in *segment files*, which are partitioned by +Apache Druid stores its index in *segment files*, which are partitioned by time. In a basic setup, one segment file is created for each time interval, where the time interval is configurable in the `segmentGranularity` parameter of the diff --git a/docs/development/build.md b/docs/development/build.md index ef5ec43a3f0f..a77bb35f9728 100644 --- a/docs/development/build.md +++ b/docs/development/build.md @@ -23,8 +23,8 @@ title: "Build from source" --> -You can build Apache Druid (incubating) directly from source. Please note that these instructions are for building the latest stable version of Druid. -For building the latest code in master, follow the instructions [here](https://github.com/apache/incubator-druid/blob/master/docs/content/development/build.md). +You can build Apache Druid directly from source. Please note that these instructions are for building the latest stable version of Druid. +For building the latest code in master, follow the instructions [here](https://github.com/apache/druid/blob/master/docs/content/development/build.md). #### Prerequisites @@ -39,7 +39,7 @@ For building the latest code in master, follow the instructions [here](https://g ##### Downloading the source: ```bash -git clone git@github.com:apache/incubator-druid.git +git clone git@github.com:apache/druid.git cd druid ``` diff --git a/docs/development/extensions-contrib/ambari-metrics-emitter.md b/docs/development/extensions-contrib/ambari-metrics-emitter.md index e8a182cabbab..77443f3e6dbf 100644 --- a/docs/development/extensions-contrib/ambari-metrics-emitter.md +++ b/docs/development/extensions-contrib/ambari-metrics-emitter.md @@ -23,7 +23,7 @@ title: "Ambari Metrics Emitter" --> -To use this Apache Druid (incubating) extension, make sure to [include](../../development/extensions.md#loading-extensions) `ambari-metrics-emitter` extension. +To use this Apache Druid extension, make sure to [include](../../development/extensions.md#loading-extensions) `ambari-metrics-emitter` extension. ## Introduction diff --git a/docs/development/extensions-contrib/azure.md b/docs/development/extensions-contrib/azure.md index fd6c6c81e577..c3f0d1639b0b 100644 --- a/docs/development/extensions-contrib/azure.md +++ b/docs/development/extensions-contrib/azure.md @@ -23,7 +23,7 @@ title: "Microsoft Azure" --> -To use this Apache Druid (incubating) extension, make sure to [include](../../development/extensions.md#loading-extensions) `druid-azure-extensions` extension. +To use this Apache Druid extension, make sure to [include](../../development/extensions.md#loading-extensions) `druid-azure-extensions` extension. ## Deep Storage diff --git a/docs/development/extensions-contrib/cassandra.md b/docs/development/extensions-contrib/cassandra.md index c8cc8d9433e1..d6ecc33174d7 100644 --- a/docs/development/extensions-contrib/cassandra.md +++ b/docs/development/extensions-contrib/cassandra.md @@ -23,7 +23,7 @@ title: "Apache Cassandra" --> -To use this Apache Druid (incubating) extension, make sure to [include](../../development/extensions.md#loading-extensions) `druid-cassandra-storage` extension. +To use this Apache Druid extension, make sure to [include](../../development/extensions.md#loading-extensions) `druid-cassandra-storage` extension. [Apache Cassandra](http://www.datastax.com/what-we-offer/products-services/datastax-enterprise/apache-cassandra) can also be leveraged for deep storage. This requires some additional Druid configuration as well as setting up the necessary diff --git a/docs/development/extensions-contrib/cloudfiles.md b/docs/development/extensions-contrib/cloudfiles.md index 1589234ee39b..d6ccccc301ad 100644 --- a/docs/development/extensions-contrib/cloudfiles.md +++ b/docs/development/extensions-contrib/cloudfiles.md @@ -23,7 +23,7 @@ title: "Rackspace Cloud Files" --> -To use this Apache Druid (incubating) extension, make sure to [include](../../development/extensions.md#loading-extensions) `druid-cloudfiles-extensions` extension. +To use this Apache Druid extension, make sure to [include](../../development/extensions.md#loading-extensions) `druid-cloudfiles-extensions` extension. ## Deep Storage diff --git a/docs/development/extensions-contrib/distinctcount.md b/docs/development/extensions-contrib/distinctcount.md index 4c3340295d6d..f6c06143e4d8 100644 --- a/docs/development/extensions-contrib/distinctcount.md +++ b/docs/development/extensions-contrib/distinctcount.md @@ -23,7 +23,7 @@ title: "DistinctCount Aggregator" --> -To use this Apache Druid (incubating) extension, make sure to [include](../../development/extensions.md#loading-extensions) the `druid-distinctcount` extension. +To use this Apache Druid extension, make sure to [include](../../development/extensions.md#loading-extensions) the `druid-distinctcount` extension. Additionally, follow these steps: diff --git a/docs/development/extensions-contrib/graphite.md b/docs/development/extensions-contrib/graphite.md index 124f7def458b..5729315e14b6 100644 --- a/docs/development/extensions-contrib/graphite.md +++ b/docs/development/extensions-contrib/graphite.md @@ -23,7 +23,7 @@ title: "Graphite Emitter" --> -To use this Apache Druid (incubating) extension, make sure to [include](../../development/extensions.md#loading-extensions) `graphite-emitter` extension. +To use this Apache Druid extension, make sure to [include](../../development/extensions.md#loading-extensions) `graphite-emitter` extension. ## Introduction diff --git a/docs/development/extensions-contrib/influx.md b/docs/development/extensions-contrib/influx.md index 28c8c9c7298b..e377bea94bfd 100644 --- a/docs/development/extensions-contrib/influx.md +++ b/docs/development/extensions-contrib/influx.md @@ -23,7 +23,7 @@ title: "InfluxDB Line Protocol Parser" --> -To use this Apache Druid (incubating) extension, make sure to [include](../../development/extensions.md#loading-extensions) `druid-influx-extensions`. +To use this Apache Druid extension, make sure to [include](../../development/extensions.md#loading-extensions) `druid-influx-extensions`. This extension enables Druid to parse the [InfluxDB Line Protocol](https://docs.influxdata.com/influxdb/v1.5/write_protocols/line_protocol_tutorial/), a popular text-based timeseries metric serialization format. diff --git a/docs/development/extensions-contrib/influxdb-emitter.md b/docs/development/extensions-contrib/influxdb-emitter.md index 7a2a8fed8ec1..3b1c84c30ca0 100644 --- a/docs/development/extensions-contrib/influxdb-emitter.md +++ b/docs/development/extensions-contrib/influxdb-emitter.md @@ -23,7 +23,7 @@ title: "InfluxDB Emitter" --> -To use this Apache Druid (incubating) extension, make sure to [include](../../development/extensions.md#loading-extensions) `druid-influxdb-emitter` extension. +To use this Apache Druid extension, make sure to [include](../../development/extensions.md#loading-extensions) `druid-influxdb-emitter` extension. ## Introduction diff --git a/docs/development/extensions-contrib/kafka-emitter.md b/docs/development/extensions-contrib/kafka-emitter.md index 5460c8d8b4b7..15c975bab4d8 100644 --- a/docs/development/extensions-contrib/kafka-emitter.md +++ b/docs/development/extensions-contrib/kafka-emitter.md @@ -23,7 +23,7 @@ title: "Kafka Emitter" --> -To use this Apache Druid (incubating) extension, make sure to [include](../../development/extensions.md#loading-extensions) `kafka-emitter` extension. +To use this Apache Druid extension, make sure to [include](../../development/extensions.md#loading-extensions) `kafka-emitter` extension. ## Introduction diff --git a/docs/development/extensions-contrib/materialized-view.md b/docs/development/extensions-contrib/materialized-view.md index 484b9558d400..0b2eb3d571bb 100644 --- a/docs/development/extensions-contrib/materialized-view.md +++ b/docs/development/extensions-contrib/materialized-view.md @@ -23,7 +23,7 @@ title: "Materialized View" --> -To use this Apache Druid (incubating) feature, make sure to only load `materialized-view-selection` on Broker and load `materialized-view-maintenance` on Overlord. In addition, this feature currently requires a Hadoop cluster. +To use this Apache Druid feature, make sure to only load `materialized-view-selection` on Broker and load `materialized-view-maintenance` on Overlord. In addition, this feature currently requires a Hadoop cluster. This feature enables Druid to greatly improve the query performance, especially when the query dataSource has a very large number of dimensions but the query only required several dimensions. This feature includes two parts. One is `materialized-view-maintenance`, and the other is `materialized-view-selection`. diff --git a/docs/development/extensions-contrib/momentsketch-quantiles.md b/docs/development/extensions-contrib/momentsketch-quantiles.md index 0f9ae21b00b7..3f3bff5641c2 100644 --- a/docs/development/extensions-contrib/momentsketch-quantiles.md +++ b/docs/development/extensions-contrib/momentsketch-quantiles.md @@ -26,7 +26,7 @@ title: "Moment Sketches for Approximate Quantiles module" This module provides aggregators for approximate quantile queries using the [momentsketch](https://github.com/stanford-futuredata/momentsketch) library. The momentsketch provides coarse quantile estimates with less space and aggregation time overheads than traditional sketches, approaching the performance of counts and sums by reconstructing distributions from computed statistics. -To use this Apache Druid (incubating) extension, make sure you [include](../../development/extensions.md#loading-extensions) the extension in your config file: +To use this Apache Druid extension, make sure you [include](../../development/extensions.md#loading-extensions) the extension in your config file: ``` druid.extensions.loadList=["druid-momentsketch"] diff --git a/docs/development/extensions-contrib/moving-average-query.md b/docs/development/extensions-contrib/moving-average-query.md index af8d902743cf..9378f4c7f86e 100644 --- a/docs/development/extensions-contrib/moving-average-query.md +++ b/docs/development/extensions-contrib/moving-average-query.md @@ -57,7 +57,7 @@ There are currently no configuration properties specific to Moving Average. ## Limitations * movingAverage is missing support for the following groupBy properties: `subtotalsSpec`, `virtualColumns`. * movingAverage is missing support for the following timeseries properties: `descending`. -* movingAverage is missing support for [SQL-compatible null handling](https://github.com/apache/incubator-druid/issues/4349) (So setting druid.generic.useDefaultValueForNull in configuration will give an error). +* movingAverage is missing support for [SQL-compatible null handling](https://github.com/apache/druid/issues/4349) (So setting druid.generic.useDefaultValueForNull in configuration will give an error). ##Query spec: * Most properties in the query spec derived from [groupBy query](../../querying/groupbyquery.md) / [timeseries](../../querying/timeseriesquery.md), see documentation for these query types. diff --git a/docs/development/extensions-contrib/opentsdb-emitter.md b/docs/development/extensions-contrib/opentsdb-emitter.md index 7167427cd80d..de481470433d 100644 --- a/docs/development/extensions-contrib/opentsdb-emitter.md +++ b/docs/development/extensions-contrib/opentsdb-emitter.md @@ -23,7 +23,7 @@ title: "OpenTSDB Emitter" --> -To use this Apache Druid (incubating) extension, make sure to [include](../../development/extensions.md#loading-extensions) `opentsdb-emitter` extension. +To use this Apache Druid extension, make sure to [include](../../development/extensions.md#loading-extensions) `opentsdb-emitter` extension. ## Introduction diff --git a/docs/development/extensions-contrib/redis-cache.md b/docs/development/extensions-contrib/redis-cache.md index 0d201be83cd2..e8e39e34ef38 100644 --- a/docs/development/extensions-contrib/redis-cache.md +++ b/docs/development/extensions-contrib/redis-cache.md @@ -23,7 +23,7 @@ title: "Druid Redis Cache" --> -To use this Apache Druid (incubating) extension, make sure to [include](../../development/extensions.md#loading-extensions) `druid-redis-cache` extension. +To use this Apache Druid extension, make sure to [include](../../development/extensions.md#loading-extensions) `druid-redis-cache` extension. A cache implementation for Druid based on [Redis](https://github.com/antirez/redis). diff --git a/docs/development/extensions-contrib/sqlserver.md b/docs/development/extensions-contrib/sqlserver.md index 84dc06801f47..103897f0de46 100644 --- a/docs/development/extensions-contrib/sqlserver.md +++ b/docs/development/extensions-contrib/sqlserver.md @@ -23,7 +23,7 @@ title: "Microsoft SQLServer" --> -To use this Apache Druid (incubating) extension, make sure to [include](../../development/extensions.md#loading-extensions) `sqlserver-metadata-storage` as an extension. +To use this Apache Druid extension, make sure to [include](../../development/extensions.md#loading-extensions) `sqlserver-metadata-storage` as an extension. ## Setting up SQLServer diff --git a/docs/development/extensions-contrib/statsd.md b/docs/development/extensions-contrib/statsd.md index aa7c1283e0c6..7a6dd6bccd35 100644 --- a/docs/development/extensions-contrib/statsd.md +++ b/docs/development/extensions-contrib/statsd.md @@ -23,7 +23,7 @@ title: "StatsD Emitter" --> -To use this Apache Druid (incubating) extension, make sure to [include](../../development/extensions.md#loading-extensions) `statsd-emitter` extension. +To use this Apache Druid extension, make sure to [include](../../development/extensions.md#loading-extensions) `statsd-emitter` extension. ## Introduction diff --git a/docs/development/extensions-contrib/tdigestsketch-quantiles.md b/docs/development/extensions-contrib/tdigestsketch-quantiles.md index 2c624c68b027..705bbc2edb12 100644 --- a/docs/development/extensions-contrib/tdigestsketch-quantiles.md +++ b/docs/development/extensions-contrib/tdigestsketch-quantiles.md @@ -23,7 +23,7 @@ title: "T-Digest Quantiles Sketch module" --> -This module provides Apache Druid (incubating) approximate sketch aggregators based on T-Digest. +This module provides Apache Druid approximate sketch aggregators based on T-Digest. T-Digest (https://github.com/tdunning/t-digest) is a popular data structure for accurate on-line accumulation of rank-based statistics such as quantiles and trimmed means. The data structure is also designed for parallel programming use cases like distributed aggregations or map reduce jobs by making combining two intermediate t-digests easy and efficient. diff --git a/docs/development/extensions-contrib/thrift.md b/docs/development/extensions-contrib/thrift.md index 051c6e9a5fab..dd3f3db98114 100644 --- a/docs/development/extensions-contrib/thrift.md +++ b/docs/development/extensions-contrib/thrift.md @@ -23,7 +23,7 @@ title: "Thrift" --> -To use this Apache Druid (incubating) extension, make sure to [include](../../development/extensions.md#loading-extensions) `druid-thrift-extensions`. +To use this Apache Druid extension, make sure to [include](../../development/extensions.md#loading-extensions) `druid-thrift-extensions`. This extension enables Druid to ingest thrift compact data online (`ByteBuffer`) and offline (SequenceFile of type `` or LzoThriftBlock File). diff --git a/docs/development/extensions-contrib/time-min-max.md b/docs/development/extensions-contrib/time-min-max.md index 1e37e9a56e65..70822336787f 100644 --- a/docs/development/extensions-contrib/time-min-max.md +++ b/docs/development/extensions-contrib/time-min-max.md @@ -23,7 +23,7 @@ title: "Timestamp Min/Max aggregators" --> -To use this Apache Druid (incubating) extension, make sure to [include](../../development/extensions.md#loading-extensions) `druid-time-min-max`. +To use this Apache Druid extension, make sure to [include](../../development/extensions.md#loading-extensions) `druid-time-min-max`. These aggregators enable more precise calculation of min and max time of given events than `__time` column whose granularity is sparse, the same as query granularity. To use this feature, a "timeMin" or "timeMax" aggregator must be included at indexing time. diff --git a/docs/development/extensions-core/approximate-histograms.md b/docs/development/extensions-core/approximate-histograms.md index b235c450951b..508e3279dc4a 100644 --- a/docs/development/extensions-core/approximate-histograms.md +++ b/docs/development/extensions-core/approximate-histograms.md @@ -23,7 +23,7 @@ title: "Approximate Histogram aggregators" --> -To use this Apache Druid (incubating) extension, make sure to [include](../../development/extensions.md#loading-extensions) `druid-histogram` as an extension. +To use this Apache Druid extension, make sure to [include](../../development/extensions.md#loading-extensions) `druid-histogram` as an extension. The `druid-histogram` extension provides an approximate histogram aggregator and a fixed buckets histogram aggregator. diff --git a/docs/development/extensions-core/avro.md b/docs/development/extensions-core/avro.md index da11af606acc..006044d912dc 100644 --- a/docs/development/extensions-core/avro.md +++ b/docs/development/extensions-core/avro.md @@ -22,7 +22,7 @@ title: "Apache Avro" ~ under the License. --> -This Apache Druid (incubating) extension enables Druid to ingest and understand the Apache Avro data format. Make sure to [include](../../development/extensions.md#loading-extensions) `druid-avro-extensions` as an extension. +This Apache Druid extension enables Druid to ingest and understand the Apache Avro data format. Make sure to [include](../../development/extensions.md#loading-extensions) `druid-avro-extensions` as an extension. ### Avro Stream Parser diff --git a/docs/development/extensions-core/bloom-filter.md b/docs/development/extensions-core/bloom-filter.md index 60031b31de5e..602f1f231869 100644 --- a/docs/development/extensions-core/bloom-filter.md +++ b/docs/development/extensions-core/bloom-filter.md @@ -23,7 +23,7 @@ title: "Bloom Filter" --> -This Apache Druid (incubating) extension adds the ability to both construct bloom filters from query results, and filter query results by testing +This Apache Druid extension adds the ability to both construct bloom filters from query results, and filter query results by testing against a bloom filter. Make sure to [include](../../development/extensions.md#loading-extensions) `druid-bloom-filter` as an extension. diff --git a/docs/development/extensions-core/datasketches-extension.md b/docs/development/extensions-core/datasketches-extension.md index 29844dd1f2c9..996649a897d2 100644 --- a/docs/development/extensions-core/datasketches-extension.md +++ b/docs/development/extensions-core/datasketches-extension.md @@ -23,7 +23,7 @@ title: "DataSketches extension" --> -Apache Druid (incubating) aggregators based on [datasketches](https://datasketches.github.io/) library. Sketches are data structures implementing approximate streaming mergeable algorithms. Sketches can be ingested from the outside of Druid or built from raw data at ingestion time. Sketches can be stored in Druid segments as additive metrics. +Apache Druid aggregators based on [datasketches](https://datasketches.github.io/) library. Sketches are data structures implementing approximate streaming mergeable algorithms. Sketches can be ingested from the outside of Druid or built from raw data at ingestion time. Sketches can be stored in Druid segments as additive metrics. To use the datasketches aggregators, make sure you [include](../../development/extensions.md#loading-extensions) the extension in your config file: diff --git a/docs/development/extensions-core/datasketches-hll.md b/docs/development/extensions-core/datasketches-hll.md index 508d02806f60..b27a031dfe28 100644 --- a/docs/development/extensions-core/datasketches-hll.md +++ b/docs/development/extensions-core/datasketches-hll.md @@ -23,7 +23,7 @@ title: "DataSketches HLL Sketch module" --> -This module provides Apache Druid (incubating) aggregators for distinct counting based on HLL sketch from [datasketches](https://datasketches.github.io/) library. At ingestion time, this aggregator creates the HLL sketch objects to be stored in Druid segments. At query time, sketches are read and merged together. In the end, by default, you receive the estimate of the number of distinct values presented to the sketch. Also, you can use post aggregator to produce a union of sketch columns in the same row. +This module provides Apache Druid aggregators for distinct counting based on HLL sketch from [datasketches](https://datasketches.github.io/) library. At ingestion time, this aggregator creates the HLL sketch objects to be stored in Druid segments. At query time, sketches are read and merged together. In the end, by default, you receive the estimate of the number of distinct values presented to the sketch. Also, you can use post aggregator to produce a union of sketch columns in the same row. You can use the HLL sketch aggregator on columns of any identifiers. It will return estimated cardinality of the column. To use this aggregator, make sure you [include](../../development/extensions.md#loading-extensions) the extension in your config file: diff --git a/docs/development/extensions-core/datasketches-quantiles.md b/docs/development/extensions-core/datasketches-quantiles.md index e91c33371a94..88b406c4503e 100644 --- a/docs/development/extensions-core/datasketches-quantiles.md +++ b/docs/development/extensions-core/datasketches-quantiles.md @@ -23,7 +23,7 @@ title: "DataSketches Quantiles Sketch module" --> -This module provides Apache Druid (incubating) aggregators based on numeric quantiles DoublesSketch from [datasketches](https://datasketches.github.io/) library. Quantiles sketch is a mergeable streaming algorithm to estimate the distribution of values, and approximately answer queries about the rank of a value, probability mass function of the distribution (PMF) or histogram, cumulative distribution function (CDF), and quantiles (median, min, max, 95th percentile and such). See [Quantiles Sketch Overview](https://datasketches.github.io/docs/Quantiles/QuantilesOverview.html). +This module provides Apache Druid aggregators based on numeric quantiles DoublesSketch from [datasketches](https://datasketches.github.io/) library. Quantiles sketch is a mergeable streaming algorithm to estimate the distribution of values, and approximately answer queries about the rank of a value, probability mass function of the distribution (PMF) or histogram, cumulative distribution function (CDF), and quantiles (median, min, max, 95th percentile and such). See [Quantiles Sketch Overview](https://datasketches.github.io/docs/Quantiles/QuantilesOverview.html). There are three major modes of operation: diff --git a/docs/development/extensions-core/datasketches-theta.md b/docs/development/extensions-core/datasketches-theta.md index 57eb62f6b8bd..10118617a557 100644 --- a/docs/development/extensions-core/datasketches-theta.md +++ b/docs/development/extensions-core/datasketches-theta.md @@ -23,7 +23,7 @@ title: "DataSketches Theta Sketch module" --> -This module provides Apache Druid (incubating) aggregators based on Theta sketch from [datasketches](https://datasketches.github.io/) library. Note that sketch algorithms are approximate; see details in the "Accuracy" section of the datasketches doc. +This module provides Apache Druid aggregators based on Theta sketch from [datasketches](https://datasketches.github.io/) library. Note that sketch algorithms are approximate; see details in the "Accuracy" section of the datasketches doc. At ingestion time, this aggregator creates the Theta sketch objects which get stored in Druid segments. Logically speaking, a Theta sketch object can be thought of as a Set data structure. At query time, sketches are read and aggregated (set unioned) together. In the end, by default, you receive the estimate of the number of unique entries in the sketch object. Also, you can use post aggregators to do union, intersection or difference on sketch columns in the same row. Note that you can use `thetaSketch` aggregator on columns which were not ingested using the same. It will return estimated cardinality of the column. It is recommended to use it at ingestion time as well to make querying faster. diff --git a/docs/development/extensions-core/datasketches-tuple.md b/docs/development/extensions-core/datasketches-tuple.md index 202a231ac05f..462c0c778f3c 100644 --- a/docs/development/extensions-core/datasketches-tuple.md +++ b/docs/development/extensions-core/datasketches-tuple.md @@ -23,7 +23,7 @@ title: "DataSketches Tuple Sketch module" --> -This module provides Apache Druid (incubating) aggregators based on Tuple sketch from [datasketches](https://datasketches.github.io/) library. ArrayOfDoublesSketch sketches extend the functionality of the count-distinct Theta sketches by adding arrays of double values associated with unique keys. +This module provides Apache Druid aggregators based on Tuple sketch from [datasketches](https://datasketches.github.io/) library. ArrayOfDoublesSketch sketches extend the functionality of the count-distinct Theta sketches by adding arrays of double values associated with unique keys. To use this aggregator, make sure you [include](../../development/extensions.md#loading-extensions) the extension in your config file: diff --git a/docs/development/extensions-core/druid-basic-security.md b/docs/development/extensions-core/druid-basic-security.md index 3194ef2d00df..892306e4a07b 100644 --- a/docs/development/extensions-core/druid-basic-security.md +++ b/docs/development/extensions-core/druid-basic-security.md @@ -23,7 +23,7 @@ title: "Basic Security" --> -This Apache Druid (incubating) extension adds: +This Apache Druid extension adds: - an Authenticator which supports [HTTP Basic authentication](https://en.wikipedia.org/wiki/Basic_access_authentication) using the Druid metadata store or LDAP as its credentials store - an Authorizer which implements basic role-based access control for Druid metadata store or LDAP users and groups diff --git a/docs/development/extensions-core/druid-kerberos.md b/docs/development/extensions-core/druid-kerberos.md index bfce6fb55395..7217f68858ce 100644 --- a/docs/development/extensions-core/druid-kerberos.md +++ b/docs/development/extensions-core/druid-kerberos.md @@ -23,7 +23,7 @@ title: "Kerberos" --> -Apache Druid (incubating) Extension to enable Authentication for Druid Processes using Kerberos. +Apache Druid Extension to enable Authentication for Druid Processes using Kerberos. This extension adds an Authenticator which is used to protect HTTP Endpoints using the simple and protected GSSAPI negotiation mechanism [SPNEGO](https://en.wikipedia.org/wiki/SPNEGO). Make sure to [include](../../development/extensions.md#loading-extensions) `druid-kerberos` as an extension. diff --git a/docs/development/extensions-core/druid-lookups.md b/docs/development/extensions-core/druid-lookups.md index e15b438b7724..04c34b6e1016 100644 --- a/docs/development/extensions-core/druid-lookups.md +++ b/docs/development/extensions-core/druid-lookups.md @@ -26,7 +26,7 @@ title: "Cached Lookup Module" > Please note that this is an experimental module and the development/testing still at early stage. Feel free to try it and give us your feedback. ## Description -This Apache Druid (incubating) module provides a per-lookup caching mechanism for JDBC data sources. +This Apache Druid module provides a per-lookup caching mechanism for JDBC data sources. The main goal of this cache is to speed up the access to a high latency lookup sources and to provide a caching isolation for every lookup source. Thus user can define various caching strategies or and implementation per lookup, even if the source is the same. This module can be used side to side with other lookup module like the global cached lookup module. diff --git a/docs/development/extensions-core/examples.md b/docs/development/extensions-core/examples.md index eff21ddc8126..577ee30f65f9 100644 --- a/docs/development/extensions-core/examples.md +++ b/docs/development/extensions-core/examples.md @@ -23,4 +23,4 @@ title: "Extension Examples" --> -This extension was removed in Apache Druid (incubating) 0.16.0. In prior versions, the extension provided obsolete facilities to ingest data from the Twitter 'Spritzer' data stream as well as the Wikipedia changes IRC channel. +This extension was removed in Apache Druid 0.16.0. In prior versions, the extension provided obsolete facilities to ingest data from the Twitter 'Spritzer' data stream as well as the Wikipedia changes IRC channel. diff --git a/docs/development/extensions-core/google.md b/docs/development/extensions-core/google.md index 9e5c63abf72b..e16769b2a47f 100644 --- a/docs/development/extensions-core/google.md +++ b/docs/development/extensions-core/google.md @@ -23,7 +23,7 @@ title: "Google Cloud Storage" --> -To use this Apache Druid (incubating) extension, make sure to [include](../../development/extensions.md#loading-extensions) `druid-google-extensions` extension. +To use this Apache Druid extension, make sure to [include](../../development/extensions.md#loading-extensions) `druid-google-extensions` extension. ## Deep Storage diff --git a/docs/development/extensions-core/hdfs.md b/docs/development/extensions-core/hdfs.md index d98e5c4e00d6..2421c2f15404 100644 --- a/docs/development/extensions-core/hdfs.md +++ b/docs/development/extensions-core/hdfs.md @@ -23,7 +23,7 @@ title: "HDFS" --> -To use this Apache Druid (incubating) extension, make sure to [include](../../development/extensions.md#loading-extensions) `druid-hdfs-storage` as an extension. +To use this Apache Druid extension, make sure to [include](../../development/extensions.md#loading-extensions) `druid-hdfs-storage` as an extension. ## Deep Storage diff --git a/docs/development/extensions-core/kafka-extraction-namespace.md b/docs/development/extensions-core/kafka-extraction-namespace.md index 1fc2d75f383d..b72472ac5c1a 100644 --- a/docs/development/extensions-core/kafka-extraction-namespace.md +++ b/docs/development/extensions-core/kafka-extraction-namespace.md @@ -24,7 +24,7 @@ title: "Apache Kafka Lookups" > Lookups are an [experimental](../experimental.md) feature. -To use this Apache Druid (incubating) extension, make sure to [include](../../development/extensions.md#loading-extensions) `druid-lookups-cached-global` and `druid-kafka-extraction-namespace` as an extension. +To use this Apache Druid extension, make sure to [include](../../development/extensions.md#loading-extensions) `druid-lookups-cached-global` and `druid-kafka-extraction-namespace` as an extension. If you need updates to populate as promptly as possible, it is possible to plug into a Kafka topic whose key is the old value and message is the desired new value (both in UTF-8) as a LookupExtractorFactory. diff --git a/docs/development/extensions-core/kafka-ingestion.md b/docs/development/extensions-core/kafka-ingestion.md index fe85504828c4..de6504212b12 100644 --- a/docs/development/extensions-core/kafka-ingestion.md +++ b/docs/development/extensions-core/kafka-ingestion.md @@ -30,7 +30,7 @@ partition and offset mechanism and are therefore able to provide guarantees of e The supervisor oversees the state of the indexing tasks to coordinate handoffs, manage failures, and ensure that the scalability and replication requirements are maintained. -This service is provided in the `druid-kafka-indexing-service` core Apache Druid (incubating) extension (see +This service is provided in the `druid-kafka-indexing-service` core Apache Druid extension (see [Including Extensions](../../development/extensions.md#loading-extensions)). > The Kafka indexing service supports transactional topics which were introduced in Kafka 0.11.x. These changes make the @@ -399,4 +399,4 @@ events for the interval 13:00 - 14:00 may be split across previous and new set o one can schedule re-indexing tasks be run to merge segments together into new segments of an ideal size (in the range of ~500-700 MB per segment). Details on how to optimize the segment size can be found on [Segment size optimization](../../operations/segment-optimization.md). There is also ongoing work to support automatic segment compaction of sharded segments as well as compaction not requiring -Hadoop (see [here](https://github.com/apache/incubator-druid/pull/5102)). +Hadoop (see [here](https://github.com/apache/druid/pull/5102)). diff --git a/docs/development/extensions-core/kinesis-ingestion.md b/docs/development/extensions-core/kinesis-ingestion.md index 9ede4f303170..f0452a4fb1be 100644 --- a/docs/development/extensions-core/kinesis-ingestion.md +++ b/docs/development/extensions-core/kinesis-ingestion.md @@ -30,7 +30,7 @@ Shards and Sequence Number mechanism and are therefore able to provide guarantee The supervisor oversees the state of the indexing tasks to coordinate handoffs, manage failures, and ensure that the scalability and replication requirements are maintained. -The Kinesis indexing service is provided as the `druid-kinesis-indexing-service` core Apache Druid (incubating) extension (see +The Kinesis indexing service is provided as the `druid-kinesis-indexing-service` core Apache Druid extension (see [Including Extensions](../../development/extensions.md#loading-extensions)). Please note that this is currently designated as an *experimental feature* and is subject to the usual [experimental caveats](../experimental.md). @@ -158,7 +158,7 @@ The tuningConfig is optional and default parameters will be used if no tuningCon | `maxParseExceptions` | Integer | The maximum number of parse exceptions that can occur before the task halts ingestion and fails. Overridden if `reportParseExceptions` is set. | no, unlimited default | | `maxSavedParseExceptions` | Integer | When a parse exception occurs, Druid can keep track of the most recent parse exceptions. "maxSavedParseExceptions" limits how many exception instances will be saved. These saved exceptions will be made available after the task finishes in the [task completion report](../../ingestion/tasks.md#reports). Overridden if `reportParseExceptions` is set. | no, default == 0 | | `maxRecordsPerPoll` | Integer | The maximum number of records/events to be fetched from buffer per poll. The actual maximum will be `Max(maxRecordsPerPoll, Max(bufferSize, 1))` | no, default == 100 | -| `repartitionTransitionDuration` | ISO8601 Period | When shards are split or merged, the supervisor will recompute shard -> task group mappings, and signal any running tasks created under the old mappings to stop early at (current time + `repartitionTransitionDuration`). Stopping the tasks early allows Druid to begin reading from the new shards more quickly. The repartition transition wait time controlled by this property gives the stream additional time to write records to the new shards after the split/merge, which helps avoid the issues with empty shard handling described at https://github.com/apache/incubator-druid/issues/7600. | no, (default == PT2M) | +| `repartitionTransitionDuration` | ISO8601 Period | When shards are split or merged, the supervisor will recompute shard -> task group mappings, and signal any running tasks created under the old mappings to stop early at (current time + `repartitionTransitionDuration`). Stopping the tasks early allows Druid to begin reading from the new shards more quickly. The repartition transition wait time controlled by this property gives the stream additional time to write records to the new shards after the split/merge, which helps avoid the issues with empty shard handling described at https://github.com/apache/druid/issues/7600. | no, (default == PT2M) | #### IndexSpec @@ -397,7 +397,7 @@ events for the interval 13:00 - 14:00 may be split across previous and new set o one can schedule re-indexing tasks be run to merge segments together into new segments of an ideal size (in the range of ~500-700 MB per segment). Details on how to optimize the segment size can be found on [Segment size optimization](../../operations/segment-optimization.md). There is also ongoing work to support automatic segment compaction of sharded segments as well as compaction not requiring -Hadoop (see [here](https://github.com/apache/incubator-druid/pull/5102)). +Hadoop (see [here](https://github.com/apache/druid/pull/5102)). ### Determining Fetch Settings Internally, the Kinesis Indexing Service uses the Kinesis Record Supplier abstraction for fetching Kinesis data records and storing the records @@ -431,7 +431,7 @@ control this behavior. The number of records that the indexing task fetch from t determines the number of records to be processed per each ingestion loop in the task. ## Deaggregation -See [issue](https://github.com/apache/incubator-druid/issues/6714) +See [issue](https://github.com/apache/druid/issues/6714) The Kinesis indexing service supports de-aggregation of multiple rows packed into a single record by the Kinesis Producer Library's aggregate method for more efficient data transfer. Currently, enabling the de-aggregate functionality diff --git a/docs/development/extensions-core/lookups-cached-global.md b/docs/development/extensions-core/lookups-cached-global.md index a2b699502125..9f9900a30104 100644 --- a/docs/development/extensions-core/lookups-cached-global.md +++ b/docs/development/extensions-core/lookups-cached-global.md @@ -25,7 +25,7 @@ title: "Globally Cached Lookups" > Lookups are an [experimental](../experimental.md) feature. -To use this Apache Druid (incubating) extension, make sure to [include](../../development/extensions.md#loading-extensions) `druid-lookups-cached-global` as an extension. +To use this Apache Druid extension, make sure to [include](../../development/extensions.md#loading-extensions) `druid-lookups-cached-global` as an extension. ## Configuration > Static configuration is no longer supported. Lookups can be configured through diff --git a/docs/development/extensions-core/mysql.md b/docs/development/extensions-core/mysql.md index eea72eb91ffa..5445e1b47587 100644 --- a/docs/development/extensions-core/mysql.md +++ b/docs/development/extensions-core/mysql.md @@ -23,7 +23,7 @@ title: "MySQL Metadata Store" --> -To use this Apache Druid (incubating) extension, make sure to [include](../../development/extensions.md#loading-extensions) `mysql-metadata-storage` as an extension. +To use this Apache Druid extension, make sure to [include](../../development/extensions.md#loading-extensions) `mysql-metadata-storage` as an extension. > The MySQL extension requires the MySQL Connector/J library which is not included in the Druid distribution. > Refer to the following section for instructions on how to install this library. diff --git a/docs/development/extensions-core/orc.md b/docs/development/extensions-core/orc.md index 1195d905cc18..4510250d1e22 100644 --- a/docs/development/extensions-core/orc.md +++ b/docs/development/extensions-core/orc.md @@ -23,7 +23,7 @@ title: "ORC Extension" --> -This Apache Druid (incubating) module extends [Druid Hadoop based indexing](../../ingestion/hadoop.md) to ingest data directly from offline +This Apache Druid module extends [Druid Hadoop based indexing](../../ingestion/hadoop.md) to ingest data directly from offline Apache ORC files. To use this extension, make sure to [include](../../development/extensions.md#loading-extensions) `druid-orc-extensions`. diff --git a/docs/development/extensions-core/parquet.md b/docs/development/extensions-core/parquet.md index b2629b5db391..9fbdb4401f26 100644 --- a/docs/development/extensions-core/parquet.md +++ b/docs/development/extensions-core/parquet.md @@ -23,7 +23,7 @@ title: "Apache Parquet Extension" --> -This Apache Druid (incubating) module extends [Druid Hadoop based indexing](../../ingestion/hadoop.md) to ingest data directly from offline +This Apache Druid module extends [Druid Hadoop based indexing](../../ingestion/hadoop.md) to ingest data directly from offline Apache Parquet files. Note: If using the `parquet-avro` parser for Apache Hadoop based indexing, `druid-parquet-extensions` depends on the `druid-avro-extensions` module, so be sure to diff --git a/docs/development/extensions-core/postgresql.md b/docs/development/extensions-core/postgresql.md index 51977433fd33..a51112b0cbcc 100644 --- a/docs/development/extensions-core/postgresql.md +++ b/docs/development/extensions-core/postgresql.md @@ -23,7 +23,7 @@ title: "PostgreSQL Metadata Store" --> -To use this Apache Druid (incubating) extension, make sure to [include](../../development/extensions.md#loading-extensions) `postgresql-metadata-storage` as an extension. +To use this Apache Druid extension, make sure to [include](../../development/extensions.md#loading-extensions) `postgresql-metadata-storage` as an extension. ## Setting up PostgreSQL diff --git a/docs/development/extensions-core/protobuf.md b/docs/development/extensions-core/protobuf.md index 0882d6839ffe..c90e597b63c2 100644 --- a/docs/development/extensions-core/protobuf.md +++ b/docs/development/extensions-core/protobuf.md @@ -23,7 +23,7 @@ title: "Protobuf" --> -This Apache Druid (incubating) extension enables Druid to ingest and understand the Protobuf data format. Make sure to [include](../../development/extensions.md#loading-extensions) `druid-protobuf-extensions` as an extension. +This Apache Druid extension enables Druid to ingest and understand the Protobuf data format. Make sure to [include](../../development/extensions.md#loading-extensions) `druid-protobuf-extensions` as an extension. ## Protobuf Parser diff --git a/docs/development/extensions-core/s3.md b/docs/development/extensions-core/s3.md index fd82b37e8eb3..1f59f4c36420 100644 --- a/docs/development/extensions-core/s3.md +++ b/docs/development/extensions-core/s3.md @@ -23,7 +23,7 @@ title: "S3-compatible" --> -To use this Apache Druid (incubating) extension, make sure to [include](../../development/extensions.md#loading-extensions) `druid-s3-extensions` as an extension. +To use this Apache Druid extension, make sure to [include](../../development/extensions.md#loading-extensions) `druid-s3-extensions` as an extension. ## Deep Storage diff --git a/docs/development/extensions-core/simple-client-sslcontext.md b/docs/development/extensions-core/simple-client-sslcontext.md index ca957bb2a95e..7452a7b5bb1b 100644 --- a/docs/development/extensions-core/simple-client-sslcontext.md +++ b/docs/development/extensions-core/simple-client-sslcontext.md @@ -23,7 +23,7 @@ title: "Simple SSLContext Provider Module" --> -This Apache Druid (incubating) module contains a simple implementation of [SSLContext](http://docs.oracle.com/javase/8/docs/api/javax/net/ssl/SSLContext.html) +This Apache Druid module contains a simple implementation of [SSLContext](http://docs.oracle.com/javase/8/docs/api/javax/net/ssl/SSLContext.html) that will be injected to be used with HttpClient that Druid processes use internally to communicate with each other. To learn more about Java's SSL support, please refer to [this](http://docs.oracle.com/javase/8/docs/technotes/guides/security/jsse/JSSERefGuide.html) guide. diff --git a/docs/development/extensions-core/stats.md b/docs/development/extensions-core/stats.md index 0dbbf3a0ffee..fa704ea30188 100644 --- a/docs/development/extensions-core/stats.md +++ b/docs/development/extensions-core/stats.md @@ -23,7 +23,7 @@ title: "Stats aggregator" --> -This Apache Druid (incubating) extension includes stat-related aggregators, including variance and standard deviations, etc. Make sure to [include](../../development/extensions.md#loading-extensions) `druid-stats` as an extension. +This Apache Druid extension includes stat-related aggregators, including variance and standard deviations, etc. Make sure to [include](../../development/extensions.md#loading-extensions) `druid-stats` as an extension. ## Variance aggregator diff --git a/docs/development/extensions-core/test-stats.md b/docs/development/extensions-core/test-stats.md index e517b356525f..9f069d87bf54 100644 --- a/docs/development/extensions-core/test-stats.md +++ b/docs/development/extensions-core/test-stats.md @@ -23,7 +23,7 @@ title: "Test Stats Aggregators" --> -This Apache Druid (incubating) extension incorporates test statistics related aggregators, including z-score and p-value. Please refer to [https://www.paypal-engineering.com/2017/06/29/democratizing-experimentation-data-for-product-innovations/](https://www.paypal-engineering.com/2017/06/29/democratizing-experimentation-data-for-product-innovations/) for math background and details. +This Apache Druid extension incorporates test statistics related aggregators, including z-score and p-value. Please refer to [https://www.paypal-engineering.com/2017/06/29/democratizing-experimentation-data-for-product-innovations/](https://www.paypal-engineering.com/2017/06/29/democratizing-experimentation-data-for-product-innovations/) for math background and details. Make sure to include `druid-stats` extension in order to use these aggregators. diff --git a/docs/development/extensions.md b/docs/development/extensions.md index d7638ccd0ad8..045fba8f8b10 100644 --- a/docs/development/extensions.md +++ b/docs/development/extensions.md @@ -102,7 +102,7 @@ For information how to create your own extension, please see [here](../developme ### Loading core extensions -Apache Druid (incubating) bundles all [core extensions](../development/extensions.md#core-extensions) out of the box. +Apache Druid bundles all [core extensions](../development/extensions.md#core-extensions) out of the box. See the [list of extensions](../development/extensions.md#core-extensions) for your options. You can load bundled extensions by adding their names to your common.runtime.properties `druid.extensions.loadList` property. For example, to load the *postgresql-metadata-storage* and diff --git a/docs/development/geo.md b/docs/development/geo.md index 1f411f603389..40c0d3a79cbd 100644 --- a/docs/development/geo.md +++ b/docs/development/geo.md @@ -22,7 +22,7 @@ title: "Spatial filters" ~ under the License. --> -Apache Druid (incubating) supports filtering specially spatially indexed columns based on an origin and a bound. +Apache Druid supports filtering specially spatially indexed columns based on an origin and a bound. ## Spatial indexing diff --git a/docs/development/javascript.md b/docs/development/javascript.md index 3e4019bb34f7..0802c93c912f 100644 --- a/docs/development/javascript.md +++ b/docs/development/javascript.md @@ -24,7 +24,7 @@ sidebar_label: "JavaScript functionality" --> -This page discusses how to use JavaScript to extend Apache Druid (incubating). +This page discusses how to use JavaScript to extend Apache Druid. ## Examples diff --git a/docs/development/modules.md b/docs/development/modules.md index b424e12ae3bc..de2bbc1e6feb 100644 --- a/docs/development/modules.md +++ b/docs/development/modules.md @@ -228,7 +228,7 @@ where `SomePasswordProvider` is the implementation of `PasswordProvider` interfa When you do `mvn install`, Druid extensions will be packaged within the Druid tarball and `extensions` directory, which are both underneath `distribution/target/`. If you want your extension to be included, you can add your extension's maven coordinate as an argument at -[distribution/pom.xml](https://github.com/apache/incubator-druid/blob/master/distribution/pom.xml#L95) +[distribution/pom.xml](https://github.com/apache/druid/blob/master/distribution/pom.xml#L95) During `mvn install`, maven will install your extension to the local maven repository, and then call [pull-deps](../operations/pull-deps.md) to pull your extension from there. In the end, you should see your extension underneath `distribution/target/extensions` and within Druid tarball. diff --git a/docs/development/versioning.md b/docs/development/versioning.md index aa48ce677a1a..b01c28cc1bff 100644 --- a/docs/development/versioning.md +++ b/docs/development/versioning.md @@ -43,4 +43,4 @@ For external deployments, we recommend running the stable release tag. Releases Tagging strategy ---------------- -Tags of the codebase are equivalent to release candidates. We tag the code every time we want to take it through our release process, which includes some QA cycles and deployments. So, it is not safe to assume that a tag is a stable release, it is a solidification of the code as it goes through our production QA cycle and deployment. Tags will never change, but we often go through a number of iterations of tags before actually getting a stable release onto production. So, it is recommended that if you are not aware of what is on a tag, to stick to the stable releases listed on the [Release](https://github.com/apache/incubator-druid/releases) page. +Tags of the codebase are equivalent to release candidates. We tag the code every time we want to take it through our release process, which includes some QA cycles and deployments. So, it is not safe to assume that a tag is a stable release, it is a solidification of the code as it goes through our production QA cycle and deployment. Tags will never change, but we often go through a number of iterations of tags before actually getting a stable release onto production. So, it is recommended that if you are not aware of what is on a tag, to stick to the stable releases listed on the [Release](https://github.com/apache/druid/releases) page. diff --git a/docs/ingestion/data-formats.md b/docs/ingestion/data-formats.md index 1649b5b79b5b..b20f54a97e82 100644 --- a/docs/ingestion/data-formats.md +++ b/docs/ingestion/data-formats.md @@ -22,7 +22,7 @@ title: "Data formats" ~ under the License. --> -Apache Druid (incubating) can ingest denormalized data in JSON, CSV, or a delimited form such as TSV, or any custom format. While most examples in the documentation use data in JSON format, it is not difficult to configure Druid to ingest any other delimited data. +Apache Druid can ingest denormalized data in JSON, CSV, or a delimited form such as TSV, or any custom format. While most examples in the documentation use data in JSON format, it is not difficult to configure Druid to ingest any other delimited data. We welcome any contributions to new formats. For additional data formats, please see our [extensions list](../development/extensions.md). diff --git a/docs/ingestion/data-management.md b/docs/ingestion/data-management.md index 054e9f00ae5f..35ebcf4e67c3 100644 --- a/docs/ingestion/data-management.md +++ b/docs/ingestion/data-management.md @@ -27,7 +27,7 @@ title: "Data management" ## Schema changes -Schemas for datasources can change at any time and Apache Druid (incubating) supports different schemas among segments. +Schemas for datasources can change at any time and Apache Druid supports different schemas among segments. ### Replacing segments @@ -151,7 +151,7 @@ data segments loaded in it (or if the interval you specify is empty). The output segment can have different metadata from the input segments unless all input segments have the same metadata. -- Dimensions: since Apache Druid (incubating) supports schema change, the dimensions can be different across segments even if they are a part of the same dataSource. +- Dimensions: since Apache Druid supports schema change, the dimensions can be different across segments even if they are a part of the same dataSource. If the input segments have different dimensions, the output segment basically includes all dimensions of the input segments. However, even if the input segments have the same set of dimensions, the dimension order or the data type of dimensions can be different. For example, the data type of some dimensions can be changed from `string` to primitive types, or the order of dimensions can be changed for better locality. @@ -199,7 +199,7 @@ Druid does not support single-record updates by primary key. ## Updating existing data -Once you ingest some data in a dataSource for an interval and create Apache Druid (incubating) segments, you might want to make changes to +Once you ingest some data in a dataSource for an interval and create Apache Druid segments, you might want to make changes to the ingested data. There are several ways this can be done. ### Using lookups @@ -249,7 +249,7 @@ The Kill Task deletes unused segments within a specified interval from metadata For more information, please see [Kill Task](../ingestion/tasks.html#kill). -Permanent deletion of a segment in Apache Druid (incubating) has two steps: +Permanent deletion of a segment in Apache Druid has two steps: 1. The segment must first be marked as "unused". This occurs when a segment is dropped by retention rules, and when a user manually disables a segment through the Coordinator API. 2. After segments have been marked as "unused", a Kill Task will delete any "unused" segments from Druid's metadata store as well as deep storage. diff --git a/docs/ingestion/hadoop.md b/docs/ingestion/hadoop.md index 81a5ce2e844e..f2f58279a4e0 100644 --- a/docs/ingestion/hadoop.md +++ b/docs/ingestion/hadoop.md @@ -23,7 +23,7 @@ sidebar_label: "Hadoop-based" ~ under the License. --> -Apache Hadoop-based batch ingestion in Apache Druid (incubating) is supported via a Hadoop-ingestion task. These tasks can be posted to a running +Apache Hadoop-based batch ingestion in Apache Druid is supported via a Hadoop-ingestion task. These tasks can be posted to a running instance of a Druid [Overlord](../design/overlord.md). Please refer to our [Hadoop-based vs. native batch comparison table](index.md#batch) for comparisons between Hadoop-based, native batch (simple), and native batch (parallel) ingestion. @@ -448,7 +448,7 @@ java -Xmx256m -Duser.timezone=UTC -Dfile.encoding=UTF-8 -classpath lib/*: -Apache Druid (incubating) currently has two types of native batch indexing tasks, `index_parallel` which can run +Apache Druid currently has two types of native batch indexing tasks, `index_parallel` which can run multiple tasks in parallel, and `index` which will run a single indexing task. Please refer to our [Hadoop-based vs. native batch comparison table](index.md#batch) for comparisons between Hadoop-based, native batch (simple), and native batch (parallel) ingestion. diff --git a/docs/ingestion/standalone-realtime.md b/docs/ingestion/standalone-realtime.md index 4fbfcc7fd286..62f31259964b 100644 --- a/docs/ingestion/standalone-realtime.md +++ b/docs/ingestion/standalone-realtime.md @@ -22,7 +22,7 @@ title: "Realtime Process" ~ under the License. --> -Older versions of Apache Druid (incubating) supported a standalone 'Realtime' process to query and index 'stream pull' +Older versions of Apache Druid supported a standalone 'Realtime' process to query and index 'stream pull' modes of real-time ingestion. These processes would periodically build segments for the data they had collected over some span of time and then set up hand-off to [Historical](../design/historical.html) servers. diff --git a/docs/operations/basic-cluster-tuning.md b/docs/operations/basic-cluster-tuning.md index a376ad97c941..ffa9d6795ac4 100644 --- a/docs/operations/basic-cluster-tuning.md +++ b/docs/operations/basic-cluster-tuning.md @@ -23,7 +23,7 @@ title: "Basic cluster tuning" --> -This document provides basic guidelines for configuration properties and cluster architecture considerations related to performance tuning of an Apache Druid (incubating) deployment. +This document provides basic guidelines for configuration properties and cluster architecture considerations related to performance tuning of an Apache Druid deployment. Please note that this document provides general guidelines and rules-of-thumb: these are not absolute, universal rules for cluster tuning, and this introductory guide is not an exhaustive description of all Druid tuning properties, which are described in the [configuration reference](../configuration/index.md). diff --git a/docs/operations/dump-segment.md b/docs/operations/dump-segment.md index 6da2415a4d3e..cc6eef107f44 100644 --- a/docs/operations/dump-segment.md +++ b/docs/operations/dump-segment.md @@ -23,7 +23,7 @@ title: "dump-segment tool" --> -The DumpSegment tool can be used to dump the metadata or contents of an Apache Druid (incubating) segment for debugging purposes. Note that the +The DumpSegment tool can be used to dump the metadata or contents of an Apache Druid segment for debugging purposes. Note that the dump is not necessarily a full-fidelity translation of the segment. In particular, not all metadata is included, and complex metric values may not be complete. diff --git a/docs/operations/high-availability.md b/docs/operations/high-availability.md index 0a1140eaa7d8..801d50c76014 100644 --- a/docs/operations/high-availability.md +++ b/docs/operations/high-availability.md @@ -31,7 +31,7 @@ and configuring ZooKeeper on them appropriately. See the [ZooKeeper admin guide] - For highly-available metadata storage, we recommend MySQL or PostgreSQL with replication and failover enabled. See [MySQL HA/Scalability Guide](https://dev.mysql.com/doc/mysql-ha-scalability/en/) and [PostgreSQL's High Availability, Load Balancing, and Replication](https://www.postgresql.org/docs/9.5/high-availability.html) for MySQL and PostgreSQL, respectively. -- For highly-available Apache Druid (incubating) Coordinators and Overlords, we recommend to run multiple servers. +- For highly-available Apache Druid Coordinators and Overlords, we recommend to run multiple servers. If they are all configured to use the same ZooKeeper cluster and metadata storage, then they will automatically failover between each other as necessary. Only one will be active at a time, but inactive servers will redirect to the currently active server. diff --git a/docs/operations/http-compression.md b/docs/operations/http-compression.md index bce4dd2fda75..58fd5aef367c 100644 --- a/docs/operations/http-compression.md +++ b/docs/operations/http-compression.md @@ -23,7 +23,7 @@ title: "HTTP compression" --> -Apache Druid (incubating) supports http request decompression and response compression, to use this, http request header `Content-Encoding:gzip` and `Accept-Encoding:gzip` is needed to be set. +Apache Druid supports http request decompression and response compression, to use this, http request header `Content-Encoding:gzip` and `Accept-Encoding:gzip` is needed to be set. |Property|Description|Default| |--------|-----------|-------| diff --git a/docs/operations/insert-segment-to-db.md b/docs/operations/insert-segment-to-db.md index f559c907cc09..4de93a76f00f 100644 --- a/docs/operations/insert-segment-to-db.md +++ b/docs/operations/insert-segment-to-db.md @@ -23,7 +23,7 @@ title: "insert-segment-to-db tool" --> -In older versions of Apache Druid (incubating), `insert-segment-to-db` was a tool that could scan deep storage and +In older versions of Apache Druid, `insert-segment-to-db` was a tool that could scan deep storage and insert data from there into Druid metadata storage. It was intended to be used to update the segment table in the metadata storage after manually migrating segments from one place to another, or even to recover lost metadata storage by telling it where the segments are stored. diff --git a/docs/operations/kubernetes.md b/docs/operations/kubernetes.md index 2a90e82a0a10..cba2bf044562 100644 --- a/docs/operations/kubernetes.md +++ b/docs/operations/kubernetes.md @@ -23,10 +23,10 @@ title: "kubernetes" --> -Apache Druid (incubating) distribution is also available as [Docker](https://www.docker.com/) image from [Docker Hub](https://hub.docker.com/r/apache/incubator-druid) . For example, you can obtain release 0.16.0-incubating using the command below. +Apache Druid distribution is also available as [Docker](https://www.docker.com/) image from [Docker Hub](https://hub.docker.com/r/apache/druid) . For example, you can obtain release 0.16.0-incubating using the command below. ``` -$docker pull apache/incubator-druid:0.16.0-incubating +$docker pull apache/druid:0.16.0-incubating ``` [druid-operator](https://github.com/druid-io/druid-operator) can be used to manage a Druid cluster on [Kubernetes](https://kubernetes.io/) . diff --git a/docs/operations/other-hadoop.md b/docs/operations/other-hadoop.md index f76c0eb98926..60a9cd811474 100644 --- a/docs/operations/other-hadoop.md +++ b/docs/operations/other-hadoop.md @@ -23,7 +23,7 @@ title: "Working with different versions of Apache Hadoop" --> -Apache Druid (incubating) can interact with Hadoop in two ways: +Apache Druid can interact with Hadoop in two ways: 1. [Use HDFS for deep storage](../development/extensions-core/hdfs.md) using the druid-hdfs-storage extension. 2. [Batch-load data from Hadoop](../ingestion/hadoop.md) using Map/Reduce jobs. diff --git a/docs/operations/password-provider.md b/docs/operations/password-provider.md index bbb1e75e87c3..4a28e64aae8b 100644 --- a/docs/operations/password-provider.md +++ b/docs/operations/password-provider.md @@ -23,7 +23,7 @@ title: "Password providers" --> -Apache Druid (incubating) needs some passwords for accessing various secured systems like metadata store, Key Store containing server certificates etc. +Apache Druid needs some passwords for accessing various secured systems like metadata store, Key Store containing server certificates etc. All these passwords have corresponding runtime properties associated with them, for example `druid.metadata.storage.connector.password` corresponds to the metadata store password. By default users can directly set the passwords in plaintext for these runtime properties, for example `druid.metadata.storage.connector.password=pwd` sets the metadata store password diff --git a/docs/operations/pull-deps.md b/docs/operations/pull-deps.md index f95be2a407f0..5028fa112535 100644 --- a/docs/operations/pull-deps.md +++ b/docs/operations/pull-deps.md @@ -23,7 +23,7 @@ title: "pull-deps tool" --> -`pull-deps` is an Apache Druid (incubating) tool that can pull down dependencies to the local repository and lay dependencies out into the extension directory as needed. +`pull-deps` is an Apache Druid tool that can pull down dependencies to the local repository and lay dependencies out into the extension directory as needed. `pull-deps` has several command line options, they are as follows: diff --git a/docs/operations/reset-cluster.md b/docs/operations/reset-cluster.md index 598ba8d31d24..80db68436fe8 100644 --- a/docs/operations/reset-cluster.md +++ b/docs/operations/reset-cluster.md @@ -23,7 +23,7 @@ title: "reset-cluster tool" --> -The `reset-cluster` tool can be used to completely wipe out Apache Druid (incubating) cluster state stored on Metadata and Deep storage. This is +The `reset-cluster` tool can be used to completely wipe out Apache Druid cluster state stored on Metadata and Deep storage. This is intended to be used in dev/test environments where you typically want to reset the cluster before running the test suite. `reset-cluster` automatically figures out necessary information from Druid cluster configuration. So the java classpath diff --git a/docs/operations/rolling-updates.md b/docs/operations/rolling-updates.md index e205f25b82a9..445849030d50 100644 --- a/docs/operations/rolling-updates.md +++ b/docs/operations/rolling-updates.md @@ -23,7 +23,7 @@ title: "Rolling updates" --> -For rolling Apache Druid (incubating) cluster updates with no downtime, we recommend updating Druid processes in the +For rolling Apache Druid cluster updates with no downtime, we recommend updating Druid processes in the following order: 1. Historical diff --git a/docs/operations/rule-configuration.md b/docs/operations/rule-configuration.md index 5eac7858f58f..e66eef02be7f 100644 --- a/docs/operations/rule-configuration.md +++ b/docs/operations/rule-configuration.md @@ -23,7 +23,7 @@ title: "Retaining or automatically dropping data" --> -In Apache Druid (incubating), Coordinator processes use rules to determine what data should be loaded to or dropped from the cluster. Rules are used for data retention and query execution, and are set on the Coordinator console (http://coordinator_ip:port). +In Apache Druid, Coordinator processes use rules to determine what data should be loaded to or dropped from the cluster. Rules are used for data retention and query execution, and are set on the Coordinator console (http://coordinator_ip:port). There are three types of rules, i.e., load rules, drop rules, and broadcast rules. Load rules indicate how segments should be assigned to different historical process tiers and how many replicas of a segment should exist in each tier. Drop rules indicate when segments should be dropped entirely from the cluster. Finally, broadcast rules indicate how segments of different data sources should be co-located in Historical processes. diff --git a/docs/operations/segment-optimization.md b/docs/operations/segment-optimization.md index 4f8e501ec3b0..9c3b903b74b1 100644 --- a/docs/operations/segment-optimization.md +++ b/docs/operations/segment-optimization.md @@ -23,7 +23,7 @@ title: "Segment Size Optimization" --> -In Apache Druid (incubating), it's important to optimize the segment size because +In Apache Druid, it's important to optimize the segment size because 1. Druid stores data in segments. If you're using the [best-effort roll-up](../ingestion/index.md#rollup) mode, increasing the segment size might introduce further aggregation which reduces the dataSource size. diff --git a/docs/operations/tls-support.md b/docs/operations/tls-support.md index 15e781a640d1..4eb07d13eb0c 100644 --- a/docs/operations/tls-support.md +++ b/docs/operations/tls-support.md @@ -34,7 +34,7 @@ and `druid.tlsPort` properties on each process. Please see `Configuration` secti ## Jetty server configuration -Apache Druid (incubating) uses Jetty as an embedded web server. To get familiar with TLS/SSL in general and related concepts like Certificates etc. +Apache Druid uses Jetty as an embedded web server. To get familiar with TLS/SSL in general and related concepts like Certificates etc. reading this [Jetty documentation](http://www.eclipse.org/jetty/documentation/9.4.x/configuring-ssl.html) might be helpful. To get more in depth knowledge of TLS/SSL support in Java in general, please refer to this [guide](http://docs.oracle.com/javase/8/docs/technotes/guides/security/jsse/JSSERefGuide.html). The documentation [here](http://www.eclipse.org/jetty/documentation/9.4.x/configuring-ssl.html#configuring-sslcontextfactory) @@ -82,7 +82,7 @@ Since, there are various ways to configure SSLContext, by default, Druid looks f while creating the HttpClient. This binding can be achieved writing a [Druid extension](../development/extensions.md) which can provide an instance of SSLContext. Druid comes with a simple extension present [here](../development/extensions-core/simple-client-sslcontext.md) which should be useful enough for most simple cases, see [this](../development/extensions.md#loading-extensions) for how to include extensions. -If this extension does not satisfy the requirements then please follow the extension [implementation](https://github.com/apache/incubator-druid/tree/master/extensions-core/simple-client-sslcontext) +If this extension does not satisfy the requirements then please follow the extension [implementation](https://github.com/apache/druid/tree/master/extensions-core/simple-client-sslcontext) to create your own extension. When Druid Coordinator/Overlord have both HTTP and HTTPS enabled and Client sends request to non-leader process, then Client is always redirected to the HTTPS endpoint on leader process. diff --git a/docs/querying/aggregations.md b/docs/querying/aggregations.md index 4bc760529493..c1d8d6537c91 100644 --- a/docs/querying/aggregations.md +++ b/docs/querying/aggregations.md @@ -23,7 +23,7 @@ title: "Aggregations" --> -Aggregations can be provided at ingestion time as part of the ingestion spec as a way of summarizing data before it enters Apache Druid (incubating). +Aggregations can be provided at ingestion time as part of the ingestion spec as a way of summarizing data before it enters Apache Druid. Aggregations can also be specified as part of many queries at query time. Available aggregations are: diff --git a/docs/querying/caching.md b/docs/querying/caching.md index 6b6811abd211..d6ac90250e91 100644 --- a/docs/querying/caching.md +++ b/docs/querying/caching.md @@ -23,7 +23,7 @@ title: "Query caching" --> -Apache Druid (incubating) supports query result caching at both the segment and whole-query result level. Cache data can be stored in the +Apache Druid supports query result caching at both the segment and whole-query result level. Cache data can be stored in the local JVM heap or in an external distributed key/value store. In all cases, the Druid cache is a query result cache. The only difference is whether the result is a _partial result_ for a particular segment, or the result for an entire query. In both cases, the cache is invalidated as soon as any underlying data changes; it will never return a stale diff --git a/docs/querying/datasource.md b/docs/querying/datasource.md index a38263ee545d..25ab8e0ccc5a 100644 --- a/docs/querying/datasource.md +++ b/docs/querying/datasource.md @@ -23,7 +23,7 @@ title: "Datasources" --> -A data source is the Apache Druid (incubating) equivalent of a database table. However, a query can also masquerade as a data source, providing subquery-like functionality. Query data sources are currently supported only by [GroupBy](../querying/groupbyquery.md) queries. +A data source is the Apache Druid equivalent of a database table. However, a query can also masquerade as a data source, providing subquery-like functionality. Query data sources are currently supported only by [GroupBy](../querying/groupbyquery.md) queries. ### Table datasource The table data source is the most common type. It's represented by a string, or by the full structure: diff --git a/docs/querying/datasourcemetadataquery.md b/docs/querying/datasourcemetadataquery.md index ecd73afa026c..9d86d027d45b 100644 --- a/docs/querying/datasourcemetadataquery.md +++ b/docs/querying/datasourcemetadataquery.md @@ -41,7 +41,7 @@ There are 2 main parts to a Data Source Metadata query: |property|description|required?| |--------|-----------|---------| -|queryType|This String should always be "dataSourceMetadata"; this is the first thing Apache Druid (incubating) looks at to figure out how to interpret the query|yes| +|queryType|This String should always be "dataSourceMetadata"; this is the first thing Apache Druid looks at to figure out how to interpret the query|yes| |dataSource|A String or Object defining the data source to query, very similar to a table in a relational database. See [DataSource](../querying/datasource.md) for more information.|yes| |context|See [Context](../querying/query-context.md)|no| diff --git a/docs/querying/dimensionspecs.md b/docs/querying/dimensionspecs.md index 0120813c1e83..a60cb86722ef 100644 --- a/docs/querying/dimensionspecs.md +++ b/docs/querying/dimensionspecs.md @@ -66,7 +66,7 @@ Please refer to the [Output Types](#output-types) section for more details. ### Filtered DimensionSpecs -These are only useful for multi-value dimensions. If you have a row in Apache Druid (incubating) that has a multi-value dimension with values ["v1", "v2", "v3"] and you send a groupBy/topN query grouping by that dimension with [query filter](filters.html) for value "v1". In the response you will get 3 rows containing "v1", "v2" and "v3". This behavior might be unintuitive for some use cases. +These are only useful for multi-value dimensions. If you have a row in Apache Druid that has a multi-value dimension with values ["v1", "v2", "v3"] and you send a groupBy/topN query grouping by that dimension with [query filter](filters.html) for value "v1". In the response you will get 3 rows containing "v1", "v2" and "v3". This behavior might be unintuitive for some use cases. It happens because "query filter" is internally used on the bitmaps and only used to match the row to be included in the query result processing. With multi-value dimensions, "query filter" behaves like a contains check, which will match the row with dimension value ["v1", "v2", "v3"]. Please see the section on "Multi-value columns" in [segment](../design/segments.md) for more details. Then groupBy/topN processing pipeline "explodes" all multi-value dimensions resulting 3 rows for "v1", "v2" and "v3" each. diff --git a/docs/querying/filters.md b/docs/querying/filters.md index 5fed8712ad1d..0d9ad6e30ac0 100644 --- a/docs/querying/filters.md +++ b/docs/querying/filters.md @@ -23,7 +23,7 @@ title: "Query Filters" --> -A filter is a JSON object indicating which rows of data should be included in the computation for a query. It’s essentially the equivalent of the WHERE clause in SQL. Apache Druid (incubating) supports the following types of filters. +A filter is a JSON object indicating which rows of data should be included in the computation for a query. It’s essentially the equivalent of the WHERE clause in SQL. Apache Druid supports the following types of filters. ### Selector filter diff --git a/docs/querying/granularities.md b/docs/querying/granularities.md index 838b00d16e7d..d98413fa4109 100644 --- a/docs/querying/granularities.md +++ b/docs/querying/granularities.md @@ -38,7 +38,7 @@ Supported granularity strings are: `all`, `none`, `second`, `minute`, `fifteen_m #### Example: -Suppose you have data below stored in Apache Druid (incubating) with millisecond ingestion granularity, +Suppose you have data below stored in Apache Druid with millisecond ingestion granularity, ``` json {"timestamp": "2013-08-31T01:02:33Z", "page": "AAA", "language" : "en"} diff --git a/docs/querying/groupbyquery.md b/docs/querying/groupbyquery.md index 6297c16abc59..a8ffb64f62ba 100644 --- a/docs/querying/groupbyquery.md +++ b/docs/querying/groupbyquery.md @@ -24,7 +24,7 @@ sidebar_label: "GroupBy" --> -These types of Apache Druid (incubating) queries take a groupBy query object and return an array of JSON objects where each object represents a +These types of Apache Druid queries take a groupBy query object and return an array of JSON objects where each object represents a grouping asked for by the query. > Note: If you are doing aggregations with time as your only grouping, or an ordered groupBy over a single dimension, diff --git a/docs/querying/having.md b/docs/querying/having.md index 9780c55b9b4e..2627af42e4c5 100644 --- a/docs/querying/having.md +++ b/docs/querying/having.md @@ -27,7 +27,7 @@ A having clause is a JSON object identifying which rows from a groupBy query sho It is essentially the equivalent of the HAVING clause in SQL. -Apache Druid (incubating) supports the following types of having clauses. +Apache Druid supports the following types of having clauses. ### Query filters diff --git a/docs/querying/hll-old.md b/docs/querying/hll-old.md index 2b6ea1ff83a0..8e40c999fdc2 100644 --- a/docs/querying/hll-old.md +++ b/docs/querying/hll-old.md @@ -25,7 +25,7 @@ title: "Cardinality/HyperUnique aggregators" ## Cardinality aggregator -Computes the cardinality of a set of Apache Druid (incubating) dimensions, using HyperLogLog to estimate the cardinality. Please note that this +Computes the cardinality of a set of Apache Druid dimensions, using HyperLogLog to estimate the cardinality. Please note that this aggregator will be much slower than indexing a column with the hyperUnique aggregator. This aggregator also runs over a dimension column, which means the string dimension cannot be removed from the dataset to improve rollup. In general, we strongly recommend using the hyperUnique aggregator instead of the cardinality aggregator if you do not care about the individual values of a dimension. diff --git a/docs/querying/joins.md b/docs/querying/joins.md index a8bd757dee23..ef444882e735 100644 --- a/docs/querying/joins.md +++ b/docs/querying/joins.md @@ -23,7 +23,7 @@ title: "Joins" --> -Apache Druid (incubating) has limited support for joins through [query-time lookups](../querying/lookups.md). The common use case of +Apache Druid has limited support for joins through [query-time lookups](../querying/lookups.md). The common use case of query-time lookups is to replace one dimension value (e.g. a String ID) with another value (e.g. a human-readable String value). This is similar to a star-schema join. Druid does not yet have full support for joins. Although Druid’s storage format would allow for the implementation diff --git a/docs/querying/lookups.md b/docs/querying/lookups.md index 31f9323e220a..e70bb329917c 100644 --- a/docs/querying/lookups.md +++ b/docs/querying/lookups.md @@ -25,7 +25,7 @@ title: "Lookups" > Lookups are an [experimental](../development/experimental.md) feature. -Lookups are a concept in Apache Druid (incubating) where dimension values are (optionally) replaced with new values, allowing join-like +Lookups are a concept in Apache Druid where dimension values are (optionally) replaced with new values, allowing join-like functionality. Applying lookups in Druid is similar to joining a dimension table in a data warehouse. See [dimension specs](../querying/dimensionspecs.md) for more information. For the purpose of these documents, a "key" refers to a dimension value to match, and a "value" refers to its replacement. So if you wanted to map diff --git a/docs/querying/multi-value-dimensions.md b/docs/querying/multi-value-dimensions.md index f2df3e32764a..2c4784298265 100644 --- a/docs/querying/multi-value-dimensions.md +++ b/docs/querying/multi-value-dimensions.md @@ -23,7 +23,7 @@ title: "Multi-value dimensions" --> -Apache Druid (incubating) supports "multi-value" string dimensions. These are generated when an input field contains an +Apache Druid supports "multi-value" string dimensions. These are generated when an input field contains an array of values instead of a single value (e.g. JSON arrays, or a TSV field containing one or more `listDelimiter` characters). diff --git a/docs/querying/multitenancy.md b/docs/querying/multitenancy.md index 645cc924f2c4..d02373159e1e 100644 --- a/docs/querying/multitenancy.md +++ b/docs/querying/multitenancy.md @@ -23,7 +23,7 @@ title: "Multitenancy considerations" --> -Apache Druid (incubating) is often used to power user-facing data applications, where multitenancy is an important requirement. This +Apache Druid is often used to power user-facing data applications, where multitenancy is an important requirement. This document outlines Druid's multitenant storage and querying features. ## Shared datasources or datasource-per-tenant? diff --git a/docs/querying/post-aggregations.md b/docs/querying/post-aggregations.md index fa89e534801f..bc5935a046d6 100644 --- a/docs/querying/post-aggregations.md +++ b/docs/querying/post-aggregations.md @@ -23,7 +23,7 @@ title: "Post-Aggregations" --> -Post-aggregations are specifications of processing that should happen on aggregated values as they come out of Apache Druid (incubating). If you include a post aggregation as part of a query, make sure to include all aggregators the post-aggregator requires. +Post-aggregations are specifications of processing that should happen on aggregated values as they come out of Apache Druid. If you include a post aggregation as part of a query, make sure to include all aggregators the post-aggregator requires. There are several post-aggregators available. diff --git a/docs/querying/query-context.md b/docs/querying/query-context.md index 573aec5a6716..0ba16e8d6abd 100644 --- a/docs/querying/query-context.md +++ b/docs/querying/query-context.md @@ -30,7 +30,7 @@ The query context is used for various query configuration parameters. The follow |timeout | `druid.server.http.defaultQueryTimeout`| Query timeout in millis, beyond which unfinished queries will be cancelled. 0 timeout means `no timeout`. To set the default timeout, see [Broker configuration](../configuration/index.html#broker) | |priority | `0` | Query Priority. Queries with higher priority get precedence for computational resources.| |queryId | auto-generated | Unique identifier given to this query. If a query ID is set or known, this can be used to cancel the query | -|useCache | `true` | Flag indicating whether to leverage the query cache for this query. When set to false, it disables reading from the query cache for this query. When set to true, Apache Druid (incubating) uses `druid.broker.cache.useCache` or `druid.historical.cache.useCache` to determine whether or not to read from the query cache | +|useCache | `true` | Flag indicating whether to leverage the query cache for this query. When set to false, it disables reading from the query cache for this query. When set to true, Apache Druid uses `druid.broker.cache.useCache` or `druid.historical.cache.useCache` to determine whether or not to read from the query cache | |populateCache | `true` | Flag indicating whether to save the results of the query to the query cache. Primarily used for debugging. When set to false, it disables saving the results of this query to the query cache. When set to true, Druid uses `druid.broker.cache.populateCache` or `druid.historical.cache.populateCache` to determine whether or not to save the results of this query to the query cache | |useResultLevelCache | `true` | Flag indicating whether to leverage the result level cache for this query. When set to false, it disables reading from the query cache for this query. When set to true, Druid uses `druid.broker.cache.useResultLevelCache` to determine whether or not to read from the result-level query cache | |populateResultLevelCache | `true` | Flag indicating whether to save the results of the query to the result level cache. Primarily used for debugging. When set to false, it disables saving the results of this query to the query cache. When set to true, Druid uses `druid.broker.cache.populateResultLevelCache` to determine whether or not to save the results of this query to the result-level query cache | diff --git a/docs/querying/querying.md b/docs/querying/querying.md index 29b925dda975..f102173263ab 100644 --- a/docs/querying/querying.md +++ b/docs/querying/querying.md @@ -24,7 +24,7 @@ sidebar_label: "Making native queries" --> -> Apache Druid (incubating) supports two query languages: [Druid SQL](sql.md) and native queries. Druid SQL +> Apache Druid supports two query languages: [Druid SQL](sql.md) and native queries. Druid SQL > queries are planned into native queries. This document describes the native query language. Native queries in Druid are JSON objects and are typically issued to the Broker or Router processes. Queries can be diff --git a/docs/querying/scan-query.md b/docs/querying/scan-query.md index 934f5eb6ae58..fd1698886b4f 100644 --- a/docs/querying/scan-query.md +++ b/docs/querying/scan-query.md @@ -24,7 +24,7 @@ sidebar_label: "Scan" --> -The Scan query returns raw Apache Druid (incubating) rows in streaming mode. +The Scan query returns raw Apache Druid rows in streaming mode. In addition to straightforward usage where a Scan query is issued to the Broker, the Scan query can also be issued directly to Historical processes or streaming ingestion tasks. This can be useful if you want to retrieve large diff --git a/docs/querying/searchquery.md b/docs/querying/searchquery.md index a0246e8c7855..e88bdec5d28a 100644 --- a/docs/querying/searchquery.md +++ b/docs/querying/searchquery.md @@ -52,7 +52,7 @@ There are several main parts to a search query: |property|description|required?| |--------|-----------|---------| -|queryType|This String should always be "search"; this is the first thing Apache Druid (incubating) looks at to figure out how to interpret the query.|yes| +|queryType|This String should always be "search"; this is the first thing Apache Druid looks at to figure out how to interpret the query.|yes| |dataSource|A String or Object defining the data source to query, very similar to a table in a relational database. See [DataSource](../querying/datasource.md) for more information.|yes| |granularity|Defines the granularity of the query. See [Granularities](../querying/granularities.md).|yes| |filter|See [Filters](../querying/filters.md).|no| diff --git a/docs/querying/segmentmetadataquery.md b/docs/querying/segmentmetadataquery.md index e6a2651b30bf..9a02b29a9b1b 100644 --- a/docs/querying/segmentmetadataquery.md +++ b/docs/querying/segmentmetadataquery.md @@ -48,7 +48,7 @@ There are several main parts to a segment metadata query: |property|description|required?| |--------|-----------|---------| -|queryType|This String should always be "segmentMetadata"; this is the first thing Apache Druid (incubating) looks at to figure out how to interpret the query|yes| +|queryType|This String should always be "segmentMetadata"; this is the first thing Apache Druid looks at to figure out how to interpret the query|yes| |dataSource|A String or Object defining the data source to query, very similar to a table in a relational database. See [DataSource](../querying/datasource.md) for more information.|yes| |intervals|A JSON Object representing ISO-8601 Intervals. This defines the time ranges to run the query over.|no| |toInclude|A JSON Object representing what columns should be included in the result. Defaults to "all".|no| diff --git a/docs/querying/select-query.md b/docs/querying/select-query.md index 13da7cee9db6..be7ede132b09 100644 --- a/docs/querying/select-query.md +++ b/docs/querying/select-query.md @@ -24,6 +24,6 @@ sidebar_label: "Select" --> -Older versions of Apache Druid (incubating) included a Select query type. Since Druid 0.17.0, it has been removed and replaced by the [Scan query](../querying/scan-query.md), which offers improved memory usage and performance. This solves issues that users had with Select queries causing Druid to run out of memory or slow down. +Older versions of Apache Druid included a Select query type. Since Druid 0.17.0, it has been removed and replaced by the [Scan query](../querying/scan-query.md), which offers improved memory usage and performance. This solves issues that users had with Select queries causing Druid to run out of memory or slow down. The Scan query has a different syntax, but supports many of the features of the Select query, including time ordering and limiting. Scan does not include the Select query's pagination feature; however, in many cases pagination is unnecessary with Scan due to its ability to return a virtually unlimited number of results in one call. diff --git a/docs/querying/sql.md b/docs/querying/sql.md index 8d6a04840407..8aa1367d14cc 100644 --- a/docs/querying/sql.md +++ b/docs/querying/sql.md @@ -31,7 +31,7 @@ sidebar_label: "Druid SQL" --> -> Apache Druid (incubating) supports two query languages: Druid SQL and [native queries](querying.md), which +> Apache Druid supports two query languages: Druid SQL and [native queries](querying.md), which > SQL queries are planned into, and which end users can also issue directly. This document describes the SQL language. Druid SQL is a built-in SQL layer and an alternative to Druid's native JSON-based query language, and is powered by a @@ -750,7 +750,7 @@ GROUP BY 1 ORDER BY 2 DESC ``` -*Caveat:* Note that a segment can be served by more than one stream ingestion tasks or Historical processes, in that case it would have multiple replicas. These replicas are weakly consistent with each other when served by multiple ingestion tasks, until a segment is eventually served by a Historical, at that point the segment is immutable. Broker prefers to query a segment from Historical over an ingestion task. But if a segment has multiple realtime replicas, for e.g.. Kafka index tasks, and one task is slower than other, then the sys.segments query results can vary for the duration of the tasks because only one of the ingestion tasks is queried by the Broker and it is not guaranteed that the same task gets picked every time. The `num_rows` column of segments table can have inconsistent values during this period. There is an open [issue](https://github.com/apache/incubator-druid/issues/5915) about this inconsistency with stream ingestion tasks. +*Caveat:* Note that a segment can be served by more than one stream ingestion tasks or Historical processes, in that case it would have multiple replicas. These replicas are weakly consistent with each other when served by multiple ingestion tasks, until a segment is eventually served by a Historical, at that point the segment is immutable. Broker prefers to query a segment from Historical over an ingestion task. But if a segment has multiple realtime replicas, for e.g.. Kafka index tasks, and one task is slower than other, then the sys.segments query results can vary for the duration of the tasks because only one of the ingestion tasks is queried by the Broker and it is not guaranteed that the same task gets picked every time. The `num_rows` column of segments table can have inconsistent values during this period. There is an open [issue](https://github.com/apache/druid/issues/5915) about this inconsistency with stream ingestion tasks. #### SERVERS table diff --git a/docs/querying/timeboundaryquery.md b/docs/querying/timeboundaryquery.md index 34aaae393d94..5e096a59441b 100644 --- a/docs/querying/timeboundaryquery.md +++ b/docs/querying/timeboundaryquery.md @@ -39,7 +39,7 @@ There are 3 main parts to a time boundary query: |property|description|required?| |--------|-----------|---------| -|queryType|This String should always be "timeBoundary"; this is the first thing Apache Druid (incubating) looks at to figure out how to interpret the query|yes| +|queryType|This String should always be "timeBoundary"; this is the first thing Apache Druid looks at to figure out how to interpret the query|yes| |dataSource|A String or Object defining the data source to query, very similar to a table in a relational database. See [DataSource](../querying/datasource.md) for more information.|yes| |bound | Optional, set to `maxTime` or `minTime` to return only the latest or earliest timestamp. Default to returning both if not set| no | |filter|See [Filters](../querying/filters.md)|no| diff --git a/docs/querying/timeseriesquery.md b/docs/querying/timeseriesquery.md index 69fdaa2a4485..2ea2210d1175 100644 --- a/docs/querying/timeseriesquery.md +++ b/docs/querying/timeseriesquery.md @@ -68,7 +68,7 @@ There are 7 main parts to a timeseries query: |property|description|required?| |--------|-----------|---------| -|queryType|This String should always be "timeseries"; this is the first thing Apache Druid (incubating) looks at to figure out how to interpret the query|yes| +|queryType|This String should always be "timeseries"; this is the first thing Apache Druid looks at to figure out how to interpret the query|yes| |dataSource|A String or Object defining the data source to query, very similar to a table in a relational database. See [DataSource](../querying/datasource.md) for more information.|yes| |descending|Whether to make descending ordered result. Default is `false`(ascending).|no| |intervals|A JSON Object representing ISO-8601 Intervals. This defines the time ranges to run the query over.|yes| diff --git a/docs/querying/topnmetricspec.md b/docs/querying/topnmetricspec.md index 8c113f760e42..74917a359539 100644 --- a/docs/querying/topnmetricspec.md +++ b/docs/querying/topnmetricspec.md @@ -23,7 +23,7 @@ title: "TopNMetricSpec" --> -In Apache Druid (incubating), the topN metric spec specifies how topN values should be sorted. +In Apache Druid, the topN metric spec specifies how topN values should be sorted. ## Numeric TopNMetricSpec diff --git a/docs/querying/topnquery.md b/docs/querying/topnquery.md index 1e6609b871a8..2f57068e36e3 100644 --- a/docs/querying/topnquery.md +++ b/docs/querying/topnquery.md @@ -24,7 +24,7 @@ sidebar_label: "TopN" --> -Apache Druid (incubating) TopN queries return a sorted set of results for the values in a given dimension according to some criteria. Conceptually, they can be thought of as an approximate [GroupByQuery](../querying/groupbyquery.md) over a single dimension with an [Ordering](../querying/limitspec.md) spec. TopNs are much faster and resource efficient than GroupBys for this use case. These types of queries take a topN query object and return an array of JSON objects where each object represents a value asked for by the topN query. +Apache Druid TopN queries return a sorted set of results for the values in a given dimension according to some criteria. Conceptually, they can be thought of as an approximate [GroupByQuery](../querying/groupbyquery.md) over a single dimension with an [Ordering](../querying/limitspec.md) spec. TopNs are much faster and resource efficient than GroupBys for this use case. These types of queries take a topN query object and return an array of JSON objects where each object represents a value asked for by the topN query. TopNs are approximate in that each data process will rank their top K results and only return those top K results to the Broker. K, by default in Druid, is `max(1000, threshold)`. In practice, this means that if you ask for the top 1000 items ordered, the correctness of the first ~900 items will be 100%, and the ordering of the results after that is not guaranteed. TopNs can be made more accurate by increasing the threshold. diff --git a/docs/querying/virtual-columns.md b/docs/querying/virtual-columns.md index 6b4259832827..0918a2fe3bac 100644 --- a/docs/querying/virtual-columns.md +++ b/docs/querying/virtual-columns.md @@ -29,7 +29,7 @@ A virtual column can potentially draw from multiple underlying columns, although Virtual columns can be used as dimensions or as inputs to aggregators. -Each Apache Druid (incubating) query can accept a list of virtual columns as a parameter. The following scan query is provided as an example: +Each Apache Druid query can accept a list of virtual columns as a parameter. The following scan query is provided as an example: ``` { diff --git a/docs/tutorials/cluster.md b/docs/tutorials/cluster.md index b795227884f2..74856562286b 100644 --- a/docs/tutorials/cluster.md +++ b/docs/tutorials/cluster.md @@ -23,7 +23,7 @@ title: "Clustered deployment" --> -Apache Druid (incubating) is designed to be deployed as a scalable, fault-tolerant cluster. +Apache Druid is designed to be deployed as a scalable, fault-tolerant cluster. In this document, we'll set up a simple cluster and discuss how it can be further configured to meet your needs. @@ -160,7 +160,7 @@ cd apache-druid-{{DRUIDVERSION}} In the package, you should find: -* `DISCLAIMER`, `LICENSE`, and `NOTICE` files +* `LICENSE` and `NOTICE` files * `bin/*` - scripts related to the [single-machine quickstart](index.html) * `conf/druid/cluster/*` - template configurations for a clustered setup * `extensions/*` - core Druid extensions diff --git a/docs/tutorials/index.md b/docs/tutorials/index.md index 3d12fcc6bbd9..928262b61f99 100644 --- a/docs/tutorials/index.md +++ b/docs/tutorials/index.md @@ -67,7 +67,7 @@ cd apache-druid-{{DRUIDVERSION}} In the package, you should find: -* `DISCLAIMER`, `LICENSE`, and `NOTICE` files +* `LICENSE` and `NOTICE` files * `bin/*` - scripts useful for this quickstart * `conf/*` - example configurations for single-server and clustered setup * `extensions/*` - core Druid extensions diff --git a/docs/tutorials/tutorial-batch-hadoop.md b/docs/tutorials/tutorial-batch-hadoop.md index d9731e498246..38abbfabdd3e 100644 --- a/docs/tutorials/tutorial-batch-hadoop.md +++ b/docs/tutorials/tutorial-batch-hadoop.md @@ -24,7 +24,7 @@ sidebar_label: "Load from Apache Hadoop" --> -This tutorial shows you how to load data files into Apache Druid (incubating) using a remote Hadoop cluster. +This tutorial shows you how to load data files into Apache Druid using a remote Hadoop cluster. For this tutorial, we'll assume that you've already completed the previous [batch ingestion tutorial](tutorial-batch.html) using Druid's native batch ingestion system and are using the diff --git a/docs/tutorials/tutorial-batch.md b/docs/tutorials/tutorial-batch.md index e85613667af6..e175d4a272c2 100644 --- a/docs/tutorials/tutorial-batch.md +++ b/docs/tutorials/tutorial-batch.md @@ -24,7 +24,7 @@ sidebar_label: "Loading files natively" --> -This tutorial demonstrates how to perform a batch file load, using Apache Druid (incubating)'s native batch ingestion. +This tutorial demonstrates how to perform a batch file load, using Apache Druid's native batch ingestion. For this tutorial, we'll assume you've already downloaded Druid as described in the [quickstart](index.html) using the `micro-quickstart` single-machine configuration and have it diff --git a/docs/tutorials/tutorial-compaction.md b/docs/tutorials/tutorial-compaction.md index bec0cafc15ae..8da333b0d735 100644 --- a/docs/tutorials/tutorial-compaction.md +++ b/docs/tutorials/tutorial-compaction.md @@ -29,7 +29,7 @@ This tutorial demonstrates how to compact existing segments into fewer but large Because there is some per-segment memory and processing overhead, it can sometimes be beneficial to reduce the total number of segments. Please check [Segment size optimization](../operations/segment-optimization.md) for details. -For this tutorial, we'll assume you've already downloaded Apache Druid (incubating) as described in +For this tutorial, we'll assume you've already downloaded Apache Druid as described in the [single-machine quickstart](index.html) and have it running on your local machine. It will also be helpful to have finished [Tutorial: Loading a file](../tutorials/tutorial-batch.md) and [Tutorial: Querying data](../tutorials/tutorial-query.md). diff --git a/docs/tutorials/tutorial-delete-data.md b/docs/tutorials/tutorial-delete-data.md index cd1e054c52d2..4f08b0ebdaf3 100644 --- a/docs/tutorials/tutorial-delete-data.md +++ b/docs/tutorials/tutorial-delete-data.md @@ -26,7 +26,7 @@ sidebar_label: "Deleting data" This tutorial demonstrates how to delete existing data. -For this tutorial, we'll assume you've already downloaded Apache Druid (incubating) as described in +For this tutorial, we'll assume you've already downloaded Apache Druid as described in the [single-machine quickstart](index.html) and have it running on your local machine. ## Load initial data diff --git a/docs/tutorials/tutorial-ingestion-spec.md b/docs/tutorials/tutorial-ingestion-spec.md index a39424529242..b722ed736750 100644 --- a/docs/tutorials/tutorial-ingestion-spec.md +++ b/docs/tutorials/tutorial-ingestion-spec.md @@ -26,7 +26,7 @@ sidebar_label: "Writing an ingestion spec" This tutorial will guide the reader through the process of defining an ingestion spec, pointing out key considerations and guidelines. -For this tutorial, we'll assume you've already downloaded Apache Druid (incubating) as described in +For this tutorial, we'll assume you've already downloaded Apache Druid as described in the [single-machine quickstart](index.html) and have it running on your local machine. It will also be helpful to have finished [Tutorial: Loading a file](../tutorials/tutorial-batch.md), [Tutorial: Querying data](../tutorials/tutorial-query.md), and [Tutorial: Rollup](../tutorials/tutorial-rollup.md). diff --git a/docs/tutorials/tutorial-kafka.md b/docs/tutorials/tutorial-kafka.md index 6a7b8e30eb8e..36194d19c91f 100644 --- a/docs/tutorials/tutorial-kafka.md +++ b/docs/tutorials/tutorial-kafka.md @@ -26,7 +26,7 @@ sidebar_label: "Load from Apache Kafka" ## Getting started -This tutorial demonstrates how to load data into Apache Druid (incubating) from a Kafka stream, using Druid's Kafka indexing service. +This tutorial demonstrates how to load data into Apache Druid from a Kafka stream, using Druid's Kafka indexing service. For this tutorial, we'll assume you've already downloaded Druid as described in the [quickstart](index.html) using the `micro-quickstart` single-machine configuration and have it diff --git a/docs/tutorials/tutorial-query.md b/docs/tutorials/tutorial-query.md index 4f496c9c9bf0..4b19c92b89b3 100644 --- a/docs/tutorials/tutorial-query.md +++ b/docs/tutorials/tutorial-query.md @@ -24,7 +24,7 @@ sidebar_label: "Querying data" --> -This tutorial will demonstrate how to query data in Apache Druid (incubating), with examples for Druid SQL and Druid's native query format. +This tutorial will demonstrate how to query data in Apache Druid, with examples for Druid SQL and Druid's native query format. The tutorial assumes that you've already completed one of the 4 ingestion tutorials, as we will be querying the sample Wikipedia edits data. diff --git a/docs/tutorials/tutorial-retention.md b/docs/tutorials/tutorial-retention.md index cd1912a8cfeb..e4ff42895273 100644 --- a/docs/tutorials/tutorial-retention.md +++ b/docs/tutorials/tutorial-retention.md @@ -26,7 +26,7 @@ sidebar_label: "Configuring data retention" This tutorial demonstrates how to configure retention rules on a datasource to set the time intervals of data that will be retained or dropped. -For this tutorial, we'll assume you've already downloaded Apache Druid (incubating) as described in +For this tutorial, we'll assume you've already downloaded Apache Druid as described in the [single-machine quickstart](index.html) and have it running on your local machine. It will also be helpful to have finished [Tutorial: Loading a file](../tutorials/tutorial-batch.md) and [Tutorial: Querying data](../tutorials/tutorial-query.md). diff --git a/docs/tutorials/tutorial-rollup.md b/docs/tutorials/tutorial-rollup.md index f344c0df2110..8b4f1ad1d6f5 100644 --- a/docs/tutorials/tutorial-rollup.md +++ b/docs/tutorials/tutorial-rollup.md @@ -24,7 +24,7 @@ sidebar_label: "Roll-up" --> -Apache Druid (incubating) can summarize raw data at ingestion time using a process we refer to as "roll-up". Roll-up is a first-level aggregation operation over a selected set of columns that reduces the size of stored data. +Apache Druid can summarize raw data at ingestion time using a process we refer to as "roll-up". Roll-up is a first-level aggregation operation over a selected set of columns that reduces the size of stored data. This tutorial will demonstrate the effects of roll-up on an example dataset. diff --git a/docs/tutorials/tutorial-transform-spec.md b/docs/tutorials/tutorial-transform-spec.md index b917df81e985..c90ca6077e33 100644 --- a/docs/tutorials/tutorial-transform-spec.md +++ b/docs/tutorials/tutorial-transform-spec.md @@ -26,7 +26,7 @@ sidebar_label: "Transforming input data" This tutorial will demonstrate how to use transform specs to filter and transform input data during ingestion. -For this tutorial, we'll assume you've already downloaded Apache Druid (incubating) as described in +For this tutorial, we'll assume you've already downloaded Apache Druid as described in the [single-machine quickstart](index.html) and have it running on your local machine. It will also be helpful to have finished [Tutorial: Loading a file](../tutorials/tutorial-batch.md) and [Tutorial: Querying data](../tutorials/tutorial-query.md). diff --git a/docs/tutorials/tutorial-update-data.md b/docs/tutorials/tutorial-update-data.md index e9435cfcb925..804385028cfa 100644 --- a/docs/tutorials/tutorial-update-data.md +++ b/docs/tutorials/tutorial-update-data.md @@ -26,7 +26,7 @@ sidebar_label: "Updating existing data" This tutorial demonstrates how to update existing data, showing both overwrites and appends. -For this tutorial, we'll assume you've already downloaded Apache Druid (incubating) as described in +For this tutorial, we'll assume you've already downloaded Apache Druid as described in the [single-machine quickstart](index.html) and have it running on your local machine. It will also be helpful to have finished [Tutorial: Loading a file](../tutorials/tutorial-batch.md), [Tutorial: Querying data](../tutorials/tutorial-query.md), and [Tutorial: Rollup](../tutorials/tutorial-rollup.md). diff --git a/extensions-contrib/distinctcount/src/main/java/org/apache/druid/query/aggregation/distinctcount/DistinctCountAggregatorFactory.java b/extensions-contrib/distinctcount/src/main/java/org/apache/druid/query/aggregation/distinctcount/DistinctCountAggregatorFactory.java index 08f5136bc821..b74aba1e24bd 100644 --- a/extensions-contrib/distinctcount/src/main/java/org/apache/druid/query/aggregation/distinctcount/DistinctCountAggregatorFactory.java +++ b/extensions-contrib/distinctcount/src/main/java/org/apache/druid/query/aggregation/distinctcount/DistinctCountAggregatorFactory.java @@ -124,7 +124,7 @@ public Object combine(Object lhs, Object rhs) @Override public AggregateCombiner makeAggregateCombiner() { - // This is likely wrong as well as combine(), see https://github.com/apache/incubator-druid/pull/2602#issuecomment-321224202 + // This is likely wrong as well as combine(), see https://github.com/apache/druid/pull/2602#issuecomment-321224202 return new LongSumAggregateCombiner(); } diff --git a/extensions-contrib/moving-average-query/README.md b/extensions-contrib/moving-average-query/README.md index 8b5a07874c6b..bfd0284be7a3 100644 --- a/extensions-contrib/moving-average-query/README.md +++ b/extensions-contrib/moving-average-query/README.md @@ -26,4 +26,4 @@ Overview Documentation ============= -See the druid.apache.org website or under [Druid Github Repo](https://github.com/apache/incubator-druid/tree/master/docs/content/development/extensions-contrib/moving-average-query.md). +See the druid.apache.org website or under [Druid Github Repo](https://github.com/apache/druid/tree/master/docs/content/development/extensions-contrib/moving-average-query.md). diff --git a/extensions-core/datasketches/src/main/java/org/apache/druid/query/aggregation/datasketches/hll/HllSketchBuildBufferAggregator.java b/extensions-core/datasketches/src/main/java/org/apache/druid/query/aggregation/datasketches/hll/HllSketchBuildBufferAggregator.java index 5b566febbb04..4c39259a6a97 100644 --- a/extensions-core/datasketches/src/main/java/org/apache/druid/query/aggregation/datasketches/hll/HllSketchBuildBufferAggregator.java +++ b/extensions-core/datasketches/src/main/java/org/apache/druid/query/aggregation/datasketches/hll/HllSketchBuildBufferAggregator.java @@ -217,7 +217,7 @@ public void inspectRuntimeShape(RuntimeShapeInspector inspector) inspector.visit("selector", selector); // lgK should be inspected because different execution paths exist in HllSketch.update() that is called from // @CalledFromHotLoop-annotated aggregate() depending on the lgK. - // See https://github.com/apache/incubator-druid/pull/6893#discussion_r250726028 + // See https://github.com/apache/druid/pull/6893#discussion_r250726028 inspector.visit("lgK", lgK); } } diff --git a/extensions-core/datasketches/src/main/java/org/apache/druid/query/aggregation/datasketches/hll/HllSketchMergeBufferAggregator.java b/extensions-core/datasketches/src/main/java/org/apache/druid/query/aggregation/datasketches/hll/HllSketchMergeBufferAggregator.java index 7f9c8bff0d3e..7161c25fb6bc 100644 --- a/extensions-core/datasketches/src/main/java/org/apache/druid/query/aggregation/datasketches/hll/HllSketchMergeBufferAggregator.java +++ b/extensions-core/datasketches/src/main/java/org/apache/druid/query/aggregation/datasketches/hll/HllSketchMergeBufferAggregator.java @@ -162,7 +162,7 @@ public void inspectRuntimeShape(RuntimeShapeInspector inspector) inspector.visit("selector", selector); // lgK should be inspected because different execution paths exist in Union.update() that is called from // @CalledFromHotLoop-annotated aggregate() depending on the lgK. - // See https://github.com/apache/incubator-druid/pull/6893#discussion_r250726028 + // See https://github.com/apache/druid/pull/6893#discussion_r250726028 inspector.visit("lgK", lgK); } } diff --git a/extensions-core/datasketches/src/main/java/org/apache/druid/query/aggregation/datasketches/tuple/ArrayOfDoublesSketchBuildAggregator.java b/extensions-core/datasketches/src/main/java/org/apache/druid/query/aggregation/datasketches/tuple/ArrayOfDoublesSketchBuildAggregator.java index bc0b551f883b..2365e07d08d1 100644 --- a/extensions-core/datasketches/src/main/java/org/apache/druid/query/aggregation/datasketches/tuple/ArrayOfDoublesSketchBuildAggregator.java +++ b/extensions-core/datasketches/src/main/java/org/apache/druid/query/aggregation/datasketches/tuple/ArrayOfDoublesSketchBuildAggregator.java @@ -61,7 +61,7 @@ public ArrayOfDoublesSketchBuildAggregator( /** * This method uses synchronization because it can be used during indexing, * and Druid can call aggregate() and get() concurrently - * https://github.com/apache/incubator-druid/pull/3956 + * https://github.com/apache/druid/pull/3956 */ @Override public void aggregate() @@ -81,7 +81,7 @@ public void aggregate() /** * This method uses synchronization because it can be used during indexing, * and Druid can call aggregate() and get() concurrently - * https://github.com/apache/incubator-druid/pull/3956 + * https://github.com/apache/druid/pull/3956 * The returned sketch is a separate instance of ArrayOfDoublesCompactSketch * representing the current state of the aggregation, and is not affected by consequent * aggregate() calls diff --git a/extensions-core/datasketches/src/main/java/org/apache/druid/query/aggregation/datasketches/tuple/ArrayOfDoublesSketchBuildBufferAggregator.java b/extensions-core/datasketches/src/main/java/org/apache/druid/query/aggregation/datasketches/tuple/ArrayOfDoublesSketchBuildBufferAggregator.java index 6aedd96187e5..4108a49dc41c 100644 --- a/extensions-core/datasketches/src/main/java/org/apache/druid/query/aggregation/datasketches/tuple/ArrayOfDoublesSketchBuildBufferAggregator.java +++ b/extensions-core/datasketches/src/main/java/org/apache/druid/query/aggregation/datasketches/tuple/ArrayOfDoublesSketchBuildBufferAggregator.java @@ -83,7 +83,7 @@ public void init(final ByteBuffer buf, final int position) /** * This method uses locks because it can be used during indexing, * and Druid can call aggregate() and get() concurrently - * https://github.com/apache/incubator-druid/pull/3956 + * https://github.com/apache/druid/pull/3956 */ @Override public void aggregate(final ByteBuffer buf, final int position) @@ -114,7 +114,7 @@ public void aggregate(final ByteBuffer buf, final int position) /** * This method uses locks because it can be used during indexing, * and Druid can call aggregate() and get() concurrently - * https://github.com/apache/incubator-druid/pull/3956 + * https://github.com/apache/druid/pull/3956 * The returned sketch is a separate instance of ArrayOfDoublesCompactSketch * representing the current state of the aggregation, and is not affected by consequent * aggregate() calls diff --git a/extensions-core/datasketches/src/main/java/org/apache/druid/query/aggregation/datasketches/tuple/ArrayOfDoublesSketchMergeAggregator.java b/extensions-core/datasketches/src/main/java/org/apache/druid/query/aggregation/datasketches/tuple/ArrayOfDoublesSketchMergeAggregator.java index 5e51cf676a0e..cb2bfc91636d 100644 --- a/extensions-core/datasketches/src/main/java/org/apache/druid/query/aggregation/datasketches/tuple/ArrayOfDoublesSketchMergeAggregator.java +++ b/extensions-core/datasketches/src/main/java/org/apache/druid/query/aggregation/datasketches/tuple/ArrayOfDoublesSketchMergeAggregator.java @@ -53,7 +53,7 @@ public ArrayOfDoublesSketchMergeAggregator( /** * This method uses synchronization because it can be used during indexing, * and Druid can call aggregate() and get() concurrently - * https://github.com/apache/incubator-druid/pull/3956 + * https://github.com/apache/druid/pull/3956 */ @Override public void aggregate() @@ -70,7 +70,7 @@ public void aggregate() /** * This method uses synchronization because it can be used during indexing, * and Druid can call aggregate() and get() concurrently - * https://github.com/apache/incubator-druid/pull/3956 + * https://github.com/apache/druid/pull/3956 * The returned sketch is a separate instance of ArrayOfDoublesCompactSketch * representing the current state of the aggregation, and is not affected by consequent * aggregate() calls diff --git a/extensions-core/datasketches/src/main/java/org/apache/druid/query/aggregation/datasketches/tuple/ArrayOfDoublesSketchMergeBufferAggregator.java b/extensions-core/datasketches/src/main/java/org/apache/druid/query/aggregation/datasketches/tuple/ArrayOfDoublesSketchMergeBufferAggregator.java index 51aae2ab9939..eca94d0c1c04 100644 --- a/extensions-core/datasketches/src/main/java/org/apache/druid/query/aggregation/datasketches/tuple/ArrayOfDoublesSketchMergeBufferAggregator.java +++ b/extensions-core/datasketches/src/main/java/org/apache/druid/query/aggregation/datasketches/tuple/ArrayOfDoublesSketchMergeBufferAggregator.java @@ -75,7 +75,7 @@ public void init(final ByteBuffer buf, final int position) /** * This method uses locks because it can be used during indexing, * and Druid can call aggregate() and get() concurrently - * https://github.com/apache/incubator-druid/pull/3956 + * https://github.com/apache/druid/pull/3956 */ @Override public void aggregate(final ByteBuffer buf, final int position) @@ -103,7 +103,7 @@ public void aggregate(final ByteBuffer buf, final int position) /** * This method uses locks because it can be used during indexing, * and Druid can call aggregate() and get() concurrently - * https://github.com/apache/incubator-druid/pull/3956 + * https://github.com/apache/druid/pull/3956 * The returned sketch is a separate instance of ArrayOfDoublesCompactSketch * representing the current state of the aggregation, and is not affected by consequent * aggregate() calls diff --git a/extensions-core/druid-basic-security/src/main/java/org/apache/druid/security/basic/authentication/BasicHTTPEscalator.java b/extensions-core/druid-basic-security/src/main/java/org/apache/druid/security/basic/authentication/BasicHTTPEscalator.java index 5aa83dde4f10..c81afc92954e 100644 --- a/extensions-core/druid-basic-security/src/main/java/org/apache/druid/security/basic/authentication/BasicHTTPEscalator.java +++ b/extensions-core/druid-basic-security/src/main/java/org/apache/druid/security/basic/authentication/BasicHTTPEscalator.java @@ -66,7 +66,7 @@ public AuthenticationResult createEscalatedAuthenticationResult() { LOG.debug("----------- Creating escalated authentication result. username: %s", this.internalClientUsername); // if you found your self asking why the authenticatedBy field is set to null please read this: - // https://github.com/apache/incubator-druid/pull/5706#discussion_r185940889 + // https://github.com/apache/druid/pull/5706#discussion_r185940889 return new AuthenticationResult(internalClientUsername, authorizerName, null, null); } } diff --git a/extensions-core/druid-kerberos/src/main/java/org/apache/druid/security/kerberos/KerberosEscalator.java b/extensions-core/druid-kerberos/src/main/java/org/apache/druid/security/kerberos/KerberosEscalator.java index b4797e674380..2ca51edb06fc 100644 --- a/extensions-core/druid-kerberos/src/main/java/org/apache/druid/security/kerberos/KerberosEscalator.java +++ b/extensions-core/druid-kerberos/src/main/java/org/apache/druid/security/kerberos/KerberosEscalator.java @@ -58,7 +58,7 @@ public HttpClient createEscalatedClient(HttpClient baseClient) public AuthenticationResult createEscalatedAuthenticationResult() { // if you found your self asking why the authenticatedBy field is set to null please read this: - // https://github.com/apache/incubator-druid/pull/5706#discussion_r185940889 + // https://github.com/apache/druid/pull/5706#discussion_r185940889 return new AuthenticationResult(internalClientPrincipal, authorizerName, null, null); } diff --git a/extensions-core/hdfs-storage/src/main/java/org/apache/druid/storage/hdfs/HdfsDataSegmentPusher.java b/extensions-core/hdfs-storage/src/main/java/org/apache/druid/storage/hdfs/HdfsDataSegmentPusher.java index bde39d49992c..0354e3c34688 100644 --- a/extensions-core/hdfs-storage/src/main/java/org/apache/druid/storage/hdfs/HdfsDataSegmentPusher.java +++ b/extensions-core/hdfs-storage/src/main/java/org/apache/druid/storage/hdfs/HdfsDataSegmentPusher.java @@ -57,7 +57,7 @@ public class HdfsDataSegmentPusher implements DataSegmentPusher private final ObjectMapper jsonMapper; // We lazily initialize fullQualifiedStorageDirectory to avoid potential issues with Hadoop namenode HA. - // Please see https://github.com/apache/incubator-druid/pull/5684 + // Please see https://github.com/apache/druid/pull/5684 private final Supplier fullyQualifiedStorageDirectory; @Inject diff --git a/extensions-core/hdfs-storage/src/main/java/org/apache/druid/storage/hdfs/HdfsStorageDruidModule.java b/extensions-core/hdfs-storage/src/main/java/org/apache/druid/storage/hdfs/HdfsStorageDruidModule.java index d4242bcddcfc..e89bb0d8000e 100644 --- a/extensions-core/hdfs-storage/src/main/java/org/apache/druid/storage/hdfs/HdfsStorageDruidModule.java +++ b/extensions-core/hdfs-storage/src/main/java/org/apache/druid/storage/hdfs/HdfsStorageDruidModule.java @@ -88,7 +88,7 @@ public void configure(Binder binder) conf.setClassLoader(getClass().getClassLoader()); // Ensure that FileSystem class level initialization happens with correct CL - // See https://github.com/apache/incubator-druid/issues/1714 + // See https://github.com/apache/druid/issues/1714 ClassLoader currCtxCl = Thread.currentThread().getContextClassLoader(); try { Thread.currentThread().setContextClassLoader(getClass().getClassLoader()); diff --git a/extensions-core/hdfs-storage/src/main/java/org/apache/hadoop/fs/HadoopFsWrapper.java b/extensions-core/hdfs-storage/src/main/java/org/apache/hadoop/fs/HadoopFsWrapper.java index fa0acf0a7cba..a57677115db5 100644 --- a/extensions-core/hdfs-storage/src/main/java/org/apache/hadoop/fs/HadoopFsWrapper.java +++ b/extensions-core/hdfs-storage/src/main/java/org/apache/hadoop/fs/HadoopFsWrapper.java @@ -55,7 +55,7 @@ public static boolean rename(FileSystem fs, Path from, Path to) try { // Note: Using reflection instead of simpler // fs.rename(from, to, Options.Rename.NONE); - // due to the issues discussed in https://github.com/apache/incubator-druid/pull/3787 + // due to the issues discussed in https://github.com/apache/druid/pull/3787 Method renameMethod = findRenameMethodRecursively(fs.getClass()); renameMethod.invoke(fs, from, to, new Options.Rename[]{Options.Rename.NONE}); return true; diff --git a/extensions-core/kafka-indexing-service/src/test/java/org/apache/druid/indexing/kafka/KafkaIndexTaskTest.java b/extensions-core/kafka-indexing-service/src/test/java/org/apache/druid/indexing/kafka/KafkaIndexTaskTest.java index e8bde11469c6..aba8bbcdcad5 100644 --- a/extensions-core/kafka-indexing-service/src/test/java/org/apache/druid/indexing/kafka/KafkaIndexTaskTest.java +++ b/extensions-core/kafka-indexing-service/src/test/java/org/apache/druid/indexing/kafka/KafkaIndexTaskTest.java @@ -2365,7 +2365,7 @@ public void testCanStartFromLaterThanEarliestOffset() throws Exception @Test public void testSerde() throws Exception { - // This is both a serde test and a regression test for https://github.com/apache/incubator-druid/issues/7724. + // This is both a serde test and a regression test for https://github.com/apache/druid/issues/7724. final KafkaIndexTask task = createTask( "taskid", diff --git a/extensions-core/orc-extensions/src/main/java/org/apache/druid/data/input/orc/OrcExtensionsModule.java b/extensions-core/orc-extensions/src/main/java/org/apache/druid/data/input/orc/OrcExtensionsModule.java index 78082cba5a82..77997eaea95a 100644 --- a/extensions-core/orc-extensions/src/main/java/org/apache/druid/data/input/orc/OrcExtensionsModule.java +++ b/extensions-core/orc-extensions/src/main/java/org/apache/druid/data/input/orc/OrcExtensionsModule.java @@ -69,7 +69,7 @@ public void configure(Binder binder) conf.setClassLoader(getClass().getClassLoader()); // Ensure that FileSystem class level initialization happens with correct CL - // See https://github.com/apache/incubator-druid/issues/1714 + // See https://github.com/apache/druid/issues/1714 ClassLoader currCtxCl = Thread.currentThread().getContextClassLoader(); try { Thread.currentThread().setContextClassLoader(getClass().getClassLoader()); diff --git a/extensions-core/parquet-extensions/src/main/java/org/apache/druid/data/input/parquet/ParquetExtensionsModule.java b/extensions-core/parquet-extensions/src/main/java/org/apache/druid/data/input/parquet/ParquetExtensionsModule.java index 17fe50d42870..31a1e90f8eef 100644 --- a/extensions-core/parquet-extensions/src/main/java/org/apache/druid/data/input/parquet/ParquetExtensionsModule.java +++ b/extensions-core/parquet-extensions/src/main/java/org/apache/druid/data/input/parquet/ParquetExtensionsModule.java @@ -78,7 +78,7 @@ public void configure(Binder binder) conf.setClassLoader(getClass().getClassLoader()); // Ensure that FileSystem class level initialization happens with correct CL - // See https://github.com/apache/incubator-druid/issues/1714 + // See https://github.com/apache/druid/issues/1714 ClassLoader currCtxCl = Thread.currentThread().getContextClassLoader(); try { Thread.currentThread().setContextClassLoader(getClass().getClassLoader()); diff --git a/extensions-core/parquet-extensions/src/main/java/org/apache/parquet/avro/DruidParquetAvroReadSupport.java b/extensions-core/parquet-extensions/src/main/java/org/apache/parquet/avro/DruidParquetAvroReadSupport.java index 9493420448dc..0937fdb214a8 100755 --- a/extensions-core/parquet-extensions/src/main/java/org/apache/parquet/avro/DruidParquetAvroReadSupport.java +++ b/extensions-core/parquet-extensions/src/main/java/org/apache/parquet/avro/DruidParquetAvroReadSupport.java @@ -110,7 +110,7 @@ public RecordMaterializer prepareForRead( ) { // coercing this value to false by default here to be friendlier default behavior - // see https://github.com/apache/incubator-druid/issues/5433#issuecomment-388539306 + // see https://github.com/apache/druid/issues/5433#issuecomment-388539306 String jobProp = "parquet.avro.add-list-element-records"; Boolean explicitlySet = configuration.getBoolean(jobProp, false); if (!explicitlySet) { diff --git a/indexing-hadoop/src/main/java/org/apache/druid/indexer/HadoopTuningConfig.java b/indexing-hadoop/src/main/java/org/apache/druid/indexer/HadoopTuningConfig.java index c841b9dd83d1..2b80d28f101a 100644 --- a/indexing-hadoop/src/main/java/org/apache/druid/indexer/HadoopTuningConfig.java +++ b/indexing-hadoop/src/main/java/org/apache/druid/indexer/HadoopTuningConfig.java @@ -117,7 +117,7 @@ public HadoopTuningConfig( final @JsonProperty("jobProperties") @Nullable Map jobProperties, final @JsonProperty("combineText") boolean combineText, final @JsonProperty("useCombiner") @Nullable Boolean useCombiner, - // See https://github.com/apache/incubator-druid/pull/1922 + // See https://github.com/apache/druid/pull/1922 final @JsonProperty("rowFlushBoundary") @Nullable Integer maxRowsInMemoryCOMPAT, // This parameter is left for compatibility when reading existing configs, to be removed in Druid 0.12. final @JsonProperty("buildV9Directly") Boolean buildV9Directly, diff --git a/indexing-hadoop/src/test/java/org/apache/druid/indexer/BatchDeltaIngestionTest.java b/indexing-hadoop/src/test/java/org/apache/druid/indexer/BatchDeltaIngestionTest.java index 091033b30960..6ab20d210fd1 100644 --- a/indexing-hadoop/src/test/java/org/apache/druid/indexer/BatchDeltaIngestionTest.java +++ b/indexing-hadoop/src/test/java/org/apache/druid/indexer/BatchDeltaIngestionTest.java @@ -162,7 +162,7 @@ public void testReindexing() throws Exception /** * By default re-indexing expects same aggregators as used by original indexing job. But, with additional flag * "useNewAggs" in DatasourcePathSpec, user can optionally have any set of aggregators. - * See https://github.com/apache/incubator-druid/issues/5277 . + * See https://github.com/apache/druid/issues/5277 . */ @Test public void testReindexingWithNewAggregators() throws Exception diff --git a/indexing-service/src/main/java/org/apache/druid/indexing/common/Counters.java b/indexing-service/src/main/java/org/apache/druid/indexing/common/Counters.java index a93e6eb93847..ab3b58f2e9aa 100644 --- a/indexing-service/src/main/java/org/apache/druid/indexing/common/Counters.java +++ b/indexing-service/src/main/java/org/apache/druid/indexing/common/Counters.java @@ -28,7 +28,7 @@ public final class Counters public static int getAndIncrementInt(ConcurrentHashMap counters, K key) { // get() before computeIfAbsent() is an optimization to avoid locking in computeIfAbsent() if not needed. - // See https://github.com/apache/incubator-druid/pull/6898#discussion_r251384586. + // See https://github.com/apache/druid/pull/6898#discussion_r251384586. AtomicInteger counter = counters.get(key); if (counter == null) { counter = counters.computeIfAbsent(key, k -> new AtomicInteger()); @@ -39,7 +39,7 @@ public static int getAndIncrementInt(ConcurrentHashMap cou public static long incrementAndGetLong(ConcurrentHashMap counters, K key) { // get() before computeIfAbsent() is an optimization to avoid locking in computeIfAbsent() if not needed. - // See https://github.com/apache/incubator-druid/pull/6898#discussion_r251384586. + // See https://github.com/apache/druid/pull/6898#discussion_r251384586. AtomicLong counter = counters.get(key); if (counter == null) { counter = counters.computeIfAbsent(key, k -> new AtomicLong()); diff --git a/indexing-service/src/main/java/org/apache/druid/indexing/common/actions/TaskAuditLogConfig.java b/indexing-service/src/main/java/org/apache/druid/indexing/common/actions/TaskAuditLogConfig.java index cfabcf18a6e9..c78aec4f39b5 100644 --- a/indexing-service/src/main/java/org/apache/druid/indexing/common/actions/TaskAuditLogConfig.java +++ b/indexing-service/src/main/java/org/apache/druid/indexing/common/actions/TaskAuditLogConfig.java @@ -24,7 +24,7 @@ /** * The configuration for task audit logging. - * This class will be removed in future releases. See https://github.com/apache/incubator-druid/issues/5859. + * This class will be removed in future releases. See https://github.com/apache/druid/issues/5859. */ @Deprecated public class TaskAuditLogConfig diff --git a/indexing-service/src/main/java/org/apache/druid/indexing/common/task/MoveTask.java b/indexing-service/src/main/java/org/apache/druid/indexing/common/task/MoveTask.java index 047ec343c9ae..1985c1299c7a 100644 --- a/indexing-service/src/main/java/org/apache/druid/indexing/common/task/MoveTask.java +++ b/indexing-service/src/main/java/org/apache/druid/indexing/common/task/MoveTask.java @@ -49,7 +49,7 @@ public MoveTask( @JsonProperty("interval") Interval interval, @JsonProperty("target") Map targetLoadSpec, @JsonProperty("context") Map context, - // See https://github.com/apache/incubator-druid/pull/1922 + // See https://github.com/apache/druid/pull/1922 @JsonProperty("targetLoadSpec") Map targetLoadSpecCOMPAT ) { diff --git a/indexing-service/src/main/java/org/apache/druid/indexing/common/task/batch/parallel/SubTaskSpec.java b/indexing-service/src/main/java/org/apache/druid/indexing/common/task/batch/parallel/SubTaskSpec.java index f23f260b2086..3a2e69607a6a 100644 --- a/indexing-service/src/main/java/org/apache/druid/indexing/common/task/batch/parallel/SubTaskSpec.java +++ b/indexing-service/src/main/java/org/apache/druid/indexing/common/task/batch/parallel/SubTaskSpec.java @@ -88,7 +88,7 @@ public InputSplit getInputSplit() /** * Creates a new task but with a backward compatible type for this SubTaskSpec. This is to support to rolling update * for parallel indexing task and subclasses override this method properly if its type name has changed between - * releases. See https://github.com/apache/incubator-druid/issues/8836 for more details. + * releases. See https://github.com/apache/druid/issues/8836 for more details. * * This method will be called if {@link #newSubTask} fails with an {@link IllegalStateException} with an error * message starting with "Could not resolve type id". The failure of {@link #newSubTask} with this error is NOT diff --git a/indexing-service/src/main/java/org/apache/druid/indexing/overlord/ForkingTaskRunner.java b/indexing-service/src/main/java/org/apache/druid/indexing/overlord/ForkingTaskRunner.java index 610d151f813a..2f1abc1d7f53 100644 --- a/indexing-service/src/main/java/org/apache/druid/indexing/overlord/ForkingTaskRunner.java +++ b/indexing-service/src/main/java/org/apache/druid/indexing/overlord/ForkingTaskRunner.java @@ -215,7 +215,7 @@ public TaskStatus call() for (String propName : props.stringPropertyNames()) { for (String allowedPrefix : config.getAllowedPrefixes()) { - // See https://github.com/apache/incubator-druid/issues/1841 + // See https://github.com/apache/druid/issues/1841 if (propName.startsWith(allowedPrefix) && !ForkingTaskRunnerConfig.JAVA_OPTS_PROPERTY.equals(propName) && !ForkingTaskRunnerConfig.JAVA_OPTS_ARRAY_PROPERTY.equals(propName) diff --git a/indexing-service/src/main/java/org/apache/druid/indexing/overlord/RemoteTaskRunner.java b/indexing-service/src/main/java/org/apache/druid/indexing/overlord/RemoteTaskRunner.java index dcbc64baa3ee..9352f6516a04 100644 --- a/indexing-service/src/main/java/org/apache/druid/indexing/overlord/RemoteTaskRunner.java +++ b/indexing-service/src/main/java/org/apache/druid/indexing/overlord/RemoteTaskRunner.java @@ -720,7 +720,7 @@ private void runPendingTasks() try { //this can still be null due to race from explicit task shutdown request //or if another thread steals and completes this task right after this thread makes copy - //of pending tasks. See https://github.com/apache/incubator-druid/issues/2842 . + //of pending tasks. See https://github.com/apache/druid/issues/2842 . Task task = pendingTaskPayloads.get(taskId); if (task != null && tryAssignTask(task, taskRunnerWorkItem)) { pendingTaskPayloads.remove(taskId); diff --git a/indexing-service/src/main/java/org/apache/druid/indexing/overlord/TaskQueue.java b/indexing-service/src/main/java/org/apache/druid/indexing/overlord/TaskQueue.java index 3d802ed281ab..16ad0864106f 100644 --- a/indexing-service/src/main/java/org/apache/druid/indexing/overlord/TaskQueue.java +++ b/indexing-service/src/main/java/org/apache/druid/indexing/overlord/TaskQueue.java @@ -279,7 +279,7 @@ private void manage() throws InterruptedException } else if (isTaskPending(task)) { // if the taskFutures contain this task and this task is pending, also let the taskRunner // to run it to guarantee it will be assigned to run - // see https://github.com/apache/incubator-druid/pull/6991 + // see https://github.com/apache/druid/pull/6991 taskRunner.run(task); } } diff --git a/indexing-service/src/main/java/org/apache/druid/indexing/overlord/hrtr/HttpRemoteTaskRunner.java b/indexing-service/src/main/java/org/apache/druid/indexing/overlord/hrtr/HttpRemoteTaskRunner.java index e5549f6c032d..56010d61304d 100644 --- a/indexing-service/src/main/java/org/apache/druid/indexing/overlord/hrtr/HttpRemoteTaskRunner.java +++ b/indexing-service/src/main/java/org/apache/druid/indexing/overlord/hrtr/HttpRemoteTaskRunner.java @@ -421,7 +421,7 @@ private boolean runTaskOnWorker( // CAUTION: This method calls RemoteTaskRunnerWorkItem.setResult(..) which results in TaskQueue.notifyStatus() being called // because that is attached by TaskQueue to task result future. So, this method must not be called with "statusLock" - // held. See https://github.com/apache/incubator-druid/issues/6201 + // held. See https://github.com/apache/druid/issues/6201 private void taskComplete( HttpRemoteTaskRunnerWorkItem taskRunnerWorkItem, WorkerHolder workerHolder, diff --git a/indexing-service/src/main/java/org/apache/druid/indexing/seekablestream/SeekableStreamIndexTask.java b/indexing-service/src/main/java/org/apache/druid/indexing/seekablestream/SeekableStreamIndexTask.java index dedba07d2238..8c790f2112d2 100644 --- a/indexing-service/src/main/java/org/apache/druid/indexing/seekablestream/SeekableStreamIndexTask.java +++ b/indexing-service/src/main/java/org/apache/druid/indexing/seekablestream/SeekableStreamIndexTask.java @@ -79,7 +79,7 @@ public abstract class SeekableStreamIndexTask> runnerSupplier; diff --git a/indexing-service/src/main/java/org/apache/druid/indexing/seekablestream/supervisor/SeekableStreamSupervisor.java b/indexing-service/src/main/java/org/apache/druid/indexing/seekablestream/supervisor/SeekableStreamSupervisor.java index 90d95b2ceb72..d3a15cde8c4e 100644 --- a/indexing-service/src/main/java/org/apache/druid/indexing/seekablestream/supervisor/SeekableStreamSupervisor.java +++ b/indexing-service/src/main/java/org/apache/druid/indexing/seekablestream/supervisor/SeekableStreamSupervisor.java @@ -2006,7 +2006,7 @@ private boolean updatePartitionDataFromStream() // This allows time for the stream to start writing to the new partitions after repartitioning. // For Kinesis ingestion, this cooldown time is particularly useful, lowering the possibility of // the new shards being empty, which can cause issues presently - // (see https://github.com/apache/incubator-druid/issues/7600) + // (see https://github.com/apache/druid/issues/7600) earlyStopTime = DateTimes.nowUtc().plus(tuningConfig.getRepartitionTransitionDuration()); log.info( "Previous partition set [%s] has changed to [%s] - requesting that tasks stop after [%s] at [%s]", diff --git a/indexing-service/src/test/java/org/apache/druid/indexing/overlord/RemoteTaskRunnerRunPendingTasksConcurrencyTest.java b/indexing-service/src/test/java/org/apache/druid/indexing/overlord/RemoteTaskRunnerRunPendingTasksConcurrencyTest.java index 3abe8edc779c..5e46af0c93cc 100644 --- a/indexing-service/src/test/java/org/apache/druid/indexing/overlord/RemoteTaskRunnerRunPendingTasksConcurrencyTest.java +++ b/indexing-service/src/test/java/org/apache/druid/indexing/overlord/RemoteTaskRunnerRunPendingTasksConcurrencyTest.java @@ -54,7 +54,7 @@ public void tearDown() throws Exception rtrTestUtils.tearDown(); } - // This task reproduces the races described in https://github.com/apache/incubator-druid/issues/2842 + // This task reproduces the races described in https://github.com/apache/druid/issues/2842 @Test(timeout = 60_000L) public void testConcurrency() throws Exception { diff --git a/indexing-service/src/test/java/org/apache/druid/indexing/overlord/http/OverlordResourceTest.java b/indexing-service/src/test/java/org/apache/druid/indexing/overlord/http/OverlordResourceTest.java index a2971cecb888..e41a6c9b462e 100644 --- a/indexing-service/src/test/java/org/apache/druid/indexing/overlord/http/OverlordResourceTest.java +++ b/indexing-service/src/test/java/org/apache/druid/indexing/overlord/http/OverlordResourceTest.java @@ -957,7 +957,7 @@ public void testGetTaskPayload() throws Exception { // This is disabled since OverlordResource.getTaskStatus() is annotated with TaskResourceFilter which is supposed to // set authorization token properly, but isn't called in this test. - // This should be fixed in https://github.com/apache/incubator-druid/issues/6685. + // This should be fixed in https://github.com/apache/druid/issues/6685. // expectAuthorizationTokenCheck(); final NoopTask task = NoopTask.create("mydatasource"); EasyMock.expect(taskStorageQueryAdapter.getTask("mytask")) @@ -995,7 +995,7 @@ public void testGetTaskStatus() throws Exception { // This is disabled since OverlordResource.getTaskStatus() is annotated with TaskResourceFilter which is supposed to // set authorization token properly, but isn't called in this test. - // This should be fixed in https://github.com/apache/incubator-druid/issues/6685. + // This should be fixed in https://github.com/apache/druid/issues/6685. // expectAuthorizationTokenCheck(); final Task task = NoopTask.create("mytask", 0); final TaskStatus status = TaskStatus.running("mytask"); @@ -1063,7 +1063,7 @@ public void testGetTaskStatus() throws Exception public void testShutdownTask() { // This is disabled since OverlordResource.doShutdown is annotated with TaskResourceFilter - // This should be fixed in https://github.com/apache/incubator-druid/issues/6685. + // This should be fixed in https://github.com/apache/druid/issues/6685. // expectAuthorizationTokenCheck(); TaskQueue mockQueue = EasyMock.createMock(TaskQueue.class); EasyMock.expect(taskMaster.isLeader()).andReturn(true).anyTimes(); @@ -1096,7 +1096,7 @@ public void testShutdownTask() public void testShutdownAllTasks() { // This is disabled since OverlordResource.shutdownTasksForDataSource is annotated with DatasourceResourceFilter - // This should be fixed in https://github.com/apache/incubator-druid/issues/6685. + // This should be fixed in https://github.com/apache/druid/issues/6685. // expectAuthorizationTokenCheck(); TaskQueue mockQueue = EasyMock.createMock(TaskQueue.class); EasyMock.expect(taskMaster.isLeader()).andReturn(true).anyTimes(); diff --git a/integration-tests/src/test/java/org/apache/druid/tests/indexer/AbstractIndexerTest.java b/integration-tests/src/test/java/org/apache/druid/tests/indexer/AbstractIndexerTest.java index 8723f75c6b71..88f6b0088dfb 100644 --- a/integration-tests/src/test/java/org/apache/druid/tests/indexer/AbstractIndexerTest.java +++ b/integration-tests/src/test/java/org/apache/druid/tests/indexer/AbstractIndexerTest.java @@ -81,7 +81,7 @@ protected void unloadAndKillData(final String dataSource) private void unloadAndKillData(final String dataSource, String start, String end) { // Wait for any existing index tasks to complete before disabling the datasource otherwise - // realtime tasks can get stuck waiting for handoff. https://github.com/apache/incubator-druid/issues/1729 + // realtime tasks can get stuck waiting for handoff. https://github.com/apache/druid/issues/1729 waitForAllTasksToComplete(); Interval interval = Intervals.of(start + "/" + end); coordinator.unloadSegmentsForDataSource(dataSource); diff --git a/licenses.yaml b/licenses.yaml index c8f6df6d893b..cc0a4031e607 100644 --- a/licenses.yaml +++ b/licenses.yaml @@ -3145,7 +3145,7 @@ libraries: - org.apache.parquet: parquet-jackson notices: - parquet-avro: | - Apache Parquet MR (Incubating) + Apache Parquet MR Copyright 2014 The Apache Software Foundation diff --git a/licenses/APACHE2 b/licenses/APACHE2 index eeb51e0f55be..44b709288a1e 100644 --- a/licenses/APACHE2 +++ b/licenses/APACHE2 @@ -201,9 +201,9 @@ See the License for the specific language governing permissions and limitations under the License. - APACHE DRUID (INCUBATING) SUBCOMPONENTS: + APACHE DRUID SUBCOMPONENTS: - Apache Druid (incubating) includes a number of subcomponents with + Apache Druid includes a number of subcomponents with separate copyright notices and license terms. Your use of the source code for these subcomponents is subject to the terms and conditions of the following licenses. diff --git a/pom.xml b/pom.xml index d3285003ef29..7029e6c03053 100644 --- a/pom.xml +++ b/pom.xml @@ -53,7 +53,7 @@ - Apache Druid (incubating) developers list + Apache Druid developers list dev-subscribe@druid.apache.org dev-unsubscribe@druid.apache.org dev@druid.apache.org @@ -64,9 +64,9 @@ 2011 - scm:git:ssh://git@github.com/apache/incubator-druid.git - scm:git:ssh://git@github.com/apache/incubator-druid.git - https://github.com/apache/incubator-druid.git + scm:git:ssh://git@github.com/apache/druid.git + scm:git:ssh://git@github.com/apache/druid.git + https://github.com/apache/druid.git 0.13.0-incubating-SNAPSHOT @@ -325,7 +325,7 @@ org.apache.curator @@ -1486,7 +1486,7 @@ - Apache Druid (incubating) + Apache Druid org.apache.apache.resources:apache-jar-resource-bundle:1.5-SNAPSHOT @@ -1533,7 +1533,7 @@ -Duser.timezone=UTC -Djava.util.logging.manager=org.apache.logging.log4j.jul.LogManager -Daws.region=us-east-1 - + -Ddruid.indexing.doubleStorage=double false @@ -1770,7 +1770,7 @@ -Xmx768m -Duser.language=en -Duser.country=US -Dfile.encoding=UTF-8 -Duser.timezone=UTC -Djava.util.logging.manager=org.apache.logging.log4j.jul.LogManager -Daws.region=us-east-1 - + -Ddruid.indexing.doubleStorage=double diff --git a/processing/pom.xml b/processing/pom.xml index ccb1df525202..9da9ecfabca2 100644 --- a/processing/pom.xml +++ b/processing/pom.xml @@ -252,7 +252,7 @@ -Dfile.encoding=UTF-8 -Duser.timezone=UTC -Djava.util.logging.manager=org.apache.logging.log4j.jul.LogManager - + -Ddruid.indexing.doubleStorage=double diff --git a/processing/src/main/java/org/apache/druid/query/IntervalChunkingQueryRunner.java b/processing/src/main/java/org/apache/druid/query/IntervalChunkingQueryRunner.java index 848d0fc0082a..5bf029ec015a 100644 --- a/processing/src/main/java/org/apache/druid/query/IntervalChunkingQueryRunner.java +++ b/processing/src/main/java/org/apache/druid/query/IntervalChunkingQueryRunner.java @@ -41,7 +41,7 @@ /** * This class is deprecated and will removed in the future. - * See https://github.com/apache/incubator-druid/pull/4004#issuecomment-284171911 for details about deprecation. + * See https://github.com/apache/druid/pull/4004#issuecomment-284171911 for details about deprecation. */ @Deprecated public class IntervalChunkingQueryRunner implements QueryRunner diff --git a/processing/src/main/java/org/apache/druid/query/IntervalChunkingQueryRunnerDecorator.java b/processing/src/main/java/org/apache/druid/query/IntervalChunkingQueryRunnerDecorator.java index 97eb9e68af11..bec8a05d04f4 100644 --- a/processing/src/main/java/org/apache/druid/query/IntervalChunkingQueryRunnerDecorator.java +++ b/processing/src/main/java/org/apache/druid/query/IntervalChunkingQueryRunnerDecorator.java @@ -28,7 +28,7 @@ /** * This class is deprecated and will removed in the future. - * See https://github.com/apache/incubator-druid/pull/4004#issuecomment-284171911 for details about deprecation. + * See https://github.com/apache/druid/pull/4004#issuecomment-284171911 for details about deprecation. */ @Deprecated public class IntervalChunkingQueryRunnerDecorator diff --git a/processing/src/main/java/org/apache/druid/query/QueryToolChest.java b/processing/src/main/java/org/apache/druid/query/QueryToolChest.java index f30a4c528df0..5ae0221f9b45 100644 --- a/processing/src/main/java/org/apache/druid/query/QueryToolChest.java +++ b/processing/src/main/java/org/apache/druid/query/QueryToolChest.java @@ -48,7 +48,7 @@ protected QueryToolChest() final TypeFactory typeFactory = TypeFactory.defaultInstance(); TypeReference resultTypeReference = getResultTypeReference(); // resultTypeReference is null in MaterializedViewQueryQueryToolChest. - // See https://github.com/apache/incubator-druid/issues/6977 + // See https://github.com/apache/druid/issues/6977 if (resultTypeReference != null) { baseResultType = typeFactory.constructType(resultTypeReference); bySegmentResultType = typeFactory.constructParametrizedType( diff --git a/processing/src/main/java/org/apache/druid/query/aggregation/FilteredAggregatorFactory.java b/processing/src/main/java/org/apache/druid/query/aggregation/FilteredAggregatorFactory.java index bc35dc2c4d0a..f76cd3d51516 100644 --- a/processing/src/main/java/org/apache/druid/query/aggregation/FilteredAggregatorFactory.java +++ b/processing/src/main/java/org/apache/druid/query/aggregation/FilteredAggregatorFactory.java @@ -149,7 +149,7 @@ public Object finalizeComputation(@Nullable Object object) return delegate.finalizeComputation(object); } - // See https://github.com/apache/incubator-druid/pull/6219#pullrequestreview-148919845 + // See https://github.com/apache/druid/pull/6219#pullrequestreview-148919845 @JsonProperty @Override public String getName() diff --git a/processing/src/main/java/org/apache/druid/query/aggregation/JavaScriptAggregatorFactory.java b/processing/src/main/java/org/apache/druid/query/aggregation/JavaScriptAggregatorFactory.java index 58017e7ed48a..006034a0e6a7 100644 --- a/processing/src/main/java/org/apache/druid/query/aggregation/JavaScriptAggregatorFactory.java +++ b/processing/src/main/java/org/apache/druid/query/aggregation/JavaScriptAggregatorFactory.java @@ -66,8 +66,8 @@ public class JavaScriptAggregatorFactory extends AggregatorFactory * in {@link #compileScript(String, String, String)} without worrying about final modifiers * on the fields of the created object * - * @see - * https://github.com/apache/incubator-druid/pull/6662#discussion_r237013157 + * @see + * https://github.com/apache/druid/pull/6662#discussion_r237013157 */ private volatile JavaScriptAggregator.@MonotonicNonNull ScriptAggregator compiledScript; diff --git a/processing/src/main/java/org/apache/druid/query/aggregation/post/JavaScriptPostAggregator.java b/processing/src/main/java/org/apache/druid/query/aggregation/post/JavaScriptPostAggregator.java index 1ed1318fd88b..23172167f1d7 100644 --- a/processing/src/main/java/org/apache/druid/query/aggregation/post/JavaScriptPostAggregator.java +++ b/processing/src/main/java/org/apache/druid/query/aggregation/post/JavaScriptPostAggregator.java @@ -94,8 +94,8 @@ public double apply(Object[] args) * in {@link #compile(String)} without worrying about final modifiers * on the fields of the created object * - * @see - * https://github.com/apache/incubator-druid/pull/6662#discussion_r237013157 + * @see + * https://github.com/apache/druid/pull/6662#discussion_r237013157 */ @MonotonicNonNull private volatile Function fn; diff --git a/processing/src/main/java/org/apache/druid/query/extraction/JavaScriptExtractionFn.java b/processing/src/main/java/org/apache/druid/query/extraction/JavaScriptExtractionFn.java index 3b95b7507b15..679121b699d8 100644 --- a/processing/src/main/java/org/apache/druid/query/extraction/JavaScriptExtractionFn.java +++ b/processing/src/main/java/org/apache/druid/query/extraction/JavaScriptExtractionFn.java @@ -76,8 +76,8 @@ public String apply(Object input) * in {@link #compile(String)} without worrying about final modifiers * on the fields of the created object * - * @see - * https://github.com/apache/incubator-druid/pull/6662#discussion_r237013157 + * @see + * https://github.com/apache/druid/pull/6662#discussion_r237013157 */ @MonotonicNonNull private volatile Function fn; diff --git a/processing/src/main/java/org/apache/druid/query/filter/JavaScriptDimFilter.java b/processing/src/main/java/org/apache/druid/query/filter/JavaScriptDimFilter.java index ea16c28c63f6..bf30eab460e5 100644 --- a/processing/src/main/java/org/apache/druid/query/filter/JavaScriptDimFilter.java +++ b/processing/src/main/java/org/apache/druid/query/filter/JavaScriptDimFilter.java @@ -58,8 +58,8 @@ public class JavaScriptDimFilter implements DimFilter * in {@link JavaScriptPredicateFactory(String, ExtractionFn)} without worrying about final modifiers * on the fields of the created object * - * @see - * https://github.com/apache/incubator-druid/pull/6662#discussion_r237013157 + * @see + * https://github.com/apache/druid/pull/6662#discussion_r237013157 */ @MonotonicNonNull private volatile JavaScriptPredicateFactory predicateFactory; diff --git a/processing/src/main/java/org/apache/druid/query/groupby/GroupByQuery.java b/processing/src/main/java/org/apache/druid/query/groupby/GroupByQuery.java index 69ab18540c5a..7c3a70d6e97b 100644 --- a/processing/src/main/java/org/apache/druid/query/groupby/GroupByQuery.java +++ b/processing/src/main/java/org/apache/druid/query/groupby/GroupByQuery.java @@ -214,7 +214,7 @@ private GroupByQuery( // Verify no duplicate names between dimensions, aggregators, and postAggregators. // They will all end up in the same namespace in the returned Rows and we can't have them clobbering each other. - // We're not counting __time, even though that name is problematic. See: https://github.com/apache/incubator-druid/pull/3684 + // We're not counting __time, even though that name is problematic. See: https://github.com/apache/druid/pull/3684 verifyOutputNames(this.dimensions, this.aggregatorSpecs, this.postAggregatorSpecs); this.postProcessingFn = postProcessingFn != null ? postProcessingFn : makePostProcessingFn(); diff --git a/processing/src/main/java/org/apache/druid/query/groupby/epinephelinae/AbstractBufferHashGrouper.java b/processing/src/main/java/org/apache/druid/query/groupby/epinephelinae/AbstractBufferHashGrouper.java index 0fe22b8f4331..e21cda479e3c 100644 --- a/processing/src/main/java/org/apache/druid/query/groupby/epinephelinae/AbstractBufferHashGrouper.java +++ b/processing/src/main/java/org/apache/druid/query/groupby/epinephelinae/AbstractBufferHashGrouper.java @@ -44,7 +44,7 @@ public abstract class AbstractBufferHashGrouper implements Grouper createIntervalChunkingRunner( // 1) It concats query chunks for consecutive intervals, which won't generate correct results. // 2) Merging instead of concating isn't a good idea, since it requires all chunks to run simultaneously, // which may take more resources than the cluster has. - // See also https://github.com/apache/incubator-druid/pull/4004 + // See also https://github.com/apache/druid/pull/4004 return runner; } diff --git a/processing/src/main/java/org/apache/druid/query/monomorphicprocessing/SpecializationService.java b/processing/src/main/java/org/apache/druid/query/monomorphicprocessing/SpecializationService.java index 7f640cc80446..c04f4b1dc5df 100644 --- a/processing/src/main/java/org/apache/druid/query/monomorphicprocessing/SpecializationService.java +++ b/processing/src/main/java/org/apache/druid/query/monomorphicprocessing/SpecializationService.java @@ -150,7 +150,7 @@ SpecializationState getSpecializationState(String runtimeShape, ImmutableMap< { SpecializationId specializationId = new SpecializationId(runtimeShape, classRemapping); // get() before computeIfAbsent() is an optimization to avoid locking in computeIfAbsent() if not needed. - // See https://github.com/apache/incubator-druid/pull/6898#discussion_r251384586. + // See https://github.com/apache/druid/pull/6898#discussion_r251384586. SpecializationState alreadyExistingState = specializationStates.get(specializationId); if (alreadyExistingState != null) { return alreadyExistingState; diff --git a/processing/src/main/java/org/apache/druid/query/search/ConciseBitmapDecisionHelper.java b/processing/src/main/java/org/apache/druid/query/search/ConciseBitmapDecisionHelper.java index d793f0d6682b..12c30374d5f0 100644 --- a/processing/src/main/java/org/apache/druid/query/search/ConciseBitmapDecisionHelper.java +++ b/processing/src/main/java/org/apache/druid/query/search/ConciseBitmapDecisionHelper.java @@ -22,7 +22,7 @@ public class ConciseBitmapDecisionHelper extends SearchQueryDecisionHelper { // This value comes from an experiment. - // See the discussion at https://github.com/apache/incubator-druid/pull/3792#issuecomment-268331804. + // See the discussion at https://github.com/apache/druid/pull/3792#issuecomment-268331804. private static final double BITMAP_INTERSECT_COST = 7.425; private static final ConciseBitmapDecisionHelper INSTANCE = new ConciseBitmapDecisionHelper(); diff --git a/processing/src/main/java/org/apache/druid/query/search/RoaringBitmapDecisionHelper.java b/processing/src/main/java/org/apache/druid/query/search/RoaringBitmapDecisionHelper.java index 14e3cdd45ce1..7d6ac5896d02 100644 --- a/processing/src/main/java/org/apache/druid/query/search/RoaringBitmapDecisionHelper.java +++ b/processing/src/main/java/org/apache/druid/query/search/RoaringBitmapDecisionHelper.java @@ -22,7 +22,7 @@ public class RoaringBitmapDecisionHelper extends SearchQueryDecisionHelper { // This value comes from an experiment. - // See the discussion at https://github.com/apache/incubator-druid/pull/3792#issuecomment-268331804. + // See the discussion at https://github.com/apache/druid/pull/3792#issuecomment-268331804. private static final double BITMAP_INTERSECT_COST = 4.5; private static final RoaringBitmapDecisionHelper INSTANCE = new RoaringBitmapDecisionHelper(); diff --git a/processing/src/main/java/org/apache/druid/query/search/SearchQueryMetrics.java b/processing/src/main/java/org/apache/druid/query/search/SearchQueryMetrics.java index 294a80b43817..1795f35d23e8 100644 --- a/processing/src/main/java/org/apache/druid/query/search/SearchQueryMetrics.java +++ b/processing/src/main/java/org/apache/druid/query/search/SearchQueryMetrics.java @@ -27,7 +27,7 @@ public interface SearchQueryMetrics extends QueryMetrics * Sets the granularity of {@link SearchQuery#getGranularity()} of the given query as dimension. * * This method is going to be used in "full" metrics impl, - * see https://github.com/apache/incubator-druid/pull/4570#issuecomment-319458229 + * see https://github.com/apache/druid/pull/4570#issuecomment-319458229 */ @SuppressWarnings("unused") void granularity(SearchQuery query); diff --git a/processing/src/main/java/org/apache/druid/segment/BitmapOffset.java b/processing/src/main/java/org/apache/druid/segment/BitmapOffset.java index 92f95121e2e6..8f67c5620bc5 100644 --- a/processing/src/main/java/org/apache/druid/segment/BitmapOffset.java +++ b/processing/src/main/java/org/apache/druid/segment/BitmapOffset.java @@ -232,7 +232,7 @@ public void inspectRuntimeShape(RuntimeShapeInspector inspector) private static IntIterator safeClone(IntIterator iterator) { // Calling clone() on empty iterators from RoaringBitmap library sometimes fails with NPE, - // see https://github.com/apache/incubator-druid/issues/4709, https://github.com/RoaringBitmap/RoaringBitmap/issues/177 + // see https://github.com/apache/druid/issues/4709, https://github.com/RoaringBitmap/RoaringBitmap/issues/177 return iterator.hasNext() ? iterator.clone() : EmptyIntIterator.instance(); } } diff --git a/processing/src/main/java/org/apache/druid/segment/DimensionSelector.java b/processing/src/main/java/org/apache/druid/segment/DimensionSelector.java index 8a0f2a75bddc..f50ae1e70b94 100644 --- a/processing/src/main/java/org/apache/druid/segment/DimensionSelector.java +++ b/processing/src/main/java/org/apache/druid/segment/DimensionSelector.java @@ -75,7 +75,7 @@ public interface DimensionSelector extends ColumnValueSelector, Dimensio @Override default float getFloat() { - // This is controversial, see https://github.com/apache/incubator-druid/issues/4888 + // This is controversial, see https://github.com/apache/druid/issues/4888 return 0.0f; } @@ -88,7 +88,7 @@ default float getFloat() @Override default double getDouble() { - // This is controversial, see https://github.com/apache/incubator-druid/issues/4888 + // This is controversial, see https://github.com/apache/druid/issues/4888 return 0.0; } @@ -101,7 +101,7 @@ default double getDouble() @Override default long getLong() { - // This is controversial, see https://github.com/apache/incubator-druid/issues/4888 + // This is controversial, see https://github.com/apache/druid/issues/4888 return 0L; } diff --git a/processing/src/main/java/org/apache/druid/segment/FilteredOffset.java b/processing/src/main/java/org/apache/druid/segment/FilteredOffset.java index a6c50d5f6396..2aed558c24bb 100644 --- a/processing/src/main/java/org/apache/druid/segment/FilteredOffset.java +++ b/processing/src/main/java/org/apache/druid/segment/FilteredOffset.java @@ -118,7 +118,7 @@ public ReadableOffset getBaseReadableOffset() * If clone is made possible for FilteredOffset, some improvements could become possible in {@link * org.apache.druid.query.topn.PooledTopNAlgorithm#computeSpecializedScanAndAggregateImplementations}. * - * See also https://github.com/apache/incubator-druid/issues/5132. + * See also https://github.com/apache/druid/issues/5132. */ @Override public Offset clone() diff --git a/processing/src/main/java/org/apache/druid/segment/column/StringDictionaryEncodedColumn.java b/processing/src/main/java/org/apache/druid/segment/column/StringDictionaryEncodedColumn.java index c08a2db7a2a5..ba77d92f15ba 100644 --- a/processing/src/main/java/org/apache/druid/segment/column/StringDictionaryEncodedColumn.java +++ b/processing/src/main/java/org/apache/druid/segment/column/StringDictionaryEncodedColumn.java @@ -127,7 +127,7 @@ public int getValueCardinality() !extractionFn.preservesOrdering()) However current behavior allows some GroupBy-V1 queries to work that wouldn't work otherwise and doesn't cause any problems due to special handling of extractionFn everywhere. - See https://github.com/apache/incubator-druid/pull/8433 + See https://github.com/apache/druid/pull/8433 */ return getCardinality(); } diff --git a/processing/src/main/java/org/apache/druid/segment/incremental/IncrementalIndex.java b/processing/src/main/java/org/apache/druid/segment/incremental/IncrementalIndex.java index 6d07cf2c88a7..fd9bcbd0d984 100644 --- a/processing/src/main/java/org/apache/druid/segment/incremental/IncrementalIndex.java +++ b/processing/src/main/java/org/apache/druid/segment/incremental/IncrementalIndex.java @@ -925,7 +925,7 @@ private ColumnCapabilitiesImpl makeCapabilitiesFromValueType(ValueType type) /** * Currently called to initialize IncrementalIndex dimension order during index creation * Index dimension ordering could be changed to initialize from DimensionsSpec after resolution of - * https://github.com/apache/incubator-druid/issues/2011 + * https://github.com/apache/druid/issues/2011 */ public void loadDimensionIterable( Iterable oldDimensionOrder, diff --git a/processing/src/main/java/org/apache/druid/segment/incremental/IncrementalIndexStorageAdapter.java b/processing/src/main/java/org/apache/druid/segment/incremental/IncrementalIndexStorageAdapter.java index 8dd3b28f5c83..b6a27bee1538 100644 --- a/processing/src/main/java/org/apache/druid/segment/incremental/IncrementalIndexStorageAdapter.java +++ b/processing/src/main/java/org/apache/druid/segment/incremental/IncrementalIndexStorageAdapter.java @@ -262,7 +262,7 @@ private class IncrementalIndexCursor implements Cursor descending, currEntry ); - // Set maxRowIndex before creating the filterMatcher. See https://github.com/apache/incubator-druid/pull/6340 + // Set maxRowIndex before creating the filterMatcher. See https://github.com/apache/druid/pull/6340 maxRowIndex = index.getLastRowIndex(); filterMatcher = filter == null ? BooleanValueMatcher.of(true) : filter.makeMatcher(columnSelectorFactory); numAdvanced = -1; diff --git a/processing/src/test/java/org/apache/druid/query/aggregation/post/FinalizingFieldAccessPostAggregatorTest.java b/processing/src/test/java/org/apache/druid/query/aggregation/post/FinalizingFieldAccessPostAggregatorTest.java index 496028f8a255..1c027be7e2f8 100644 --- a/processing/src/test/java/org/apache/druid/query/aggregation/post/FinalizingFieldAccessPostAggregatorTest.java +++ b/processing/src/test/java/org/apache/druid/query/aggregation/post/FinalizingFieldAccessPostAggregatorTest.java @@ -84,7 +84,7 @@ public void testComputedWithFinalizing() "final_billy", aggName, ImmutableMap.of(aggName, aggFactory) ); - // Check that the class matches exactly; see https://github.com/apache/incubator-druid/issues/6063 + // Check that the class matches exactly; see https://github.com/apache/druid/issues/6063 Assert.assertEquals(FinalizingFieldAccessPostAggregator.class, postAgg.getClass()); Map metricValues = new HashMap<>(); diff --git a/processing/src/test/java/org/apache/druid/query/groupby/GroupByQueryRunnerTest.java b/processing/src/test/java/org/apache/druid/query/groupby/GroupByQueryRunnerTest.java index 87f0de0b15ba..6622fbe00a16 100644 --- a/processing/src/test/java/org/apache/druid/query/groupby/GroupByQueryRunnerTest.java +++ b/processing/src/test/java/org/apache/druid/query/groupby/GroupByQueryRunnerTest.java @@ -4798,7 +4798,7 @@ public void testSubqueryWithMultipleIntervalsInOuterQueryAndChunkPeriod() @Test public void testSubqueryWithExtractionFnInOuterQuery() { - //https://github.com/apache/incubator-druid/issues/2556 + //https://github.com/apache/druid/issues/2556 GroupByQuery subquery = makeQueryBuilder() .setDataSource(QueryRunnerTestHelper.DATA_SOURCE) @@ -6852,7 +6852,7 @@ public void testGroupByWithSubtotalsSpecGeneral() TestHelper.assertExpectedObjects(expectedResults, results, "subtotal"); } - // https://github.com/apache/incubator-druid/issues/7820 + // https://github.com/apache/druid/issues/7820 @Test public void testGroupByWithSubtotalsSpecWithRenamedDimensionAndFilter() { diff --git a/processing/src/test/java/org/apache/druid/query/groupby/epinephelinae/BufferHashGrouperTest.java b/processing/src/test/java/org/apache/druid/query/groupby/epinephelinae/BufferHashGrouperTest.java index a2275f7f32f7..047dba25a949 100644 --- a/processing/src/test/java/org/apache/druid/query/groupby/epinephelinae/BufferHashGrouperTest.java +++ b/processing/src/test/java/org/apache/druid/query/groupby/epinephelinae/BufferHashGrouperTest.java @@ -133,7 +133,7 @@ public void testGrowing() @Test public void testGrowingOverflowingInteger() { - // This test checks the bug reported in https://github.com/apache/incubator-druid/pull/4333 only when + // This test checks the bug reported in https://github.com/apache/druid/pull/4333 only when // NullHandling.replaceWithDefault() is true if (NullHandling.replaceWithDefault()) { final TestColumnSelectorFactory columnSelectorFactory = GrouperTestUtil.newColumnSelectorFactory(); diff --git a/processing/src/test/java/org/apache/druid/query/metadata/SegmentMetadataQueryTest.java b/processing/src/test/java/org/apache/druid/query/metadata/SegmentMetadataQueryTest.java index 715e3c8b89e4..728a83e4c68d 100644 --- a/processing/src/test/java/org/apache/druid/query/metadata/SegmentMetadataQueryTest.java +++ b/processing/src/test/java/org/apache/druid/query/metadata/SegmentMetadataQueryTest.java @@ -884,7 +884,7 @@ public void testBySegmentResults() FACTORY.mergeRunners( Execs.directExecutor(), //Note: It is essential to have atleast 2 query runners merged to reproduce the regression bug described in - //https://github.com/apache/incubator-druid/pull/1172 + //https://github.com/apache/druid/pull/1172 //the bug surfaces only when ordering is used which happens only when you have 2 things to compare Lists.newArrayList(singleSegmentQueryRunner, singleSegmentQueryRunner) ) diff --git a/processing/src/test/java/org/apache/druid/query/timeseries/TimeseriesQueryQueryToolChestTest.java b/processing/src/test/java/org/apache/druid/query/timeseries/TimeseriesQueryQueryToolChestTest.java index 89fdbb7a8674..8c0211ac76af 100644 --- a/processing/src/test/java/org/apache/druid/query/timeseries/TimeseriesQueryQueryToolChestTest.java +++ b/processing/src/test/java/org/apache/druid/query/timeseries/TimeseriesQueryQueryToolChestTest.java @@ -181,7 +181,7 @@ public void testCacheKey() ) .build(); - // Test for https://github.com/apache/incubator-druid/issues/4093. + // Test for https://github.com/apache/druid/issues/4093. Assert.assertFalse( Arrays.equals( TOOL_CHEST.getCacheStrategy(query1).computeCacheKey(query1), diff --git a/processing/src/test/java/org/apache/druid/query/topn/TopNQueryRunnerTest.java b/processing/src/test/java/org/apache/druid/query/topn/TopNQueryRunnerTest.java index 600329a245c8..36c75cf68ea3 100644 --- a/processing/src/test/java/org/apache/druid/query/topn/TopNQueryRunnerTest.java +++ b/processing/src/test/java/org/apache/druid/query/topn/TopNQueryRunnerTest.java @@ -5713,7 +5713,7 @@ public void testFullOnTopNWithAggsOnNumericDims() @Test public void testFullOnTopNBoundFilterAndLongSumMetric() { - // this tests the stack overflow issue from https://github.com/apache/incubator-druid/issues/4628 + // this tests the stack overflow issue from https://github.com/apache/druid/issues/4628 TopNQuery query = new TopNQueryBuilder() .dataSource(QueryRunnerTestHelper.DATA_SOURCE) .granularity(QueryRunnerTestHelper.ALL_GRAN) @@ -5744,7 +5744,7 @@ public void testFullOnTopNBoundFilterAndLongSumMetric() } /** - * Regression test for https://github.com/apache/incubator-druid/issues/5132 + * Regression test for https://github.com/apache/druid/issues/5132 */ @Test public void testTopNWithNonBitmapFilter() diff --git a/processing/src/test/java/org/apache/druid/segment/data/GenericIndexedWriterTest.java b/processing/src/test/java/org/apache/druid/segment/data/GenericIndexedWriterTest.java index 7be04c30d620..38c67d69bad0 100644 --- a/processing/src/test/java/org/apache/druid/segment/data/GenericIndexedWriterTest.java +++ b/processing/src/test/java/org/apache/druid/segment/data/GenericIndexedWriterTest.java @@ -45,7 +45,7 @@ public static void staticSetUp() @Test public void writeLargeValueIntoLargeColumn() throws IOException { - // Regression test for https://github.com/apache/incubator-druid/issues/9027. + // Regression test for https://github.com/apache/druid/issues/9027. final GenericIndexedWriter writer = new GenericIndexedWriter<>( new OnHeapMemorySegmentWriteOutMedium(), diff --git a/processing/src/test/java/org/apache/druid/segment/incremental/IncrementalIndexStorageAdapterTest.java b/processing/src/test/java/org/apache/druid/segment/incremental/IncrementalIndexStorageAdapterTest.java index 3f3f79d0c03a..54d7597bb9c2 100644 --- a/processing/src/test/java/org/apache/druid/segment/incremental/IncrementalIndexStorageAdapterTest.java +++ b/processing/src/test/java/org/apache/druid/segment/incremental/IncrementalIndexStorageAdapterTest.java @@ -496,7 +496,7 @@ public void testCursoringAndIndexUpdationInterleaving() throws Exception @Test public void testCursorDictionaryRaceConditionFix() throws Exception { - // Tests the dictionary ID race condition bug described at https://github.com/apache/incubator-druid/pull/6340 + // Tests the dictionary ID race condition bug described at https://github.com/apache/druid/pull/6340 final IncrementalIndex index = indexCreator.createIndex(); final long timestamp = System.currentTimeMillis(); diff --git a/publications/demo/druid_demo.tex b/publications/demo/druid_demo.tex index a357eeebff0c..d793943838bb 100644 --- a/publications/demo/druid_demo.tex +++ b/publications/demo/druid_demo.tex @@ -108,7 +108,7 @@ \begin{abstract} Druid is an open -source\footnote{\href{https://github.com/apache/incubator-druid}{https://github.com/apache/incubator-druid}} +source\footnote{\href{https://github.com/apache/druid}{https://github.com/apache/druid}} data store built for exploratory analytics on large data sets. Druid supports fast data aggregation, low latency data ingestion, and arbitrary data exploration. The system combines a column-oriented storage layout, a diff --git a/publications/whitepaper/druid.tex b/publications/whitepaper/druid.tex index ea1d90b83711..ba3db9969d43 100644 --- a/publications/whitepaper/druid.tex +++ b/publications/whitepaper/druid.tex @@ -62,7 +62,7 @@ \maketitle \begin{abstract} -Druid is an open source\footnote{\href{http://druid.io/}{http://druid.io/} \href{https://github.com/apache/incubator-druid}{https://github.com/apache/incubator-druid}} +Druid is an open source\footnote{\href{http://druid.io/}{http://druid.io/} \href{https://github.com/apache/druid}{https://github.com/apache/druid}} data store designed for real-time exploratory analytics on large data sets. The system combines a column-oriented storage layout, a distributed, shared-nothing architecture, and an advanced indexing structure to allow for diff --git a/server/src/main/java/org/apache/druid/client/CachingClusteredClient.java b/server/src/main/java/org/apache/druid/client/CachingClusteredClient.java index 3df55242bc41..414c0541b648 100644 --- a/server/src/main/java/org/apache/druid/client/CachingClusteredClient.java +++ b/server/src/main/java/org/apache/druid/client/CachingClusteredClient.java @@ -245,7 +245,7 @@ private class SpecificQueryRunnable this.populateCache = CacheUtil.populateCacheOnBrokers(query, strategy, cacheConfig); this.isBySegment = QueryContexts.isBySegment(query); // Note that enabling this leads to putting uncovered intervals information in the response headers - // and might blow up in some cases https://github.com/apache/incubator-druid/issues/2108 + // and might blow up in some cases https://github.com/apache/druid/issues/2108 this.uncoveredIntervalsLimit = QueryContexts.getUncoveredIntervalsLimit(query); this.downstreamQuery = query.withOverriddenContext(makeDownstreamQueryContext()); // For nested queries, we need to look at the intervals of the inner most query. diff --git a/server/src/main/java/org/apache/druid/client/DataSourcesSnapshot.java b/server/src/main/java/org/apache/druid/client/DataSourcesSnapshot.java index dbd3ecb35124..a2cde8eedd6c 100644 --- a/server/src/main/java/org/apache/druid/client/DataSourcesSnapshot.java +++ b/server/src/main/java/org/apache/druid/client/DataSourcesSnapshot.java @@ -147,7 +147,7 @@ public Iterable iterateAllUsedSegmentsInSnapshot() * This method builds timelines from all data sources and finds the overshadowed segments list * * This method should be deduplicated with {@link VersionedIntervalTimeline#findFullyOvershadowed()}: see - * https://github.com/apache/incubator-druid/issues/8070. + * https://github.com/apache/druid/issues/8070. * * @return overshadowed segment Ids list */ diff --git a/server/src/main/java/org/apache/druid/client/ImmutableDruidDataSource.java b/server/src/main/java/org/apache/druid/client/ImmutableDruidDataSource.java index d92f90f29f12..668562f3a81e 100644 --- a/server/src/main/java/org/apache/druid/client/ImmutableDruidDataSource.java +++ b/server/src/main/java/org/apache/druid/client/ImmutableDruidDataSource.java @@ -124,11 +124,11 @@ public String toString() /** * ImmutableDruidDataSource should be considered a container, not a data class. The idea is the same as behind * prohibiting/limiting equals() (and therefore usage as HashSet/HashMap keys) of DataSegment: see - * https://github.com/apache/incubator-druid/issues/6358. When somebody wants to deduplicate ImmutableDruidDataSource + * https://github.com/apache/druid/issues/6358. When somebody wants to deduplicate ImmutableDruidDataSource * objects, they would need to put them into a Map and resolve conflicts by name * manually. * - * See https://github.com/apache/incubator-druid/issues/7858 + * See https://github.com/apache/druid/issues/7858 */ @Override public boolean equals(Object o) @@ -139,11 +139,11 @@ public boolean equals(Object o) /** * ImmutableDruidDataSource should be considered a container, not a data class. The idea is the same as behind * prohibiting/limiting hashCode() (and therefore usage as HashSet/HashMap keys) of DataSegment: see - * https://github.com/apache/incubator-druid/issues/6358. When somebody wants to deduplicate ImmutableDruidDataSource + * https://github.com/apache/druid/issues/6358. When somebody wants to deduplicate ImmutableDruidDataSource * objects, they would need to put them into a Map and resolve conflicts by name * manually. * - * See https://github.com/apache/incubator-druid/issues/7858 + * See https://github.com/apache/druid/issues/7858 */ @Override public int hashCode() diff --git a/server/src/main/java/org/apache/druid/discovery/DiscoveryDruidNode.java b/server/src/main/java/org/apache/druid/discovery/DiscoveryDruidNode.java index eaef7169817b..1687a1fc55d1 100644 --- a/server/src/main/java/org/apache/druid/discovery/DiscoveryDruidNode.java +++ b/server/src/main/java/org/apache/druid/discovery/DiscoveryDruidNode.java @@ -66,7 +66,7 @@ public Map getServices() /** * Keeping the legacy name 'nodeType' property name for backward compatibility. When the project is updated to - * Jackson 2.9 it could be changed, see https://github.com/apache/incubator-druid/issues/7152. + * Jackson 2.9 it could be changed, see https://github.com/apache/druid/issues/7152. */ @JsonProperty("nodeType") public NodeRole getNodeRole() diff --git a/server/src/main/java/org/apache/druid/discovery/NodeRole.java b/server/src/main/java/org/apache/druid/discovery/NodeRole.java index 564d19fd517a..08dfc25845a0 100644 --- a/server/src/main/java/org/apache/druid/discovery/NodeRole.java +++ b/server/src/main/java/org/apache/druid/discovery/NodeRole.java @@ -29,7 +29,7 @@ * expose them via JSON APIs. * * These abstractions can probably be merged when Druid updates to Jackson 2.9 that supports JsonAliases, see - * see https://github.com/apache/incubator-druid/issues/7152. + * see https://github.com/apache/druid/issues/7152. */ public enum NodeRole { diff --git a/server/src/main/java/org/apache/druid/metadata/IndexerSQLMetadataStorageCoordinator.java b/server/src/main/java/org/apache/druid/metadata/IndexerSQLMetadataStorageCoordinator.java index da8a7833db4c..ad6221330508 100644 --- a/server/src/main/java/org/apache/druid/metadata/IndexerSQLMetadataStorageCoordinator.java +++ b/server/src/main/java/org/apache/druid/metadata/IndexerSQLMetadataStorageCoordinator.java @@ -515,7 +515,7 @@ private SegmentIdWithShardSpec allocatePendingSegmentWithSegmentLineageCheck( // UNIQUE key for the row, ensuring sequences do not fork in two directions. // Using a single column instead of (sequence_name, sequence_prev_id) as some MySQL storage engines - // have difficulty with large unique keys (see https://github.com/apache/incubator-druid/issues/2319) + // have difficulty with large unique keys (see https://github.com/apache/druid/issues/2319) final String sequenceNamePrevIdSha1 = BaseEncoding.base16().encode( Hashing.sha1() .newHasher() @@ -591,7 +591,7 @@ private SegmentIdWithShardSpec allocatePendingSegment( // UNIQUE key for the row, ensuring we don't have more than one segment per sequence per interval. // Using a single column instead of (sequence_name, sequence_prev_id) as some MySQL storage engines - // have difficulty with large unique keys (see https://github.com/apache/incubator-druid/issues/2319) + // have difficulty with large unique keys (see https://github.com/apache/druid/issues/2319) final String sequenceNamePrevIdSha1 = BaseEncoding.base16().encode( Hashing.sha1() .newHasher() diff --git a/server/src/main/java/org/apache/druid/metadata/SQLMetadataRuleManager.java b/server/src/main/java/org/apache/druid/metadata/SQLMetadataRuleManager.java index 1f47e44811c7..fc7801341826 100644 --- a/server/src/main/java/org/apache/druid/metadata/SQLMetadataRuleManager.java +++ b/server/src/main/java/org/apache/druid/metadata/SQLMetadataRuleManager.java @@ -203,7 +203,7 @@ public void run() // won't actually run anymore after that (it could only enter the synchronized section and exit // immediately because the localStartedOrder doesn't match the new currentStartOrder). It's needed // to avoid flakiness in SQLMetadataRuleManagerTest. - // See https://github.com/apache/incubator-druid/issues/6028 + // See https://github.com/apache/druid/issues/6028 synchronized (lock) { if (localStartedOrder == currentStartOrder) { poll(); diff --git a/server/src/main/java/org/apache/druid/metadata/SQLMetadataSegmentManager.java b/server/src/main/java/org/apache/druid/metadata/SQLMetadataSegmentManager.java index 368918043be1..d121cc9b08dd 100644 --- a/server/src/main/java/org/apache/druid/metadata/SQLMetadataSegmentManager.java +++ b/server/src/main/java/org/apache/druid/metadata/SQLMetadataSegmentManager.java @@ -315,7 +315,7 @@ private Runnable createPollTaskForStartOrder(long startOrder, PeriodicDatabasePo // isPollingDatabasePeriodically() to ensure that when stopPollingDatabasePeriodically() exits, poll() won't // actually run anymore after that (it could only enter the synchronized section and exit immediately because the // localStartedOrder doesn't match the new currentStartPollingOrder). It's needed to avoid flakiness in - // SqlSegmentsMetadataTest. See https://github.com/apache/incubator-druid/issues/6028 + // SqlSegmentsMetadataTest. See https://github.com/apache/druid/issues/6028 ReentrantReadWriteLock.ReadLock lock = startStopPollLock.readLock(); lock.lock(); try { diff --git a/server/src/main/java/org/apache/druid/segment/loading/RoundRobinStorageLocationSelectorStrategy.java b/server/src/main/java/org/apache/druid/segment/loading/RoundRobinStorageLocationSelectorStrategy.java index 1dd666005c2d..5e702c936ce8 100644 --- a/server/src/main/java/org/apache/druid/segment/loading/RoundRobinStorageLocationSelectorStrategy.java +++ b/server/src/main/java/org/apache/druid/segment/loading/RoundRobinStorageLocationSelectorStrategy.java @@ -49,7 +49,7 @@ public Iterator getLocations() private final int numStorageLocations = storageLocations.size(); private int remainingIterations = numStorageLocations; // Each call to this methods starts with a different startIndex to avoid the same location being picked up over - // again. See https://github.com/apache/incubator-druid/issues/8614. + // again. See https://github.com/apache/druid/issues/8614. private int i = startIndex.getAndUpdate(n -> (n + 1) % numStorageLocations); @Override diff --git a/server/src/main/java/org/apache/druid/segment/loading/StorageLocationSelectorStrategy.java b/server/src/main/java/org/apache/druid/segment/loading/StorageLocationSelectorStrategy.java index 99e7554239ae..f85f28d8a98e 100644 --- a/server/src/main/java/org/apache/druid/segment/loading/StorageLocationSelectorStrategy.java +++ b/server/src/main/java/org/apache/druid/segment/loading/StorageLocationSelectorStrategy.java @@ -31,8 +31,8 @@ * * Only a snapshot of the locations is returned here. The implemntations currently do not handle all kinds of * concurrency issues and accesses to the underlying storage. Please see - * https://github.com/apache/incubator-druid/pull/8038#discussion_r325520829 of PR https://github - * .com/apache/incubator-druid/pull/8038 for more details. + * https://github.com/apache/druid/pull/8038#discussion_r325520829 of PR https://github + * .com/apache/druid/pull/8038 for more details. */ @JsonTypeInfo(use = JsonTypeInfo.Id.NAME, property = "type", defaultImpl = LeastBytesUsedStorageLocationSelectorStrategy.class) diff --git a/server/src/main/java/org/apache/druid/segment/realtime/firehose/EventReceiverFirehoseFactory.java b/server/src/main/java/org/apache/druid/segment/realtime/firehose/EventReceiverFirehoseFactory.java index b3f40c74b99a..23c0a58e51d4 100644 --- a/server/src/main/java/org/apache/druid/segment/realtime/firehose/EventReceiverFirehoseFactory.java +++ b/server/src/main/java/org/apache/druid/segment/realtime/firehose/EventReceiverFirehoseFactory.java @@ -114,7 +114,7 @@ public EventReceiverFirehoseFactory( @JsonProperty("serviceName") String serviceName, @JsonProperty("bufferSize") Integer bufferSize, // Keeping the legacy 'maxIdleTime' property name for backward compatibility. When the project is updated to - // Jackson 2.9 it could be changed, see https://github.com/apache/incubator-druid/issues/7152 + // Jackson 2.9 it could be changed, see https://github.com/apache/druid/issues/7152 @JsonProperty("maxIdleTime") @Nullable Long maxIdleTimeMillis, @JacksonInject ChatHandlerProvider chatHandlerProvider, @JacksonInject @Json ObjectMapper jsonMapper, @@ -174,7 +174,7 @@ public int getBufferSize() /** * Keeping the legacy 'maxIdleTime' property name for backward compatibility. When the project is updated to Jackson - * 2.9 it could be changed, see https://github.com/apache/incubator-druid/issues/7152 + * 2.9 it could be changed, see https://github.com/apache/druid/issues/7152 */ @JsonProperty("maxIdleTime") public long getMaxIdleTimeMillis() @@ -302,7 +302,7 @@ private Thread createDelayedCloseExecutor() // we long the error and continue a loop after some pause. log.error( "Either idleCloseTimeNs or requestedShutdownTimeNs must be non-null. " - + "Please file a bug at https://github.com/apache/incubator-druid/issues" + + "Please file a bug at https://github.com/apache/druid/issues" ); } if (idleCloseTimeNs != null && idleCloseTimeNs - System.nanoTime() <= 0) { // overflow-aware comparison diff --git a/server/src/main/java/org/apache/druid/server/ClientQuerySegmentWalker.java b/server/src/main/java/org/apache/druid/server/ClientQuerySegmentWalker.java index 2d98d2d187b3..29a173ccf985 100644 --- a/server/src/main/java/org/apache/druid/server/ClientQuerySegmentWalker.java +++ b/server/src/main/java/org/apache/druid/server/ClientQuerySegmentWalker.java @@ -92,7 +92,7 @@ private QueryRunner makeRunner(Query query, QueryRunner baseClientR { QueryToolChest> toolChest = warehouse.getToolChest(query); - // This does not adhere to the fluent workflow. See https://github.com/apache/incubator-druid/issues/5517 + // This does not adhere to the fluent workflow. See https://github.com/apache/druid/issues/5517 return new ResultLevelCachingQueryRunner<>(makeRunner(query, baseClientRunner, toolChest), toolChest, query, diff --git a/server/src/main/java/org/apache/druid/server/QueryResource.java b/server/src/main/java/org/apache/druid/server/QueryResource.java index 6bfb3a9cc3bf..78100aa153a4 100644 --- a/server/src/main/java/org/apache/druid/server/QueryResource.java +++ b/server/src/main/java/org/apache/druid/server/QueryResource.java @@ -276,7 +276,7 @@ public void write(OutputStream outputStream) throws WebApplicationException DirectDruidClient.removeMagicResponseContextFields(responseContext); - //Limit the response-context header, see https://github.com/apache/incubator-druid/issues/2331 + //Limit the response-context header, see https://github.com/apache/druid/issues/2331 //Note that Response.ResponseBuilder.header(String key,Object value).build() calls value.toString() //and encodes the string using ASCII, so 1 char is = 1 byte final ResponseContext.SerializationResult serializationResult = responseContext.serializeWith( diff --git a/server/src/main/java/org/apache/druid/server/coordination/ServerType.java b/server/src/main/java/org/apache/druid/server/coordination/ServerType.java index df25fb3aa08b..42fb65a3fdfb 100644 --- a/server/src/main/java/org/apache/druid/server/coordination/ServerType.java +++ b/server/src/main/java/org/apache/druid/server/coordination/ServerType.java @@ -35,7 +35,7 @@ *

* The toString() method converts the enum name() to lowercase and replaces underscores with hyphens, * which is the format expected for the server type string prior to the patch that introduced ServerType: - * https://github.com/apache/incubator-druid/pull/4148 + * https://github.com/apache/druid/pull/4148 * * This is a historical occasion that this enum is different from {@link NodeRole} because * they are essentially the same abstraction, but merging them could only increase the complexity and drop the code @@ -43,7 +43,7 @@ * and both expose them via JSON APIs. * * These abstractions can probably be merged when Druid updates to Jackson 2.9 that supports JsonAliases, see - * see https://github.com/apache/incubator-druid/issues/7152. + * see https://github.com/apache/druid/issues/7152. */ public enum ServerType { diff --git a/server/src/main/java/org/apache/druid/server/coordinator/CachingCostBalancerStrategyFactory.java b/server/src/main/java/org/apache/druid/server/coordinator/CachingCostBalancerStrategyFactory.java index 93df9ab738c7..4a1989df24df 100644 --- a/server/src/main/java/org/apache/druid/server/coordinator/CachingCostBalancerStrategyFactory.java +++ b/server/src/main/java/org/apache/druid/server/coordinator/CachingCostBalancerStrategyFactory.java @@ -61,7 +61,7 @@ public CachingCostBalancerStrategyFactory( this.config = config; // Adding to lifecycle dynamically because couldn't use @ManageLifecycle on the class, - // see https://github.com/apache/incubator-druid/issues/4980 + // see https://github.com/apache/druid/issues/4980 lifecycle.addMaybeStartManagedInstance(this); serverInventoryView.registerSegmentCallback( diff --git a/server/src/main/java/org/apache/druid/server/coordinator/CoordinatorDynamicConfig.java b/server/src/main/java/org/apache/druid/server/coordinator/CoordinatorDynamicConfig.java index ada41d22bb88..a3101a5c3f06 100644 --- a/server/src/main/java/org/apache/druid/server/coordinator/CoordinatorDynamicConfig.java +++ b/server/src/main/java/org/apache/druid/server/coordinator/CoordinatorDynamicConfig.java @@ -91,7 +91,7 @@ public class CoordinatorDynamicConfig @JsonCreator public CoordinatorDynamicConfig( // Keeping the legacy 'millisToWaitBeforeDeleting' property name for backward compatibility. When the project is - // updated to Jackson 2.9 it could be changed, see https://github.com/apache/incubator-druid/issues/7152 + // updated to Jackson 2.9 it could be changed, see https://github.com/apache/druid/issues/7152 @JsonProperty("millisToWaitBeforeDeleting") long leadingTimeMillisBeforeCanMarkAsUnusedOvershadowedSegments, @JsonProperty("mergeBytesLimit") long mergeBytesLimit, @@ -102,17 +102,17 @@ public CoordinatorDynamicConfig( @JsonProperty("balancerComputeThreads") int balancerComputeThreads, @JsonProperty("emitBalancingStats") boolean emitBalancingStats, // Type is Object here so that we can support both string and list as Coordinator console can not send array of - // strings in the update request. See https://github.com/apache/incubator-druid/issues/3055. + // strings in the update request. See https://github.com/apache/druid/issues/3055. // Keeping the legacy 'killDataSourceWhitelist' property name for backward compatibility. When the project is - // updated to Jackson 2.9 it could be changed, see https://github.com/apache/incubator-druid/issues/7152 + // updated to Jackson 2.9 it could be changed, see https://github.com/apache/druid/issues/7152 @JsonProperty("killDataSourceWhitelist") Object specificDataSourcesToKillUnusedSegmentsIn, // Keeping the legacy 'killAllDataSources' property name for backward compatibility. When the project is - // updated to Jackson 2.9 it could be changed, see https://github.com/apache/incubator-druid/issues/7152 + // updated to Jackson 2.9 it could be changed, see https://github.com/apache/druid/issues/7152 @JsonProperty("killAllDataSources") boolean killUnusedSegmentsInAllDataSources, // Type is Object here so that we can support both string and list as Coordinator console can not send array of // strings in the update request, as well as for specificDataSourcesToKillUnusedSegmentsIn. // Keeping the legacy 'killPendingSegmentsSkipList' property name for backward compatibility. When the project is - // updated to Jackson 2.9 it could be changed, see https://github.com/apache/incubator-druid/issues/7152 + // updated to Jackson 2.9 it could be changed, see https://github.com/apache/druid/issues/7152 @JsonProperty("killPendingSegmentsSkipList") Object dataSourcesToNotKillStalePendingSegmentsIn, @JsonProperty("maxSegmentsInNodeLoadingQueue") int maxSegmentsInNodeLoadingQueue, @JsonProperty("decommissioningNodes") Object decommissioningNodes, diff --git a/server/src/main/java/org/apache/druid/server/coordinator/CostBalancerStrategy.java b/server/src/main/java/org/apache/druid/server/coordinator/CostBalancerStrategy.java index 6f2373b3f8e9..4fd4164f3001 100644 --- a/server/src/main/java/org/apache/druid/server/coordinator/CostBalancerStrategy.java +++ b/server/src/main/java/org/apache/druid/server/coordinator/CostBalancerStrategy.java @@ -52,7 +52,7 @@ public class CostBalancerStrategy implements BalancerStrategy /** * This defines the unnormalized cost function between two segments. * - * See https://github.com/apache/incubator-druid/pull/2972 for more details about the cost function. + * See https://github.com/apache/druid/pull/2972 for more details about the cost function. * * intervalCost: segments close together are more likely to be queried together * diff --git a/server/src/main/java/org/apache/druid/server/coordinator/DruidCoordinator.java b/server/src/main/java/org/apache/druid/server/coordinator/DruidCoordinator.java index d8d280031222..ae29950bd518 100644 --- a/server/src/main/java/org/apache/druid/server/coordinator/DruidCoordinator.java +++ b/server/src/main/java/org/apache/druid/server/coordinator/DruidCoordinator.java @@ -326,7 +326,7 @@ public Map getLoadStatus() final DruidDataSource loadedView = druidServer.getDataSource(dataSource.getName()); if (loadedView != null) { // This does not use segments.removeAll(loadedView.getSegments()) for performance reasons. - // Please see https://github.com/apache/incubator-druid/pull/5632 and LoadStatusBenchmark for more info. + // Please see https://github.com/apache/druid/pull/5632 and LoadStatusBenchmark for more info. for (DataSegment serverSegment : loadedView.getSegments()) { segments.remove(serverSegment); } diff --git a/server/src/main/java/org/apache/druid/server/coordinator/cost/SegmentsCostCache.java b/server/src/main/java/org/apache/druid/server/coordinator/cost/SegmentsCostCache.java index 43ded1ee4952..561bc3322da7 100644 --- a/server/src/main/java/org/apache/druid/server/coordinator/cost/SegmentsCostCache.java +++ b/server/src/main/java/org/apache/druid/server/coordinator/cost/SegmentsCostCache.java @@ -43,7 +43,7 @@ /** * SegmentsCostCache provides faster way to calculate cost function proposed in {@link CostBalancerStrategy}. - * See https://github.com/apache/incubator-druid/pull/2972 for more details about the cost function. + * See https://github.com/apache/druid/pull/2972 for more details about the cost function. * * Joint cost for two segments (you can make formulas below readable by copy-pasting to * https://www.codecogs.com/latex/eqneditor.php): diff --git a/server/src/main/java/org/apache/druid/server/coordinator/rules/Rule.java b/server/src/main/java/org/apache/druid/server/coordinator/rules/Rule.java index 14f791e6adb4..dc12e4dc9bf3 100644 --- a/server/src/main/java/org/apache/druid/server/coordinator/rules/Rule.java +++ b/server/src/main/java/org/apache/druid/server/coordinator/rules/Rule.java @@ -60,7 +60,7 @@ public interface Rule * For example, {@link org.apache.druid.server.coordinator.ReplicationThrottler} needs to belong only to "RuleParams", * but not "DruidCoordinatorHelperParams". The opposite for the collection of used segments. * - * See https://github.com/apache/incubator-druid/issues/7228 + * See https://github.com/apache/druid/issues/7228 */ CoordinatorStats run(DruidCoordinator coordinator, DruidCoordinatorRuntimeParams params, DataSegment segment); } diff --git a/server/src/main/java/org/apache/druid/server/emitter/HttpEmitterSSLClientConfig.java b/server/src/main/java/org/apache/druid/server/emitter/HttpEmitterSSLClientConfig.java index bbf7a14140cc..62b910a4b517 100644 --- a/server/src/main/java/org/apache/druid/server/emitter/HttpEmitterSSLClientConfig.java +++ b/server/src/main/java/org/apache/druid/server/emitter/HttpEmitterSSLClientConfig.java @@ -31,7 +31,7 @@ * {@link org.apache.druid.guice.annotations.ExtensionPoint}, which would also have to be moved. * * It would be easier to resolve these issues and merge the TLS-related config with HttpEmitterConfig once - * https://github.com/apache/incubator-druid/issues/4312 is resolved, so the TLS config is kept separate for now. + * https://github.com/apache/druid/issues/4312 is resolved, so the TLS config is kept separate for now. */ public class HttpEmitterSSLClientConfig { diff --git a/server/src/main/java/org/apache/druid/server/http/MetadataResource.java b/server/src/main/java/org/apache/druid/server/http/MetadataResource.java index 6eca39248c1c..fec9556a7b60 100644 --- a/server/src/main/java/org/apache/druid/server/http/MetadataResource.java +++ b/server/src/main/java/org/apache/druid/server/http/MetadataResource.java @@ -248,7 +248,7 @@ public Response getUsedSegmentsInDataSource( /** * This is a {@link POST} method to pass the list of intervals in the body, - * see https://github.com/apache/incubator-druid/pull/2109#issuecomment-182191258 + * see https://github.com/apache/druid/pull/2109#issuecomment-182191258 */ @POST @Path("/datasources/{dataSourceName}/segments") diff --git a/server/src/main/java/org/apache/druid/server/initialization/jetty/JettyServerModule.java b/server/src/main/java/org/apache/druid/server/initialization/jetty/JettyServerModule.java index 4e0714230dfc..bbb80e49aaf7 100644 --- a/server/src/main/java/org/apache/druid/server/initialization/jetty/JettyServerModule.java +++ b/server/src/main/java/org/apache/druid/server/initialization/jetty/JettyServerModule.java @@ -223,7 +223,7 @@ static Server makeAndInitializeServer( final Server server = new Server(threadPool); // Without this bean set, the default ScheduledExecutorScheduler runs as non-daemon, causing lifecycle hooks to fail - // to fire on main exit. Related bug: https://github.com/apache/incubator-druid/pull/1627 + // to fire on main exit. Related bug: https://github.com/apache/druid/pull/1627 server.addBean(new ScheduledExecutorScheduler("JettyScheduler", true), true); final List serverConnectors = new ArrayList<>(); diff --git a/server/src/main/java/org/apache/druid/server/metrics/MonitorsConfig.java b/server/src/main/java/org/apache/druid/server/metrics/MonitorsConfig.java index 9d9241b7d401..25e124c19837 100644 --- a/server/src/main/java/org/apache/druid/server/metrics/MonitorsConfig.java +++ b/server/src/main/java/org/apache/druid/server/metrics/MonitorsConfig.java @@ -45,7 +45,7 @@ public class MonitorsConfig * Prior to 0.12.0, Druid used Monitor classes from the `com.metamx.metrics` package. * In 0.12.0, these Monitor classes were moved to the Druid repo under `org.apache.druid.java.util.metrics`. * In 0.13.0, they were moved again to `org.apache.druid.java.util.metrics`. - * See https://github.com/apache/incubator-druid/pull/5289 and https://github.com/apache/incubator-druid/pull/6266 + * See https://github.com/apache/druid/pull/5289 and https://github.com/apache/druid/pull/6266 * for details. * * We automatically adjust old package references to `org.apache.druid.java.util.metrics` for backwards diff --git a/server/src/main/java/org/apache/druid/server/security/AuthenticationResult.java b/server/src/main/java/org/apache/druid/server/security/AuthenticationResult.java index 4e7a3da294c2..361cb434a7c6 100644 --- a/server/src/main/java/org/apache/druid/server/security/AuthenticationResult.java +++ b/server/src/main/java/org/apache/druid/server/security/AuthenticationResult.java @@ -43,7 +43,7 @@ public class AuthenticationResult * Name of authenticator whom created the results * * If you found your self asking why the authenticatedBy field can be null please read this - * https://github.com/apache/incubator-druid/pull/5706#discussion_r185940889 + * https://github.com/apache/druid/pull/5706#discussion_r185940889 */ @Nullable private final String authenticatedBy; diff --git a/server/src/test/java/org/apache/druid/curator/CuratorModuleTest.java b/server/src/test/java/org/apache/druid/curator/CuratorModuleTest.java index 6fd5705919d4..18b6b6fc5785 100644 --- a/server/src/test/java/org/apache/druid/curator/CuratorModuleTest.java +++ b/server/src/test/java/org/apache/druid/curator/CuratorModuleTest.java @@ -159,7 +159,7 @@ public void exitsJvmWhenMaxRetriesExceeded() throws Exception ); } - @Ignore("Verifies changes in https://github.com/apache/incubator-druid/pull/8458, but overkill for regular testing") + @Ignore("Verifies changes in https://github.com/apache/druid/pull/8458, but overkill for regular testing") @Test public void ignoresDeprecatedCuratorConfigProperties() { diff --git a/server/src/test/java/org/apache/druid/curator/CuratorTestBase.java b/server/src/test/java/org/apache/druid/curator/CuratorTestBase.java index 5eb813eb76a0..7099ff31d80a 100644 --- a/server/src/test/java/org/apache/druid/curator/CuratorTestBase.java +++ b/server/src/test/java/org/apache/druid/curator/CuratorTestBase.java @@ -81,7 +81,7 @@ protected void setupZNodeForServer(DruidServer server, ZkPathsConfig zkPathsConf * For some reason, Travis build sometimes fails here because of * org.apache.zookeeper.KeeperException$NodeExistsException: KeeperErrorCode = NodeExists, though it should never * happen because zookeeper should be in a clean state for each run of tests. - * Address issue: https://github.com/apache/incubator-druid/issues/1512 + * Address issue: https://github.com/apache/druid/issues/1512 */ try { curator.setData() diff --git a/services/src/main/java/org/apache/druid/cli/CreateTables.java b/services/src/main/java/org/apache/druid/cli/CreateTables.java index ce48a8bf66f3..34ca5abe1efa 100644 --- a/services/src/main/java/org/apache/druid/cli/CreateTables.java +++ b/services/src/main/java/org/apache/druid/cli/CreateTables.java @@ -69,7 +69,7 @@ protected List getModules() return ImmutableList.of( // It's unknown why those modules are required in CreateTables, and if all of those modules are required or not. // Maybe some of those modules could be removed. - // See https://github.com/apache/incubator-druid/pull/4429#discussion_r123602930 + // See https://github.com/apache/druid/pull/4429#discussion_r123602930 new DruidProcessingModule(), new QueryableModule(), new QueryRunnerFactoryModule(), diff --git a/services/src/main/java/org/apache/druid/cli/ExportMetadata.java b/services/src/main/java/org/apache/druid/cli/ExportMetadata.java index c3202959beaf..fd981637896e 100644 --- a/services/src/main/java/org/apache/druid/cli/ExportMetadata.java +++ b/services/src/main/java/org/apache/druid/cli/ExportMetadata.java @@ -143,7 +143,7 @@ protected List getModules() // This area is copied from CreateTables. // It's unknown why those modules are required in CreateTables, and if all of those modules are required or not. // Maybe some of those modules could be removed. - // See https://github.com/apache/incubator-druid/pull/4429#discussion_r123602930 + // See https://github.com/apache/druid/pull/4429#discussion_r123602930 new DruidProcessingModule(), new QueryableModule(), new QueryRunnerFactoryModule(), diff --git a/services/src/main/java/org/apache/druid/cli/ResetCluster.java b/services/src/main/java/org/apache/druid/cli/ResetCluster.java index b6dc6ba2d5db..e0cc14c39780 100644 --- a/services/src/main/java/org/apache/druid/cli/ResetCluster.java +++ b/services/src/main/java/org/apache/druid/cli/ResetCluster.java @@ -80,7 +80,7 @@ protected List getModules() return ImmutableList.of( // It's unknown if those modules are required in ResetCluster. // Maybe some of those modules could be removed. - // See https://github.com/apache/incubator-druid/pull/4429#discussion_r123603498 + // See https://github.com/apache/druid/pull/4429#discussion_r123603498 new DruidProcessingModule(), new QueryableModule(), new QueryRunnerFactoryModule(), diff --git a/services/src/main/java/org/apache/druid/cli/ValidateSegments.java b/services/src/main/java/org/apache/druid/cli/ValidateSegments.java index 1487d2af1467..d5625a6ffc4c 100644 --- a/services/src/main/java/org/apache/druid/cli/ValidateSegments.java +++ b/services/src/main/java/org/apache/druid/cli/ValidateSegments.java @@ -81,7 +81,7 @@ protected List getModules() return ImmutableList.of( // It's unknown if those modules are required in ValidateSegments. // Maybe some of those modules could be removed. - // See https://github.com/apache/incubator-druid/pull/4429#discussion_r123603498 + // See https://github.com/apache/druid/pull/4429#discussion_r123603498 new DruidProcessingModule(), new QueryableModule(), new QueryRunnerFactoryModule(), diff --git a/services/src/main/java/org/apache/druid/cli/validate/DruidJsonValidator.java b/services/src/main/java/org/apache/druid/cli/validate/DruidJsonValidator.java index 1d0eb1758957..46922fc616d8 100644 --- a/services/src/main/java/org/apache/druid/cli/validate/DruidJsonValidator.java +++ b/services/src/main/java/org/apache/druid/cli/validate/DruidJsonValidator.java @@ -99,7 +99,7 @@ protected List getModules() return ImmutableList.of( // It's unknown if those modules are required in DruidJsonValidator. // Maybe some of those modules could be removed. - // See https://github.com/apache/incubator-druid/pull/4429#discussion_r123603498 + // See https://github.com/apache/druid/pull/4429#discussion_r123603498 new DruidProcessingModule(), new QueryableModule(), new QueryRunnerFactoryModule(), diff --git a/sql/src/main/java/org/apache/druid/sql/avatica/DruidStatement.java b/sql/src/main/java/org/apache/druid/sql/avatica/DruidStatement.java index c96cbb4c9526..4c9ac0391906 100644 --- a/sql/src/main/java/org/apache/druid/sql/avatica/DruidStatement.java +++ b/sql/src/main/java/org/apache/druid/sql/avatica/DruidStatement.java @@ -68,8 +68,8 @@ public class DruidStatement implements Closeable * we would not need to use this executor. *

* See discussion at: - * https://github.com/apache/incubator-druid/pull/4288 - * https://github.com/apache/incubator-druid/pull/4415 + * https://github.com/apache/druid/pull/4288 + * https://github.com/apache/druid/pull/4415 */ private final ExecutorService yielderOpenCloseExecutor; private State state = State.NEW; diff --git a/sql/src/test/java/org/apache/druid/sql/calcite/CalciteQueryTest.java b/sql/src/test/java/org/apache/druid/sql/calcite/CalciteQueryTest.java index 4116d9519d9b..895b452e47c7 100644 --- a/sql/src/test/java/org/apache/druid/sql/calcite/CalciteQueryTest.java +++ b/sql/src/test/java/org/apache/druid/sql/calcite/CalciteQueryTest.java @@ -1125,7 +1125,7 @@ public void testSelectProjectionFromSelectSingleColumnWithInnerLimitDescending() @Test public void testSelectProjectionFromSelectSingleColumnDescending() throws Exception { - // Regression test for https://github.com/apache/incubator-druid/issues/7768. + // Regression test for https://github.com/apache/druid/issues/7768. // After upgrading to Calcite 1.21, Calcite no longer respects the ORDER BY __time DESC // in the inner query. This is valid, as the SQL standard considers the subquery results to be an unordered @@ -1818,7 +1818,7 @@ public void testColumnComparison() throws Exception @Test public void testHavingOnRatio() throws Exception { - // Test for https://github.com/apache/incubator-druid/issues/4264 + // Test for https://github.com/apache/druid/issues/4264 testQuery( "SELECT\n" @@ -2555,7 +2555,7 @@ public void testGroupByNothingWithLiterallyFalseFilter() throws Exception @Test public void testGroupByNothingWithImpossibleTimeFilter() throws Exception { - // Regression test for https://github.com/apache/incubator-druid/issues/7671 + // Regression test for https://github.com/apache/druid/issues/7671 testQuery( "SELECT COUNT(*) FROM druid.foo\n" @@ -3622,7 +3622,7 @@ public void testInFilter() throws Exception @Test public void testInFilterWith23Elements() throws Exception { - // Regression test for https://github.com/apache/incubator-druid/issues/4203. + // Regression test for https://github.com/apache/druid/issues/4203. final List elements = new ArrayList<>(); elements.add("abc"); @@ -7617,7 +7617,7 @@ public void testUsingSubqueryAsPartOfOrFilter() @Test public void testTimeExtractWithTooFewArguments() throws Exception { - // Regression test for https://github.com/apache/incubator-druid/pull/7710. + // Regression test for https://github.com/apache/druid/pull/7710. expectedException.expect(ValidationException.class); expectedException.expectCause(CoreMatchers.instanceOf(CalciteContextException.class)); expectedException.expectCause( @@ -7696,7 +7696,7 @@ public void testUsingSubqueryAsFilterWithInnerSort() throws Exception { String nullValue = NullHandling.replaceWithDefault() ? "" : null; - // Regression test for https://github.com/apache/incubator-druid/issues/4208 + // Regression test for https://github.com/apache/druid/issues/4208 testQuery( "SELECT dim1, dim2 FROM druid.foo\n" + " WHERE dim2 IN (\n" diff --git a/web-console/src/utils/ingestion-spec.tsx b/web-console/src/utils/ingestion-spec.tsx index d063714b2c5e..d26a5435b30c 100644 --- a/web-console/src/utils/ingestion-spec.tsx +++ b/web-console/src/utils/ingestion-spec.tsx @@ -393,7 +393,7 @@ export interface TimestampSpec { } export function getTimestampSpecColumn(timestampSpec: TimestampSpec) { - // https://github.com/apache/incubator-druid/blob/master/core/src/main/java/org/apache/druid/data/input/impl/TimestampSpec.java#L44 + // https://github.com/apache/druid/blob/master/core/src/main/java/org/apache/druid/data/input/impl/TimestampSpec.java#L44 return timestampSpec.column || 'timestamp'; } diff --git a/web-console/src/views/load-data-view/load-data-view.tsx b/web-console/src/views/load-data-view/load-data-view.tsx index 809e71abd542..84bdcc1ebf21 100644 --- a/web-console/src/views/load-data-view/load-data-view.tsx +++ b/web-console/src/views/load-data-view/load-data-view.tsx @@ -2994,7 +2994,7 @@ export class LoadDataView extends React.PureComponent  ·  - + diff --git a/website/pom.xml b/website/pom.xml index 9f0ba97afc93..a826da3029a9 100644 --- a/website/pom.xml +++ b/website/pom.xml @@ -33,7 +33,7 @@ - ../../incubator-druid-website-src + ../../druid-website-src