From 6e9fe71ba9fff00c5653e0c3e949fd28a2920cf6 Mon Sep 17 00:00:00 2001 From: Gian Merlino Date: Wed, 12 Aug 2020 15:47:36 -0700 Subject: [PATCH] Remove implied profanity from error messages. i.e. WTF, WTH. --- .../org/apache/druid/guice/ConfigProvider.java | 3 ++- .../util/common/io/smoosh/FileSmoosher.java | 4 ++-- .../druid/java/util/common/logger/Logger.java | 10 ---------- .../util/http/client/pool/ResourcePool.java | 2 +- .../SequenceInputStreamResponseHandler.java | 2 +- .../partition/OvershadowableManager.java | 18 +++++++++++------- .../apache/druid/common/utils/IdUtilsTest.java | 2 +- .../ambari/metrics/AmbariMetricsEmitter.java | 2 +- .../emitter/graphite/GraphiteEmitter.java | 12 ++++++------ .../emitter/opentsdb/OpentsdbEmitter.java | 2 +- ...atorPollingBasicAuthorizerCacheManager.java | 2 +- .../bloom/BloomFilterMergeAggregator.java | 2 +- .../BloomFilterMergeAggregatorFactory.java | 2 +- .../kafka/supervisor/KafkaSupervisorTest.java | 4 ++-- .../supervisor/KinesisSupervisorTest.java | 4 ++-- .../druid/server/lookup/PollingLookup.java | 2 +- .../indexer/DetermineHashedPartitionsJob.java | 7 ++++--- .../druid/indexer/DeterminePartitionsJob.java | 4 ++-- .../druid/indexer/IndexGeneratorJob.java | 2 +- .../AppenderatorDriverRealtimeIndexTask.java | 6 +++++- .../indexing/common/task/ArchiveTask.java | 2 +- .../druid/indexing/common/task/MoveTask.java | 2 +- .../common/task/RealtimeIndexTask.java | 2 +- .../indexing/common/task/RestoreTask.java | 2 +- .../parallel/PartialSegmentMergeTask.java | 2 +- .../overlord/BaseRestorableTaskRunner.java | 2 +- .../indexing/overlord/ForkingTaskRunner.java | 4 ++-- .../indexing/overlord/RemoteTaskRunner.java | 6 +++--- .../overlord/SingleTaskBackgroundRunner.java | 4 ++-- .../druid/indexing/overlord/TaskLockbox.java | 2 +- .../indexing/overlord/ThreadingTaskRunner.java | 2 +- .../overlord/hrtr/HttpRemoteTaskRunner.java | 12 ++++++------ .../SeekableStreamIndexTaskRunner.java | 10 +++++----- .../seekablestream/SequenceMetadata.java | 8 ++++++-- .../supervisor/SeekableStreamSupervisor.java | 10 +++++----- .../indexing/worker/WorkerTaskManager.java | 6 +++--- ...gestSegmentFirehoseFactoryTimelineTest.java | 2 +- .../apache/druid/guice/PropertiesModule.java | 2 +- .../query/ChainedExecutionQueryRunner.java | 2 +- .../epinephelinae/ByteBufferHashTable.java | 6 +++--- .../LimitedBufferHashGrouper.java | 4 ++-- .../druid/query/scan/ScanQueryEngine.java | 2 +- .../query/scan/ScanQueryRunnerFactory.java | 2 +- .../AggregateTopNMetricFirstAlgorithm.java | 2 +- .../incremental/OffheapIncrementalIndex.java | 2 +- ...utCachingExpressionColumnValueSelector.java | 2 +- ...utCachingExpressionColumnValueSelector.java | 2 +- .../druid/query/SchemaEvolutionTest.java | 2 +- .../LimitedBufferHashGrouperTest.java | 2 +- .../discovery/ServerDiscoverySelector.java | 2 +- .../IndexerSQLMetadataStorageCoordinator.java | 2 +- .../SegmentLoaderLocalCacheManager.java | 4 ++-- .../druid/segment/realtime/FireHydrant.java | 2 +- .../appenderator/AppenderatorImpl.java | 4 ++-- .../appenderator/BaseAppenderatorDriver.java | 6 +++--- .../appenderator/StreamAppenderatorDriver.java | 6 +++--- .../segment/realtime/plumber/Plumbers.java | 2 +- .../realtime/plumber/RealtimePlumber.java | 6 ++++-- .../coordination/ChangeRequestHttpSyncer.java | 2 +- .../server/coordinator/HttpLoadQueuePeon.java | 2 +- .../coordinator/duty/CompactSegments.java | 4 ++-- .../lookup/cache/LookupCoordinatorManager.java | 2 +- .../appenderator/AppenderatorTest.java | 2 +- .../java/org/apache/druid/cli/DumpSegment.java | 2 +- .../expression/BinaryOperatorConversion.java | 4 ++-- .../sql/calcite/expression/Expressions.java | 10 +++++----- .../expression/OperatorConversions.java | 2 +- .../builtin/CeilOperatorConversion.java | 4 ++-- .../builtin/FloorOperatorConversion.java | 4 ++-- .../sql/calcite/filtration/BoundValue.java | 2 +- .../filtration/CombineAndSimplifyBounds.java | 3 ++- .../filtration/ConvertSelectorsToIns.java | 3 ++- .../sql/calcite/filtration/Filtration.java | 3 ++- .../calcite/planner/DruidConvertletTable.java | 2 +- .../sql/calcite/rel/PartialDruidQuery.java | 2 +- .../ProjectAggregatePruneUnusedCallRule.java | 2 +- 76 files changed, 149 insertions(+), 140 deletions(-) diff --git a/core/src/main/java/org/apache/druid/guice/ConfigProvider.java b/core/src/main/java/org/apache/druid/guice/ConfigProvider.java index a6ca07b42996..538f2a61375e 100644 --- a/core/src/main/java/org/apache/druid/guice/ConfigProvider.java +++ b/core/src/main/java/org/apache/druid/guice/ConfigProvider.java @@ -29,6 +29,7 @@ import java.util.Map; /** + * */ public class ConfigProvider implements Provider { @@ -79,7 +80,7 @@ public T get() { try { // ConfigMagic handles a null replacements - Preconditions.checkNotNull(factory, "WTF!? Code misconfigured, inject() didn't get called."); + Preconditions.checkNotNull(factory, "Code misconfigured, inject() didn't get called."); return factory.buildWithReplacements(clazz, replacements); } catch (IllegalArgumentException e) { diff --git a/core/src/main/java/org/apache/druid/java/util/common/io/smoosh/FileSmoosher.java b/core/src/main/java/org/apache/druid/java/util/common/io/smoosh/FileSmoosher.java index f8e5d8374450..6d4de9378e7d 100644 --- a/core/src/main/java/org/apache/druid/java/util/common/io/smoosh/FileSmoosher.java +++ b/core/src/main/java/org/apache/druid/java/util/common/io/smoosh/FileSmoosher.java @@ -205,7 +205,7 @@ private int verifySize(long bytesWrittenInChunk) bytesWritten += bytesWrittenInChunk; if (bytesWritten != currOut.getCurrOffset() - startOffset) { - throw new ISE("WTF? Perhaps there is some concurrent modification going on?"); + throw new ISE("Perhaps there is some concurrent modification going on?"); } if (bytesWritten > size) { throw new ISE("Wrote[%,d] bytes for something of size[%,d]. Liar!!!", bytesWritten, size); @@ -228,7 +228,7 @@ public void close() throws IOException writerCurrentlyInUse = false; if (bytesWritten != currOut.getCurrOffset() - startOffset) { - throw new ISE("WTF? Perhaps there is some concurrent modification going on?"); + throw new ISE("Perhaps there is some concurrent modification going on?"); } if (bytesWritten != size) { throw new IOE("Expected [%,d] bytes, only saw [%,d], potential corruption?", size, bytesWritten); diff --git a/core/src/main/java/org/apache/druid/java/util/common/logger/Logger.java b/core/src/main/java/org/apache/druid/java/util/common/logger/Logger.java index bd1cf9d93cdb..8306dcad9e60 100644 --- a/core/src/main/java/org/apache/druid/java/util/common/logger/Logger.java +++ b/core/src/main/java/org/apache/druid/java/util/common/logger/Logger.java @@ -162,16 +162,6 @@ public void assertionError(String message, Object... formatArgs) log.error("ASSERTION_ERROR: " + message, formatArgs); } - public void wtf(String message, Object... formatArgs) - { - error(message, formatArgs); - } - - public void wtf(Throwable t, String message, Object... formatArgs) - { - error(t, message, formatArgs); - } - public void debugSegments(@Nullable final Collection segments, @Nullable String preamble) { if (log.isDebugEnabled()) { diff --git a/core/src/main/java/org/apache/druid/java/util/http/client/pool/ResourcePool.java b/core/src/main/java/org/apache/druid/java/util/http/client/pool/ResourcePool.java index 40570f060e6d..81297968c8d9 100644 --- a/core/src/main/java/org/apache/druid/java/util/http/client/pool/ResourcePool.java +++ b/core/src/main/java/org/apache/druid/java/util/http/client/pool/ResourcePool.java @@ -224,7 +224,7 @@ V get() deficit--; poolVal = null; } else { - throw new IllegalStateException("WTF?! No objects left, and no object deficit. This is probably a bug."); + throw new IllegalStateException("Unexpected state: No objects left, and no object deficit"); } } diff --git a/core/src/main/java/org/apache/druid/java/util/http/client/response/SequenceInputStreamResponseHandler.java b/core/src/main/java/org/apache/druid/java/util/http/client/response/SequenceInputStreamResponseHandler.java index c3247d69832b..e395a7172396 100644 --- a/core/src/main/java/org/apache/druid/java/util/http/client/response/SequenceInputStreamResponseHandler.java +++ b/core/src/main/java/org/apache/druid/java/util/http/client/response/SequenceInputStreamResponseHandler.java @@ -151,7 +151,7 @@ public ClientResponse done(ClientResponse clientRespon } catch (IOException e) { // This should never happen - log.wtf(e, "The empty stream threw an IOException"); + log.error(e, "The empty stream threw an IOException"); throw new RuntimeException(e); } finally { diff --git a/core/src/main/java/org/apache/druid/timeline/partition/OvershadowableManager.java b/core/src/main/java/org/apache/druid/timeline/partition/OvershadowableManager.java index a4529f31cefb..8d010cf43b67 100644 --- a/core/src/main/java/org/apache/druid/timeline/partition/OvershadowableManager.java +++ b/core/src/main/java/org/apache/druid/timeline/partition/OvershadowableManager.java @@ -287,6 +287,7 @@ private List> findOvershadowedBy( * @param minorVersion the minor version to check overshadow relation. The found groups will have lower minor versions * than this. * @param fromState the state to search for overshadowed groups. + * * @return a list of found atomicUpdateGroups. It could be empty if no groups are found. */ @VisibleForTesting @@ -333,6 +334,7 @@ private List> findOvershadows(AtomicUpdateGroup aug, Sta * @param minorVersion the minor version to check overshadow relation. The found groups will have higher minor * versions than this. * @param fromState the state to search for overshadowed groups. + * * @return a list of found atomicUpdateGroups. It could be empty if no groups are found. */ @VisibleForTesting @@ -438,9 +440,9 @@ private void determineVisibleGroupAfterAdd(AtomicUpdateGroup aug, State state * The given standby group can be visible in the below two cases: * * - The standby group is full. Since every standby group has a higher version than the current visible group, - * it should become visible immediately when it's full. + * it should become visible immediately when it's full. * - The standby group is not full but not empty and the current visible is not full. If there's no fully available - * group, the group of the highest version should be the visible. + * group, the group of the highest version should be the visible. */ private void moveNewStandbyToVisibleIfNecessary(AtomicUpdateGroup standbyGroup, State stateOfGroup) { @@ -530,7 +532,7 @@ private void checkVisibleIsFullyAvailableAndTryToMoveOvershadowedToVisible( findOvershadows(group, State.STANDBY) ); if (overshadowingStandbys.isEmpty()) { - throw new ISE("WTH? atomicUpdateGroup[%s] is in overshadowed state, but no one overshadows it?", group); + throw new ISE("Unexpected state: atomicUpdateGroup[%s] is overshadowed, but nothing overshadows it", group); } groupsOvershadowingAug = overshadowingStandbys; isOvershadowingGroupsFull = false; @@ -585,6 +587,7 @@ private void checkVisibleIsFullyAvailableAndTryToMoveOvershadowedToVisible( * @param groups atomicUpdateGroups sorted by their rootPartitionRange * @param startRootPartitionId the start partitionId of the root partition range to check the coverage * @param endRootPartitionId the end partitionId of the root partition range to check the coverage + * * @return true if the given groups fully cover the given partition range. */ private boolean doGroupsFullyCoverPartitionRange( @@ -675,7 +678,7 @@ boolean addChunk(PartitionChunk chunk) // If this chunk is already in the atomicUpdateGroup, it should be in knownPartitionChunks // and this code must not be executed. throw new ISE( - "WTH? chunk[%s] is in the atomicUpdateGroup[%s] but not in knownPartitionChunks[%s]?", + "Unexpected state: chunk[%s] is in the atomicUpdateGroup[%s] but not in knownPartitionChunks[%s]", chunk, atomicUpdateGroup, knownPartitionChunks @@ -875,7 +878,7 @@ private void removeFrom(AtomicUpdateGroup aug, State state) if (!removed.equals(aug)) { throw new ISE( - "WTH? actually removed atomicUpdateGroup[%s] is different from the one which is supposed to be[%s]", + "Unexpected state: Removed atomicUpdateGroup[%s] is different from expected atomicUpdateGroup[%s]", removed, aug ); @@ -896,7 +899,7 @@ PartitionChunk removeChunk(PartitionChunk partitionChunk) if (!knownChunk.equals(partitionChunk)) { throw new ISE( - "WTH? Same partitionId[%d], but known partition[%s] is different from the input partition[%s]", + "Unexpected state: Same partitionId[%d], but known partition[%s] is different from the input partition[%s]", partitionChunk.getChunkNumber(), knownChunk, partitionChunk @@ -932,7 +935,8 @@ public boolean isComplete() (SingleEntryShort2ObjectSortedMap>) map; //noinspection ConstantConditions return singleMap.val.isFull(); - }); + } + ); } @Nullable diff --git a/core/src/test/java/org/apache/druid/common/utils/IdUtilsTest.java b/core/src/test/java/org/apache/druid/common/utils/IdUtilsTest.java index 2adbd495c30c..c8aaded1df7c 100644 --- a/core/src/test/java/org/apache/druid/common/utils/IdUtilsTest.java +++ b/core/src/test/java/org/apache/druid/common/utils/IdUtilsTest.java @@ -98,7 +98,7 @@ public void testInvalidSpacesRegexLineTabulation() { expectedException.expect(IllegalArgumentException.class); expectedException.expectMessage("thingToValidate cannot contain whitespace character except space."); - IdUtils.validateId(THINGO, "wtf\u000Bis line tabulation"); + IdUtils.validateId(THINGO, "what\u000Bis line tabulation"); } @Test diff --git a/extensions-contrib/ambari-metrics-emitter/src/main/java/org/apache/druid/emitter/ambari/metrics/AmbariMetricsEmitter.java b/extensions-contrib/ambari-metrics-emitter/src/main/java/org/apache/druid/emitter/ambari/metrics/AmbariMetricsEmitter.java index a0241801d0dc..6b4bbd5e5be0 100644 --- a/extensions-contrib/ambari-metrics-emitter/src/main/java/org/apache/druid/emitter/ambari/metrics/AmbariMetricsEmitter.java +++ b/extensions-contrib/ambari-metrics-emitter/src/main/java/org/apache/druid/emitter/ambari/metrics/AmbariMetricsEmitter.java @@ -107,7 +107,7 @@ public void start() public void emit(Event event) { if (!started.get()) { - throw new ISE("WTF emit was called while service is not started yet"); + throw new ISE("Emit called unexpectedly before service start"); } if (event instanceof ServiceMetricEvent) { final TimelineMetric timelineEvent = timelineMetricConverter.druidEventToTimelineMetric((ServiceMetricEvent) event); diff --git a/extensions-contrib/graphite-emitter/src/main/java/org/apache/druid/emitter/graphite/GraphiteEmitter.java b/extensions-contrib/graphite-emitter/src/main/java/org/apache/druid/emitter/graphite/GraphiteEmitter.java index 13ffb484b2d5..b3739ab9d15f 100644 --- a/extensions-contrib/graphite-emitter/src/main/java/org/apache/druid/emitter/graphite/GraphiteEmitter.java +++ b/extensions-contrib/graphite-emitter/src/main/java/org/apache/druid/emitter/graphite/GraphiteEmitter.java @@ -99,7 +99,7 @@ public void start() public void emit(Event event) { if (!started.get()) { - throw new ISE("WTF emit was called while service is not started yet"); + throw new ISE("Emit called unexpectedly before service start"); } if (event instanceof ServiceMetricEvent) { final GraphiteEvent graphiteEvent = graphiteEventConverter.druidEventToGraphite((ServiceMetricEvent) event); @@ -152,14 +152,14 @@ public ConsumerRunnable() { if (graphiteEmitterConfig.getProtocol().equals(GraphiteEmitterConfig.PLAINTEXT_PROTOCOL)) { graphite = new Graphite( - graphiteEmitterConfig.getHostname(), - graphiteEmitterConfig.getPort() + graphiteEmitterConfig.getHostname(), + graphiteEmitterConfig.getPort() ); } else { graphite = new PickledGraphite( - graphiteEmitterConfig.getHostname(), - graphiteEmitterConfig.getPort(), - graphiteEmitterConfig.getBatchSize() + graphiteEmitterConfig.getHostname(), + graphiteEmitterConfig.getPort(), + graphiteEmitterConfig.getBatchSize() ); } log.info("Using %s protocol.", graphiteEmitterConfig.getProtocol()); diff --git a/extensions-contrib/opentsdb-emitter/src/main/java/org/apache/druid/emitter/opentsdb/OpentsdbEmitter.java b/extensions-contrib/opentsdb-emitter/src/main/java/org/apache/druid/emitter/opentsdb/OpentsdbEmitter.java index 41c2413cbb6b..67b95eb30fd4 100644 --- a/extensions-contrib/opentsdb-emitter/src/main/java/org/apache/druid/emitter/opentsdb/OpentsdbEmitter.java +++ b/extensions-contrib/opentsdb-emitter/src/main/java/org/apache/druid/emitter/opentsdb/OpentsdbEmitter.java @@ -66,7 +66,7 @@ public void start() public void emit(Event event) { if (!started.get()) { - throw new ISE("WTF emit was called while service is not started yet"); + throw new ISE("Emit called unexpectedly before service start"); } if (event instanceof ServiceMetricEvent) { OpentsdbEvent opentsdbEvent = converter.convert((ServiceMetricEvent) event); diff --git a/extensions-core/druid-basic-security/src/main/java/org/apache/druid/security/basic/authorization/db/cache/CoordinatorPollingBasicAuthorizerCacheManager.java b/extensions-core/druid-basic-security/src/main/java/org/apache/druid/security/basic/authorization/db/cache/CoordinatorPollingBasicAuthorizerCacheManager.java index 58f4984df57c..5ccd04b7ef12 100644 --- a/extensions-core/druid-basic-security/src/main/java/org/apache/druid/security/basic/authorization/db/cache/CoordinatorPollingBasicAuthorizerCacheManager.java +++ b/extensions-core/druid-basic-security/src/main/java/org/apache/druid/security/basic/authorization/db/cache/CoordinatorPollingBasicAuthorizerCacheManager.java @@ -206,7 +206,7 @@ public void handleAuthorizerUserUpdate(String authorizerPrefix, byte[] serialize } } catch (Exception e) { - LOG.makeAlert(e, "WTF? Could not deserialize user/role map received from coordinator.").emit(); + LOG.makeAlert(e, "Could not deserialize user/role map received from coordinator").emit(); } } diff --git a/extensions-core/druid-bloom-filter/src/main/java/org/apache/druid/query/aggregation/bloom/BloomFilterMergeAggregator.java b/extensions-core/druid-bloom-filter/src/main/java/org/apache/druid/query/aggregation/bloom/BloomFilterMergeAggregator.java index 7dd8abaea0b5..1210e1c8ef08 100644 --- a/extensions-core/druid-bloom-filter/src/main/java/org/apache/druid/query/aggregation/bloom/BloomFilterMergeAggregator.java +++ b/extensions-core/druid-bloom-filter/src/main/java/org/apache/druid/query/aggregation/bloom/BloomFilterMergeAggregator.java @@ -39,7 +39,7 @@ public void bufferAdd(ByteBuffer buf) ByteBuffer other = selector.getObject(); if (other == null) { // nulls should be empty bloom filters by this point, so encountering a nil column in merge agg is unexpected - throw new ISE("WTF?! Unexpected null value in BloomFilterMergeAggregator"); + throw new ISE("Unexpected null value in BloomFilterMergeAggregator"); } BloomKFilter.mergeBloomFilterByteBuffers(buf, buf.position(), other, other.position()); } diff --git a/extensions-core/druid-bloom-filter/src/main/java/org/apache/druid/query/aggregation/bloom/BloomFilterMergeAggregatorFactory.java b/extensions-core/druid-bloom-filter/src/main/java/org/apache/druid/query/aggregation/bloom/BloomFilterMergeAggregatorFactory.java index ed5ce2904654..7d74432a87c6 100644 --- a/extensions-core/druid-bloom-filter/src/main/java/org/apache/druid/query/aggregation/bloom/BloomFilterMergeAggregatorFactory.java +++ b/extensions-core/druid-bloom-filter/src/main/java/org/apache/druid/query/aggregation/bloom/BloomFilterMergeAggregatorFactory.java @@ -76,7 +76,7 @@ private BloomFilterMergeAggregator makeMergeAggregator(ColumnSelectorFactory met final BaseNullableColumnValueSelector selector = metricFactory.makeColumnValueSelector(fieldName); // null columns should be empty bloom filters by this point, so encountering a nil column in merge agg is unexpected if (selector instanceof NilColumnValueSelector) { - throw new ISE("WTF?! Unexpected NilColumnValueSelector"); + throw new ISE("Unexpected NilColumnValueSelector"); } return new BloomFilterMergeAggregator((ColumnValueSelector) selector, getMaxNumEntries(), true); } diff --git a/extensions-core/kafka-indexing-service/src/test/java/org/apache/druid/indexing/kafka/supervisor/KafkaSupervisorTest.java b/extensions-core/kafka-indexing-service/src/test/java/org/apache/druid/indexing/kafka/supervisor/KafkaSupervisorTest.java index 9d8635dd39a9..ac0e1470ba79 100644 --- a/extensions-core/kafka-indexing-service/src/test/java/org/apache/druid/indexing/kafka/supervisor/KafkaSupervisorTest.java +++ b/extensions-core/kafka-indexing-service/src/test/java/org/apache/druid/indexing/kafka/supervisor/KafkaSupervisorTest.java @@ -2595,10 +2595,10 @@ public void testCheckpointForUnknownTaskGroup() } Assert.assertTrue( - serviceEmitter.getStackTrace().startsWith("org.apache.druid.java.util.common.ISE: WTH?! cannot find") + serviceEmitter.getStackTrace().startsWith("org.apache.druid.java.util.common.ISE: Cannot find") ); Assert.assertEquals( - "WTH?! cannot find taskGroup [0] among all activelyReadingTaskGroups [{}]", + "Cannot find taskGroup [0] among all activelyReadingTaskGroups [{}]", serviceEmitter.getExceptionMessage() ); Assert.assertEquals(ISE.class, serviceEmitter.getExceptionClass()); diff --git a/extensions-core/kinesis-indexing-service/src/test/java/org/apache/druid/indexing/kinesis/supervisor/KinesisSupervisorTest.java b/extensions-core/kinesis-indexing-service/src/test/java/org/apache/druid/indexing/kinesis/supervisor/KinesisSupervisorTest.java index 6bd309c8b7d3..b50ef3e6a9fa 100644 --- a/extensions-core/kinesis-indexing-service/src/test/java/org/apache/druid/indexing/kinesis/supervisor/KinesisSupervisorTest.java +++ b/extensions-core/kinesis-indexing-service/src/test/java/org/apache/druid/indexing/kinesis/supervisor/KinesisSupervisorTest.java @@ -3195,9 +3195,9 @@ public void testCheckpointForUnknownTaskGroup() } Assert.assertTrue(serviceEmitter.getStackTrace() - .startsWith("org.apache.druid.java.util.common.ISE: WTH?! cannot find")); + .startsWith("org.apache.druid.java.util.common.ISE: Cannot find")); Assert.assertEquals( - "WTH?! cannot find taskGroup [0] among all activelyReadingTaskGroups [{}]", + "Cannot find taskGroup [0] among all activelyReadingTaskGroups [{}]", serviceEmitter.getExceptionMessage() ); Assert.assertEquals(ISE.class, serviceEmitter.getExceptionClass()); diff --git a/extensions-core/lookups-cached-single/src/main/java/org/apache/druid/server/lookup/PollingLookup.java b/extensions-core/lookups-cached-single/src/main/java/org/apache/druid/server/lookup/PollingLookup.java index 10ccc9b97dc8..0b1b14c9d094 100644 --- a/extensions-core/lookups-cached-single/src/main/java/org/apache/druid/server/lookup/PollingLookup.java +++ b/extensions-core/lookups-cached-single/src/main/java/org/apache/druid/server/lookup/PollingLookup.java @@ -119,7 +119,7 @@ public String apply(@Nullable String key) } final CacheRefKeeper cacheRefKeeper = refOfCacheKeeper.get(); if (cacheRefKeeper == null) { - throw new ISE("Cache reference is null WTF"); + throw new ISE("Cache reference is null"); } final PollingCache cache = cacheRefKeeper.getAndIncrementRef(); try { diff --git a/indexing-hadoop/src/main/java/org/apache/druid/indexer/DetermineHashedPartitionsJob.java b/indexing-hadoop/src/main/java/org/apache/druid/indexer/DetermineHashedPartitionsJob.java index 62a13d3f9fc4..b39061919024 100644 --- a/indexing-hadoop/src/main/java/org/apache/druid/indexer/DetermineHashedPartitionsJob.java +++ b/indexing-hadoop/src/main/java/org/apache/druid/indexer/DetermineHashedPartitionsJob.java @@ -239,7 +239,8 @@ public Map getStats() Map metrics = TaskMetricsUtils.makeIngestionRowMetrics( jobCounters.findCounter(HadoopDruidIndexerConfig.IndexJobCounters.ROWS_PROCESSED_COUNTER).getValue(), - jobCounters.findCounter(HadoopDruidIndexerConfig.IndexJobCounters.ROWS_PROCESSED_WITH_ERRORS_COUNTER).getValue(), + jobCounters.findCounter(HadoopDruidIndexerConfig.IndexJobCounters.ROWS_PROCESSED_WITH_ERRORS_COUNTER) + .getValue(), jobCounters.findCounter(HadoopDruidIndexerConfig.IndexJobCounters.ROWS_UNPARSEABLE_COUNTER).getValue(), jobCounters.findCounter(HadoopDruidIndexerConfig.IndexJobCounters.ROWS_THROWN_AWAY_COUNTER).getValue() ); @@ -318,7 +319,7 @@ protected void innerMap( .bucketInterval(DateTimes.utc(inputRow.getTimestampFromEpoch())); if (!maybeInterval.isPresent()) { - throw new ISE("WTF?! No bucket found for timestamp: %s", inputRow.getTimestampFromEpoch()); + throw new ISE("No bucket found for timestamp: %s", inputRow.getTimestampFromEpoch()); } interval = maybeInterval.get(); } @@ -387,7 +388,7 @@ protected void reduce( Optional intervalOptional = config.getGranularitySpec().bucketInterval(DateTimes.utc(key.get())); if (!intervalOptional.isPresent()) { - throw new ISE("WTF?! No bucket found for timestamp: %s", key.get()); + throw new ISE("No bucket found for timestamp: %s", key.get()); } interval = intervalOptional.get(); } diff --git a/indexing-hadoop/src/main/java/org/apache/druid/indexer/DeterminePartitionsJob.java b/indexing-hadoop/src/main/java/org/apache/druid/indexer/DeterminePartitionsJob.java index 0b246be384b6..1e810c69aefd 100644 --- a/indexing-hadoop/src/main/java/org/apache/druid/indexer/DeterminePartitionsJob.java +++ b/indexing-hadoop/src/main/java/org/apache/druid/indexer/DeterminePartitionsJob.java @@ -464,7 +464,7 @@ void emitDimValueCounts( final Optional maybeInterval = config.getGranularitySpec().bucketInterval(timestamp); if (!maybeInterval.isPresent()) { - throw new ISE("WTF?! No bucket found for timestamp: %s", timestamp); + throw new ISE("No bucket found for timestamp: %s", timestamp); } final Interval interval = maybeInterval.get(); @@ -627,7 +627,7 @@ protected void innerReduce(Context context, SortableBytes keyBytes, Iterable bucket = getConfig().getBucket(inputRow); if (!bucket.isPresent()) { - throw new ISE("WTF?! No bucket found for row: %s", inputRow); + throw new ISE("No bucket found for row: %s", inputRow); } final long truncatedTimestamp = granularitySpec.getQueryGranularity() diff --git a/indexing-service/src/main/java/org/apache/druid/indexing/common/task/AppenderatorDriverRealtimeIndexTask.java b/indexing-service/src/main/java/org/apache/druid/indexing/common/task/AppenderatorDriverRealtimeIndexTask.java index d20a8f44dd8b..df23146f7cbb 100644 --- a/indexing-service/src/main/java/org/apache/druid/indexing/common/task/AppenderatorDriverRealtimeIndexTask.java +++ b/indexing-service/src/main/java/org/apache/druid/indexing/common/task/AppenderatorDriverRealtimeIndexTask.java @@ -71,6 +71,7 @@ import org.apache.druid.query.NoopQueryRunner; import org.apache.druid.query.Query; import org.apache.druid.query.QueryRunner; +import org.apache.druid.segment.SegmentUtils; import org.apache.druid.segment.indexing.DataSchema; import org.apache.druid.segment.indexing.RealtimeIOConfig; import org.apache.druid.segment.realtime.FireDepartment; @@ -338,7 +339,10 @@ public TaskStatus run(final TaskToolbox toolbox) final TransactionalSegmentPublisher publisher = (mustBeNullOrEmptySegments, segments, commitMetadata) -> { if (mustBeNullOrEmptySegments != null && !mustBeNullOrEmptySegments.isEmpty()) { - throw new ISE("WTH? stream ingestion tasks are overwriting segments[%s]", mustBeNullOrEmptySegments); + throw new ISE( + "Stream ingestion task unexpectedly attempted to overwrite segments: %s", + SegmentUtils.commaSeparatedIdentifiers(mustBeNullOrEmptySegments) + ); } final SegmentTransactionalInsertAction action = SegmentTransactionalInsertAction.appendAction( segments, diff --git a/indexing-service/src/main/java/org/apache/druid/indexing/common/task/ArchiveTask.java b/indexing-service/src/main/java/org/apache/druid/indexing/common/task/ArchiveTask.java index 9f6509968b5e..4e048ad4d2bf 100644 --- a/indexing-service/src/main/java/org/apache/druid/indexing/common/task/ArchiveTask.java +++ b/indexing-service/src/main/java/org/apache/druid/indexing/common/task/ArchiveTask.java @@ -73,7 +73,7 @@ public TaskStatus run(TaskToolbox toolbox) throws Exception for (final DataSegment unusedSegment : unusedSegments) { if (unusedSegment.getVersion().compareTo(myLock.getVersion()) > 0) { throw new ISE( - "WTF?! Unused segment[%s] has version[%s] > task version[%s]", + "Unused segment[%s] has version[%s] > task version[%s]", unusedSegment.getId(), unusedSegment.getVersion(), myLock.getVersion() diff --git a/indexing-service/src/main/java/org/apache/druid/indexing/common/task/MoveTask.java b/indexing-service/src/main/java/org/apache/druid/indexing/common/task/MoveTask.java index 4a0b3ac7a386..152eb33043b6 100644 --- a/indexing-service/src/main/java/org/apache/druid/indexing/common/task/MoveTask.java +++ b/indexing-service/src/main/java/org/apache/druid/indexing/common/task/MoveTask.java @@ -81,7 +81,7 @@ public TaskStatus run(TaskToolbox toolbox) throws Exception for (final DataSegment unusedSegment : unusedSegments) { if (unusedSegment.getVersion().compareTo(myLock.getVersion()) > 0) { throw new ISE( - "WTF?! Unused segment[%s] has version[%s] > task version[%s]", + "Unused segment[%s] has version[%s] > task version[%s]", unusedSegment.getId(), unusedSegment.getVersion(), myLock.getVersion() diff --git a/indexing-service/src/main/java/org/apache/druid/indexing/common/task/RealtimeIndexTask.java b/indexing-service/src/main/java/org/apache/druid/indexing/common/task/RealtimeIndexTask.java index 79aa3967bc0e..b4c75206058d 100644 --- a/indexing-service/src/main/java/org/apache/druid/indexing/common/task/RealtimeIndexTask.java +++ b/indexing-service/src/main/java/org/apache/druid/indexing/common/task/RealtimeIndexTask.java @@ -212,7 +212,7 @@ public TaskStatus run(final TaskToolbox toolbox) throws Exception runThread = Thread.currentThread(); if (this.plumber != null) { - throw new IllegalStateException("WTF?!? run with non-null plumber??!"); + throw new IllegalStateException("Plumber must be null"); } setupTimeoutAlert(); diff --git a/indexing-service/src/main/java/org/apache/druid/indexing/common/task/RestoreTask.java b/indexing-service/src/main/java/org/apache/druid/indexing/common/task/RestoreTask.java index 2978859211e7..622c9ff8b018 100644 --- a/indexing-service/src/main/java/org/apache/druid/indexing/common/task/RestoreTask.java +++ b/indexing-service/src/main/java/org/apache/druid/indexing/common/task/RestoreTask.java @@ -74,7 +74,7 @@ public TaskStatus run(TaskToolbox toolbox) throws Exception for (final DataSegment unusedSegment : unusedSegments) { if (unusedSegment.getVersion().compareTo(myLock.getVersion()) > 0) { throw new ISE( - "WTF?! Unused segment[%s] has version[%s] > task version[%s]", + "Unused segment[%s] has version[%s] > task version[%s]", unusedSegment.getId(), unusedSegment.getVersion(), myLock.getVersion() diff --git a/indexing-service/src/main/java/org/apache/druid/indexing/common/task/batch/parallel/PartialSegmentMergeTask.java b/indexing-service/src/main/java/org/apache/druid/indexing/common/task/batch/parallel/PartialSegmentMergeTask.java index b105d5e8da09..bb15f376ca4d 100644 --- a/indexing-service/src/main/java/org/apache/druid/indexing/common/task/batch/parallel/PartialSegmentMergeTask.java +++ b/indexing-service/src/main/java/org/apache/druid/indexing/common/task/batch/parallel/PartialSegmentMergeTask.java @@ -157,7 +157,7 @@ public TaskStatus runTask(TaskToolbox toolbox) throws Exception final String mustBeNull = intervalToVersion.put(lock.getInterval(), lock.getVersion()); if (mustBeNull != null) { throw new ISE( - "WTH? Two versions([%s], [%s]) for the same interval[%s]?", + "Unexpected state: Two versions([%s], [%s]) for the same interval[%s]", lock.getVersion(), mustBeNull, lock.getInterval() diff --git a/indexing-service/src/main/java/org/apache/druid/indexing/overlord/BaseRestorableTaskRunner.java b/indexing-service/src/main/java/org/apache/druid/indexing/overlord/BaseRestorableTaskRunner.java index 30dcec286bb2..9fa8a28957ac 100644 --- a/indexing-service/src/main/java/org/apache/druid/indexing/overlord/BaseRestorableTaskRunner.java +++ b/indexing-service/src/main/java/org/apache/druid/indexing/overlord/BaseRestorableTaskRunner.java @@ -95,7 +95,7 @@ public List>> restore() final Task task = jsonMapper.readValue(taskFile, Task.class); if (!task.getId().equals(taskId)) { - throw new ISE("WTF?! Task[%s] restore file had wrong id[%s].", taskId, task.getId()); + throw new ISE("Task[%s] restore file had wrong id[%s]", taskId, task.getId()); } if (taskConfig.isRestoreTasksOnRestart() && task.canRestore()) { diff --git a/indexing-service/src/main/java/org/apache/druid/indexing/overlord/ForkingTaskRunner.java b/indexing-service/src/main/java/org/apache/druid/indexing/overlord/ForkingTaskRunner.java index c8524c7cd637..f29af347c67b 100644 --- a/indexing-service/src/main/java/org/apache/druid/indexing/overlord/ForkingTaskRunner.java +++ b/indexing-service/src/main/java/org/apache/druid/indexing/overlord/ForkingTaskRunner.java @@ -174,7 +174,7 @@ public TaskStatus call() final ForkingTaskRunnerWorkItem taskWorkItem = tasks.get(task.getId()); if (taskWorkItem == null) { - LOGGER.makeAlert("WTF?! TaskInfo disappeared!").addData("task", task.getId()).emit(); + LOGGER.makeAlert("TaskInfo disappeared!").addData("task", task.getId()).emit(); throw new ISE("TaskInfo disappeared for task[%s]!", task.getId()); } @@ -183,7 +183,7 @@ public TaskStatus call() } if (taskWorkItem.processHolder != null) { - LOGGER.makeAlert("WTF?! TaskInfo already has a processHolder") + LOGGER.makeAlert("TaskInfo already has a processHolder") .addData("task", task.getId()) .emit(); throw new ISE("TaskInfo already has processHolder for task[%s]!", task.getId()); diff --git a/indexing-service/src/main/java/org/apache/druid/indexing/overlord/RemoteTaskRunner.java b/indexing-service/src/main/java/org/apache/druid/indexing/overlord/RemoteTaskRunner.java index dbaadf98e9d9..352f75fc6868 100644 --- a/indexing-service/src/main/java/org/apache/druid/indexing/overlord/RemoteTaskRunner.java +++ b/indexing-service/src/main/java/org/apache/druid/indexing/overlord/RemoteTaskRunner.java @@ -770,7 +770,7 @@ private void cleanup(final String taskId) final RemoteTaskRunnerWorkItem removed = completeTasks.remove(taskId); final Worker worker; if (removed == null || (worker = removed.getWorker()) == null) { - log.makeAlert("WTF?! Asked to cleanup nonexistent task") + log.makeAlert("Asked to cleanup nonexistent task") .addData("taskId", taskId) .emit(); } else { @@ -901,7 +901,7 @@ private boolean announceTask( RemoteTaskRunnerWorkItem workItem = pendingTasks.remove(task.getId()); if (workItem == null) { - log.makeAlert("WTF?! Got a null work item from pending tasks?! How can this be?!") + log.makeAlert("Ignoring null work item from pending task queue") .addData("taskId", task.getId()) .emit(); return false; @@ -1119,7 +1119,7 @@ private void updateWorker(final Worker worker) zkWorker.setWorker(worker); } else { log.warn( - "WTF, worker[%s] updated its announcement but we didn't have a ZkWorker for it. Ignoring.", + "Worker[%s] updated its announcement but we didn't have a ZkWorker for it. Ignoring.", worker.getHost() ); } diff --git a/indexing-service/src/main/java/org/apache/druid/indexing/overlord/SingleTaskBackgroundRunner.java b/indexing-service/src/main/java/org/apache/druid/indexing/overlord/SingleTaskBackgroundRunner.java index 7e7fafac3e8b..13bd4900b013 100644 --- a/indexing-service/src/main/java/org/apache/druid/indexing/overlord/SingleTaskBackgroundRunner.java +++ b/indexing-service/src/main/java/org/apache/druid/indexing/overlord/SingleTaskBackgroundRunner.java @@ -171,7 +171,7 @@ public void stop() executorService.shutdown(); } catch (SecurityException ex) { - log.wtf(ex, "I can't control my own threads!"); + log.error(ex, "I can't control my own threads!"); } } @@ -233,7 +233,7 @@ public void stop() executorService.shutdownNow(); } catch (SecurityException ex) { - log.wtf(ex, "I can't control my own threads!"); + log.error(ex, "I can't control my own threads!"); } } } diff --git a/indexing-service/src/main/java/org/apache/druid/indexing/overlord/TaskLockbox.java b/indexing-service/src/main/java/org/apache/druid/indexing/overlord/TaskLockbox.java index 6a56169a3686..3dce5917fe9c 100644 --- a/indexing-service/src/main/java/org/apache/druid/indexing/overlord/TaskLockbox.java +++ b/indexing-service/src/main/java/org/apache/druid/indexing/overlord/TaskLockbox.java @@ -146,7 +146,7 @@ public int compare(Pair left, Pair right) final TaskLock savedTaskLock = Preconditions.checkNotNull(taskAndLock.rhs, "savedTaskLock"); if (savedTaskLock.getInterval().toDurationMillis() <= 0) { // "Impossible", but you never know what crazy stuff can be restored from storage. - log.warn("WTF?! Got lock[%s] with empty interval for task: %s", savedTaskLock, task.getId()); + log.warn("Ignoring lock[%s] with empty interval for task: %s", savedTaskLock, task.getId()); continue; } diff --git a/indexing-service/src/main/java/org/apache/druid/indexing/overlord/ThreadingTaskRunner.java b/indexing-service/src/main/java/org/apache/druid/indexing/overlord/ThreadingTaskRunner.java index 541786dbdf98..82cf9bccfef5 100644 --- a/indexing-service/src/main/java/org/apache/druid/indexing/overlord/ThreadingTaskRunner.java +++ b/indexing-service/src/main/java/org/apache/druid/indexing/overlord/ThreadingTaskRunner.java @@ -176,7 +176,7 @@ public TaskStatus call() taskWorkItem = tasks.get(task.getId()); if (taskWorkItem == null) { - LOGGER.makeAlert("WTF?! TaskInfo disappeared!").addData("task", task.getId()).emit(); + LOGGER.makeAlert("TaskInfo disappeared").addData("task", task.getId()).emit(); throw new ISE("TaskInfo disappeared for task[%s]!", task.getId()); } diff --git a/indexing-service/src/main/java/org/apache/druid/indexing/overlord/hrtr/HttpRemoteTaskRunner.java b/indexing-service/src/main/java/org/apache/druid/indexing/overlord/hrtr/HttpRemoteTaskRunner.java index da83b966606f..d8a7cb6ffcfc 100644 --- a/indexing-service/src/main/java/org/apache/druid/indexing/overlord/hrtr/HttpRemoteTaskRunner.java +++ b/indexing-service/src/main/java/org/apache/druid/indexing/overlord/hrtr/HttpRemoteTaskRunner.java @@ -487,7 +487,7 @@ public void nodeViewInitialized() long workerDiscoveryStartTime = System.currentTimeMillis(); while (!workerViewInitialized.await(30, TimeUnit.SECONDS)) { if (System.currentTimeMillis() - workerDiscoveryStartTime > TimeUnit.MINUTES.toMillis(5)) { - throw new ISE("WTF! Couldn't discover workers."); + throw new ISE("Couldn't discover workers."); } else { log.info("Waiting for worker discovery..."); } @@ -1169,7 +1169,7 @@ private void pendingTasksExecutionLoop() } if (immutableWorker == null) { - throw new ISE("WTH! NULL immutableWorker"); + throw new ISE("Unexpected state: null immutableWorker"); } try { @@ -1405,7 +1405,7 @@ void taskAddedOrUpdated(final TaskAnnouncement announcement, final WorkerHolder break; default: log.makeAlert( - "WTF! Found unrecognized state[%s] of task[%s] in taskStorage. Notification[%s] from worker[%s] is ignored.", + "Found unrecognized state[%s] of task[%s] in taskStorage. Notification[%s] from worker[%s] is ignored.", knownStatusInStorage.get().getStatusCode(), taskId, announcement, @@ -1468,7 +1468,7 @@ void taskAddedOrUpdated(final TaskAnnouncement announcement, final WorkerHolder break; default: log.makeAlert( - "WTF! Found unrecognized state[%s] of task[%s]. Notification[%s] from worker[%s] is ignored.", + "Found unrecognized state[%s] of task[%s]. Notification[%s] from worker[%s] is ignored.", taskItem.getState(), taskId, announcement, @@ -1513,7 +1513,7 @@ void taskAddedOrUpdated(final TaskAnnouncement announcement, final WorkerHolder break; default: log.makeAlert( - "WTF! Found unrecognized state[%s] of task[%s]. Notification[%s] from worker[%s] is ignored.", + "Found unrecognized state[%s] of task[%s]. Notification[%s] from worker[%s] is ignored.", taskItem.getState(), taskId, announcement, @@ -1523,7 +1523,7 @@ void taskAddedOrUpdated(final TaskAnnouncement announcement, final WorkerHolder break; default: log.makeAlert( - "WTF! Worker[%s] reported unrecognized state[%s] for task[%s].", + "Worker[%s] reported unrecognized state[%s] for task[%s].", worker.getHost(), announcement.getTaskStatus().getStatusCode(), taskId diff --git a/indexing-service/src/main/java/org/apache/druid/indexing/seekablestream/SeekableStreamIndexTaskRunner.java b/indexing-service/src/main/java/org/apache/druid/indexing/seekablestream/SeekableStreamIndexTaskRunner.java index 8f538310dcd4..25963a1a1717 100644 --- a/indexing-service/src/main/java/org/apache/druid/indexing/seekablestream/SeekableStreamIndexTaskRunner.java +++ b/indexing-service/src/main/java/org/apache/druid/indexing/seekablestream/SeekableStreamIndexTaskRunner.java @@ -478,7 +478,7 @@ private TaskStatus runInternal(TaskToolbox toolbox) throws Exception // Sanity checks. if (!restoredNextPartitions.getStream().equals(ioConfig.getStartSequenceNumbers().getStream())) { throw new ISE( - "WTF?! Restored stream[%s] but expected stream[%s]", + "Restored stream[%s] but expected stream[%s]", restoredNextPartitions.getStream(), ioConfig.getStartSequenceNumbers().getStream() ); @@ -486,7 +486,7 @@ private TaskStatus runInternal(TaskToolbox toolbox) throws Exception if (!currOffsets.keySet().equals(ioConfig.getStartSequenceNumbers().getPartitionSequenceNumberMap().keySet())) { throw new ISE( - "WTF?! Restored partitions[%s] but expected partitions[%s]", + "Restored partitions[%s] but expected partitions[%s]", currOffsets.keySet(), ioConfig.getStartSequenceNumbers().getPartitionSequenceNumberMap().keySet() ); @@ -641,7 +641,7 @@ public void run() if (sequenceToUse == null) { throw new ISE( - "WTH?! cannot find any valid sequence for record with partition [%s] and sequenceNumber [%s]. Current sequences: %s", + "Cannot find any valid sequence for record with partition [%s] and sequenceNumber [%s]. Current sequences: %s", record.getPartitionId(), record.getSequenceNumber(), sequences @@ -1626,7 +1626,7 @@ public Response setEndOffsets( pauseLock.lockInterruptibly(); // Perform all sequence related checks before checking for isPaused() // and after acquiring pauseLock to correctly guard against duplicate requests - Preconditions.checkState(sequenceNumbers.size() > 0, "WTH?! No Sequences found to set end sequences"); + Preconditions.checkState(sequenceNumbers.size() > 0, "No sequences found to set end sequences"); final SequenceMetadata latestSequence = getLastSequenceMetadata(); final Set exclusiveStartPartitions; @@ -1651,7 +1651,7 @@ public Response setEndOffsets( } else if (latestSequence.isCheckpointed()) { return Response.status(Response.Status.BAD_REQUEST) .entity(StringUtils.format( - "WTH?! Sequence [%s] has already endOffsets set, cannot set to [%s]", + "Sequence [%s] has already endOffsets set, cannot set to [%s]", latestSequence, sequenceNumbers )).build(); diff --git a/indexing-service/src/main/java/org/apache/druid/indexing/seekablestream/SequenceMetadata.java b/indexing-service/src/main/java/org/apache/druid/indexing/seekablestream/SequenceMetadata.java index 4b0265fb36fe..af926203d6c2 100644 --- a/indexing-service/src/main/java/org/apache/druid/indexing/seekablestream/SequenceMetadata.java +++ b/indexing-service/src/main/java/org/apache/druid/indexing/seekablestream/SequenceMetadata.java @@ -32,6 +32,7 @@ import org.apache.druid.indexing.seekablestream.common.OrderedSequenceNumber; import org.apache.druid.java.util.common.ISE; import org.apache.druid.java.util.emitter.EmittingLogger; +import org.apache.druid.segment.SegmentUtils; import org.apache.druid.segment.realtime.appenderator.TransactionalSegmentPublisher; import org.apache.druid.timeline.DataSegment; @@ -341,7 +342,10 @@ public SegmentPublishResult publishAnnotatedSegments( ) throws IOException { if (mustBeNullOrEmptySegments != null && !mustBeNullOrEmptySegments.isEmpty()) { - throw new ISE("WTH? stream ingestion tasks are overwriting segments[%s]", mustBeNullOrEmptySegments); + throw new ISE( + "Stream ingestion task unexpectedly attempted to overwrite segments: %s", + SegmentUtils.commaSeparatedIdentifiers(mustBeNullOrEmptySegments) + ); } final Map commitMetaMap = (Map) Preconditions.checkNotNull(commitMetadata, "commitMetadata"); final SeekableStreamEndSequenceNumbers finalPartitions = @@ -353,7 +357,7 @@ public SegmentPublishResult publishAnnotatedSegments( // Sanity check, we should only be publishing things that match our desired end state. if (!getEndOffsets().equals(finalPartitions.getPartitionSequenceNumberMap())) { throw new ISE( - "WTF?! Driver for sequence [%s], attempted to publish invalid metadata[%s].", + "Driver for sequence[%s] attempted to publish invalid metadata[%s].", SequenceMetadata.this.toString(), commitMetadata ); diff --git a/indexing-service/src/main/java/org/apache/druid/indexing/seekablestream/supervisor/SeekableStreamSupervisor.java b/indexing-service/src/main/java/org/apache/druid/indexing/seekablestream/supervisor/SeekableStreamSupervisor.java index cc4b986bd1a7..58d65ab383df 100644 --- a/indexing-service/src/main/java/org/apache/druid/indexing/seekablestream/supervisor/SeekableStreamSupervisor.java +++ b/indexing-service/src/main/java/org/apache/druid/indexing/seekablestream/supervisor/SeekableStreamSupervisor.java @@ -423,7 +423,7 @@ boolean isValidTaskGroup(int taskGroupId, @Nullable TaskGroup taskGroup) log.warn("Ignoring checkpoint request because taskGroup[%d] is inactive", taskGroupId); return false; } else { - throw new ISE("WTH?! cannot find taskGroup [%s] among all activelyReadingTaskGroups [%s]", taskGroupId, + throw new ISE("Cannot find taskGroup [%s] among all activelyReadingTaskGroups [%s]", taskGroupId, activelyReadingTaskGroups ); } @@ -1494,7 +1494,7 @@ public Boolean apply(SeekableStreamIndexTaskRunner.Status status) final TaskData prevTaskData = taskGroup.tasks.putIfAbsent(taskId, new TaskData()); if (prevTaskData != null) { throw new ISE( - "WTH? a taskGroup[%s] already exists for new task[%s]", + "taskGroup[%s] already exists for new task[%s]", prevTaskData, taskId ); @@ -2526,7 +2526,7 @@ public Map apply(List pauseResult = pauseFutures.get(i).get(); throw new ISE( - "WTH? The pause request for task [%s] is supposed to fail, but returned [%s]", + "Pause request for task [%s] should have failed, but returned [%s]", taskId, pauseResult ); @@ -2682,7 +2682,7 @@ private void checkPendingCompletionTasks() final String taskId = entry.getKey(); final TaskData taskData = entry.getValue(); - Preconditions.checkNotNull(taskData.status, "WTH? task[%s] has a null status", taskId); + Preconditions.checkNotNull(taskData.status, "task[%s] has null status", taskId); if (taskData.status.isFailure()) { stateManager.recordCompletedTaskState(TaskState.FAILED); @@ -2782,7 +2782,7 @@ private void checkCurrentTaskState() throws ExecutionException, InterruptedExcep continue; } - Preconditions.checkNotNull(taskData.status, "WTH? task[%s] has a null status", taskId); + Preconditions.checkNotNull(taskData.status, "Task[%s] has null status", taskId); // remove failed tasks if (taskData.status.isFailure()) { diff --git a/indexing-service/src/main/java/org/apache/druid/indexing/worker/WorkerTaskManager.java b/indexing-service/src/main/java/org/apache/druid/indexing/worker/WorkerTaskManager.java index 6855d08b5b5d..3f497768eaad 100644 --- a/indexing-service/src/main/java/org/apache/druid/indexing/worker/WorkerTaskManager.java +++ b/indexing-service/src/main/java/org/apache/druid/indexing/worker/WorkerTaskManager.java @@ -345,7 +345,7 @@ private void initAssignedTasks() if (taskId.equals(task.getId())) { assignedTasks.put(taskId, task); } else { - throw new ISE("WTF! Corrupted assigned task on disk[%s].", taskFile.getAbsoluteFile()); + throw new ISE("Corrupted assigned task on disk[%s].", taskFile.getAbsoluteFile()); } } catch (IOException ex) { @@ -471,7 +471,7 @@ private void initCompletedTasks() if (taskId.equals(taskAnnouncement.getTaskId())) { completedTasks.put(taskId, taskAnnouncement); } else { - throw new ISE("WTF! Corrupted completed task on disk[%s].", taskFile.getAbsoluteFile()); + throw new ISE("Corrupted completed task on disk[%s].", taskFile.getAbsoluteFile()); } } catch (IOException ex) { @@ -699,7 +699,7 @@ public void handle() if (!status.isComplete()) { log.warn( - "WTF?! Got status notice for task [%s] that isn't complete (status = [%s])...", + "Got status notice for task [%s] that isn't complete (status = [%s])...", task.getId(), status.getStatusCode() ); diff --git a/indexing-service/src/test/java/org/apache/druid/indexing/firehose/IngestSegmentFirehoseFactoryTimelineTest.java b/indexing-service/src/test/java/org/apache/druid/indexing/firehose/IngestSegmentFirehoseFactoryTimelineTest.java index b8a222085171..06ebc56e9ec7 100644 --- a/indexing-service/src/test/java/org/apache/druid/indexing/firehose/IngestSegmentFirehoseFactoryTimelineTest.java +++ b/indexing-service/src/test/java/org/apache/druid/indexing/firehose/IngestSegmentFirehoseFactoryTimelineTest.java @@ -331,7 +331,7 @@ public Collection fetchUsedSegmentsInDataSourceForIntervals( if (intervals.equals(ImmutableList.of(testCase.interval))) { return ImmutableSet.copyOf(testCase.segments); } else { - throw new IllegalArgumentException("WTF"); + throw new IllegalArgumentException("BAD"); } } diff --git a/processing/src/main/java/org/apache/druid/guice/PropertiesModule.java b/processing/src/main/java/org/apache/druid/guice/PropertiesModule.java index 4dd7b7045885..a4006509f26d 100644 --- a/processing/src/main/java/org/apache/druid/guice/PropertiesModule.java +++ b/processing/src/main/java/org/apache/druid/guice/PropertiesModule.java @@ -76,7 +76,7 @@ public void configure(Binder binder) } } catch (FileNotFoundException e) { - log.wtf(e, "This can only happen if the .exists() call lied."); + log.error(e, "This can only happen if the .exists() call lied."); } finally { CloseQuietly.close(stream); diff --git a/processing/src/main/java/org/apache/druid/query/ChainedExecutionQueryRunner.java b/processing/src/main/java/org/apache/druid/query/ChainedExecutionQueryRunner.java index f400bacbdd1e..e30e9d1d8e5b 100644 --- a/processing/src/main/java/org/apache/druid/query/ChainedExecutionQueryRunner.java +++ b/processing/src/main/java/org/apache/druid/query/ChainedExecutionQueryRunner.java @@ -124,7 +124,7 @@ public Iterable call() List retVal = result.toList(); if (retVal == null) { - throw new ISE("Got a null list of results! WTF?!"); + throw new ISE("Got a null list of results"); } return retVal; diff --git a/processing/src/main/java/org/apache/druid/query/groupby/epinephelinae/ByteBufferHashTable.java b/processing/src/main/java/org/apache/druid/query/groupby/epinephelinae/ByteBufferHashTable.java index b9d0c7cc8132..62c65f7cecb7 100644 --- a/processing/src/main/java/org/apache/druid/query/groupby/epinephelinae/ByteBufferHashTable.java +++ b/processing/src/main/java/org/apache/druid/query/groupby/epinephelinae/ByteBufferHashTable.java @@ -169,7 +169,7 @@ public void adjustTableWhenFull() } if (newBuckets < maxBuckets) { - throw new ISE("WTF?! newBuckets[%,d] < maxBuckets[%,d]", newBuckets, maxBuckets); + throw new ISE("newBuckets[%,d] < maxBuckets[%,d]", newBuckets, maxBuckets); } ByteBuffer newTableBuffer = buffer.duplicate(); @@ -206,7 +206,7 @@ public void adjustTableWhenFull() final int newBucket = findBucket(true, newBuckets, newTableBuffer, keyBuffer, keyHash); if (newBucket < 0) { - throw new ISE("WTF?! Couldn't find a bucket while resizing?!"); + throw new ISE("Couldn't find a bucket while resizing"); } final int newBucketOffset = newBucket * bucketSizeWithHash; @@ -230,7 +230,7 @@ public void adjustTableWhenFull() growthCount++; if (size != newSize) { - throw new ISE("WTF?! size[%,d] != newSize[%,d] after resizing?!", size, newSize); + throw new ISE("size[%,d] != newSize[%,d] after resizing", size, newSize); } } diff --git a/processing/src/main/java/org/apache/druid/query/groupby/epinephelinae/LimitedBufferHashGrouper.java b/processing/src/main/java/org/apache/druid/query/groupby/epinephelinae/LimitedBufferHashGrouper.java index b15845bd7080..2c7c320e8bc7 100644 --- a/processing/src/main/java/org/apache/druid/query/groupby/epinephelinae/LimitedBufferHashGrouper.java +++ b/processing/src/main/java/org/apache/druid/query/groupby/epinephelinae/LimitedBufferHashGrouper.java @@ -98,7 +98,7 @@ public void init() // We check this already in SpillingGrouper to ensure that LimitedBufferHashGrouper is only used when there is // sufficient buffer capacity. If this error occurs, something went very wrong. if (!validateBufferCapacity(totalBuffer.capacity())) { - throw new IAE("WTF? Using LimitedBufferHashGrouper with insufficient buffer capacity."); + throw new IAE("LimitedBufferHashGrouper initialized with insufficient buffer capacity"); } //only store offsets up to `limit` + 1 instead of up to # of buckets, we only keep the top results @@ -485,7 +485,7 @@ public void adjustTableWhenFull() final int newBucket = findBucket(true, maxBuckets, newTableBuffer, keyBuffer, keyHash); if (newBucket < 0) { - throw new ISE("WTF?! Couldn't find a bucket while resizing?!"); + throw new ISE("Couldn't find a bucket while resizing"); } final int newBucketOffset = newBucket * bucketSizeWithHash; diff --git a/processing/src/main/java/org/apache/druid/query/scan/ScanQueryEngine.java b/processing/src/main/java/org/apache/druid/query/scan/ScanQueryEngine.java index 9c72b30f5be8..b09f731eea11 100644 --- a/processing/src/main/java/org/apache/druid/query/scan/ScanQueryEngine.java +++ b/processing/src/main/java/org/apache/druid/query/scan/ScanQueryEngine.java @@ -65,7 +65,7 @@ public Sequence process( ) { // "legacy" should be non-null due to toolChest.mergeResults - final boolean legacy = Preconditions.checkNotNull(query.isLegacy(), "WTF?! Expected non-null legacy"); + final boolean legacy = Preconditions.checkNotNull(query.isLegacy(), "Expected non-null 'legacy' parameter"); final Object numScannedRows = responseContext.get(ResponseContext.Key.NUM_SCANNED_ROWS); if (numScannedRows != null) { diff --git a/processing/src/main/java/org/apache/druid/query/scan/ScanQueryRunnerFactory.java b/processing/src/main/java/org/apache/druid/query/scan/ScanQueryRunnerFactory.java index 52066ab1f25e..44fa5b23906c 100644 --- a/processing/src/main/java/org/apache/druid/query/scan/ScanQueryRunnerFactory.java +++ b/processing/src/main/java/org/apache/druid/query/scan/ScanQueryRunnerFactory.java @@ -265,7 +265,7 @@ public ScanResultValue accumulate(ScanResultValue accumulated, ScanResultValue i } } if (finalInterval == null) { - throw new ISE("WTH??? Row came from an unscanned interval?"); + throw new ISE("Row came from an unscanned interval"); } } } diff --git a/processing/src/main/java/org/apache/druid/query/topn/AggregateTopNMetricFirstAlgorithm.java b/processing/src/main/java/org/apache/druid/query/topn/AggregateTopNMetricFirstAlgorithm.java index ab5eef290851..2180b73253ce 100644 --- a/processing/src/main/java/org/apache/druid/query/topn/AggregateTopNMetricFirstAlgorithm.java +++ b/processing/src/main/java/org/apache/druid/query/topn/AggregateTopNMetricFirstAlgorithm.java @@ -80,7 +80,7 @@ public void run( AggregatorUtil.condensedAggregators(query.getAggregatorSpecs(), query.getPostAggregatorSpecs(), metric); if (condensedAggPostAggPair.lhs.isEmpty() && condensedAggPostAggPair.rhs.isEmpty()) { - throw new ISE("WTF! Can't find the metric to do topN over?"); + throw new ISE("Can't find the topN metric"); } // Run topN for only a single metric TopNQuery singleMetricQuery = new TopNQueryBuilder(query) diff --git a/processing/src/main/java/org/apache/druid/segment/incremental/OffheapIncrementalIndex.java b/processing/src/main/java/org/apache/druid/segment/incremental/OffheapIncrementalIndex.java index ac48901d09ec..490f62534f47 100644 --- a/processing/src/main/java/org/apache/druid/segment/incremental/OffheapIncrementalIndex.java +++ b/processing/src/main/java/org/apache/druid/segment/incremental/OffheapIncrementalIndex.java @@ -217,7 +217,7 @@ protected AddToFactsResult addToFacts( if (IncrementalIndexRow.EMPTY_ROW_INDEX == prev) { getNumEntries().incrementAndGet(); } else { - throw new ISE("WTF! we are in sychronized block."); + throw new ISE("Unexpected state: Concurrent fact addition."); } } } diff --git a/processing/src/main/java/org/apache/druid/segment/virtual/SingleLongInputCachingExpressionColumnValueSelector.java b/processing/src/main/java/org/apache/druid/segment/virtual/SingleLongInputCachingExpressionColumnValueSelector.java index df9241b9eead..76bbb628fecf 100644 --- a/processing/src/main/java/org/apache/druid/segment/virtual/SingleLongInputCachingExpressionColumnValueSelector.java +++ b/processing/src/main/java/org/apache/druid/segment/virtual/SingleLongInputCachingExpressionColumnValueSelector.java @@ -60,7 +60,7 @@ public SingleLongInputCachingExpressionColumnValueSelector( { // Verify expression has just one binding. if (expression.analyzeInputs().getRequiredBindings().size() != 1) { - throw new ISE("WTF?! Expected expression with just one binding"); + throw new ISE("Expected expression with just one binding"); } this.selector = Preconditions.checkNotNull(selector, "selector"); diff --git a/processing/src/main/java/org/apache/druid/segment/virtual/SingleStringInputCachingExpressionColumnValueSelector.java b/processing/src/main/java/org/apache/druid/segment/virtual/SingleStringInputCachingExpressionColumnValueSelector.java index a3bf08c8f6f5..cbd98cfb095b 100644 --- a/processing/src/main/java/org/apache/druid/segment/virtual/SingleStringInputCachingExpressionColumnValueSelector.java +++ b/processing/src/main/java/org/apache/druid/segment/virtual/SingleStringInputCachingExpressionColumnValueSelector.java @@ -56,7 +56,7 @@ public SingleStringInputCachingExpressionColumnValueSelector( { // Verify expression has just one binding. if (expression.analyzeInputs().getRequiredBindings().size() != 1) { - throw new ISE("WTF?! Expected expression with just one binding"); + throw new ISE("Expected expression with just one binding"); } this.selector = Preconditions.checkNotNull(selector, "selector"); diff --git a/processing/src/test/java/org/apache/druid/query/SchemaEvolutionTest.java b/processing/src/test/java/org/apache/druid/query/SchemaEvolutionTest.java index 729923cfd569..35e642fbf932 100644 --- a/processing/src/test/java/org/apache/druid/query/SchemaEvolutionTest.java +++ b/processing/src/test/java/org/apache/druid/query/SchemaEvolutionTest.java @@ -210,7 +210,7 @@ public void setUp() throws IOException if (index4.getAvailableDimensions().size() != 0) { // Just double-checking that the exclusions are working properly - throw new ISE("WTF?! Expected no dimensions in index4"); + throw new ISE("Expected no dimensions in index4"); } } diff --git a/processing/src/test/java/org/apache/druid/query/groupby/epinephelinae/LimitedBufferHashGrouperTest.java b/processing/src/test/java/org/apache/druid/query/groupby/epinephelinae/LimitedBufferHashGrouperTest.java index 70e1abcde4a3..592baebd88b2 100644 --- a/processing/src/test/java/org/apache/druid/query/groupby/epinephelinae/LimitedBufferHashGrouperTest.java +++ b/processing/src/test/java/org/apache/druid/query/groupby/epinephelinae/LimitedBufferHashGrouperTest.java @@ -132,7 +132,7 @@ public void testLimitAndBufferSwapping() public void testBufferTooSmall() { expectedException.expect(IAE.class); - expectedException.expectMessage("WTF? Using LimitedBufferHashGrouper with insufficient buffer capacity."); + expectedException.expectMessage("LimitedBufferHashGrouper initialized with insufficient buffer capacity"); final TestColumnSelectorFactory columnSelectorFactory = GrouperTestUtil.newColumnSelectorFactory(); makeGrouper(columnSelectorFactory, 10, 2, 100); } diff --git a/server/src/main/java/org/apache/druid/curator/discovery/ServerDiscoverySelector.java b/server/src/main/java/org/apache/druid/curator/discovery/ServerDiscoverySelector.java index d51402d6ab94..9d29d8015eaa 100644 --- a/server/src/main/java/org/apache/druid/curator/discovery/ServerDiscoverySelector.java +++ b/server/src/main/java/org/apache/druid/curator/discovery/ServerDiscoverySelector.java @@ -60,7 +60,7 @@ public Server apply(final ServiceInstance instance) { Preconditions.checkState( instance.getPort() >= 0 || (instance.getSslPort() != null && instance.getSslPort() >= 0), - "WTH?! Both port and sslPort not set" + "Both port and sslPort not set" ); final int port; final String scheme; diff --git a/server/src/main/java/org/apache/druid/metadata/IndexerSQLMetadataStorageCoordinator.java b/server/src/main/java/org/apache/druid/metadata/IndexerSQLMetadataStorageCoordinator.java index cc4d03351b28..bf2bcb4e560e 100644 --- a/server/src/main/java/org/apache/druid/metadata/IndexerSQLMetadataStorageCoordinator.java +++ b/server/src/main/java/org/apache/druid/metadata/IndexerSQLMetadataStorageCoordinator.java @@ -322,7 +322,7 @@ public Set announceHistoricalSegments(final Set segmen // Metadata transaction cannot fail because we are not trying to do one. if (!result.isSuccess()) { - throw new ISE("WTF?! announceHistoricalSegments failed with null metadata, should not happen."); + throw new ISE("announceHistoricalSegments failed with null metadata, should not happen."); } return result.getSegments(); diff --git a/server/src/main/java/org/apache/druid/segment/loading/SegmentLoaderLocalCacheManager.java b/server/src/main/java/org/apache/druid/segment/loading/SegmentLoaderLocalCacheManager.java index b2ac7e8f35b0..80e8dc6015f2 100644 --- a/server/src/main/java/org/apache/druid/segment/loading/SegmentLoaderLocalCacheManager.java +++ b/server/src/main/java/org/apache/druid/segment/loading/SegmentLoaderLocalCacheManager.java @@ -334,9 +334,9 @@ private void unlock(DataSegment dataSegment, ReferenceCountingLock lock) dataSegment, (segment, existingLock) -> { if (existingLock == null) { - throw new ISE("WTH? the given lock has already been removed"); + throw new ISE("Lock has already been removed"); } else if (existingLock != lock) { - throw new ISE("WTH? Different lock instance"); + throw new ISE("Different lock instance"); } else { if (existingLock.numReferences == 1) { return null; diff --git a/server/src/main/java/org/apache/druid/segment/realtime/FireHydrant.java b/server/src/main/java/org/apache/druid/segment/realtime/FireHydrant.java index bf5ba26c5299..137280e4926c 100644 --- a/server/src/main/java/org/apache/druid/segment/realtime/FireHydrant.java +++ b/server/src/main/java/org/apache/druid/segment/realtime/FireHydrant.java @@ -96,7 +96,7 @@ public void swapSegment(@Nullable Segment newSegment) !newSegment.getId().equals(currentSegment.getId())) { // Sanity check: identifier should not change throw new ISE( - "WTF?! Cannot swap identifier[%s] -> [%s]!", + "Cannot swap identifier[%s] -> [%s]", currentSegment.getId(), newSegment.getId() ); diff --git a/server/src/main/java/org/apache/druid/segment/realtime/appenderator/AppenderatorImpl.java b/server/src/main/java/org/apache/druid/segment/realtime/appenderator/AppenderatorImpl.java index 24fcb67efa29..c1e8142b82cc 100644 --- a/server/src/main/java/org/apache/druid/segment/realtime/appenderator/AppenderatorImpl.java +++ b/server/src/main/java/org/apache/druid/segment/realtime/appenderator/AppenderatorImpl.java @@ -716,12 +716,12 @@ private DataSegment mergeAndPush( // Sanity checks for (FireHydrant hydrant : sink) { if (sink.isWritable()) { - throw new ISE("WTF?! Expected sink to be no longer writable before mergeAndPush. Segment[%s].", identifier); + throw new ISE("Expected sink to be no longer writable before mergeAndPush for segment[%s].", identifier); } synchronized (hydrant) { if (!hydrant.hasSwapped()) { - throw new ISE("WTF?! Expected sink to be fully persisted before mergeAndPush. Segment[%s].", identifier); + throw new ISE("Expected sink to be fully persisted before mergeAndPush for segment[%s].", identifier); } } } diff --git a/server/src/main/java/org/apache/druid/segment/realtime/appenderator/BaseAppenderatorDriver.java b/server/src/main/java/org/apache/druid/segment/realtime/appenderator/BaseAppenderatorDriver.java index c1226c72aa81..ab8452caffb7 100644 --- a/server/src/main/java/org/apache/druid/segment/realtime/appenderator/BaseAppenderatorDriver.java +++ b/server/src/main/java/org/apache/druid/segment/realtime/appenderator/BaseAppenderatorDriver.java @@ -131,7 +131,7 @@ void setAppendingSegment(SegmentWithState appendingSegment) // There should be only one appending segment at any time Preconditions.checkState( this.appendingSegment == null, - "WTF?! Current appendingSegment[%s] is not null. " + "Current appendingSegment[%s] is not null. " + "Its state must be changed before setting a new appendingSegment[%s]", this.appendingSegment, appendingSegment @@ -345,7 +345,7 @@ private SegmentIdWithShardSpec getSegment( for (SegmentIdWithShardSpec identifier : appenderator.getSegments()) { if (identifier.equals(newSegment)) { throw new ISE( - "WTF?! Allocated segment[%s] which conflicts with existing segment[%s].", + "Allocated segment[%s] which conflicts with existing segment[%s].", newSegment, identifier ); @@ -418,7 +418,7 @@ protected AppenderatorDriverAddResult append( ); } catch (SegmentNotWritableException e) { - throw new ISE(e, "WTF?! Segment[%s] not writable when it should have been.", identifier); + throw new ISE(e, "Segment[%s] not writable when it should have been.", identifier); } } else { return AppenderatorDriverAddResult.fail(); diff --git a/server/src/main/java/org/apache/druid/segment/realtime/appenderator/StreamAppenderatorDriver.java b/server/src/main/java/org/apache/druid/segment/realtime/appenderator/StreamAppenderatorDriver.java index 0555f2fd37ea..ba70a0aefd02 100644 --- a/server/src/main/java/org/apache/druid/segment/realtime/appenderator/StreamAppenderatorDriver.java +++ b/server/src/main/java/org/apache/druid/segment/realtime/appenderator/StreamAppenderatorDriver.java @@ -197,7 +197,7 @@ public void moveSegmentOut(final String sequenceName, final List committerSupplier) throws IndexSizeExceededException + public IncrementalIndexAddResult add(InputRow row, Supplier committerSupplier) + throws IndexSizeExceededException { long messageTimestamp = row.getTimestampFromEpoch(); final Sink sink = getSink(messageTimestamp); @@ -394,7 +396,7 @@ public void doRun() if (!isPushedMarker.exists()) { removeSegment(sink, mergedTarget); if (mergedTarget.exists()) { - log.wtf("Merged target[%s] exists?!", mergedTarget); + log.warn("Merged target[%s] still exists after attempt to delete it; skipping push.", mergedTarget); return; } } else { diff --git a/server/src/main/java/org/apache/druid/server/coordination/ChangeRequestHttpSyncer.java b/server/src/main/java/org/apache/druid/server/coordination/ChangeRequestHttpSyncer.java index fc466d114915..7f33d1e31814 100644 --- a/server/src/main/java/org/apache/druid/server/coordination/ChangeRequestHttpSyncer.java +++ b/server/src/main/java/org/apache/druid/server/coordination/ChangeRequestHttpSyncer.java @@ -391,7 +391,7 @@ private void addNextSyncToWorkQueue() } else { log.makeAlert( th, - "WTF! Couldn't schedule next sync. [%s] is not being synced any more, restarting Druid process on that " + "Couldn't schedule next sync. [%s] is not being synced any more, restarting Druid process on that " + "server might fix the issue.", logIdentity ).emit(); diff --git a/server/src/main/java/org/apache/druid/server/coordinator/HttpLoadQueuePeon.java b/server/src/main/java/org/apache/druid/server/coordinator/HttpLoadQueuePeon.java index 0d297697ee29..a05c5aac36f8 100644 --- a/server/src/main/java/org/apache/druid/server/coordinator/HttpLoadQueuePeon.java +++ b/server/src/main/java/org/apache/druid/server/coordinator/HttpLoadQueuePeon.java @@ -227,7 +227,7 @@ public void onSuccess(InputStream result) break; default: scheduleNextRunImmediately = false; - log.error("WTF! Server[%s] returned unknown state in status[%s].", serverId, e.getStatus()); + log.error("Server[%s] returned unknown state in status[%s].", serverId, e.getStatus()); } } } diff --git a/server/src/main/java/org/apache/druid/server/coordinator/duty/CompactSegments.java b/server/src/main/java/org/apache/druid/server/coordinator/duty/CompactSegments.java index 7b8c9b58f457..d4eb5b2742bb 100644 --- a/server/src/main/java/org/apache/druid/server/coordinator/duty/CompactSegments.java +++ b/server/src/main/java/org/apache/druid/server/coordinator/duty/CompactSegments.java @@ -98,7 +98,7 @@ public DruidCoordinatorRuntimeParams run(DruidCoordinatorRuntimeParams params) for (TaskStatusPlus status : compactionTasks) { final TaskPayloadResponse response = indexingServiceClient.getTaskPayload(status.getId()); if (response == null) { - throw new ISE("WTH? got a null paylord from overlord for task[%s]", status.getId()); + throw new ISE("Got a null paylord from overlord for task[%s]", status.getId()); } if (COMPACTION_TASK_TYPE.equals(response.getPayload().getType())) { final ClientCompactionTaskQuery compactionTaskQuery = (ClientCompactionTaskQuery) response.getPayload(); @@ -107,7 +107,7 @@ public DruidCoordinatorRuntimeParams run(DruidCoordinatorRuntimeParams params) final int numSubTasks = findNumMaxConcurrentSubTasks(compactionTaskQuery.getTuningConfig()); numEstimatedNonCompleteCompactionTasks += numSubTasks + 1; // count the compaction task itself } else { - throw new ISE("WTH? task[%s] is not a compactionTask?", status.getId()); + throw new ISE("task[%s] is not a compactionTask", status.getId()); } } diff --git a/server/src/main/java/org/apache/druid/server/lookup/cache/LookupCoordinatorManager.java b/server/src/main/java/org/apache/druid/server/lookup/cache/LookupCoordinatorManager.java index cd3ac2c68bdd..7526ecbcc10f 100644 --- a/server/src/main/java/org/apache/druid/server/lookup/cache/LookupCoordinatorManager.java +++ b/server/src/main/java/org/apache/druid/server/lookup/cache/LookupCoordinatorManager.java @@ -395,7 +395,7 @@ public void start() lookupCoordinatorManagerConfig.getHostTimeout().getMillis() * 10, TimeUnit.MILLISECONDS )) { - throw new ISE("WTF! LookupCoordinatorManager executor from last start() hasn't finished. Failed to Start."); + throw new ISE("LookupCoordinatorManager executor from last start() hasn't finished. Failed to Start."); } executorService = MoreExecutors.listeningDecorator( diff --git a/server/src/test/java/org/apache/druid/segment/realtime/appenderator/AppenderatorTest.java b/server/src/test/java/org/apache/druid/segment/realtime/appenderator/AppenderatorTest.java index 76fe86542aa0..607a3c9fdd14 100644 --- a/server/src/test/java/org/apache/druid/segment/realtime/appenderator/AppenderatorTest.java +++ b/server/src/test/java/org/apache/druid/segment/realtime/appenderator/AppenderatorTest.java @@ -844,7 +844,7 @@ private static List sorted(final List xs) } else if (a instanceof DataSegment && b instanceof DataSegment) { return ((DataSegment) a).getId().compareTo(((DataSegment) b).getId()); } else { - throw new IllegalStateException("WTF??"); + throw new IllegalStateException("BAD"); } } ); diff --git a/services/src/main/java/org/apache/druid/cli/DumpSegment.java b/services/src/main/java/org/apache/druid/cli/DumpSegment.java index 9ba28191bc0c..eab178a1fff5 100644 --- a/services/src/main/java/org/apache/druid/cli/DumpSegment.java +++ b/services/src/main/java/org/apache/druid/cli/DumpSegment.java @@ -189,7 +189,7 @@ public void run() runBitmaps(injector, index); break; default: - throw new ISE("WTF?! dumpType[%s] has no handler?", dumpType); + throw new ISE("dumpType[%s] has no handler", dumpType); } } catch (Exception e) { diff --git a/sql/src/main/java/org/apache/druid/sql/calcite/expression/BinaryOperatorConversion.java b/sql/src/main/java/org/apache/druid/sql/calcite/expression/BinaryOperatorConversion.java index 8199e3b4a873..9960a2c1dfda 100644 --- a/sql/src/main/java/org/apache/druid/sql/calcite/expression/BinaryOperatorConversion.java +++ b/sql/src/main/java/org/apache/druid/sql/calcite/expression/BinaryOperatorConversion.java @@ -60,7 +60,7 @@ public DruidExpression toDruidExpression( rexNode, operands -> { if (operands.size() < 2) { - throw new ISE("WTF?! Got binary operator[%s] with %s args?", operator.getName(), operands.size()); + throw new ISE("Got binary operator[%s] with %s args", operator.getName(), operands.size()); } return DruidExpression.fromExpression( @@ -92,7 +92,7 @@ public DruidExpression toDruidExpressionWithPostAggOperands( rexNode, operands -> { if (operands.size() < 2) { - throw new ISE("WTF?! Got binary operator[%s] with %s args?", operator.getName(), operands.size()); + throw new ISE("Got binary operator[%s] with %s args", operator.getName(), operands.size()); } return DruidExpression.fromExpression( diff --git a/sql/src/main/java/org/apache/druid/sql/calcite/expression/Expressions.java b/sql/src/main/java/org/apache/druid/sql/calcite/expression/Expressions.java index b0c7c59421e7..51e54a4c875d 100644 --- a/sql/src/main/java/org/apache/druid/sql/calcite/expression/Expressions.java +++ b/sql/src/main/java/org/apache/druid/sql/calcite/expression/Expressions.java @@ -221,7 +221,7 @@ private static DruidExpression inputRefToDruidExpression( final RexInputRef ref = (RexInputRef) rexNode; final String columnName = rowSignature.getColumnName(ref.getIndex()); if (columnName == null) { - throw new ISE("WTF?! Expression referred to nonexistent index[%d]", ref.getIndex()); + throw new ISE("Expression referred to nonexistent index[%d]", ref.getIndex()); } return DruidExpression.fromColumn(columnName); @@ -490,7 +490,7 @@ private static DimFilter toSimpleLeafFilter( || kind == SqlKind.LESS_THAN || kind == SqlKind.LESS_THAN_OR_EQUAL) { final List operands = ((RexCall) rexNode).getOperands(); - Preconditions.checkState(operands.size() == 2, "WTF?! Expected 2 operands, got[%,d]", operands.size()); + Preconditions.checkState(operands.size() == 2, "Expected 2 operands, got[%,d]", operands.size()); boolean flip = false; RexNode lhs = operands.get(0); RexNode rhs = operands.get(1); @@ -525,7 +525,7 @@ private static DimFilter toSimpleLeafFilter( flippedKind = SqlKind.GREATER_THAN_OR_EQUAL; break; default: - throw new ISE("WTF?! Kind[%s] not expected here", kind); + throw new ISE("Kind[%s] not expected here", kind); } } else { flippedKind = kind; @@ -632,7 +632,7 @@ private static DimFilter toSimpleLeafFilter( filter = Bounds.lessThanOrEqualTo(boundRefKey, val); break; default: - throw new IllegalStateException("WTF?! Shouldn't have got here..."); + throw new IllegalStateException("Shouldn't have got here"); } return filter; @@ -770,7 +770,7 @@ private static DimFilter getBoundTimeDimFilter( case LESS_THAN_OR_EQUAL: return Bounds.lessThan(boundRefKey, String.valueOf(interval.getEndMillis())); default: - throw new IllegalStateException("WTF?! Shouldn't have got here..."); + throw new IllegalStateException("Shouldn't have got here"); } } } diff --git a/sql/src/main/java/org/apache/druid/sql/calcite/expression/OperatorConversions.java b/sql/src/main/java/org/apache/druid/sql/calcite/expression/OperatorConversions.java index 23c48df601b1..1f716e7578c0 100644 --- a/sql/src/main/java/org/apache/druid/sql/calcite/expression/OperatorConversions.java +++ b/sql/src/main/java/org/apache/druid/sql/calcite/expression/OperatorConversions.java @@ -195,7 +195,7 @@ public static PostAggregator toPostAggregator( final RexInputRef ref = (RexInputRef) rexNode; final String columnName = rowSignature.getColumnName(ref.getIndex()); if (columnName == null) { - throw new ISE("WTF?! PostAgg referred to nonexistent index[%d]", ref.getIndex()); + throw new ISE("PostAggregator referred to nonexistent index[%d]", ref.getIndex()); } return new FieldAccessPostAggregator( diff --git a/sql/src/main/java/org/apache/druid/sql/calcite/expression/builtin/CeilOperatorConversion.java b/sql/src/main/java/org/apache/druid/sql/calcite/expression/builtin/CeilOperatorConversion.java index 77baafb796f7..588a2fe80a06 100644 --- a/sql/src/main/java/org/apache/druid/sql/calcite/expression/builtin/CeilOperatorConversion.java +++ b/sql/src/main/java/org/apache/druid/sql/calcite/expression/builtin/CeilOperatorConversion.java @@ -23,6 +23,7 @@ import org.apache.calcite.rex.RexNode; import org.apache.calcite.sql.SqlOperator; import org.apache.calcite.sql.fun.SqlStdOperatorTable; +import org.apache.druid.java.util.common.ISE; import org.apache.druid.segment.column.RowSignature; import org.apache.druid.sql.calcite.expression.DruidExpression; import org.apache.druid.sql.calcite.expression.OperatorConversions; @@ -59,8 +60,7 @@ public DruidExpression toDruidExpression( TimeFloorOperatorConversion.toTimestampFloorOrCeilArgs(plannerContext, rowSignature, call.getOperands()) ); } else { - // WTF? CEIL with the wrong number of arguments? - return null; + throw new ISE("Unexpected number of arguments"); } } } diff --git a/sql/src/main/java/org/apache/druid/sql/calcite/expression/builtin/FloorOperatorConversion.java b/sql/src/main/java/org/apache/druid/sql/calcite/expression/builtin/FloorOperatorConversion.java index d27c1e7b2a18..be8d891fc03b 100644 --- a/sql/src/main/java/org/apache/druid/sql/calcite/expression/builtin/FloorOperatorConversion.java +++ b/sql/src/main/java/org/apache/druid/sql/calcite/expression/builtin/FloorOperatorConversion.java @@ -23,6 +23,7 @@ import org.apache.calcite.rex.RexNode; import org.apache.calcite.sql.SqlOperator; import org.apache.calcite.sql.fun.SqlStdOperatorTable; +import org.apache.druid.java.util.common.ISE; import org.apache.druid.segment.column.RowSignature; import org.apache.druid.sql.calcite.expression.DruidExpression; import org.apache.druid.sql.calcite.expression.OperatorConversions; @@ -59,8 +60,7 @@ public DruidExpression toDruidExpression( TimeFloorOperatorConversion.toTimestampFloorOrCeilArgs(plannerContext, rowSignature, call.getOperands()) ); } else { - // WTF? FLOOR with the wrong number of arguments? - return null; + throw new ISE("Unexpected number of arguments"); } } } diff --git a/sql/src/main/java/org/apache/druid/sql/calcite/filtration/BoundValue.java b/sql/src/main/java/org/apache/druid/sql/calcite/filtration/BoundValue.java index 76034515992f..e638ff45f287 100644 --- a/sql/src/main/java/org/apache/druid/sql/calcite/filtration/BoundValue.java +++ b/sql/src/main/java/org/apache/druid/sql/calcite/filtration/BoundValue.java @@ -76,7 +76,7 @@ public int hashCode() public int compareTo(BoundValue o) { if (!comparator.equals(o.comparator)) { - throw new ISE("WTF?! Comparator mismatch?!"); + throw new ISE("Comparator mismatch"); } return comparator.compare(value, o.value); } diff --git a/sql/src/main/java/org/apache/druid/sql/calcite/filtration/CombineAndSimplifyBounds.java b/sql/src/main/java/org/apache/druid/sql/calcite/filtration/CombineAndSimplifyBounds.java index ce294943b674..971ba1620a20 100644 --- a/sql/src/main/java/org/apache/druid/sql/calcite/filtration/CombineAndSimplifyBounds.java +++ b/sql/src/main/java/org/apache/druid/sql/calcite/filtration/CombineAndSimplifyBounds.java @@ -153,7 +153,8 @@ private static DimFilter doSimplify(final List children, boolean disj // We found a simplification. Remove the old filters and add new ones. for (final BoundDimFilter bound : filterList) { if (!newChildren.remove(bound)) { - throw new ISE("WTF?! Tried to remove bound but couldn't?"); + // Don't expect this to happen, but include it as a sanity check. + throw new ISE("Tried to remove bound, but couldn't"); } } diff --git a/sql/src/main/java/org/apache/druid/sql/calcite/filtration/ConvertSelectorsToIns.java b/sql/src/main/java/org/apache/druid/sql/calcite/filtration/ConvertSelectorsToIns.java index 54b2625988b1..631fd937766f 100644 --- a/sql/src/main/java/org/apache/druid/sql/calcite/filtration/ConvertSelectorsToIns.java +++ b/sql/src/main/java/org/apache/druid/sql/calcite/filtration/ConvertSelectorsToIns.java @@ -85,7 +85,8 @@ public DimFilter process(DimFilter filter) for (final SelectorDimFilter selector : filterList) { values.add(selector.getValue()); if (!children.remove(selector)) { - throw new ISE("WTF?! Tried to remove selector but couldn't?"); + // Don't expect this to happen, but include it as a sanity check. + throw new ISE("Tried to remove selector but couldn't"); } } diff --git a/sql/src/main/java/org/apache/druid/sql/calcite/filtration/Filtration.java b/sql/src/main/java/org/apache/druid/sql/calcite/filtration/Filtration.java index 8f2f7604fbca..df03ff9662be 100644 --- a/sql/src/main/java/org/apache/druid/sql/calcite/filtration/Filtration.java +++ b/sql/src/main/java/org/apache/druid/sql/calcite/filtration/Filtration.java @@ -141,7 +141,8 @@ public Filtration optimizeFilterOnly(final RowSignature rowSignature) ); if (!transformed.getIntervals().equals(ImmutableList.of(eternity()))) { - throw new ISE("WTF?! optimizeFilterOnly was about to return filtration with intervals?!"); + // Should not happen, but include as a sanity check to be sure. + throw new ISE("optimizeFilterOnly was about to return filtration with intervals"); } return transformed; diff --git a/sql/src/main/java/org/apache/druid/sql/calcite/planner/DruidConvertletTable.java b/sql/src/main/java/org/apache/druid/sql/calcite/planner/DruidConvertletTable.java index c8cfb045f6d1..4533c952f142 100644 --- a/sql/src/main/java/org/apache/druid/sql/calcite/planner/DruidConvertletTable.java +++ b/sql/src/main/java/org/apache/druid/sql/calcite/planner/DruidConvertletTable.java @@ -149,7 +149,7 @@ public RexNode convertCall(final SqlRexContext cx, final SqlCall call) ) ); } else { - throw new ISE("WTF?! Should not have got here, operator was: %s", operator); + throw new ISE("Should not have got here, operator was: %s", operator); } } } diff --git a/sql/src/main/java/org/apache/druid/sql/calcite/rel/PartialDruidQuery.java b/sql/src/main/java/org/apache/druid/sql/calcite/rel/PartialDruidQuery.java index 73abd6977d69..7a721a18133c 100644 --- a/sql/src/main/java/org/apache/druid/sql/calcite/rel/PartialDruidQuery.java +++ b/sql/src/main/java/org/apache/druid/sql/calcite/rel/PartialDruidQuery.java @@ -393,7 +393,7 @@ public RelNode leafRel() case SCAN: return scan; default: - throw new ISE("WTF?! Unknown stage: %s", currentStage); + throw new ISE("Unknown stage: %s", currentStage); } } diff --git a/sql/src/main/java/org/apache/druid/sql/calcite/rule/ProjectAggregatePruneUnusedCallRule.java b/sql/src/main/java/org/apache/druid/sql/calcite/rule/ProjectAggregatePruneUnusedCallRule.java index 4c79e80b39bf..7b79d8c5201d 100644 --- a/sql/src/main/java/org/apache/druid/sql/calcite/rule/ProjectAggregatePruneUnusedCallRule.java +++ b/sql/src/main/java/org/apache/druid/sql/calcite/rule/ProjectAggregatePruneUnusedCallRule.java @@ -61,7 +61,7 @@ public void onMatch(final RelOptRuleCall call) final int fieldCount = aggregate.getGroupCount() + aggregate.getAggCallList().size(); if (fieldCount != aggregate.getRowType().getFieldCount()) { throw new ISE( - "WTF, expected[%s] to have[%s] fields but it had[%s]", + "Expected[%s] to have[%s] fields but it had[%s]", aggregate, fieldCount, aggregate.getRowType().getFieldCount()