diff --git a/extensions-core/s3-extensions/src/main/java/org/apache/druid/storage/s3/S3DataSegmentKiller.java b/extensions-core/s3-extensions/src/main/java/org/apache/druid/storage/s3/S3DataSegmentKiller.java
index fbe703a888e2..79435869e7b1 100644
--- a/extensions-core/s3-extensions/src/main/java/org/apache/druid/storage/s3/S3DataSegmentKiller.java
+++ b/extensions-core/s3-extensions/src/main/java/org/apache/druid/storage/s3/S3DataSegmentKiller.java
@@ -149,7 +149,7 @@ private boolean deleteKeysForBucket(
try {
deleteObjectsRequest.setKeys(chunkOfKeys);
log.info(
- "Removing from bucket: [%s] the following index files: [%s] from s3!",
+ "Deleting the following segment files from S3 bucket[%s]: [%s]",
s3Bucket,
keysToDeleteStrings
);
diff --git a/indexing-service/pom.xml b/indexing-service/pom.xml
index 5c722530acf8..da30e431e8fe 100644
--- a/indexing-service/pom.xml
+++ b/indexing-service/pom.xml
@@ -267,6 +267,11 @@
maven-resolver-api
1.3.1
+
+ org.jdbi
+ jdbi
+ test
+
diff --git a/indexing-service/src/main/java/org/apache/druid/indexing/common/actions/SegmentNukeAction.java b/indexing-service/src/main/java/org/apache/druid/indexing/common/actions/SegmentNukeAction.java
index 84445d3968a5..6c73f664e3fd 100644
--- a/indexing-service/src/main/java/org/apache/druid/indexing/common/actions/SegmentNukeAction.java
+++ b/indexing-service/src/main/java/org/apache/druid/indexing/common/actions/SegmentNukeAction.java
@@ -27,16 +27,23 @@
import org.apache.druid.indexing.common.task.Task;
import org.apache.druid.indexing.overlord.CriticalAction;
import org.apache.druid.java.util.common.ISE;
+import org.apache.druid.java.util.common.logger.Logger;
import org.apache.druid.java.util.emitter.service.ServiceMetricEvent;
import org.apache.druid.query.DruidMetrics;
import org.apache.druid.segment.SegmentUtils;
import org.apache.druid.timeline.DataSegment;
+import org.joda.time.Interval;
import java.util.Set;
import java.util.stream.Collectors;
+/**
+ * Permanently deletes unused segments from the metadata store.
+ */
public class SegmentNukeAction implements TaskAction
{
+ private static final Logger log = new Logger(SegmentNukeAction.class);
+
private final Set segments;
@JsonCreator
@@ -65,22 +72,25 @@ public Void perform(Task task, TaskActionToolbox toolbox)
TaskLocks.checkLockCoversSegments(task, toolbox.getTaskLockbox(), segments);
try {
- toolbox.getTaskLockbox().doInCriticalSection(
+ final Set intervals = segments.stream().map(DataSegment::getInterval).collect(Collectors.toSet());
+ int numDeletedSegments = toolbox.getTaskLockbox().doInCriticalSection(
task,
- segments.stream().map(DataSegment::getInterval).collect(Collectors.toSet()),
- CriticalAction.builder()
- .onValidLocks(
- () -> {
- toolbox.getIndexerMetadataStorageCoordinator().deleteSegments(segments);
- return null;
- }
- )
- .onInvalidLocks(
- () -> {
- throw new ISE("Some locks for task[%s] are already revoked", task.getId());
- }
- )
- .build()
+ intervals,
+ CriticalAction.builder().onValidLocks(
+ () -> toolbox.getIndexerMetadataStorageCoordinator().deleteSegments(segments)
+ ).onInvalidLocks(
+ () -> {
+ throw new ISE("Some locks for task[%s] are already revoked", task.getId());
+ }
+ ).build()
+ );
+
+ log.info(
+ "Deleted [%d] segments from metadata store out of requested[%d],"
+ + " across [%d] intervals[%s], for task[%s] of datasource[%s].",
+ numDeletedSegments, segments.size(),
+ intervals.size(), intervals,
+ task.getId(), task.getDataSource()
);
}
catch (Exception e) {
diff --git a/indexing-service/src/main/java/org/apache/druid/indexing/common/task/KillUnusedSegmentsTask.java b/indexing-service/src/main/java/org/apache/druid/indexing/common/task/KillUnusedSegmentsTask.java
index 06082a988d98..fe58c264ca8b 100644
--- a/indexing-service/src/main/java/org/apache/druid/indexing/common/task/KillUnusedSegmentsTask.java
+++ b/indexing-service/src/main/java/org/apache/druid/indexing/common/task/KillUnusedSegmentsTask.java
@@ -212,7 +212,7 @@ public TaskStatus runTask(TaskToolbox toolbox) throws Exception
int nextBatchSize = computeNextBatchSize(numSegmentsKilled);
@Nullable Integer numTotalBatches = getNumTotalBatches();
List unusedSegments;
- LOG.info(
+ logInfo(
"Starting kill for datasource[%s] in interval[%s] and versions[%s] with batchSize[%d], up to limit[%d]"
+ " segments before maxUsedStatusLastUpdatedTime[%s] will be deleted%s",
getDataSource(), getInterval(), getVersions(), batchSize, limit, maxUsedStatusLastUpdatedTime,
@@ -236,9 +236,7 @@ public TaskStatus runTask(TaskToolbox toolbox) throws Exception
break;
}
- unusedSegments = toolbox.getTaskActionClient().submit(
- new RetrieveUnusedSegmentsAction(getDataSource(), getInterval(), getVersions(), nextBatchSize, maxUsedStatusLastUpdatedTime)
- );
+ unusedSegments = fetchNextBatchOfUnusedSegments(toolbox, nextBatchSize);
// Fetch locks each time as a revokal could have occurred in between batches
final NavigableMap> taskLockMap
@@ -283,6 +281,7 @@ public TaskStatus runTask(TaskToolbox toolbox) throws Exception
// Nuke Segments
taskActionClient.submit(new SegmentNukeAction(new HashSet<>(unusedSegments)));
+ emitMetric(toolbox.getEmitter(), TaskMetrics.SEGMENTS_DELETED_FROM_METADATA_STORE, unusedSegments.size());
// Determine segments to be killed
final List segmentsToBeKilled
@@ -290,22 +289,27 @@ public TaskStatus runTask(TaskToolbox toolbox) throws Exception
final Set segmentsNotKilled = new HashSet<>(unusedSegments);
segmentsToBeKilled.forEach(segmentsNotKilled::remove);
- LOG.infoSegments(
- segmentsNotKilled,
- "Skipping segment kill from deep storage as their load specs are referenced by other segments."
- );
+
+ if (!segmentsNotKilled.isEmpty()) {
+ LOG.warn(
+ "Skipping kill of [%d] segments from deep storage as their load specs are used by other segments.",
+ segmentsNotKilled.size()
+ );
+ }
toolbox.getDataSegmentKiller().kill(segmentsToBeKilled);
+ emitMetric(toolbox.getEmitter(), TaskMetrics.SEGMENTS_DELETED_FROM_DEEPSTORE, segmentsToBeKilled.size());
+
numBatchesProcessed++;
numSegmentsKilled += segmentsToBeKilled.size();
- LOG.info("Processed [%d] batches for kill task[%s].", numBatchesProcessed, getId());
+ logInfo("Processed [%d] batches for kill task[%s].", numBatchesProcessed, getId());
nextBatchSize = computeNextBatchSize(numSegmentsKilled);
} while (!unusedSegments.isEmpty() && (null == numTotalBatches || numBatchesProcessed < numTotalBatches));
final String taskId = getId();
- LOG.info(
+ logInfo(
"Finished kill task[%s] for dataSource[%s] and interval[%s]."
+ " Deleted total [%d] unused segments in [%d] batches.",
taskId, getDataSource(), getInterval(), numSegmentsKilled, numBatchesProcessed
@@ -322,9 +326,8 @@ taskId, getDataSource(), getInterval(), numSegmentsKilled, numBatchesProcessed
}
@JsonIgnore
- @VisibleForTesting
@Nullable
- Integer getNumTotalBatches()
+ protected Integer getNumTotalBatches()
{
return null != limit ? (int) Math.ceil((double) limit / batchSize) : null;
}
@@ -336,6 +339,31 @@ int computeNextBatchSize(int numSegmentsKilled)
return null != limit ? Math.min(limit - numSegmentsKilled, batchSize) : batchSize;
}
+ /**
+ * Fetches the next batch of unused segments that are eligible for kill.
+ */
+ protected List fetchNextBatchOfUnusedSegments(TaskToolbox toolbox, int nextBatchSize) throws IOException
+ {
+ return toolbox.getTaskActionClient().submit(
+ new RetrieveUnusedSegmentsAction(
+ getDataSource(),
+ getInterval(),
+ getVersions(),
+ nextBatchSize,
+ maxUsedStatusLastUpdatedTime
+ )
+ );
+ }
+
+ /**
+ * Logs the given info message. Exposed here to allow embedded kill tasks to
+ * suppress info logs.
+ */
+ protected void logInfo(String message, Object... args)
+ {
+ LOG.info(message, args);
+ }
+
private NavigableMap> getNonRevokedTaskLockMap(TaskActionClient client) throws IOException
{
final NavigableMap> taskLockMap = new TreeMap<>();
@@ -385,6 +413,10 @@ private List getKillableSegments(
response.getUpgradedToSegmentIds().forEach((parent, children) -> {
if (!CollectionUtils.isNullOrEmpty(children)) {
// Do not kill segment if its parent or any of its siblings still exist in metadata store
+ LOG.info(
+ "Skipping kill of segments[%s] as its load spec is also used by segment IDs[%s].",
+ parentIdToUnusedSegments.get(parent), children
+ );
parentIdToUnusedSegments.remove(parent);
}
});
@@ -402,10 +434,25 @@ private List getKillableSegments(
return parentIdToUnusedSegments.values()
.stream()
.flatMap(Set::stream)
- .filter(segment -> !usedSegmentLoadSpecs.contains(segment.getLoadSpec()))
+ .filter(segment -> !isSegmentLoadSpecPresentIn(segment, usedSegmentLoadSpecs))
.collect(Collectors.toList());
}
+ /**
+ * @return true if the load spec of the segment is present in the given set of
+ * used load specs.
+ */
+ private boolean isSegmentLoadSpecPresentIn(
+ DataSegment segment,
+ Set
- *
- * See {@link org.apache.druid.indexing.common.task.KillUnusedSegmentsTask}.
- *
+ *
+ * @see org.apache.druid.indexing.common.task.KillUnusedSegmentsTask for details
+ * of the actual kill task and {@code UnusedSegmentKiller} to run embedded kill
+ * tasks on the Overlord.
*/
public class KillUnusedSegments implements CoordinatorDuty
{
diff --git a/server/src/test/java/org/apache/druid/metadata/IndexerSQLMetadataStorageCoordinatorReadOnlyTest.java b/server/src/test/java/org/apache/druid/metadata/IndexerSQLMetadataStorageCoordinatorReadOnlyTest.java
index 48656aa93e96..72ee7a9a1691 100644
--- a/server/src/test/java/org/apache/druid/metadata/IndexerSQLMetadataStorageCoordinatorReadOnlyTest.java
+++ b/server/src/test/java/org/apache/druid/metadata/IndexerSQLMetadataStorageCoordinatorReadOnlyTest.java
@@ -101,7 +101,7 @@ public void setup()
cachePollExecutor = new BlockingExecutorService("test-cache-poll-exec");
segmentMetadataCache = new HeapMemorySegmentMetadataCache(
mapper,
- () -> new SegmentsMetadataManagerConfig(null, cacheMode),
+ () -> new SegmentsMetadataManagerConfig(null, cacheMode, null),
derbyConnectorRule.metadataTablesConfigSupplier(),
new NoopSegmentSchemaCache(),
derbyConnector,
diff --git a/server/src/test/java/org/apache/druid/metadata/IndexerSQLMetadataStorageCoordinatorTest.java b/server/src/test/java/org/apache/druid/metadata/IndexerSQLMetadataStorageCoordinatorTest.java
index 9a1b7ce63edf..db5c23b8d30e 100644
--- a/server/src/test/java/org/apache/druid/metadata/IndexerSQLMetadataStorageCoordinatorTest.java
+++ b/server/src/test/java/org/apache/druid/metadata/IndexerSQLMetadataStorageCoordinatorTest.java
@@ -151,7 +151,7 @@ public void setUp()
segmentMetadataCache = new HeapMemorySegmentMetadataCache(
mapper,
- () -> new SegmentsMetadataManagerConfig(null, cacheMode),
+ () -> new SegmentsMetadataManagerConfig(null, cacheMode, null),
derbyConnectorRule.metadataTablesConfigSupplier(),
new NoopSegmentSchemaCache(),
derbyConnector,
@@ -1831,6 +1831,99 @@ public void testSimpleUnusedListWithLimit()
Assert.assertTrue(SEGMENTS.containsAll(retreivedUnusedSegments));
}
+ @Test
+ public void testRetrieveUnusedSegmentsWithExactInterval()
+ {
+ final String dataSource = defaultSegment.getDataSource();
+ coordinator.commitSegments(Set.of(defaultSegment, defaultSegment2, defaultSegment3), null);
+
+ final DateTime now = DateTimes.nowUtc();
+ markAllSegmentsUnused(Set.of(defaultSegment, defaultSegment2, defaultSegment3), now.minusHours(1));
+
+ // Verify that query for overlapping interval does not return the segments
+ Assert.assertTrue(
+ coordinator.retrieveUnusedSegmentsWithExactInterval(
+ dataSource,
+ Intervals.ETERNITY,
+ now,
+ 10
+ ).isEmpty()
+ );
+
+ // Verify that query for exact interval returns the segments
+ Assert.assertEquals(
+ List.of(defaultSegment3),
+ coordinator.retrieveUnusedSegmentsWithExactInterval(
+ dataSource,
+ defaultSegment3.getInterval(),
+ now,
+ 10
+ )
+ );
+
+ Assert.assertEquals(defaultSegment.getInterval(), defaultSegment2.getInterval());
+ Assert.assertEquals(
+ Set.of(defaultSegment, defaultSegment2),
+ Set.copyOf(
+ coordinator.retrieveUnusedSegmentsWithExactInterval(
+ dataSource,
+ defaultSegment.getInterval(),
+ now,
+ 10
+ )
+ )
+ );
+
+ // Verify that query with limit 1 returns only 1 result
+ Assert.assertEquals(
+ 1,
+ coordinator.retrieveUnusedSegmentsWithExactInterval(
+ dataSource,
+ defaultSegment.getInterval(),
+ now,
+ 1
+ ).size()
+ );
+ }
+
+ @Test
+ public void testRetrieveUnusedSegmentIntervals()
+ {
+ final String dataSource = defaultSegment.getDataSource();
+ coordinator.commitSegments(Set.of(defaultSegment, defaultSegment3), null);
+
+ Assert.assertTrue(coordinator.retrieveUnusedSegmentIntervals(dataSource, 100).isEmpty());
+
+ markAllSegmentsUnused(Set.of(defaultSegment), DateTimes.nowUtc().minusHours(1));
+ Assert.assertEquals(
+ List.of(defaultSegment.getInterval()),
+ coordinator.retrieveUnusedSegmentIntervals(dataSource, 100)
+ );
+
+ markAllSegmentsUnused(Set.of(defaultSegment3), DateTimes.nowUtc().minusHours(1));
+ Assert.assertEquals(
+ Set.of(defaultSegment.getInterval(), defaultSegment3.getInterval()),
+ Set.copyOf(coordinator.retrieveUnusedSegmentIntervals(dataSource, 100))
+ );
+
+ // Verify retrieve with limit 1 returns only 1 interval
+ Assert.assertEquals(
+ 1,
+ coordinator.retrieveUnusedSegmentIntervals(dataSource, 1).size()
+ );
+ }
+
+ @Test
+ public void testRetrieveAllDatasourceNames()
+ {
+ coordinator.commitSegments(Set.of(defaultSegment), null);
+ coordinator.commitSegments(Set.of(hugeTimeRangeSegment1), null);
+ Assert.assertEquals(
+ Set.of("fooDataSource", "hugeTimeRangeDataSource"),
+ coordinator.retrieveAllDatasourceNames()
+ );
+ }
+
@Test
public void testUsedOverlapLow()
{
@@ -3742,7 +3835,11 @@ public void testRetrieveUnusedSegmentsForExactIntervalAndVersion()
SegmentId highestUnusedId = transactionFactory.inReadWriteDatasourceTransaction(
TestDataSource.WIKI,
- transaction -> transaction.findHighestUnusedSegmentId(Intervals.of("2024/2025"), "v1")
+ transaction -> transaction.noCacheSql().retrieveHighestUnusedSegmentId(
+ TestDataSource.WIKI,
+ Intervals.of("2024/2025"),
+ "v1"
+ )
);
Assert.assertEquals(
unusedSegmentForExactIntervalAndVersion.getId(),
diff --git a/server/src/test/java/org/apache/druid/metadata/SqlSegmentsMetadataManagerProviderTest.java b/server/src/test/java/org/apache/druid/metadata/SqlSegmentsMetadataManagerProviderTest.java
index fb310430ac38..44812c3acbf2 100644
--- a/server/src/test/java/org/apache/druid/metadata/SqlSegmentsMetadataManagerProviderTest.java
+++ b/server/src/test/java/org/apache/druid/metadata/SqlSegmentsMetadataManagerProviderTest.java
@@ -46,7 +46,7 @@ public class SqlSegmentsMetadataManagerProviderTest
public void testLifecycleStartCreatesSegmentTables() throws Exception
{
final TestDerbyConnector connector = derbyConnectorRule.getConnector();
- final SegmentsMetadataManagerConfig config = new SegmentsMetadataManagerConfig(null, null);
+ final SegmentsMetadataManagerConfig config = new SegmentsMetadataManagerConfig(null, null, null);
final Lifecycle lifecycle = new Lifecycle();
final SegmentSchemaCache segmentSchemaCache = new SegmentSchemaCache();
SqlSegmentsMetadataManagerProvider provider = new SqlSegmentsMetadataManagerProvider(
diff --git a/server/src/test/java/org/apache/druid/metadata/SqlSegmentsMetadataManagerSchemaPollTest.java b/server/src/test/java/org/apache/druid/metadata/SqlSegmentsMetadataManagerSchemaPollTest.java
index b1d8335a2276..8e7e6a7fc5b0 100644
--- a/server/src/test/java/org/apache/druid/metadata/SqlSegmentsMetadataManagerSchemaPollTest.java
+++ b/server/src/test/java/org/apache/druid/metadata/SqlSegmentsMetadataManagerSchemaPollTest.java
@@ -109,7 +109,7 @@ public void testPollSegmentAndSchema()
CentralizedDatasourceSchemaConfig centralizedDatasourceSchemaConfig
= CentralizedDatasourceSchemaConfig.enabled(true);
- config = new SegmentsMetadataManagerConfig(Period.seconds(3), null);
+ config = new SegmentsMetadataManagerConfig(Period.seconds(3), null, null);
sqlSegmentsMetadataManager = new SqlSegmentsMetadataManager(
jsonMapper,
Suppliers.ofInstance(config),
@@ -193,7 +193,7 @@ public void testPollOnlyNewSchemaVersion()
CentralizedDatasourceSchemaConfig centralizedDatasourceSchemaConfig
= CentralizedDatasourceSchemaConfig.enabled(true);
- config = new SegmentsMetadataManagerConfig(Period.seconds(3), null);
+ config = new SegmentsMetadataManagerConfig(Period.seconds(3), null, null);
sqlSegmentsMetadataManager = new SqlSegmentsMetadataManager(
jsonMapper,
Suppliers.ofInstance(config),
diff --git a/server/src/test/java/org/apache/druid/metadata/SqlSegmentsMetadataManagerTest.java b/server/src/test/java/org/apache/druid/metadata/SqlSegmentsMetadataManagerTest.java
index 987d93dcd276..9e1ba861e1e6 100644
--- a/server/src/test/java/org/apache/druid/metadata/SqlSegmentsMetadataManagerTest.java
+++ b/server/src/test/java/org/apache/druid/metadata/SqlSegmentsMetadataManagerTest.java
@@ -384,7 +384,8 @@ public void testIterateAllUsedNonOvershadowedSegmentsForDatasourceInterval()
final Interval theInterval = Intervals.of("2012-03-15T00:00:00.000/2012-03-20T00:00:00.000");
// Re-create SqlSegmentsMetadataManager with a higher poll duration
- final SegmentsMetadataManagerConfig config = new SegmentsMetadataManagerConfig(Period.seconds(1), null);
+ final SegmentsMetadataManagerConfig config =
+ new SegmentsMetadataManagerConfig(Period.seconds(1), null, null);
sqlSegmentsMetadataManager = new SqlSegmentsMetadataManager(
jsonMapper,
Suppliers.ofInstance(config),
diff --git a/server/src/test/java/org/apache/druid/metadata/SqlSegmentsMetadataManagerTestBase.java b/server/src/test/java/org/apache/druid/metadata/SqlSegmentsMetadataManagerTestBase.java
index c35488c62ebf..bedc41f11e81 100644
--- a/server/src/test/java/org/apache/druid/metadata/SqlSegmentsMetadataManagerTestBase.java
+++ b/server/src/test/java/org/apache/druid/metadata/SqlSegmentsMetadataManagerTestBase.java
@@ -72,7 +72,7 @@ public class SqlSegmentsMetadataManagerTestBase
protected void setUp(TestDerbyConnector.DerbyConnectorRule derbyConnectorRule) throws Exception
{
- config = new SegmentsMetadataManagerConfig(Period.seconds(3), null);
+ config = new SegmentsMetadataManagerConfig(Period.seconds(3), null, null);
connector = derbyConnectorRule.getConnector();
storageConfig = derbyConnectorRule.metadataTablesConfigSupplier().get();
diff --git a/server/src/test/java/org/apache/druid/metadata/segment/SqlSegmentsMetadataManagerV2Test.java b/server/src/test/java/org/apache/druid/metadata/segment/SqlSegmentsMetadataManagerV2Test.java
index e5554b931eef..08098decdab1 100644
--- a/server/src/test/java/org/apache/druid/metadata/segment/SqlSegmentsMetadataManagerV2Test.java
+++ b/server/src/test/java/org/apache/druid/metadata/segment/SqlSegmentsMetadataManagerV2Test.java
@@ -88,7 +88,7 @@ private void initManager(
segmentMetadataCacheExec = new BlockingExecutorService("test");
SegmentMetadataCache segmentMetadataCache = new HeapMemorySegmentMetadataCache(
jsonMapper,
- Suppliers.ofInstance(new SegmentsMetadataManagerConfig(Period.seconds(1), cacheMode)),
+ Suppliers.ofInstance(new SegmentsMetadataManagerConfig(Period.seconds(1), cacheMode, null)),
Suppliers.ofInstance(storageConfig),
useSchemaCache ? new SegmentSchemaCache() : new NoopSegmentSchemaCache(),
connector,
diff --git a/server/src/test/java/org/apache/druid/metadata/segment/cache/HeapMemorySegmentMetadataCacheTest.java b/server/src/test/java/org/apache/druid/metadata/segment/cache/HeapMemorySegmentMetadataCacheTest.java
index 2a5940b04951..fab64152ec8a 100644
--- a/server/src/test/java/org/apache/druid/metadata/segment/cache/HeapMemorySegmentMetadataCacheTest.java
+++ b/server/src/test/java/org/apache/druid/metadata/segment/cache/HeapMemorySegmentMetadataCacheTest.java
@@ -117,7 +117,7 @@ private void setupTargetWithCaching(SegmentMetadataCache.UsageMode cacheMode, bo
throw new ISE("Test target has already been initialized with caching[%s]", cache.isEnabled());
}
final SegmentsMetadataManagerConfig metadataManagerConfig
- = new SegmentsMetadataManagerConfig(null, cacheMode);
+ = new SegmentsMetadataManagerConfig(null, cacheMode, null);
schemaCache = useSchemaCache ? new SegmentSchemaCache() : new NoopSegmentSchemaCache();
cache = new HeapMemorySegmentMetadataCache(
TestHelper.JSON_MAPPER,
diff --git a/server/src/test/java/org/apache/druid/segment/metadata/CoordinatorSegmentMetadataCacheTest.java b/server/src/test/java/org/apache/druid/segment/metadata/CoordinatorSegmentMetadataCacheTest.java
index d1b4cdfb7a61..0dea1be5d1a3 100644
--- a/server/src/test/java/org/apache/druid/segment/metadata/CoordinatorSegmentMetadataCacheTest.java
+++ b/server/src/test/java/org/apache/druid/segment/metadata/CoordinatorSegmentMetadataCacheTest.java
@@ -132,7 +132,7 @@ public void setUp() throws Exception
Mockito.when(segmentsMetadataManager.getRecentDataSourcesSnapshot())
.thenReturn(DataSourcesSnapshot.fromUsedSegments(List.of()));
SegmentsMetadataManagerConfig metadataManagerConfig =
- new SegmentsMetadataManagerConfig(Period.millis(10), null);
+ new SegmentsMetadataManagerConfig(Period.millis(10), null, null);
segmentsMetadataManagerConfigSupplier = Suppliers.ofInstance(metadataManagerConfig);
}
diff --git a/server/src/test/java/org/apache/druid/server/coordinator/simulate/TestDruidLeaderSelector.java b/server/src/test/java/org/apache/druid/server/coordinator/simulate/TestDruidLeaderSelector.java
index d84cbcff6efe..d526ccfaf6f9 100644
--- a/server/src/test/java/org/apache/druid/server/coordinator/simulate/TestDruidLeaderSelector.java
+++ b/server/src/test/java/org/apache/druid/server/coordinator/simulate/TestDruidLeaderSelector.java
@@ -23,16 +23,21 @@
import javax.annotation.Nullable;
import java.util.concurrent.atomic.AtomicBoolean;
+import java.util.concurrent.atomic.AtomicInteger;
public class TestDruidLeaderSelector implements DruidLeaderSelector
{
+ private final AtomicInteger localTerm = new AtomicInteger(0);
private final AtomicBoolean isLeader = new AtomicBoolean(false);
private volatile Listener listener;
public void becomeLeader()
{
- if (isLeader.compareAndSet(false, true) && listener != null) {
- listener.becomeLeader();
+ if (isLeader.compareAndSet(false, true)) {
+ if (listener != null) {
+ listener.becomeLeader();
+ }
+ localTerm.incrementAndGet();
}
}
@@ -59,7 +64,7 @@ public boolean isLeader()
@Override
public int localTerm()
{
- return 0;
+ return localTerm.get();
}
@Override
diff --git a/services/src/main/java/org/apache/druid/cli/CliOverlord.java b/services/src/main/java/org/apache/druid/cli/CliOverlord.java
index 95fa654674d9..ef4901c68c35 100644
--- a/services/src/main/java/org/apache/druid/cli/CliOverlord.java
+++ b/services/src/main/java/org/apache/druid/cli/CliOverlord.java
@@ -93,6 +93,7 @@
import org.apache.druid.indexing.overlord.duty.OverlordDuty;
import org.apache.druid.indexing.overlord.duty.TaskLogAutoCleaner;
import org.apache.druid.indexing.overlord.duty.TaskLogAutoCleanerConfig;
+import org.apache.druid.indexing.overlord.duty.UnusedSegmentsKiller;
import org.apache.druid.indexing.overlord.hrtr.HttpRemoteTaskRunnerFactory;
import org.apache.druid.indexing.overlord.hrtr.HttpRemoteTaskRunnerResource;
import org.apache.druid.indexing.overlord.http.OverlordCompactionResource;
@@ -249,6 +250,8 @@ public void configure(Binder binder)
binder.bind(ShuffleClient.class).toProvider(Providers.of(null));
binder.bind(ChatHandlerProvider.class).toProvider(Providers.of(new NoopChatHandlerProvider()));
+ CliPeon.bindDataSegmentKiller(binder);
+
PolyBind.createChoice(
binder,
"druid.indexer.task.rowIngestionMeters.type",
@@ -445,9 +448,9 @@ private void configureAutoscale(Binder binder)
private void configureOverlordHelpers(Binder binder)
{
JsonConfigProvider.bind(binder, "druid.indexer.logs.kill", TaskLogAutoCleanerConfig.class);
- Multibinder.newSetBinder(binder, OverlordDuty.class)
- .addBinding()
- .to(TaskLogAutoCleaner.class);
+ final Multibinder dutyBinder = Multibinder.newSetBinder(binder, OverlordDuty.class);
+ dutyBinder.addBinding().to(TaskLogAutoCleaner.class);
+ dutyBinder.addBinding().to(UnusedSegmentsKiller.class).in(LazySingleton.class);
}
},
new IndexingServiceInputSourceModule(),
diff --git a/services/src/main/java/org/apache/druid/cli/CliPeon.java b/services/src/main/java/org/apache/druid/cli/CliPeon.java
index e9cbab108d39..9cb939f9b899 100644
--- a/services/src/main/java/org/apache/druid/cli/CliPeon.java
+++ b/services/src/main/java/org/apache/druid/cli/CliPeon.java
@@ -468,14 +468,19 @@ static void bindChatHandler(Binder binder)
static void bindPeonDataSegmentHandlers(Binder binder)
{
// Build it to make it bind even if nothing binds to it.
- Binders.dataSegmentKillerBinder(binder);
- binder.bind(DataSegmentKiller.class).to(OmniDataSegmentKiller.class).in(LazySingleton.class);
+ bindDataSegmentKiller(binder);
Binders.dataSegmentMoverBinder(binder);
binder.bind(DataSegmentMover.class).to(OmniDataSegmentMover.class).in(LazySingleton.class);
Binders.dataSegmentArchiverBinder(binder);
binder.bind(DataSegmentArchiver.class).to(OmniDataSegmentArchiver.class).in(LazySingleton.class);
}
+ static void bindDataSegmentKiller(Binder binder)
+ {
+ Binders.dataSegmentKillerBinder(binder);
+ binder.bind(DataSegmentKiller.class).to(OmniDataSegmentKiller.class).in(LazySingleton.class);
+ }
+
private static void configureTaskActionClient(Binder binder)
{
binder.bind(TaskActionClientFactory.class)