Skip to content
Merged
Show file tree
Hide file tree
Changes from all commits
Commits
File filter

Filter by extension

Filter by extension

Conversations
Failed to load comments.
Loading
Jump to
Jump to file
Failed to load files.
Loading
Diff view
Diff view
Original file line number Diff line number Diff line change
@@ -0,0 +1,39 @@
/*
* Licensed to the Apache Software Foundation (ASF) under one
* or more contributor license agreements. See the NOTICE file
* distributed with this work for additional information
* regarding copyright ownership. The ASF licenses this file
* to you under the Apache License, Version 2.0 (the
* "License"); you may not use this file except in compliance
* with the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing,
* software distributed under the License is distributed on an
* "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
* KIND, either express or implied. See the License for the
* specific language governing permissions and limitations
* under the License.
*/

package org.apache.druid.java.util;
Copy link
Copy Markdown
Contributor

Choose a reason for hiding this comment

The reason will be displayed to describe this comment to others. Learn more.

Why did you choose to put this class in this package?

I didn't find an equivalent class in the java.util package.

I think for now it might be better to move this to a private class in KillCompactionConfig which appears to be the only place where this is used

Copy link
Copy Markdown
Contributor Author

Choose a reason for hiding this comment

The reason will be displayed to describe this comment to others. Learn more.

I think it would be useful for people using the RetryUtils (which is also in org.apache.druid.java.util). RetryUtils.retry retry condition requires an exception to be thrown and applying a predicate to the thrown exception. For cases where the task method does not throw an exception, the method can throw this RetryableException so that the RetryUtils can then retry

Copy link
Copy Markdown
Contributor

Choose a reason for hiding this comment

The reason will be displayed to describe this comment to others. Learn more.

Ah, this is what I suspected, but the package naming threw me off. Can you please add javadocs explaining this use case

Copy link
Copy Markdown
Contributor Author

Choose a reason for hiding this comment

The reason will be displayed to describe this comment to others. Learn more.

Done


import com.google.common.base.Predicate;
import org.apache.druid.java.util.common.RetryUtils;

/**
* This Exception class can be use with {@link RetryUtils}.
* The method {@link RetryUtils#retry(RetryUtils.Task, Predicate, int)} retry condition (Predicate argument)
* requires an exception to be thrown and applying the predicate to the thrown exception.
* For cases where the task method does not throw an exception but still needs retrying,
* the method can throw this RetryableException so that the RetryUtils can then retry the task
*/
public class RetryableException extends Exception
{
public RetryableException(Throwable t)
{
super(t);
}

}
Original file line number Diff line number Diff line change
Expand Up @@ -22,6 +22,7 @@
import com.google.common.base.Predicate;
import com.google.common.base.Predicates;
import org.apache.commons.lang3.mutable.MutableInt;
import org.apache.druid.java.util.RetryableException;
import org.apache.druid.java.util.common.concurrent.Execs;
import org.hamcrest.CoreMatchers;
import org.junit.Assert;
Expand Down Expand Up @@ -190,4 +191,22 @@ public void testInterruptRetryLoop() throws ExecutionException, InterruptedExcep
}
}

@Test
public void testExceptionPredicateForRetryableException() throws Exception
{
final AtomicInteger count = new AtomicInteger();
String result = RetryUtils.retry(
() -> {
if (count.incrementAndGet() >= 2) {
return "hey";
} else {
throw new RetryableException(new RuntimeException("uhh"));
}
},
e -> e instanceof RetryableException,
3
);
Assert.assertEquals(result, "hey");
Assert.assertEquals("count", 2, count.get());
}
}
2 changes: 2 additions & 0 deletions docs/configuration/index.md
Original file line number Diff line number Diff line change
Expand Up @@ -753,6 +753,8 @@ These Coordinator static configurations can be defined in the `coordinator/runti
|`druid.coordinator.kill.audit.on`| Boolean value for whether to enable automatic deletion of audit logs. If set to true, Coordinator will periodically remove audit logs from the audit table entries in metadata storage.| No | False|
|`druid.coordinator.kill.audit.period`| How often to do automatic deletion of audit logs in [ISO 8601](https://en.wikipedia.org/wiki/ISO_8601) duration format. Value must be equal to or greater than `druid.coordinator.period.metadataStoreManagementPeriod`. Only applies if `druid.coordinator.kill.audit.on` is set to "True".| No| `P1D`|
|`druid.coordinator.kill.audit.durationToRetain`| Duration of audit logs to be retained from created time in [ISO 8601](https://en.wikipedia.org/wiki/ISO_8601) duration format. Only applies if `druid.coordinator.kill.audit.on` is set to "True".| Yes if `druid.coordinator.kill.audit.on` is set to "True".| None|
|`druid.coordinator.kill.compaction.on`| Boolean value for whether to enable automatic deletion of compaction configurations. If set to true, Coordinator will periodically remove compaction configuration of inactive datasource (datasource with no used and unused segments) from the config table in metadata storage. | No | False|
|`druid.coordinator.kill.compaction.period`| How often to do automatic deletion of compaction configurations in [ISO 8601](https://en.wikipedia.org/wiki/ISO_8601) duration format. Value must be equal to or greater than `druid.coordinator.period.metadataStoreManagementPeriod`. Only applies if `druid.coordinator.kill.compaction.on` is set to "True".| No| `P1D`|
|`druid.coordinator.kill.rule.on`| Boolean value for whether to enable automatic deletion of rules. If set to true, Coordinator will periodically remove rules of inactive datasource (datasource with no used and unused segments) from the rule table in metadata storage.| No | False|
|`druid.coordinator.kill.rule.period`| How often to do automatic deletion of rules in [ISO 8601](https://en.wikipedia.org/wiki/ISO_8601) duration format. Value must be equal to or greater than `druid.coordinator.period.metadataStoreManagementPeriod`. Only applies if `druid.coordinator.kill.rule.on` is set to "True".| No| `P1D`|
|`druid.coordinator.kill.rule.durationToRetain`| Duration of rules to be retained from created time in [ISO 8601](https://en.wikipedia.org/wiki/ISO_8601) duration format. Only applies if `druid.coordinator.kill.rule.on` is set to "True".| Yes if `druid.coordinator.kill.rule.on` is set to "True".| None|
Expand Down
1 change: 1 addition & 0 deletions docs/operations/metrics.md
Original file line number Diff line number Diff line change
Expand Up @@ -258,6 +258,7 @@ These metrics are for the Druid Coordinator and are reset each time the Coordina
|`coordinator/global/time`|Approximate runtime of a full coordination cycle in milliseconds. The `dutyGroup` dimension indicates what type of coordination this run was. i.e. Historical Management vs Indexing|`dutyGroup`|Varies.|
|`metadata/kill/supervisor/count`|Total number of terminated supervisors that were automatically deleted from metadata store per each Coordinator kill supervisor duty run. This metric can help adjust `druid.coordinator.kill.supervisor.durationToRetain` configuration based on whether more or less terminated supervisors need to be deleted per cycle. Note that this metric is only emitted when `druid.coordinator.kill.supervisor.on` is set to true.| |Varies.|
|`metadata/kill/audit/count`|Total number of audit logs that were automatically deleted from metadata store per each Coordinator kill audit duty run. This metric can help adjust `druid.coordinator.kill.audit.durationToRetain` configuration based on whether more or less audit logs need to be deleted per cycle. Note that this metric is only emitted when `druid.coordinator.kill.audit.on` is set to true.| |Varies.|
|`metadata/kill/compaction/count`|Total number of compaction configurations that were automatically deleted from metadata store per each Coordinator kill compaction configuration duty run. Note that this metric is only emitted when `druid.coordinator.kill.compaction.on` is set to true.| |Varies.|
|`metadata/kill/rule/count`|Total number of rules that were automatically deleted from metadata store per each Coordinator kill rule duty run. This metric can help adjust `druid.coordinator.kill.rule.durationToRetain` configuration based on whether more or less rules need to be deleted per cycle. Note that this metric is only emitted when `druid.coordinator.kill.rule.on` is set to true.| |Varies.|
|`metadata/kill/datasource/count`|Total number of datasource metadata that were automatically deleted from metadata store per each Coordinator kill datasource duty run (Note: datasource metadata only exists for datasource created from supervisor). This metric can help adjust `druid.coordinator.kill.datasource.durationToRetain` configuration based on whether more or less datasource metadata need to be deleted per cycle. Note that this metric is only emitted when `druid.coordinator.kill.datasource.on` is set to true.| |Varies.|

Expand Down
Original file line number Diff line number Diff line change
Expand Up @@ -71,6 +71,10 @@ public abstract class DruidCoordinatorConfig
@Default("PT-1s")
public abstract Duration getCoordinatorAuditKillDurationToRetain();

@Config("druid.coordinator.kill.compaction.period")
@Default("P1D")
public abstract Duration getCoordinatorCompactionKillPeriod();

@Config("druid.coordinator.kill.rule.period")
@Default("P1D")
public abstract Duration getCoordinatorRuleKillPeriod();
Expand Down
Original file line number Diff line number Diff line change
@@ -0,0 +1,162 @@
/*
* Licensed to the Apache Software Foundation (ASF) under one
* or more contributor license agreements. See the NOTICE file
* distributed with this work for additional information
* regarding copyright ownership. The ASF licenses this file
* to you under the Apache License, Version 2.0 (the
* "License"); you may not use this file except in compliance
* with the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing,
* software distributed under the License is distributed on an
* "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
* KIND, either express or implied. See the License for the
* specific language governing permissions and limitations
* under the License.
*/

package org.apache.druid.server.coordinator.duty;

import com.google.common.base.Preconditions;
import com.google.common.collect.ImmutableList;
import com.google.inject.Inject;
import org.apache.druid.audit.AuditInfo;
import org.apache.druid.common.config.ConfigManager;
import org.apache.druid.common.config.JacksonConfigManager;
import org.apache.druid.java.util.RetryableException;
import org.apache.druid.java.util.common.RetryUtils;
import org.apache.druid.java.util.common.logger.Logger;
import org.apache.druid.java.util.emitter.service.ServiceEmitter;
import org.apache.druid.java.util.emitter.service.ServiceMetricEvent;
import org.apache.druid.metadata.SqlSegmentsMetadataManager;
import org.apache.druid.server.coordinator.CoordinatorCompactionConfig;
import org.apache.druid.server.coordinator.DataSourceCompactionConfig;
import org.apache.druid.server.coordinator.DruidCoordinatorConfig;
import org.apache.druid.server.coordinator.DruidCoordinatorRuntimeParams;

import java.util.Map;
import java.util.Set;
import java.util.function.Function;
import java.util.stream.Collectors;

/**
* CoordinatorDuty for automatic deletion of compaction configurations from the config table in metadata storage.
* Note that this will delete compaction configuration for inactive datasources
* (datasource with no used and unused segments) immediately.
*/
public class KillCompactionConfig implements CoordinatorDuty
Copy link
Copy Markdown
Contributor

Choose a reason for hiding this comment

The reason will be displayed to describe this comment to others. Learn more.

javadocs please

Copy link
Copy Markdown
Contributor Author

Choose a reason for hiding this comment

The reason will be displayed to describe this comment to others. Learn more.

Done

{
private static final Logger log = new Logger(KillCompactionConfig.class);
private static final int UPDATE_NUM_RETRY = 5;

static final String COUNT_METRIC = "metadata/kill/compaction/count";

private final long period;
private long lastKillTime = 0;

private final JacksonConfigManager jacksonConfigManager;
private final SqlSegmentsMetadataManager sqlSegmentsMetadataManager;

@Inject
public KillCompactionConfig(
DruidCoordinatorConfig config,
SqlSegmentsMetadataManager sqlSegmentsMetadataManager,
JacksonConfigManager jacksonConfigManager
)
{
this.sqlSegmentsMetadataManager = sqlSegmentsMetadataManager;
this.jacksonConfigManager = jacksonConfigManager;
this.period = config.getCoordinatorCompactionKillPeriod().getMillis();
Preconditions.checkArgument(
this.period >= config.getCoordinatorMetadataStoreManagementPeriod().getMillis(),
"Coordinator compaction configuration kill period must be >= druid.coordinator.period.metadataStoreManagementPeriod"
);
log.debug(
"Compaction Configuration Kill Task scheduling enabled with period [%s]",
this.period
);
}

@Override
public DruidCoordinatorRuntimeParams run(DruidCoordinatorRuntimeParams params)
{
long currentTimeMillis = System.currentTimeMillis();
if ((lastKillTime + period) < currentTimeMillis) {
lastKillTime = currentTimeMillis;
try {
RetryUtils.retry(
() -> {
CoordinatorCompactionConfig current = CoordinatorCompactionConfig.current(jacksonConfigManager);
// If current compaction config is empty then there is nothing to do
if (CoordinatorCompactionConfig.empty().equals(current)) {
log.info(
"Finished running KillCompactionConfig duty. Nothing to do as compaction config is already empty.");
emitMetric(params.getEmitter(), 0);
return ConfigManager.SetResult.ok();
}

// Get all active datasources
// Note that we get all active datasources after getting compaction config to prevent race condition if new
// datasource and config are added.
Set<String> activeDatasources = sqlSegmentsMetadataManager.retrieveAllDataSourceNames();
final Map<String, DataSourceCompactionConfig> updated = current
.getCompactionConfigs()
.stream()
.filter(dataSourceCompactionConfig -> activeDatasources.contains(dataSourceCompactionConfig.getDataSource()))
.collect(Collectors.toMap(DataSourceCompactionConfig::getDataSource, Function.identity()));

// Calculate number of compaction configs to remove for logging
int compactionConfigRemoved = current.getCompactionConfigs().size() - updated.size();

ConfigManager.SetResult result = jacksonConfigManager.set(
CoordinatorCompactionConfig.CONFIG_KEY,
// Do database insert without swap if the current config is empty as this means the config may be null in the database
current,
CoordinatorCompactionConfig.from(current, ImmutableList.copyOf(updated.values())),
new AuditInfo(
"KillCompactionConfig",
"CoordinatorDuty for automatic deletion of compaction config",
""
)
);
if (result.isOk()) {
log.info(
"Finished running KillCompactionConfig duty. Removed %,d compaction configs",
compactionConfigRemoved
);
emitMetric(params.getEmitter(), compactionConfigRemoved);
} else if (result.isRetryable()) {
// Failed but is retryable
log.debug("Retrying KillCompactionConfig duty");
throw new RetryableException(result.getException());
} else {
// Failed and not retryable
log.error(result.getException(), "Failed to kill compaction configurations");
emitMetric(params.getEmitter(), 0);
}
return result;
},
e -> e instanceof RetryableException,
UPDATE_NUM_RETRY
);
}
catch (Exception e) {
log.error(e, "Failed to kill compaction configurations");
emitMetric(params.getEmitter(), 0);
}
}
return params;
}

private void emitMetric(ServiceEmitter emitter, int compactionConfigRemoved)
{
emitter.emit(
new ServiceMetricEvent.Builder().build(
COUNT_METRIC,
compactionConfigRemoved
)
);
}
}
Original file line number Diff line number Diff line change
Expand Up @@ -180,6 +180,7 @@ public void setUp() throws Exception
null,
null,
null,
null,
10,
new Duration("PT0s")
);
Expand Down
Original file line number Diff line number Diff line change
Expand Up @@ -152,6 +152,7 @@ public void setUp() throws Exception
null,
null,
null,
null,
10,
new Duration("PT0s")
);
Expand Down
Original file line number Diff line number Diff line change
Expand Up @@ -89,6 +89,7 @@ public class HttpLoadQueuePeonTest
null,
null,
null,
null,
10,
Duration.ZERO
)
Expand Down
Original file line number Diff line number Diff line change
Expand Up @@ -104,6 +104,7 @@ public void testMultipleLoadDropSegments() throws Exception
null,
null,
null,
null,
10,
Duration.millis(0)
)
Expand Down Expand Up @@ -308,6 +309,7 @@ public void testFailAssignForNonTimeoutFailures() throws Exception
null,
null,
null,
null,
10,
new Duration("PT1s")
)
Expand Down Expand Up @@ -369,6 +371,7 @@ public void testFailAssignForLoadDropTimeout() throws Exception
null,
null,
null,
null,
10,
new Duration("PT1s")
)
Expand Down
Original file line number Diff line number Diff line change
Expand Up @@ -53,6 +53,7 @@ public LoadQueuePeonTester()
null,
null,
null,
null,
10,
new Duration("PT1s")
)
Expand Down
Original file line number Diff line number Diff line change
Expand Up @@ -35,6 +35,7 @@ public class TestDruidCoordinatorConfig extends DruidCoordinatorConfig
private final Duration coordinatorSupervisorKillDurationToRetain;
private final Duration coordinatorAuditKillPeriod;
private final Duration coordinatorAuditKillDurationToRetain;
private final Duration coordinatorCompactionKillPeriod;
private final Duration coordinatorRuleKillPeriod;
private final Duration coordinatorRuleKillDurationToRetain;
private final Duration coordinatorDatasourceKillPeriod;
Expand All @@ -54,6 +55,7 @@ public TestDruidCoordinatorConfig(
Duration coordinatorSupervisorKillDurationToRetain,
Duration coordinatorAuditKillPeriod,
Duration coordinatorAuditKillDurationToRetain,
Duration coordinatorCompactionKillPeriod,
Duration coordinatorRuleKillPeriod,
Duration coordinatorRuleKillDurationToRetain,
Duration coordinatorDatasourceKillPeriod,
Expand All @@ -73,6 +75,7 @@ public TestDruidCoordinatorConfig(
this.coordinatorSupervisorKillDurationToRetain = coordinatorSupervisorKillDurationToRetain;
this.coordinatorAuditKillPeriod = coordinatorAuditKillPeriod;
this.coordinatorAuditKillDurationToRetain = coordinatorAuditKillDurationToRetain;
this.coordinatorCompactionKillPeriod = coordinatorCompactionKillPeriod;
this.coordinatorRuleKillPeriod = coordinatorRuleKillPeriod;
this.coordinatorRuleKillDurationToRetain = coordinatorRuleKillDurationToRetain;
this.coordinatorDatasourceKillPeriod = coordinatorDatasourceKillPeriod;
Expand Down Expand Up @@ -141,6 +144,12 @@ public Duration getCoordinatorAuditKillDurationToRetain()
return coordinatorAuditKillDurationToRetain;
}

@Override
public Duration getCoordinatorCompactionKillPeriod()
{
return coordinatorCompactionKillPeriod;
}

@Override
public Duration getCoordinatorRuleKillPeriod()
{
Expand Down
Original file line number Diff line number Diff line change
Expand Up @@ -70,6 +70,7 @@ public void testRunSkipIfLastRunLessThanPeriod()
null,
null,
null,
null,
10,
null
);
Expand Down Expand Up @@ -98,6 +99,7 @@ public void testRunNotSkipIfLastRunMoreThanPeriod()
null,
null,
null,
null,
10,
null
);
Expand Down Expand Up @@ -126,6 +128,7 @@ public void testConstructorFailIfInvalidPeriod()
null,
null,
null,
null,
10,
null
);
Expand Down Expand Up @@ -153,6 +156,7 @@ public void testConstructorFailIfInvalidRetainDuration()
null,
null,
null,
null,
10,
null
);
Expand Down
Loading