Skip to content
Original file line number Diff line number Diff line change
Expand Up @@ -21,13 +21,15 @@

import org.apache.druid.java.util.emitter.core.Event;
import org.apache.druid.java.util.emitter.service.ServiceEmitter;
import org.apache.druid.java.util.emitter.service.ServiceMetricEvent;

import java.util.ArrayList;
import java.util.List;

public class StubServiceEmitter extends ServiceEmitter
{
private List<Event> events = new ArrayList<>();
private final List<Event> events = new ArrayList<>();
private final List<ServiceMetricEvent> metricEvents = new ArrayList<>();

public StubServiceEmitter(String service, String host)
{
Expand All @@ -37,14 +39,28 @@ public StubServiceEmitter(String service, String host)
@Override
public void emit(Event event)
{
if (event instanceof ServiceMetricEvent) {
metricEvents.add((ServiceMetricEvent) event);
}
events.add(event);
}

/**
* Gets all the events emitted since the previous {@link #flush()}.
*/
public List<Event> getEvents()
{
return events;
}

/**
* Gets all the metric events emitted since the previous {@link #flush()}.
*/
public List<ServiceMetricEvent> getMetricEvents()
{
return metricEvents;
}

@Override
public void start()
{
Expand All @@ -53,6 +69,8 @@ public void start()
@Override
public void flush()
{
events.clear();
metricEvents.clear();
}

@Override
Expand Down
Original file line number Diff line number Diff line change
Expand Up @@ -973,6 +973,14 @@ public List<? extends CoordinatorDuty> getDuties()
{
return duties;
}

@Override
public String toString()
{
return "DutiesRunnable{" +
"dutiesRunnableAlias='" + dutiesRunnableAlias + '\'' +
'}';
}
Comment on lines +978 to +983
Copy link
Copy Markdown
Contributor

Choose a reason for hiding this comment

The reason will be displayed to describe this comment to others. Learn more.

I know this comment isn't about your code, but your addition of the toString here made me wonder why DruidCoordinator's toString reads as "DutiesRunnable". The class is probably large enough (and already depended upon, see @VisibleForTesting annotation peppering this code) that maybe it's just time to promote it to its own class.

Copy link
Copy Markdown
Contributor Author

@kfaraz kfaraz Sep 20, 2022

Choose a reason for hiding this comment

The reason will be displayed to describe this comment to others. Learn more.

Oh, yeah, the VisibleForTesting is ubiquitous 😅

It would be good to pull out DutiesRunnable. Right now, it seems to be directly using pretty much all the fields that DruidCoordinator contains. That's probably why it is still hanging around here and why it is not a static inner class either.

The preferable way to do this would be for DruidCoordinator to expose a bunch of methods that update the state of segmentManager and other fields that DutiesRunnable needs to access. And the DutiesRunnable constructor just gets the DruidCoordinator instance. DruidCoordinator already exposes other such utility methods such as moveSegment() or markSegmentAsUnused() which are used by the actual duties themselves.

Let me know if this approach makes sense. We can get it done in a follow-up PR.

}

/**
Expand Down
Original file line number Diff line number Diff line change
Expand Up @@ -92,6 +92,7 @@ private void balanceTier(
)
{

log.info("Balancing segments in tier [%s]", tier);
Copy link
Copy Markdown
Contributor

Choose a reason for hiding this comment

The reason will be displayed to describe this comment to others. Learn more.

Is there any more information that can be added to this? Having just the fact that the balancing occurred is useful, but if we can like add sizes or anything else that might be nice to have when trying to understand what happened, that can make it even more useful.

Copy link
Copy Markdown
Contributor Author

Choose a reason for hiding this comment

The reason will be displayed to describe this comment to others. Learn more.

I intend to clean up the logs and add some more useful metrics around balancing/loading as a follow up to these changes.

if (params.getUsedSegments().size() == 0) {
log.info("Metadata segments are not available. Cannot balance.");
// suppress emit zero stats
Expand Down
Original file line number Diff line number Diff line change
Expand Up @@ -30,7 +30,6 @@
import org.apache.druid.timeline.DataSegment;
import org.apache.druid.timeline.partition.NoneShardSpec;
import org.easymock.EasyMock;
import org.hamcrest.Matchers;
import org.joda.time.DateTime;
import org.joda.time.Interval;
import org.junit.After;
Expand Down Expand Up @@ -277,9 +276,9 @@ public void testMoveDecommissioningMaxPercentOfMaxSegmentsToMove()
params = new BalanceSegmentsTester(coordinator).run(params);
EasyMock.verify(strategy);
Assert.assertEquals(3L, params.getCoordinatorStats().getTieredStat("movedCount", "normal"));
Assert.assertThat(
peon3.getSegmentsToLoad(),
Matchers.is(Matchers.equalTo(ImmutableSet.of(segment1, segment3, segment4)))
Assert.assertEquals(
ImmutableSet.of(segment1, segment3, segment4),
peon3.getSegmentsToLoad()
);
}

Expand All @@ -289,7 +288,7 @@ public void testZeroDecommissioningMaxPercentOfMaxSegmentsToMove()
DruidCoordinatorRuntimeParams params = setupParamsForDecommissioningMaxPercentOfMaxSegmentsToMove(0);
params = new BalanceSegmentsTester(coordinator).run(params);
Assert.assertEquals(1L, params.getCoordinatorStats().getTieredStat("movedCount", "normal"));
Assert.assertThat(peon3.getSegmentsToLoad(), Matchers.is(Matchers.equalTo(ImmutableSet.of(segment1))));
Assert.assertEquals(ImmutableSet.of(segment1), peon3.getSegmentsToLoad());
}

@Test
Expand All @@ -298,7 +297,7 @@ public void testMaxDecommissioningMaxPercentOfMaxSegmentsToMove()
DruidCoordinatorRuntimeParams params = setupParamsForDecommissioningMaxPercentOfMaxSegmentsToMove(10);
params = new BalanceSegmentsTester(coordinator).run(params);
Assert.assertEquals(1L, params.getCoordinatorStats().getTieredStat("movedCount", "normal"));
Assert.assertThat(peon3.getSegmentsToLoad(), Matchers.is(Matchers.equalTo(ImmutableSet.of(segment2))));
Assert.assertEquals(ImmutableSet.of(segment2), peon3.getSegmentsToLoad());
}

/**
Expand Down Expand Up @@ -347,9 +346,9 @@ public void testMoveDecommissioningMaxPercentOfMaxSegmentsToMoveWithNoDecommissi
params = new BalanceSegmentsTester(coordinator).run(params);
EasyMock.verify(strategy);
Assert.assertEquals(3L, params.getCoordinatorStats().getTieredStat("movedCount", "normal"));
Assert.assertThat(
peon3.getSegmentsToLoad(),
Matchers.is(Matchers.equalTo(ImmutableSet.of(segment2, segment3, segment4)))
Assert.assertEquals(
ImmutableSet.of(segment2, segment3, segment4),
peon3.getSegmentsToLoad()
);
}

Expand Down Expand Up @@ -603,10 +602,7 @@ public void testThatDynamicConfigIsHonoredWhenPickingSegmentToMove()
params = new BalanceSegmentsTester(coordinator).run(params);
EasyMock.verify(strategy);
Assert.assertEquals(1L, params.getCoordinatorStats().getTieredStat("movedCount", "normal"));
Assert.assertThat(
peon3.getSegmentsToLoad(),
Matchers.is(Matchers.equalTo(ImmutableSet.of(segment3)))
);
Assert.assertEquals(ImmutableSet.of(segment3), peon3.getSegmentsToLoad());
}

@Test
Expand Down
Original file line number Diff line number Diff line change
@@ -0,0 +1,135 @@
/*
* Licensed to the Apache Software Foundation (ASF) under one
* or more contributor license agreements. See the NOTICE file
* distributed with this work for additional information
* regarding copyright ownership. The ASF licenses this file
* to you under the Apache License, Version 2.0 (the
* "License"); you may not use this file except in compliance
* with the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing,
* software distributed under the License is distributed on an
* "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
* KIND, either express or implied. See the License for the
* specific language governing permissions and limitations
* under the License.
*/

package org.apache.druid.server.coordinator;

import org.apache.druid.java.util.common.DateTimes;
import org.apache.druid.java.util.common.granularity.Granularity;
import org.apache.druid.segment.IndexIO;
import org.apache.druid.timeline.DataSegment;
import org.apache.druid.timeline.partition.NumberedShardSpec;
import org.joda.time.DateTime;
import org.joda.time.Interval;

import java.util.ArrayList;
import java.util.Collections;
import java.util.List;

/**
* Test utility to create {@link DataSegment}s for a given datasource.
*/
public class CreateDataSegments
{
private final String datasource;

private DateTime startTime;
private Granularity granularity;
private int numPartitions;
private int numIntervals;

public static CreateDataSegments ofDatasource(String datasource)
{
return new CreateDataSegments(datasource);
}

private CreateDataSegments(String datasource)
{
this.datasource = datasource;
}

public CreateDataSegments forIntervals(int numIntervals, Granularity intervalSize)
{
this.numIntervals = numIntervals;
this.granularity = intervalSize;
return this;
}

public CreateDataSegments startingAt(String startOfFirstInterval)
{
this.startTime = DateTimes.of(startOfFirstInterval);
return this;
}

public CreateDataSegments withNumPartitions(int numPartitions)
{
this.numPartitions = numPartitions;
return this;
}

public List<DataSegment> eachOfSizeInMb(long sizeMb)
{
final List<DataSegment> segments = new ArrayList<>();

int uniqueIdInInterval = 0;
DateTime nextStart = startTime;
for (int numInterval = 0; numInterval < numIntervals; ++numInterval) {
Interval nextInterval = new Interval(nextStart, granularity.increment(nextStart));
for (int numPartition = 0; numPartition < numPartitions; ++numPartition) {
segments.add(
new NumberedDataSegment(
datasource,
nextInterval,
new NumberedShardSpec(numPartition, numPartitions),
++uniqueIdInInterval,
sizeMb
)
);
}
nextStart = granularity.increment(nextStart);
}

return Collections.unmodifiableList(segments);
}

/**
* Simple implementation of DataSegment with a unique integer id to make debugging easier.
*/
private static class NumberedDataSegment extends DataSegment
{
private final int uniqueId;

private NumberedDataSegment(
String datasource,
Interval interval,
NumberedShardSpec shardSpec,
int uinqueId,
long size
)
{
super(
datasource,
interval,
"1",
Collections.emptyMap(),
Collections.emptyList(),
Collections.emptyList(),
shardSpec,
IndexIO.CURRENT_VERSION_ID,
size
);
this.uniqueId = uinqueId;
}

@Override
public String toString()
{
return "{" + getDataSource() + "::" + uniqueId + "}";
}
}
}
Original file line number Diff line number Diff line change
Expand Up @@ -26,6 +26,7 @@
import org.apache.druid.client.DruidServer;
import org.apache.druid.java.util.common.DateTimes;
import org.apache.druid.java.util.common.Intervals;
import org.apache.druid.java.util.common.granularity.Granularities;
import org.apache.druid.java.util.emitter.EmittingLogger;
import org.apache.druid.java.util.emitter.service.ServiceEmitter;
import org.apache.druid.java.util.emitter.service.ServiceEventBuilder;
Expand All @@ -41,8 +42,6 @@
import org.apache.druid.timeline.DataSegment;
import org.apache.druid.timeline.partition.NoneShardSpec;
import org.easymock.EasyMock;
import org.joda.time.DateTime;
import org.joda.time.Interval;
import org.junit.After;
import org.junit.Assert;
import org.junit.Before;
Expand Down Expand Up @@ -81,24 +80,11 @@ public void setUp()
databaseRuleManager = EasyMock.createMock(MetadataRuleManager.class);
segmentsMetadataManager = EasyMock.createNiceMock(SegmentsMetadataManager.class);

DateTime start = DateTimes.of("2012-01-01");
usedSegments = new ArrayList<>();
for (int i = 0; i < 24; i++) {
usedSegments.add(
new DataSegment(
"test",
new Interval(start, start.plusHours(1)),
DateTimes.nowUtc().toString(),
new HashMap<>(),
new ArrayList<>(),
new ArrayList<>(),
NoneShardSpec.instance(),
IndexIO.CURRENT_VERSION_ID,
1
)
);
start = start.plusHours(1);
}
usedSegments = CreateDataSegments.ofDatasource("test")
.forIntervals(24, Granularities.HOUR)
.startingAt("2012-01-01")
.withNumPartitions(1)
.eachOfSizeInMb(1);

ruleRunner = new RunRules(new ReplicationThrottler(24, 1, false), coordinator);
}
Expand Down
Loading