Skip to content
Closed
Changes from all commits
Commits
File filter

Filter by extension

Filter by extension

Conversations
Failed to load comments.
Loading
Jump to
Jump to file
Failed to load files.
Loading
Diff view
Diff view
Original file line number Diff line number Diff line change
Expand Up @@ -20,9 +20,17 @@
package org.apache.druid.spectator.histogram;

import com.fasterxml.jackson.databind.ObjectMapper;
import com.google.common.collect.ImmutableList;
import com.google.common.collect.ImmutableMap;
import com.google.common.collect.Lists;
import com.netflix.spectator.api.histogram.PercentileBuckets;
import org.apache.druid.common.config.NullHandling;
import org.apache.druid.data.input.InputRow;
import org.apache.druid.data.input.MapBasedInputRow;
import org.apache.druid.data.input.impl.NoopInputRowParser;
import org.apache.druid.jackson.DefaultObjectMapper;
import org.apache.druid.java.util.common.granularity.Granularities;
import org.apache.druid.java.util.common.granularity.Granularity;
import org.apache.druid.java.util.common.guava.Sequence;
import org.apache.druid.query.Druids;
import org.apache.druid.query.QueryPlus;
Expand All @@ -32,6 +40,9 @@
import org.apache.druid.query.aggregation.AggregationTestHelper;
import org.apache.druid.query.aggregation.AggregatorFactory;
import org.apache.druid.query.aggregation.AggregatorUtil;
import org.apache.druid.query.aggregation.CountAggregatorFactory;
import org.apache.druid.query.aggregation.DoubleSumAggregatorFactory;
import org.apache.druid.query.groupby.GroupByQuery;
import org.apache.druid.query.groupby.GroupByQueryConfig;
import org.apache.druid.query.groupby.GroupByQueryRunnerTest;
import org.apache.druid.query.groupby.ResultRow;
Expand All @@ -42,13 +53,18 @@
import org.apache.druid.query.metadata.metadata.SegmentAnalysis;
import org.apache.druid.query.metadata.metadata.SegmentMetadataQuery;
import org.apache.druid.query.timeseries.TimeseriesResultValue;
import org.apache.druid.segment.IncrementalIndexSegment;
import org.apache.druid.segment.IndexIO;
import org.apache.druid.segment.QueryableIndex;
import org.apache.druid.segment.QueryableIndexSegment;
import org.apache.druid.segment.Segment;
import org.apache.druid.segment.TestHelper;
import org.apache.druid.segment.column.ColumnConfig;
import org.apache.druid.segment.incremental.IncrementalIndex;
import org.apache.druid.testing.InitializedNullHandlingTest;
import org.apache.druid.timeline.SegmentId;
import org.joda.time.DateTime;
import org.joda.time.DateTimeZone;
import org.junit.Assert;
import org.junit.Rule;
import org.junit.Test;
Expand All @@ -58,7 +74,9 @@

import java.io.File;
import java.util.ArrayList;
import java.util.Arrays;
import java.util.Collection;
import java.util.Collections;
import java.util.HashMap;
import java.util.List;
import java.util.Map;
Expand Down Expand Up @@ -365,6 +383,60 @@ public void testBuildingAndCountingHistograms() throws Exception
Assert.assertEquals(9.0, (Double) results.get(0).get(1), 0.001);
}

@Test
public void testBuildingAndCountingHistogramsIncrementalIndex() throws Exception
{
NullHandling.initializeForTestsWithValues(true, true);
List<String> dimensions = Collections.singletonList("d");
int n = 10;
DateTime startOfDay = DateTime.now(DateTimeZone.UTC).withTimeAtStartOfDay();
List<InputRow> inputRows = new ArrayList<>(n);
for (int i = 1; i <= n; i++) {
String val = String.valueOf(i * 1.0d);

inputRows.add(new MapBasedInputRow(
startOfDay.plusMinutes(i),
dimensions,
ImmutableMap.of("x", i, "d", val)
));
}

IncrementalIndex index = AggregationTestHelper.createIncrementalIndex(
inputRows.iterator(),
new NoopInputRowParser(null),
new AggregatorFactory[]{
new CountAggregatorFactory("count"),
new SpectatorHistogramAggregatorFactory("histogram", "x")
},
0,
Granularities.NONE,
100,
false
);

ImmutableList<Segment> segments = ImmutableList.of(
new IncrementalIndexSegment(index, SegmentId.dummy("test")),
helper.persistIncrementalIndex(index, null)
);

GroupByQuery query = new GroupByQuery.Builder()
.setDataSource("test")
.setGranularity(Granularities.HOUR)
.setInterval("1970/2050")
.setAggregatorSpecs(
new DoubleSumAggregatorFactory("doubleSum", "histogram")
Copy link
Copy Markdown
Member

Choose a reason for hiding this comment

The reason will be displayed to describe this comment to others. Learn more.

I don't really see why this is beneficial at all - why can't the user invoke a method which will translate the histogram to a literal and the aggregate those values with sum?

I think an aggregate which is repesented by a sketch or something could normally not be represented by a single literal number; if we want to introduce something which translates the sketch to some other type automatically - I think that should be registered properly and not hidden in the aggregator as some bonus functionality.

am I alone with this mindset?

).build();

Sequence<ResultRow> seq = helper.runQueryOnSegmentsObjs(segments, query);

List<ResultRow> results = seq.toList();
Assert.assertEquals(1, results.size());
// Check timestamp
Assert.assertEquals(startOfDay.getMillis(), results.get(0).get(0));
// Check doubleSum
Assert.assertEquals(n, (Double) results.get(0).get(1), 0.001);
}

@Test
public void testBuildingAndCountingHistogramsWithNullFilter() throws Exception
{
Expand Down