Skip to content
Merged
Show file tree
Hide file tree
Changes from all commits
Commits
File filter

Filter by extension

Filter by extension

Conversations
Failed to load comments.
Loading
Jump to
Jump to file
Failed to load files.
Loading
Diff view
Diff view
Original file line number Diff line number Diff line change
Expand Up @@ -197,9 +197,7 @@ public void setup() throws IOException
qIndex = INDEX_IO.loadIndex(indexFile);

factory = new TimeseriesQueryRunnerFactory(
new TimeseriesQueryQueryToolChest(
QueryBenchmarkUtil.noopIntervalChunkingQueryRunnerDecorator()
),
new TimeseriesQueryQueryToolChest(),
new TimeseriesQueryEngine(),
QueryBenchmarkUtil.NOOP_QUERYWATCHER
);
Expand Down
Original file line number Diff line number Diff line change
Expand Up @@ -409,10 +409,7 @@ public String getFormatString()

factory = new GroupByQueryRunnerFactory(
strategySelector,
new GroupByQueryQueryToolChest(
strategySelector,
QueryBenchmarkUtil.noopIntervalChunkingQueryRunnerDecorator()
)
new GroupByQueryQueryToolChest(strategySelector)
);
}

Expand Down
Original file line number Diff line number Diff line change
Expand Up @@ -301,10 +301,7 @@ public void setup() throws IOException
0,
Integer.MAX_VALUE
),
new TopNQueryQueryToolChest(
new TopNQueryConfig(),
QueryBenchmarkUtil.noopIntervalChunkingQueryRunnerDecorator()
),
new TopNQueryQueryToolChest(new TopNQueryConfig()),
QueryBenchmarkUtil.NOOP_QUERYWATCHER
);
}
Expand Down
Original file line number Diff line number Diff line change
Expand Up @@ -260,9 +260,7 @@ public boolean useParallelMergePool()
.put(
TimeseriesQuery.class,
new TimeseriesQueryRunnerFactory(
new TimeseriesQueryQueryToolChest(
QueryRunnerTestHelper.noopIntervalChunkingQueryRunnerDecorator()
),
new TimeseriesQueryQueryToolChest(),
new TimeseriesQueryEngine(),
QueryRunnerTestHelper.NOOP_QUERYWATCHER
)
Expand All @@ -274,10 +272,7 @@ public boolean useParallelMergePool()
"TopNQueryRunnerFactory-bufferPool",
() -> ByteBuffer.allocate(PROCESSING_BUFFER_SIZE)
),
new TopNQueryQueryToolChest(
new TopNQueryConfig(),
QueryRunnerTestHelper.noopIntervalChunkingQueryRunnerDecorator()
),
new TopNQueryQueryToolChest(new TopNQueryConfig()),
QueryRunnerTestHelper.NOOP_QUERYWATCHER
)
)
Expand Down Expand Up @@ -374,14 +369,8 @@ private static GroupByQueryRunnerFactory makeGroupByQueryRunnerFactory(
QueryRunnerTestHelper.NOOP_QUERYWATCHER
)
);
final GroupByQueryQueryToolChest toolChest = new GroupByQueryQueryToolChest(
strategySelector,
QueryRunnerTestHelper.sameThreadIntervalChunkingQueryRunnerDecorator()
);
return new GroupByQueryRunnerFactory(
strategySelector,
toolChest
);
final GroupByQueryQueryToolChest toolChest = new GroupByQueryQueryToolChest(strategySelector);
return new GroupByQueryRunnerFactory(strategySelector, toolChest);
}

@TearDown(Level.Trial)
Expand Down
Original file line number Diff line number Diff line change
Expand Up @@ -572,10 +572,7 @@ public String getFormatString()

factory = new GroupByQueryRunnerFactory(
strategySelector,
new GroupByQueryQueryToolChest(
strategySelector,
QueryBenchmarkUtil.noopIntervalChunkingQueryRunnerDecorator()
)
new GroupByQueryQueryToolChest(strategySelector)
);
}

Expand Down
Original file line number Diff line number Diff line change
Expand Up @@ -19,19 +19,14 @@

package org.apache.druid.benchmark.query;

import com.google.common.util.concurrent.ListenableFuture;
import org.apache.druid.common.config.NullHandling;
import org.apache.druid.java.util.common.guava.Sequence;
import org.apache.druid.query.BySegmentQueryRunner;
import org.apache.druid.query.FinalizeResultsQueryRunner;
import org.apache.druid.query.IntervalChunkingQueryRunnerDecorator;
import org.apache.druid.query.Query;
import org.apache.druid.query.QueryPlus;
import org.apache.druid.query.QueryRunner;
import org.apache.druid.query.QueryRunnerFactory;
import org.apache.druid.query.QueryToolChest;
import org.apache.druid.query.QueryWatcher;
import org.apache.druid.query.context.ResponseContext;
import org.apache.druid.segment.Segment;
import org.apache.druid.timeline.SegmentId;

Expand All @@ -53,29 +48,5 @@ public static <T, QueryType extends Query<T>> QueryRunner<T> makeQueryRunner(
);
}

public static IntervalChunkingQueryRunnerDecorator noopIntervalChunkingQueryRunnerDecorator()
{
return new IntervalChunkingQueryRunnerDecorator(null, null, null) {
@Override
public <T> QueryRunner<T> decorate(final QueryRunner<T> delegate, QueryToolChest<T, ? extends Query<T>> toolChest)
{
return new QueryRunner<T>() {
@Override
public Sequence<T> run(QueryPlus<T> queryPlus, ResponseContext responseContext)
{
return delegate.run(queryPlus, responseContext);
}
};
}
};
}

public static final QueryWatcher NOOP_QUERYWATCHER = new QueryWatcher()
{
@Override
public void registerQuery(Query query, ListenableFuture future)
{

}
};
public static final QueryWatcher NOOP_QUERYWATCHER = (query, future) -> {};
}
Original file line number Diff line number Diff line change
Expand Up @@ -373,10 +373,7 @@ public void setup() throws IOException
final SearchQueryConfig config = new SearchQueryConfig().withOverrides(query);
factory = new SearchQueryRunnerFactory(
new SearchStrategySelector(Suppliers.ofInstance(config)),
new SearchQueryQueryToolChest(
config,
QueryBenchmarkUtil.noopIntervalChunkingQueryRunnerDecorator()
),
new SearchQueryQueryToolChest(config),
QueryBenchmarkUtil.NOOP_QUERYWATCHER
);
}
Expand Down
Original file line number Diff line number Diff line change
Expand Up @@ -296,9 +296,7 @@ public void setup() throws IOException
}

factory = new TimeseriesQueryRunnerFactory(
new TimeseriesQueryQueryToolChest(
QueryBenchmarkUtil.noopIntervalChunkingQueryRunnerDecorator()
),
new TimeseriesQueryQueryToolChest(),
new TimeseriesQueryEngine(),
QueryBenchmarkUtil.NOOP_QUERYWATCHER
);
Expand Down
Original file line number Diff line number Diff line change
Expand Up @@ -279,10 +279,7 @@ public void setup() throws IOException
0,
Integer.MAX_VALUE
),
new TopNQueryQueryToolChest(
new TopNQueryConfig(),
QueryBenchmarkUtil.noopIntervalChunkingQueryRunnerDecorator()
),
new TopNQueryQueryToolChest(new TopNQueryConfig()),
QueryBenchmarkUtil.NOOP_QUERYWATCHER
);
}
Expand Down
Original file line number Diff line number Diff line change
Expand Up @@ -230,10 +230,7 @@ private void setupQueries()
0,
Integer.MAX_VALUE
),
new TopNQueryQueryToolChest(
new TopNQueryConfig(),
QueryBenchmarkUtil.noopIntervalChunkingQueryRunnerDecorator()
),
new TopNQueryQueryToolChest(new TopNQueryConfig()),
QueryBenchmarkUtil.NOOP_QUERYWATCHER
);
}
Expand Down Expand Up @@ -274,7 +271,7 @@ private void setupQueries()

timeseriesQuery = timeseriesQueryBuilder.build();
timeseriesFactory = new TimeseriesQueryRunnerFactory(
new TimeseriesQueryQueryToolChest(QueryBenchmarkUtil.noopIntervalChunkingQueryRunnerDecorator()),
new TimeseriesQueryQueryToolChest(),
new TimeseriesQueryEngine(),
QueryBenchmarkUtil.NOOP_QUERYWATCHER
);
Expand Down
2 changes: 1 addition & 1 deletion docs/configuration/index.md
Original file line number Diff line number Diff line change
Expand Up @@ -1505,7 +1505,7 @@ Druid broker can optionally retry queries internally for transient errors.

##### Processing

The broker uses processing configs for nested groupBy queries. And, if you use groupBy v1, long-interval queries (of any type) can be broken into shorter interval queries and processed in parallel inside this thread pool. For more details, see "chunkPeriod" in the [query context](../querying/query-context.md) doc.
The broker uses processing configs for nested groupBy queries.

|Property|Description|Default|
|--------|-----------|-------|
Expand Down
5 changes: 0 additions & 5 deletions docs/design/extensions-contrib/dropwizard.md
Original file line number Diff line number Diff line change
Expand Up @@ -123,11 +123,6 @@ Latest default metrics mapping can be found [here] (https://github.com/apache/dr
"type": "timer",
"timeUnit": "MILLISECONDS"
},
"query/intervalChunk/time": {
"dimensions": [],
"type": "timer",
"timeUnit": "MILLISECONDS"
},
"query/segment/time": {
"dimensions": [],
"type": "timer",
Expand Down
8 changes: 0 additions & 8 deletions docs/operations/basic-cluster-tuning.md
Original file line number Diff line number Diff line change
Expand Up @@ -144,14 +144,6 @@ On the Broker, the amount of direct memory needed depends on how many merge buff
- `druid.processing.numThreads`: set this to 1 (the minimum allowed)
- `druid.processing.numMergeBuffers`: set this to the same value as on Historicals or a bit higher

##### Note on the deprecated `chunkPeriod`

There is one exception to the Broker not needing processing threads and processing buffers:

If the deprecated `chunkPeriod` property in the [query context](../querying/query-context.md) is set, GroupBy V1 queries will use processing threads and processing buffers on the Broker.

Both `chunkPeriod` and GroupBy V1 are deprecated (use GroupBy V2 instead) and will be removed in the future, we do not recommend using them. The presence of the deprecated `chunkPeriod` feature is why a minimum of 1 processing thread must be configured, even if it's unused.

#### Connection pool sizing

Please see the [General Connection Pool Guidelines](#connection-pool) section for an overview of connection pool configuration.
Expand Down
1 change: 0 additions & 1 deletion docs/operations/metrics.md
Original file line number Diff line number Diff line change
Expand Up @@ -54,7 +54,6 @@ Available Metrics
|`query/node/bytes`|number of bytes returned from querying individual historical/realtime processes.|id, status, server.| |
|`query/node/ttfb`|Time to first byte. Milliseconds elapsed until Broker starts receiving the response from individual historical/realtime processes.|id, status, server.|< 1s|
|`query/node/backpressure`|Milliseconds that the channel to this process has spent suspended due to backpressure.|id, status, server.| |
|`query/intervalChunk/time`|Only emitted if interval chunking is enabled. Milliseconds required to query an interval chunk. This metric is deprecated and will be removed in the future because interval chunking is deprecated. See [Query Context](../querying/query-context.md).|id, status, chunkInterval (if interval chunking is enabled).|< 1s|
|`query/count`|number of total queries|This metric is only available if the QueryCountStatsMonitor module is included.||
|`query/success/count`|number of queries successfully processed|This metric is only available if the QueryCountStatsMonitor module is included.||
|`query/failed/count`|number of failed queries|This metric is only available if the QueryCountStatsMonitor module is included.||
Expand Down
2 changes: 0 additions & 2 deletions docs/querying/groupbyquery.md
Original file line number Diff line number Diff line change
Expand Up @@ -257,8 +257,6 @@ by using a finite-sized merge buffer pool. By default, the number of merge buffe
threads. You can adjust this as necessary to balance concurrency and memory usage.
- groupBy v1 supports caching on either the Broker or Historical processes, whereas groupBy v2 only supports caching on
Historical processes.
- groupBy v1 supports using [chunkPeriod](query-context.html) to parallelize merging on the Broker, whereas groupBy v2
ignores chunkPeriod.
- groupBy v2 supports both array-based aggregation and hash-based aggregation. The array-based aggregation is used only
when the grouping key is a single indexed string column. In array-based aggregation, the dictionary-encoded value is used
as the index, so the aggregated values in the array can be accessed directly without finding buckets based on hashing.
Expand Down
1 change: 0 additions & 1 deletion docs/querying/query-context.md
Original file line number Diff line number Diff line change
Expand Up @@ -36,7 +36,6 @@ The query context is used for various query configuration parameters. The follow
|populateResultLevelCache | `true` | Flag indicating whether to save the results of the query to the result level cache. Primarily used for debugging. When set to false, it disables saving the results of this query to the query cache. When set to true, Druid uses `druid.broker.cache.populateResultLevelCache` to determine whether or not to save the results of this query to the result-level query cache |
|bySegment | `false` | Return "by segment" results. Primarily used for debugging, setting it to `true` returns results associated with the data segment they came from |
|finalize | `true` | Flag indicating whether to "finalize" aggregation results. Primarily used for debugging. For instance, the `hyperUnique` aggregator will return the full HyperLogLog sketch instead of the estimated cardinality when this flag is set to `false` |
|chunkPeriod | `P0D` (off) | At the Broker process level, long interval queries (of any type) may be broken into shorter interval queries to parallelize merging more than normal. Broken up queries will use a larger share of cluster resources, but, if you use groupBy "v1, it may be able to complete faster as a result. Use ISO 8601 periods. For example, if this property is set to `P1M` (one month), then a query covering a year would be broken into 12 smaller queries. The broker uses its query processing executor service to initiate processing for query chunks, so make sure `druid.processing.numThreads` is configured appropriately on the broker. [groupBy queries](groupbyquery.html) do not support chunkPeriod by default, although they do if using the legacy "v1" engine. This context is deprecated since it's only useful for groupBy "v1", and will be removed in the future releases.|
|maxScatterGatherBytes| `druid.server.http.maxScatterGatherBytes` | Maximum number of bytes gathered from data processes such as Historicals and realtime processes to execute a query. This parameter can be used to further reduce `maxScatterGatherBytes` limit at query time. See [Broker configuration](../configuration/index.html#broker) for more details.|
|maxQueuedBytes | `druid.broker.http.maxQueuedBytes` | Maximum number of bytes queued per query before exerting backpressure on the channel to the data server. Similar to `maxScatterGatherBytes`, except unlike that configuration, this one will trigger backpressure rather than query failure. Zero means disabled.|
|serializeDateTimeAsLong| `false` | If true, DateTime is serialized as long in the result returned by Broker and the data transportation between Broker and compute process|
Expand Down
Original file line number Diff line number Diff line change
Expand Up @@ -71,7 +71,6 @@ public void setUp()
"query/time, true",
"query/node/ttfb, true",
"query/segmentAndCache/time, true",
"query/intervalChunk/time, false",
"query/time/balaba, true",
"query/tim, false",
"segment/added/bytes, true",
Expand Down
Original file line number Diff line number Diff line change
Expand Up @@ -28,11 +28,6 @@
"type": "timer",
"timeUnit": "MILLISECONDS"
},
"query/intervalChunk/time": {
"dimensions": [],
"type": "timer",
"timeUnit": "MILLISECONDS"
},
"query/segment/time": {
"dimensions": [],
"type": "timer",
Expand Down
Original file line number Diff line number Diff line change
Expand Up @@ -75,7 +75,6 @@ public void setUp()
"query/time, true",
"query/node/ttfb, true",
"query/segmentAndCache/time, true",
"query/intervalChunk/time, false",
"query/time/balaba, true",
"query/tim, false",
"segment/added/bytes, false",
Expand Down Expand Up @@ -136,7 +135,7 @@ public void testWhiteListedStringArrayDimension() throws IOException
);

ServiceMetricEvent event = new ServiceMetricEvent.Builder()
.setDimension("gcName", new String[] {"g1"})
.setDimension("gcName", new String[]{"g1"})
.build(createdTime, "jvm/gc/cpu", 10)
.build(serviceName, hostname);

Expand Down
Original file line number Diff line number Diff line change
Expand Up @@ -55,12 +55,7 @@ public void testMakePostComputeManipulatorFn()
QueryToolChest materializedViewQueryQueryToolChest =
new MaterializedViewQueryQueryToolChest(new MapQueryToolChestWarehouse(
ImmutableMap.<Class<? extends Query>, QueryToolChest>builder()
.put(
TimeseriesQuery.class,
new TimeseriesQueryQueryToolChest(
QueryRunnerTestHelper.noopIntervalChunkingQueryRunnerDecorator()
)
)
.put(TimeseriesQuery.class, new TimeseriesQueryQueryToolChest())
.build()
));

Expand Down
Original file line number Diff line number Diff line change
Expand Up @@ -16,9 +16,6 @@
"query/node/ttfb": [
"server"
],
"query/intervalChunk/time": [
"chunkInterval"
],
"query/success/count": [],
"query/failed/count": [],
"query/interrupted/count": [],
Expand Down
Original file line number Diff line number Diff line change
Expand Up @@ -5,7 +5,6 @@
"query/node/ttfb" : { "dimensions" : ["server"], "type" : "timer"},
"query/node/bytes" : { "dimensions" : ["server"], "type" : "count"},
"query/node/backpressure": { "dimensions" : ["server"], "type" : "timer"},
"query/intervalChunk/time" : { "dimensions" : [], "type" : "timer"},

"query/segment/time" : { "dimensions" : [], "type" : "timer"},
"query/wait/time" : { "dimensions" : [], "type" : "timer"},
Expand Down
Original file line number Diff line number Diff line change
Expand Up @@ -110,10 +110,7 @@ public int getNumThreads()

final GroupByQueryRunnerFactory factory = new GroupByQueryRunnerFactory(
strategySelector,
new GroupByQueryQueryToolChest(
strategySelector,
QueryRunnerTestHelper.noopIntervalChunkingQueryRunnerDecorator()
)
new GroupByQueryQueryToolChest(strategySelector)
);

runner = QueryRunnerTestHelper.makeQueryRunner(
Expand Down
Original file line number Diff line number Diff line change
Expand Up @@ -67,10 +67,7 @@ public void setup() throws IOException

final TopNQueryRunnerFactory factory = new TopNQueryRunnerFactory(
new StupidPool<>("map-virtual-column-test", () -> ByteBuffer.allocate(1024)),
new TopNQueryQueryToolChest(
new TopNQueryConfig(),
QueryRunnerTestHelper.noopIntervalChunkingQueryRunnerDecorator()
),
new TopNQueryQueryToolChest(new TopNQueryConfig()),
QueryRunnerTestHelper.NOOP_QUERYWATCHER
);

Expand Down
Original file line number Diff line number Diff line change
Expand Up @@ -79,20 +79,14 @@ public static Iterable<Object[]> constructorFeeder()
QueryRunnerTestHelper.makeQueryRunners(
new TopNQueryRunnerFactory(
defaultPool,
new TopNQueryQueryToolChest(
new TopNQueryConfig(),
QueryRunnerTestHelper.noopIntervalChunkingQueryRunnerDecorator()
),
new TopNQueryQueryToolChest(new TopNQueryConfig()),
QueryRunnerTestHelper.NOOP_QUERYWATCHER
)
),
QueryRunnerTestHelper.makeQueryRunners(
new TopNQueryRunnerFactory(
customPool,
new TopNQueryQueryToolChest(
new TopNQueryConfig(),
QueryRunnerTestHelper.noopIntervalChunkingQueryRunnerDecorator()
),
new TopNQueryQueryToolChest(new TopNQueryConfig()),
QueryRunnerTestHelper.NOOP_QUERYWATCHER
)
)
Expand Down
Original file line number Diff line number Diff line change
Expand Up @@ -79,20 +79,14 @@ public static Iterable<Object[]> constructorFeeder()
QueryRunnerTestHelper.makeQueryRunners(
new TopNQueryRunnerFactory(
defaultPool,
new TopNQueryQueryToolChest(
new TopNQueryConfig(),
QueryRunnerTestHelper.noopIntervalChunkingQueryRunnerDecorator()
),
new TopNQueryQueryToolChest(new TopNQueryConfig()),
QueryRunnerTestHelper.NOOP_QUERYWATCHER
)
),
QueryRunnerTestHelper.makeQueryRunners(
new TopNQueryRunnerFactory(
customPool,
new TopNQueryQueryToolChest(
new TopNQueryConfig(),
QueryRunnerTestHelper.noopIntervalChunkingQueryRunnerDecorator()
),
new TopNQueryQueryToolChest(new TopNQueryConfig()),
QueryRunnerTestHelper.NOOP_QUERYWATCHER
)
)
Expand Down
Loading