Skip to content
Merged
Show file tree
Hide file tree
Changes from all commits
Commits
File filter

Filter by extension

Filter by extension

Conversations
Failed to load comments.
Loading
Jump to
Jump to file
Failed to load files.
Loading
Diff view
Diff view
Original file line number Diff line number Diff line change
Expand Up @@ -291,7 +291,8 @@ public int compare(int lhs, int rhs)
swappers.add(swapper);
}

Arrays.quickSort(
// Use stable sort, so peer rows retain original order.
Arrays.mergeSort(
0,
rows.size(),
(lhs, rhs) -> {
Expand Down
Original file line number Diff line number Diff line change
Expand Up @@ -97,7 +97,9 @@ public RowsAndColumns complete()
}

final int numColsToCompare = index;
Arrays.quickSort(

// Use stable sort, so peer rows retain original order.
Arrays.mergeSort(
0,
rac.numRows(),
(k1, k2) -> {
Expand Down
Original file line number Diff line number Diff line change
Expand Up @@ -122,7 +122,7 @@ public DruidCorrelateUnnestRel withPartialQuery(PartialDruidQuery newQueryBuilde
{
return new DruidCorrelateUnnestRel(
getCluster(),
getTraitSet().plusAll(newQueryBuilder.getRelTraits()),
newQueryBuilder.getTraitSet(getConvention()),
correlateRel,
newQueryBuilder,
leftFilter,
Expand Down
Original file line number Diff line number Diff line change
Expand Up @@ -124,7 +124,7 @@ public DruidJoinQueryRel withPartialQuery(final PartialDruidQuery newQueryBuilde
{
return new DruidJoinQueryRel(
getCluster(),
getTraitSet().plusAll(newQueryBuilder.getRelTraits()),
newQueryBuilder.getTraitSet(getConvention()),
joinRel,
leftFilter,
newQueryBuilder,
Expand All @@ -136,7 +136,7 @@ public DruidJoinQueryRel withPartialQuery(final PartialDruidQuery newQueryBuilde
public DruidQuery toDruidQuery(final boolean finalizeAggregations)
{
final DruidRel<?> leftDruidRel = (DruidRel<?>) left;
final DruidQuery leftQuery = Preconditions.checkNotNull((leftDruidRel).toDruidQuery(false), "leftQuery");
final DruidQuery leftQuery = Preconditions.checkNotNull(leftDruidRel.toDruidQuery(false), "leftQuery");
final RowSignature leftSignature = leftQuery.getOutputRowSignature();
final DataSource leftDataSource;

Expand Down
Original file line number Diff line number Diff line change
Expand Up @@ -73,7 +73,7 @@ public static DruidOuterQueryRel create(
{
return new DruidOuterQueryRel(
sourceRel.getCluster(),
sourceRel.getTraitSet().plusAll(partialQuery.getRelTraits()),
partialQuery.getTraitSet(sourceRel.getConvention()),
sourceRel,
partialQuery,
sourceRel.getPlannerContext()
Expand All @@ -91,7 +91,7 @@ public DruidOuterQueryRel withPartialQuery(final PartialDruidQuery newQueryBuild
{
return new DruidOuterQueryRel(
getCluster(),
getTraitSet().plusAll(newQueryBuilder.getRelTraits()),
newQueryBuilder.getTraitSet(getConvention()),
sourceRel,
newQueryBuilder,
getPlannerContext()
Expand Down
38 changes: 21 additions & 17 deletions sql/src/main/java/org/apache/druid/sql/calcite/rel/DruidQuery.java
Original file line number Diff line number Diff line change
Expand Up @@ -473,6 +473,7 @@ private static Grouping computeGrouping(
* @param virtualColumnRegistry re-usable virtual column references
* @param typeFactory factory for SQL types
* @return dimensions
*
* @throws CannotBuildQueryException if dimensions cannot be computed
*/
private static List<DimensionExpression> computeDimensions(
Expand Down Expand Up @@ -582,7 +583,9 @@ private static Subtotals computeSubtotals(
* @param finalizeAggregations true if this query should include explicit finalization for all of its
* aggregators, where required. Useful for subqueries where Druid's native query layer
* does not do this automatically.
*
* @return aggregations
*
* @throws CannotBuildQueryException if dimensions cannot be computed
*/
private static List<Aggregation> computeAggregations(
Expand Down Expand Up @@ -876,8 +879,6 @@ private static Filtration toFiltration(DimFilter filter, VirtualColumnRegistry v
* <p>
* Necessary because some combinations are unsafe, mainly because they would lead to the creation of too many
* time-granular buckets during query processing.
*
* @see Granularity#getIterable(Interval) the problematic method call we are trying to avoid
*/
private static boolean canUseQueryGranularity(
final DataSource dataSource,
Expand Down Expand Up @@ -954,11 +955,6 @@ public Query<?> getQuery()
*/
private Query<?> computeQuery()
{
if (windowing != null) {
// Windowing can only be handled by window queries.
return toWindowQuery();
}

if (dataSource instanceof QueryDataSource) {
// If there is a subquery, then we prefer the outer query to be a groupBy if possible, since this potentially
// enables more efficient execution. (The groupBy query toolchest can handle some subqueries by itself, without
Expand All @@ -970,6 +966,11 @@ private Query<?> computeQuery()
}
}

final WindowOperatorQuery operatorQuery = toWindowQuery();
if (operatorQuery != null) {
return operatorQuery;
}

final TimeBoundaryQuery timeBoundaryQuery = toTimeBoundaryQuery();
if (timeBoundaryQuery != null) {
return timeBoundaryQuery;
Expand Down Expand Up @@ -1010,7 +1011,8 @@ private TimeBoundaryQuery toTimeBoundaryQuery()
|| grouping == null
|| grouping.getSubtotals().hasEffect(grouping.getDimensionSpecs())
|| grouping.getHavingFilter() != null
|| selectProjection != null) {
|| selectProjection != null
|| windowing != null) {
return null;
}

Expand Down Expand Up @@ -1074,7 +1076,8 @@ private TimeseriesQuery toTimeseriesQuery()
if (!plannerContext.engineHasFeature(EngineFeature.TIMESERIES_QUERY)
|| grouping == null
|| grouping.getSubtotals().hasEffect(grouping.getDimensionSpecs())
|| grouping.getHavingFilter() != null) {
|| grouping.getHavingFilter() != null
|| windowing != null) {
return null;
}

Expand Down Expand Up @@ -1194,7 +1197,7 @@ private TopNQuery toTopNQuery()
}

// Must have GROUP BY one column, no GROUPING SETS, ORDER BY ≤ 1 column, LIMIT > 0 and ≤ maxTopNLimit,
// no OFFSET, no HAVING.
// no OFFSET, no HAVING, no windowing.
final boolean topNOk = grouping != null
&& grouping.getDimensions().size() == 1
&& !grouping.getSubtotals().hasEffect(grouping.getDimensionSpecs())
Expand All @@ -1205,7 +1208,8 @@ private TopNQuery toTopNQuery()
&& sorting.getOffsetLimit().getLimit() <= plannerContext.getPlannerConfig()
.getMaxTopNLimit()
&& !sorting.getOffsetLimit().hasOffset())
&& grouping.getHavingFilter() == null;
&& grouping.getHavingFilter() == null
&& windowing == null;

if (!topNOk) {
return null;
Expand Down Expand Up @@ -1284,7 +1288,7 @@ private TopNQuery toTopNQuery()
@Nullable
private GroupByQuery toGroupByQuery()
{
if (grouping == null) {
if (grouping == null || windowing != null) {
return null;
}

Expand Down Expand Up @@ -1429,8 +1433,8 @@ private WindowOperatorQuery toWindowQuery()
@Nullable
private ScanQuery toScanQuery()
{
if (grouping != null) {
// Scan cannot GROUP BY.
if (grouping != null || windowing != null) {
// Scan cannot GROUP BY or do windows.
return null;
}

Expand Down Expand Up @@ -1484,16 +1488,16 @@ private ScanQuery toScanQuery()
// Cannot handle this ordering.
// Scan cannot ORDER BY non-time columns.
plannerContext.setPlanningError(
"SQL query requires order by non-time column %s that is not supported.",
"SQL query requires order by non-time column %s, which is not supported.",
orderByColumns
);
return null;
}
if (!dataSource.isConcrete()) {
// Cannot handle this ordering.
// Scan cannot ORDER BY non-time columns.
// Scan cannot ORDER BY non-concrete datasources on _any_ column.
plannerContext.setPlanningError(
"SQL query is a scan and requires order by on a datasource[%s], which is not supported.",
"SQL query requires order by on non-concrete datasource [%s], which is not supported.",
Copy link
Copy Markdown
Contributor

Choose a reason for hiding this comment

The reason will be displayed to describe this comment to others. Learn more.

I was looking at this PR for some other reason. I don't think we should be using "concrete" since that concept is not well understood by the users. As a user, it will not be clear to me to know what a non-concrete data source means. what do you think?

Copy link
Copy Markdown
Contributor Author

@gianm gianm Apr 20, 2023

Choose a reason for hiding this comment

The reason will be displayed to describe this comment to others. Learn more.

The message was removed anyway in #13965: https://github.com/apache/druid/pull/13965/files#r1145293604.

Although, TBH, I don't understand why it was removed. The comment "Since we are using a table data source and not a query data source now the isConcrete() check is not needed" doesn't really make sense to me. It's still possible for the dataSource to be non-concrete at this point in the code. I tried a test query that generates a non-concrete datasource at this point in the code, and the error you get is like this:

Time-ordering on scan queries is only supported for queries with segment specs of type MultipleSpecificSegmentSpec

It's from native execution, not from SQL planning, since the query does now pass the SQL planner. IMO in terms of clarity, it's even worse 😛

Test query is:

select __time as t, m1
from (select __time, m1 from druid.foo where __time >= timestamp '1970-01-01 00:00:00')
where (m1 in (select distinct m1 from druid.foo))
order by 1
limit 1

I think we could address this by restoring a check here, but instead of checking isConcrete, check isConcreteBased. That'd allow joins on concrete stuff, but not subqueries. Then for the message, we could do something like:

ORDER BY is only supported for __time, and only on regular tables (not subqueries)

Or, we could spend the time supporting order-by for all scans ✨

dataSource
);
return null;
Expand Down
Original file line number Diff line number Diff line change
Expand Up @@ -37,7 +37,6 @@
import org.apache.druid.sql.calcite.table.DruidTable;

import javax.annotation.Nullable;

import java.util.Set;

/**
Expand Down Expand Up @@ -170,7 +169,7 @@ public DruidQueryRel withPartialQuery(final PartialDruidQuery newQueryBuilder)
{
return new DruidQueryRel(
getCluster(),
getTraitSet().plusAll(newQueryBuilder.getRelTraits()),
newQueryBuilder.getTraitSet(getConvention()),
table,
druidTable,
getPlannerContext(),
Expand Down
Original file line number Diff line number Diff line change
Expand Up @@ -107,7 +107,7 @@ public DruidUnionDataSourceRel withPartialQuery(final PartialDruidQuery newQuery
{
return new DruidUnionDataSourceRel(
getCluster(),
getTraitSet().plusAll(newQueryBuilder.getRelTraits()),
newQueryBuilder.getTraitSet(getConvention()),
unionRel,
unionColumnNames,
newQueryBuilder,
Expand Down
Loading