From 7e56e00f338400b52a130390997920bbbe26bb01 Mon Sep 17 00:00:00 2001 From: jon-wei Date: Tue, 17 Sep 2019 17:10:02 -0700 Subject: [PATCH 01/15] Upgrade Calcite to 1.21 --- .../org/apache/druid/math/expr/Function.java | 13 +- pom.xml | 2 +- .../calcite/planner/DruidConvertletTable.java | 2 + .../calcite/planner/DruidOperatorTable.java | 13 +- .../sql/calcite/planner/PlannerFactory.java | 6 + .../druid/sql/calcite/CalciteQueryTest.java | 300 ++++++++++-------- 6 files changed, 200 insertions(+), 136 deletions(-) diff --git a/core/src/main/java/org/apache/druid/math/expr/Function.java b/core/src/main/java/org/apache/druid/math/expr/Function.java index 4439a667531b..ae71b7e24789 100644 --- a/core/src/main/java/org/apache/druid/math/expr/Function.java +++ b/core/src/main/java/org/apache/druid/math/expr/Function.java @@ -29,6 +29,7 @@ import org.joda.time.DateTimeZone; import org.joda.time.format.DateTimeFormat; +import javax.annotation.Nullable; import java.math.BigDecimal; import java.math.RoundingMode; import java.util.ArrayList; @@ -224,7 +225,7 @@ protected final ExprEval eval(ExprEval x, ExprEval y) return eval(x.asString(), y.asInt()); } - protected abstract ExprEval eval(String x, int y); + protected abstract ExprEval eval(@Nullable String x, int y); } /** @@ -1455,7 +1456,7 @@ public String name() } @Override - protected ExprEval eval(String x, int y) + protected ExprEval eval(@Nullable String x, int y) { if (y < 0) { throw new IAE( @@ -1463,6 +1464,9 @@ protected ExprEval eval(String x, int y) name() ); } + if (x == null) { + return ExprEval.of(null); + } int len = x.length(); return ExprEval.of(y < len ? x.substring(len - y) : x); } @@ -1477,7 +1481,7 @@ public String name() } @Override - protected ExprEval eval(String x, int y) + protected ExprEval eval(@Nullable String x, int y) { if (y < 0) { throw new IAE( @@ -1485,6 +1489,9 @@ protected ExprEval eval(String x, int y) name() ); } + if (x == null) { + return ExprEval.of(null); + } return ExprEval.of(y < x.length() ? x.substring(0, y) : x); } } diff --git a/pom.xml b/pom.xml index 154847a0d2f4..b9989d949e3e 100644 --- a/pom.xml +++ b/pom.xml @@ -80,7 +80,7 @@ 2.12.0 1.12.0 1.9.0 - 1.17.0 + 1.21.0 10.14.2.0 4.0.0 16.0.1 diff --git a/sql/src/main/java/org/apache/druid/sql/calcite/planner/DruidConvertletTable.java b/sql/src/main/java/org/apache/druid/sql/calcite/planner/DruidConvertletTable.java index 7363e76f42ee..84b26cdb4bda 100644 --- a/sql/src/main/java/org/apache/druid/sql/calcite/planner/DruidConvertletTable.java +++ b/sql/src/main/java/org/apache/druid/sql/calcite/planner/DruidConvertletTable.java @@ -68,6 +68,8 @@ public class DruidConvertletTable implements SqlRexConvertletTable .add(SqlStdOperatorTable.TIMESTAMP_DIFF) .add(SqlStdOperatorTable.UNION) .add(SqlStdOperatorTable.UNION_ALL) + .add(SqlStdOperatorTable.NULLIF) + .add(SqlStdOperatorTable.COALESCE) .add(OracleSqlOperatorTable.NVL) .build(); diff --git a/sql/src/main/java/org/apache/druid/sql/calcite/planner/DruidOperatorTable.java b/sql/src/main/java/org/apache/druid/sql/calcite/planner/DruidOperatorTable.java index a6b7ce842a50..8c9982d1a648 100644 --- a/sql/src/main/java/org/apache/druid/sql/calcite/planner/DruidOperatorTable.java +++ b/sql/src/main/java/org/apache/druid/sql/calcite/planner/DruidOperatorTable.java @@ -29,8 +29,10 @@ import org.apache.calcite.sql.SqlOperatorTable; import org.apache.calcite.sql.SqlSyntax; import org.apache.calcite.sql.fun.SqlStdOperatorTable; +import org.apache.calcite.sql.validate.SqlNameMatcher; import org.apache.druid.java.util.common.ISE; import org.apache.druid.java.util.common.StringUtils; +import org.apache.druid.java.util.emitter.EmittingLogger; import org.apache.druid.sql.calcite.aggregation.SqlAggregator; import org.apache.druid.sql.calcite.aggregation.builtin.ApproxCountDistinctSqlAggregator; import org.apache.druid.sql.calcite.aggregation.builtin.AvgSqlAggregator; @@ -112,6 +114,8 @@ public class DruidOperatorTable implements SqlOperatorTable { + private static final EmittingLogger log = new EmittingLogger(DruidOperatorTable.class); + private static final List STANDARD_AGGREGATORS = ImmutableList.builder() .add(new ApproxCountDistinctSqlAggregator()) @@ -352,10 +356,11 @@ public SqlOperatorConversion lookupOperatorConversion(final SqlOperator operator @Override public void lookupOperatorOverloads( - final SqlIdentifier opName, - final SqlFunctionCategory category, - final SqlSyntax syntax, - final List operatorList + SqlIdentifier opName, + SqlFunctionCategory category, + SqlSyntax syntax, + List operatorList, + SqlNameMatcher nameMatcher ) { if (opName == null) { diff --git a/sql/src/main/java/org/apache/druid/sql/calcite/planner/PlannerFactory.java b/sql/src/main/java/org/apache/druid/sql/calcite/planner/PlannerFactory.java index 6a7cb68ae32c..3d5f2fac3ffc 100644 --- a/sql/src/main/java/org/apache/druid/sql/calcite/planner/PlannerFactory.java +++ b/sql/src/main/java/org/apache/druid/sql/calcite/planner/PlannerFactory.java @@ -138,6 +138,12 @@ public C unwrap(final Class aClass) final Properties props = new Properties(); return (C) new CalciteConnectionConfigImpl(props) { + @Override + public T typeSystem(Class typeSystemClass, T defaultTypeSystem) + { + return (T) DruidTypeSystem.INSTANCE; + } + @Override public SqlConformance conformance() { diff --git a/sql/src/test/java/org/apache/druid/sql/calcite/CalciteQueryTest.java b/sql/src/test/java/org/apache/druid/sql/calcite/CalciteQueryTest.java index 12448503a196..9c32e3626417 100644 --- a/sql/src/test/java/org/apache/druid/sql/calcite/CalciteQueryTest.java +++ b/sql/src/test/java/org/apache/druid/sql/calcite/CalciteQueryTest.java @@ -25,6 +25,7 @@ import org.apache.calcite.tools.ValidationException; import org.apache.druid.common.config.NullHandling; import org.apache.druid.java.util.common.DateTimes; +import org.apache.druid.java.util.common.IAE; import org.apache.druid.java.util.common.Intervals; import org.apache.druid.java.util.common.JodaUtils; import org.apache.druid.java.util.common.granularity.Granularities; @@ -39,6 +40,7 @@ import org.apache.druid.query.aggregation.FilteredAggregatorFactory; import org.apache.druid.query.aggregation.FloatMaxAggregatorFactory; import org.apache.druid.query.aggregation.FloatMinAggregatorFactory; +import org.apache.druid.query.aggregation.FloatSumAggregatorFactory; import org.apache.druid.query.aggregation.LongMaxAggregatorFactory; import org.apache.druid.query.aggregation.LongMinAggregatorFactory; import org.apache.druid.query.aggregation.LongSumAggregatorFactory; @@ -78,10 +80,12 @@ import org.joda.time.DateTimeZone; import org.joda.time.Interval; import org.joda.time.Period; +import org.junit.Assert; import org.junit.Ignore; import org.junit.Test; import org.junit.internal.matchers.ThrowableMessageMatcher; +import java.lang.reflect.InvocationTargetException; import java.util.ArrayList; import java.util.Arrays; import java.util.Collections; @@ -137,7 +141,7 @@ public void testSelectCountStart() throws Exception ImmutableList.of(Druids.newTimeseriesQueryBuilder() .dataSource(CalciteTests.DATASOURCE1) .intervals(querySegmentSpec(Filtration.eternity())) - .filters(selector("dim2", "0", null)) + .filters(bound("dim2", "0", "0", false, false, null, StringComparators.NUMERIC)) .granularity(Granularities.ALL) .aggregators(aggregators( new CountAggregatorFactory("a0"), @@ -757,16 +761,16 @@ public void testSelectStarFromSelectSingleColumnWithLimitDescending() throws Exc newScanQueryBuilder() .dataSource(CalciteTests.DATASOURCE1) .intervals(querySegmentSpec(Filtration.eternity())) - .columns(ImmutableList.of("__time", "dim1")) + .columns(ImmutableList.of("dim1")) .limit(2) - .order(ScanQuery.Order.DESCENDING) + .order(ScanQuery.Order.NONE) .resultFormat(ScanQuery.ResultFormat.RESULT_FORMAT_COMPACTED_LIST) .context(QUERY_CONTEXT_DEFAULT) .build() ), ImmutableList.of( - new Object[]{"abc"}, - new Object[]{"def"} + new Object[]{""}, + new Object[]{"10.1"} ) ); } @@ -800,6 +804,9 @@ public void testSelectProjectionFromSelectSingleColumnDescending() throws Except { // Regression test for https://github.com/apache/incubator-druid/issues/7768. + // After upgrading to Calcite 1.21, the results return in the wrong order, the ORDER BY __time DESC + // in the inner query is not being applied. + testQuery( "SELECT 'beep ' || dim1 FROM (SELECT dim1 FROM druid.foo ORDER BY __time DESC)", ImmutableList.of( @@ -807,19 +814,19 @@ public void testSelectProjectionFromSelectSingleColumnDescending() throws Except .dataSource(CalciteTests.DATASOURCE1) .intervals(querySegmentSpec(Filtration.eternity())) .virtualColumns(expressionVirtualColumn("v0", "concat('beep ',\"dim1\")", ValueType.STRING)) - .columns(ImmutableList.of("__time", "v0")) - .order(ScanQuery.Order.DESCENDING) + .columns(ImmutableList.of("v0")) + .order(ScanQuery.Order.NONE) .resultFormat(ScanQuery.ResultFormat.RESULT_FORMAT_COMPACTED_LIST) .context(QUERY_CONTEXT_DEFAULT) .build() ), ImmutableList.of( - new Object[]{"beep abc"}, - new Object[]{"beep def"}, - new Object[]{"beep 1"}, - new Object[]{"beep 2"}, + new Object[]{"beep "}, new Object[]{"beep 10.1"}, - new Object[]{"beep "} + new Object[]{"beep 2"}, + new Object[]{"beep 1"}, + new Object[]{"beep def"}, + new Object[]{"beep abc"} ) ); } @@ -1339,7 +1346,7 @@ public void testColumnComparison() throws Exception .setDataSource(CalciteTests.DATASOURCE1) .setInterval(querySegmentSpec(Filtration.eternity())) .setGranularity(Granularities.ALL) - .setDimFilter(expressionFilter("((\"m1\" - 1) == \"dim1\")")) + .setDimFilter(expressionFilter("((\"m1\" - 1) == CAST(\"dim1\", 'DOUBLE'))")) .setDimensions(dimensions( new DefaultDimensionSpec("dim1", "d0"), new DefaultDimensionSpec("m1", "d1", ValueType.FLOAT) @@ -1793,7 +1800,16 @@ public void testNullEmptyStringEquality() throws Exception .dataSource(CalciteTests.DATASOURCE1) .intervals(querySegmentSpec(Filtration.eternity())) .granularity(Granularities.ALL) - .filters(expressionFilter("case_searched((\"dim2\" == 'a'),1,isnull(\"dim2\"))")) + .filters( + or( + selector("dim2", "a", null), + and( + selector("dim2", null, null), + not(selector("dim2", "a", null)) + ) + ) + ) + //.filters(expressionFilter("case_searched((\"dim2\" == 'a'),1,isnull(\"dim2\"))")) .aggregators(aggregators(new CountAggregatorFactory("a0"))) .context(TIMESERIES_CONTEXT_DEFAULT) .build() @@ -1811,35 +1827,68 @@ public void testNullEmptyStringEquality() throws Exception @Test public void testEmptyStringEquality() throws Exception { - testQuery( - "SELECT COUNT(*)\n" - + "FROM druid.foo\n" - + "WHERE NULLIF(dim2, 'a') = ''", - ImmutableList.of( - Druids.newTimeseriesQueryBuilder() - .dataSource(CalciteTests.DATASOURCE1) - .intervals(querySegmentSpec(Filtration.eternity())) - .granularity(Granularities.ALL) - .filters(expressionFilter("case_searched((\"dim2\" == 'a')," - + (NullHandling.replaceWithDefault() ? "1" : "0") - + ",(\"dim2\" == ''))")) - .aggregators(aggregators(new CountAggregatorFactory("a0"))) - .context(TIMESERIES_CONTEXT_DEFAULT) - .build() - ), - ImmutableList.of( - NullHandling.replaceWithDefault() ? - // Matches everything but "abc" - new Object[]{5L} : - // match only empty string - new Object[]{1L} - ) - ); + if (NullHandling.replaceWithDefault()) { + testQuery( + "SELECT COUNT(*)\n" + + "FROM druid.foo\n" + + "WHERE NULLIF(dim2, 'a') = ''", + ImmutableList.of( + Druids.newTimeseriesQueryBuilder() + .dataSource(CalciteTests.DATASOURCE1) + .intervals(querySegmentSpec(Filtration.eternity())) + .granularity(Granularities.ALL) + .filters(in("dim2", ImmutableList.of("", "a"), null)) + .aggregators(aggregators(new CountAggregatorFactory("a0"))) + .context(TIMESERIES_CONTEXT_DEFAULT) + .build() + ), + ImmutableList.of( + // Matches everything but "abc" + new Object[]{5L} + ) + ); + } else { + testQuery( + "SELECT COUNT(*)\n" + + "FROM druid.foo\n" + + "WHERE NULLIF(dim2, 'a') = ''", + ImmutableList.of( + Druids.newTimeseriesQueryBuilder() + .dataSource(CalciteTests.DATASOURCE1) + .intervals(querySegmentSpec(Filtration.eternity())) + .granularity(Granularities.ALL) + .filters(selector("dim2", "", null)) + .aggregators(aggregators(new CountAggregatorFactory("a0"))) + .context(TIMESERIES_CONTEXT_DEFAULT) + .build() + ), + ImmutableList.of( + // match only empty string + new Object[]{1L} + ) + ); + } } @Test public void testNullStringEquality() throws Exception { + testQuery( + "SELECT COUNT(*)\n" + + "FROM druid.foo\n" + + "WHERE NULLIF(dim2, 'a') = null", + ImmutableList.of(), + NullHandling.replaceWithDefault() ? + // Matches everything but "abc" + ImmutableList.of(new Object[]{0L}) : + // null is not eqaual to null or any other value + ImmutableList.of(new Object[]{0L}) + ); + /* + The SQL query in this test planned to the following Druid query in Calcite 1.17. + After upgrading to Calcite 1.21, it seems like there is an optimization based on the NULLIF() == null, + and no query is generated. + testQuery( "SELECT COUNT(*)\n" + "FROM druid.foo\n" @@ -1862,7 +1911,7 @@ public void testNullStringEquality() throws Exception // null is not eqaual to null or any other value ImmutableList.of() ); - + */ } @Test @@ -2150,9 +2199,6 @@ public void testCountNullableColumn() throws Exception @Test public void testCountNullableExpression() throws Exception { - // Cannot vectorize due to expression filter. - cannotVectorize(); - testQuery( "SELECT COUNT(CASE WHEN dim2 = 'abc' THEN 'yes' WHEN dim2 = 'def' THEN 'yes' END) FROM druid.foo", ImmutableList.of( @@ -2160,19 +2206,11 @@ public void testCountNullableExpression() throws Exception .dataSource(CalciteTests.DATASOURCE1) .intervals(querySegmentSpec(Filtration.eternity())) .granularity(Granularities.ALL) - .virtualColumns( - expressionVirtualColumn( - "v0", - "case_searched((\"dim2\" == 'abc'),'yes',(\"dim2\" == 'def'),'yes'," - + DruidExpression.nullLiteral() - + ")", - ValueType.STRING - ) - ) + .virtualColumns() .aggregators(aggregators( new FilteredAggregatorFactory( new CountAggregatorFactory("a0"), - not(selector("v0", NullHandling.defaultStringValue(), null)) + in("dim2", ImmutableList.of("abc", "def"), null) ) )) .context(TIMESERIES_CONTEXT_DEFAULT) @@ -2457,7 +2495,7 @@ public void testFilterOnStringAsNumber() throws Exception ) .setDimFilter( or( - selector("dim1", "10", null), + bound("dim1", "10", "10", false, false, null, StringComparators.NUMERIC), and( selector("v0", "10.00", null), bound("dim1", "9", "10.5", true, false, null, StringComparators.NUMERIC) @@ -3108,20 +3146,14 @@ public void testCountStarWithDegenerateFilter() throws Exception .intervals(querySegmentSpec(Filtration.eternity())) .granularity(Granularities.ALL) .filters( - and( - selector("dim2", "a", null), - or( - bound("dim1", "a", null, true, false, null, StringComparators.LEXICOGRAPHIC), - not(selector("dim1", null, null)) - ) - ) + selector("dim2", "a", null) ) .aggregators(aggregators(new CountAggregatorFactory("a0"))) .context(TIMESERIES_CONTEXT_DEFAULT) .build() ), ImmutableList.of( - new Object[]{NullHandling.sqlCompatible() ? 2L : 1L} + new Object[]{NullHandling.sqlCompatible() ? 2L : 2L} ) ); } @@ -3331,20 +3363,27 @@ public void testCountStarWithTimeFilterUsingStringLiterals() throws Exception public void testCountStarWithTimeFilterUsingStringLiteralsInvalid() throws Exception { // Strings are implicitly cast to timestamps. Test an invalid string. - // This error message isn't ideal but it is at least better than silently ignoring the problem. - expectedException.expect(RuntimeException.class); - expectedException.expectMessage("Error while applying rule ReduceExpressionsRule"); - expectedException.expectCause( - ThrowableMessageMatcher.hasMessage(CoreMatchers.containsString("Illegal TIMESTAMP constant")) - ); - - testQuery( - "SELECT COUNT(*) FROM druid.foo\n" - + "WHERE __time >= 'z2000-01-01 00:00:00' AND __time < '2001-01-01 00:00:00'\n", - ImmutableList.of(), - ImmutableList.of() - ); + try { + testQuery( + "SELECT COUNT(*) FROM druid.foo\n" + + "WHERE __time >= 'z2000-01-01 00:00:00' AND __time < '2001-01-01 00:00:00'\n", + ImmutableList.of(), + ImmutableList.of() + ); + } + catch (Throwable t) { + // The root exception is not accessible by doing `getCause()`, it can only be retrieved through a chain of + // InvocationTargetException.getTargetException() calls, hence the code below + Throwable t2 = ((InvocationTargetException) t.getCause()).getTargetException(); + Throwable t3 = ((InvocationTargetException) t2.getCause()).getTargetException(); + Throwable rootException = ((InvocationTargetException) t3.getCause()).getTargetException(); + Assert.assertEquals(IAE.class, rootException.getClass()); + Assert.assertEquals( + "Illegal TIMESTAMP constant: CAST('z2000-01-01 00:00:00'):TIMESTAMP(3) NOT NULL", + rootException.getMessage() + ); + } } @Test @@ -3820,7 +3859,7 @@ public void testSelectDistinctWithStrlenFilter() throws Exception .setGranularity(Granularities.ALL) .setVirtualColumns( expressionVirtualColumn("v0", "strlen(\"dim1\")", ValueType.LONG), - expressionVirtualColumn("v1", "CAST(strlen(\"dim1\"), 'STRING')", ValueType.STRING) + expressionVirtualColumn("v1", "CAST(CAST(strlen(\"dim1\"), 'STRING'), 'LONG')", ValueType.LONG) ) .setDimensions(dimensions(new DefaultDimensionSpec("dim1", "d0"))) .setDimFilter( @@ -4160,8 +4199,8 @@ public void testExactCountDistinctWithGroupingAndOtherAggregators() throws Excep .setInterval(querySegmentSpec(Filtration.eternity())) .setGranularity(Granularities.ALL) .setDimensions(dimensions( - new DefaultDimensionSpec("dim2", "d0"), - new DefaultDimensionSpec("dim1", "d1") + new DefaultDimensionSpec("dim1", "d0"), + new DefaultDimensionSpec("dim2", "d1") )) .setAggregatorSpecs(aggregators(new LongSumAggregatorFactory("a0", "cnt"))) .setContext(QUERY_CONTEXT_DEFAULT) @@ -4170,12 +4209,12 @@ public void testExactCountDistinctWithGroupingAndOtherAggregators() throws Excep ) .setInterval(querySegmentSpec(Filtration.eternity())) .setGranularity(Granularities.ALL) - .setDimensions(dimensions(new DefaultDimensionSpec("d0", "_d0"))) + .setDimensions(dimensions(new DefaultDimensionSpec("d1", "_d0"))) .setAggregatorSpecs(aggregators( new LongSumAggregatorFactory("_a0", "a0"), new FilteredAggregatorFactory( new CountAggregatorFactory("_a1"), - not(selector("d1", null, null)) + not(selector("d0", null, null)) ) )) .setContext(QUERY_CONTEXT_DEFAULT) @@ -4306,8 +4345,8 @@ public void testNestedGroupBy() throws Exception .setInterval(querySegmentSpec(Filtration.eternity())) .setGranularity(Granularities.ALL) .setDimensions(dimensions( - new DefaultDimensionSpec("m2", "d0", ValueType.DOUBLE), - new DefaultDimensionSpec("dim1", "d1") + new DefaultDimensionSpec("dim1", "d0"), + new DefaultDimensionSpec("m2", "d1", ValueType.DOUBLE) )) .setDimFilter(new SelectorDimFilter("m1", "5.0", null)) .setAggregatorSpecs(aggregators(new LongMaxAggregatorFactory("a0", "__time"))) @@ -4325,7 +4364,7 @@ public void testNestedGroupBy() throws Exception ) .setDimensions(dimensions( new DefaultDimensionSpec("v0", "v0", ValueType.LONG), - new DefaultDimensionSpec("d1", "_d0", ValueType.STRING) + new DefaultDimensionSpec("d0", "_d0", ValueType.STRING) )) .setAggregatorSpecs(aggregators( new CountAggregatorFactory("_a0") @@ -7145,18 +7184,6 @@ public void testUsingSubqueryAsFilterWithInnerSort() throws Exception .setInterval(querySegmentSpec(Filtration.eternity())) .setGranularity(Granularities.ALL) .setDimensions(dimensions(new DefaultDimensionSpec("dim2", "d0"))) - .setLimitSpec( - new DefaultLimitSpec( - ImmutableList.of( - new OrderByColumnSpec( - "d0", - OrderByColumnSpec.Direction.DESCENDING, - StringComparators.LEXICOGRAPHIC - ) - ), - Integer.MAX_VALUE - ) - ) .setContext(QUERY_CONTEXT_DEFAULT) .build(), newScanQueryBuilder() @@ -7395,15 +7422,6 @@ public void testProjectAfterSort() throws Exception new DefaultDimensionSpec("dim2", "d1") ) ) - .setAggregatorSpecs(aggregators(new CountAggregatorFactory("a0"))) - .setLimitSpec( - new DefaultLimitSpec( - Collections.singletonList( - new OrderByColumnSpec("a0", Direction.ASCENDING, StringComparators.NUMERIC) - ), - Integer.MAX_VALUE - ) - ) .setContext(QUERY_CONTEXT_DEFAULT) .build() ), @@ -7438,17 +7456,9 @@ public void testProjectAfterSort2() throws Exception aggregators(new CountAggregatorFactory("a0"), new DoubleSumAggregatorFactory("a1", "m2")) ) .setPostAggregatorSpecs(Collections.singletonList(expressionPostAgg( - "s0", + "p0", "(\"a1\" / \"a0\")" ))) - .setLimitSpec( - new DefaultLimitSpec( - Collections.singletonList( - new OrderByColumnSpec("a0", Direction.ASCENDING, StringComparators.NUMERIC) - ), - Integer.MAX_VALUE - ) - ) .setContext(QUERY_CONTEXT_DEFAULT) .build() ), @@ -7463,7 +7473,12 @@ public void testProjectAfterSort2() throws Exception ); } + /** + * In Calcite 1.17, this test worked, but after upgrading to Calcite 1.21, this query fails with: + * org.apache.calcite.sql.validate.SqlValidatorException: Column 'dim1' is ambiguous + */ @Test + @Ignore public void testProjectAfterSort3() throws Exception { testQuery( @@ -7532,8 +7547,8 @@ public void testSortProjectAfterNestedGroupBy() throws Exception .setGranularity(Granularities.ALL) .setDimensions(dimensions( new DefaultDimensionSpec("__time", "d0", ValueType.LONG), - new DefaultDimensionSpec("m2", "d1", ValueType.DOUBLE), - new DefaultDimensionSpec("dim1", "d2") + new DefaultDimensionSpec("dim1", "d1"), + new DefaultDimensionSpec("m2", "d2", ValueType.DOUBLE) )) .setContext(QUERY_CONTEXT_DEFAULT) .build() @@ -7542,19 +7557,11 @@ public void testSortProjectAfterNestedGroupBy() throws Exception .setGranularity(Granularities.ALL) .setDimensions(dimensions( new DefaultDimensionSpec("d0", "_d0", ValueType.LONG), - new DefaultDimensionSpec("d2", "_d1", ValueType.STRING) + new DefaultDimensionSpec("d1", "_d1", ValueType.STRING) )) .setAggregatorSpecs(aggregators( new CountAggregatorFactory("a0") )) - .setLimitSpec( - new DefaultLimitSpec( - Collections.singletonList( - new OrderByColumnSpec("a0", Direction.ASCENDING, StringComparators.NUMERIC) - ), - Integer.MAX_VALUE - ) - ) .setContext(QUERY_CONTEXT_DEFAULT) .build() ), @@ -8028,17 +8035,19 @@ public void testTrigonometricFunction() throws Exception ImmutableList.of(Druids.newTimeseriesQueryBuilder() .dataSource(CalciteTests.DATASOURCE1) .intervals(querySegmentSpec(Filtration.eternity())) - .filters(selector("dim2", "0", null)) + .filters(bound("dim2", "0", "0", false, false, null, StringComparators.NUMERIC)) .granularity(Granularities.ALL) .aggregators(aggregators( new CountAggregatorFactory("a0") )) + // after upgrading to Calcite 1.21, the sin(pi/6)-type expressions with no references + // to columns are optimized into constant values .postAggregators( expressionPostAgg("p0", "(exp(\"a0\") + 10)"), - expressionPostAgg("p1", "sin((pi() / 6))"), - expressionPostAgg("p2", "cos((pi() / 6))"), - expressionPostAgg("p3", "tan((pi() / 6))"), - expressionPostAgg("p4", "cot((pi() / 6))"), + expressionPostAgg("p1", "0.49999999999999994"), + expressionPostAgg("p2", "0.8660254037844387"), + expressionPostAgg("p3", "0.5773502691896257"), + expressionPostAgg("p4", "1.7320508075688776"), expressionPostAgg("p5", "asin((exp(\"a0\") / 2))"), expressionPostAgg("p6", "acos((exp(\"a0\") / 2))"), expressionPostAgg("p7", "atan((exp(\"a0\") / 2))"), @@ -9089,4 +9098,39 @@ public void testMultiValueStringToStringToMultiValueString() throws Exception results ); } + + + @Test + public void testLeftRightStringOperators() throws Exception + { + testQuery( + "SELECT\n" + + " dim1," + + " LEFT(dim1, 2),\n" + + " RIGHT(dim1, 2)\n" + + "FROM druid.foo\n" + + "GROUP BY dim1\n", + ImmutableList.of( + GroupByQuery.builder() + .setDataSource(CalciteTests.DATASOURCE1) + .setInterval(querySegmentSpec(Filtration.eternity())) + .setGranularity(Granularities.ALL) + .setDimensions(dimensions(new DefaultDimensionSpec("dim1", "d0"))) + .setPostAggregatorSpecs(ImmutableList.of( + expressionPostAgg("p0", "left(\"d0\",2)"), + expressionPostAgg("p1", "right(\"d0\",2)") + )) + .setContext(QUERY_CONTEXT_DEFAULT) + .build() + ), + ImmutableList.of( + new Object[]{"", "", ""}, + new Object[]{"1", "1", "1"}, + new Object[]{"10.1", "10", ".1"}, + new Object[]{"2", "2", "2"}, + new Object[]{"abc", "ab", "bc"}, + new Object[]{"def", "de", "ef"} + ) + ); + } } From 608812c4aecd2430f6f7547c0f2c5b3a4591833a Mon Sep 17 00:00:00 2001 From: jon-wei Date: Mon, 23 Sep 2019 15:44:00 -0700 Subject: [PATCH 02/15] Checkstyle, test fix' --- .../filter/sql/BloomDimFilterSqlTest.java | 20 +++++++++++++++---- .../druid/sql/calcite/CalciteQueryTest.java | 7 +++---- 2 files changed, 19 insertions(+), 8 deletions(-) diff --git a/extensions-core/druid-bloom-filter/src/test/java/org/apache/druid/query/filter/sql/BloomDimFilterSqlTest.java b/extensions-core/druid-bloom-filter/src/test/java/org/apache/druid/query/filter/sql/BloomDimFilterSqlTest.java index aa717a07e2e3..f26cf1304fee 100644 --- a/extensions-core/druid-bloom-filter/src/test/java/org/apache/druid/query/filter/sql/BloomDimFilterSqlTest.java +++ b/extensions-core/druid-bloom-filter/src/test/java/org/apache/druid/query/filter/sql/BloomDimFilterSqlTest.java @@ -42,11 +42,11 @@ import org.apache.druid.query.filter.BloomDimFilter; import org.apache.druid.query.filter.BloomKFilter; import org.apache.druid.query.filter.BloomKFilterHolder; -import org.apache.druid.query.filter.ExpressionDimFilter; import org.apache.druid.query.filter.OrDimFilter; import org.apache.druid.query.lookup.LookupExtractorFactoryContainerProvider; import org.apache.druid.segment.TestHelper; import org.apache.druid.segment.column.ValueType; +import org.apache.druid.segment.virtual.ExpressionVirtualColumn; import org.apache.druid.server.security.AuthenticationResult; import org.apache.druid.sql.calcite.BaseCalciteQueryTest; import org.apache.druid.sql.calcite.filtration.Filtration; @@ -133,6 +133,9 @@ public void testBloomFilter() throws Exception @Test public void testBloomFilterExprFilter() throws Exception { + // Cannot vectorize due to expression virtual columns. + cannotVectorize(); + BloomKFilter filter = new BloomKFilter(1500); filter.addString("a-foo"); filter.addString("-foo"); @@ -150,12 +153,21 @@ public void testBloomFilterExprFilter() throws Exception .dataSource(CalciteTests.DATASOURCE1) .intervals(querySegmentSpec(Filtration.eternity())) .granularity(Granularities.ALL) - .filters( - new ExpressionDimFilter( - StringUtils.format("(bloom_filter_test(concat(\"dim2\",'-foo'),'%s') == 1)", base64), + .virtualColumns( + new ExpressionVirtualColumn( + "v0", + "concat(\"dim2\",'-foo')", + ValueType.STRING, createExprMacroTable() ) ) + .filters( + new BloomDimFilter( + "v0", + BloomKFilterHolder.fromBytes(bytes), + null + ) + ) .aggregators(aggregators(new CountAggregatorFactory("a0"))) .context(TIMESERIES_CONTEXT_DEFAULT) .build() diff --git a/sql/src/test/java/org/apache/druid/sql/calcite/CalciteQueryTest.java b/sql/src/test/java/org/apache/druid/sql/calcite/CalciteQueryTest.java index 9c32e3626417..0dc59dbe6971 100644 --- a/sql/src/test/java/org/apache/druid/sql/calcite/CalciteQueryTest.java +++ b/sql/src/test/java/org/apache/druid/sql/calcite/CalciteQueryTest.java @@ -40,7 +40,6 @@ import org.apache.druid.query.aggregation.FilteredAggregatorFactory; import org.apache.druid.query.aggregation.FloatMaxAggregatorFactory; import org.apache.druid.query.aggregation.FloatMinAggregatorFactory; -import org.apache.druid.query.aggregation.FloatSumAggregatorFactory; import org.apache.druid.query.aggregation.LongMaxAggregatorFactory; import org.apache.druid.query.aggregation.LongMinAggregatorFactory; import org.apache.druid.query.aggregation.LongSumAggregatorFactory; @@ -3375,9 +3374,9 @@ public void testCountStarWithTimeFilterUsingStringLiteralsInvalid() throws Excep catch (Throwable t) { // The root exception is not accessible by doing `getCause()`, it can only be retrieved through a chain of // InvocationTargetException.getTargetException() calls, hence the code below - Throwable t2 = ((InvocationTargetException) t.getCause()).getTargetException(); - Throwable t3 = ((InvocationTargetException) t2.getCause()).getTargetException(); - Throwable rootException = ((InvocationTargetException) t3.getCause()).getTargetException(); + Throwable t2 = ((InvocationTargetException) t.getCause()).getTargetException(); + Throwable t3 = ((InvocationTargetException) t2.getCause()).getTargetException(); + Throwable rootException = ((InvocationTargetException) t3.getCause()).getTargetException(); Assert.assertEquals(IAE.class, rootException.getClass()); Assert.assertEquals( "Illegal TIMESTAMP constant: CAST('z2000-01-01 00:00:00'):TIMESTAMP(3) NOT NULL", From 8d8d74177432c686c1eb8edc74a846936cf89a55 Mon Sep 17 00:00:00 2001 From: jon-wei Date: Mon, 23 Sep 2019 16:49:17 -0700 Subject: [PATCH 03/15] Exclude calcite yaml deps, update license.yaml --- licenses.yaml | 10 +++++----- sql/pom.xml | 11 +++++++++++ 2 files changed, 16 insertions(+), 5 deletions(-) diff --git a/licenses.yaml b/licenses.yaml index 85fb69645f91..d4bcabe9b485 100644 --- a/licenses.yaml +++ b/licenses.yaml @@ -169,7 +169,7 @@ name: Esri Geometry API for Java license_category: binary module: java-core license_name: Apache License version 2.0 -version: 2.0.0 +version: 2.2.0 libraries: - com.esri.geometry: esri-geometry-api @@ -1117,17 +1117,17 @@ name: Apache Calcite license_category: binary module: java-core license_name: Apache License version 2.0 -version: 1.17.0 +version: 1.21.0 libraries: - org.apache.calcite: calcite-core - org.apache.calcite: calcite-linq4j notices: - calcite-core: | Calcite Core - Copyright 2012-2018 The Apache Software Foundation + Copyright 2012-2019 The Apache Software Foundation - calcite-linq4j: | Calcite Linq4j - Copyright 2012-2018 The Apache Software Foundation + Copyright 2012-2019 The Apache Software Foundation --- @@ -3319,7 +3319,7 @@ name: Janino and Commons Compiler license_category: binary module: java-core license_name: BSD-3-Clause License -version: 2.7.6 +version: 3.0.11 copyright: Arno Unkrig and TIBCO Software Inc. license_file_path: licenses/bin/janino.BSD3 libraries: diff --git a/sql/pom.xml b/sql/pom.xml index f292a9d9936e..d8bdc886e9dd 100644 --- a/sql/pom.xml +++ b/sql/pom.xml @@ -67,6 +67,17 @@ com.yahoo.datasketches sketches-core + + + com.fasterxml.jackson.dataformat + jackson-dataformat-yaml + + + org.yaml + snakeyaml + From bddac22a46abc8f55d8017c854cd4f40196846cd Mon Sep 17 00:00:00 2001 From: jon-wei Date: Mon, 23 Sep 2019 17:58:48 -0700 Subject: [PATCH 04/15] Add method for exception chain handling --- .../druid/sql/calcite/CalciteQueryTest.java | 7 ++----- .../druid/sql/calcite/util/CalciteTests.java | 16 ++++++++++++++++ 2 files changed, 18 insertions(+), 5 deletions(-) diff --git a/sql/src/test/java/org/apache/druid/sql/calcite/CalciteQueryTest.java b/sql/src/test/java/org/apache/druid/sql/calcite/CalciteQueryTest.java index 0dc59dbe6971..c6c013ceb8af 100644 --- a/sql/src/test/java/org/apache/druid/sql/calcite/CalciteQueryTest.java +++ b/sql/src/test/java/org/apache/druid/sql/calcite/CalciteQueryTest.java @@ -21,6 +21,7 @@ import com.google.common.base.Joiner; import com.google.common.collect.ImmutableList; +import org.aopalliance.intercept.Invocation; import org.apache.calcite.runtime.CalciteContextException; import org.apache.calcite.tools.ValidationException; import org.apache.druid.common.config.NullHandling; @@ -3372,11 +3373,7 @@ public void testCountStarWithTimeFilterUsingStringLiteralsInvalid() throws Excep ); } catch (Throwable t) { - // The root exception is not accessible by doing `getCause()`, it can only be retrieved through a chain of - // InvocationTargetException.getTargetException() calls, hence the code below - Throwable t2 = ((InvocationTargetException) t.getCause()).getTargetException(); - Throwable t3 = ((InvocationTargetException) t2.getCause()).getTargetException(); - Throwable rootException = ((InvocationTargetException) t3.getCause()).getTargetException(); + Throwable rootException = CalciteTests.getRootCauseFromInvocationTargetExceptionChain(t); Assert.assertEquals(IAE.class, rootException.getClass()); Assert.assertEquals( "Illegal TIMESTAMP constant: CAST('z2000-01-01 00:00:00'):TIMESTAMP(3) NOT NULL", diff --git a/sql/src/test/java/org/apache/druid/sql/calcite/util/CalciteTests.java b/sql/src/test/java/org/apache/druid/sql/calcite/util/CalciteTests.java index 0cda7526ab4a..32337578ffd3 100644 --- a/sql/src/test/java/org/apache/druid/sql/calcite/util/CalciteTests.java +++ b/sql/src/test/java/org/apache/druid/sql/calcite/util/CalciteTests.java @@ -133,6 +133,7 @@ import org.joda.time.chrono.ISOChronology; import java.io.File; +import java.lang.reflect.InvocationTargetException; import java.nio.ByteBuffer; import java.util.ArrayList; import java.util.HashMap; @@ -814,4 +815,19 @@ public static SystemSchema createMockSystemSchema( ); return schema; } + + /** + * Some Calcite exceptions (such as that thrown by + * {@link org.apache.druid.sql.calcite.CalciteQueryTest#testCountStarWithTimeFilterUsingStringLiteralsInvalid)}, + * are structured as a chain of RuntimeExceptions caused by InvocationTargetExceptions. To get the root exception + * it is necessary to make getTargetException calls on the InvocationTargetExceptions. + */ + public static Throwable getRootCauseFromInvocationTargetExceptionChain(Throwable t) + { + Throwable curThrowable = t; + while (curThrowable.getCause() instanceof InvocationTargetException) { + curThrowable = ((InvocationTargetException) curThrowable.getCause()).getTargetException(); + } + return curThrowable; + } } From 62c6132ec293a1d58993b45edd993dde6600c796 Mon Sep 17 00:00:00 2001 From: jon-wei Date: Tue, 24 Sep 2019 13:53:00 -0700 Subject: [PATCH 05/15] Checkstyle --- .../java/org/apache/druid/sql/calcite/CalciteQueryTest.java | 2 -- 1 file changed, 2 deletions(-) diff --git a/sql/src/test/java/org/apache/druid/sql/calcite/CalciteQueryTest.java b/sql/src/test/java/org/apache/druid/sql/calcite/CalciteQueryTest.java index c6c013ceb8af..3fcbedbbe042 100644 --- a/sql/src/test/java/org/apache/druid/sql/calcite/CalciteQueryTest.java +++ b/sql/src/test/java/org/apache/druid/sql/calcite/CalciteQueryTest.java @@ -21,7 +21,6 @@ import com.google.common.base.Joiner; import com.google.common.collect.ImmutableList; -import org.aopalliance.intercept.Invocation; import org.apache.calcite.runtime.CalciteContextException; import org.apache.calcite.tools.ValidationException; import org.apache.druid.common.config.NullHandling; @@ -85,7 +84,6 @@ import org.junit.Test; import org.junit.internal.matchers.ThrowableMessageMatcher; -import java.lang.reflect.InvocationTargetException; import java.util.ArrayList; import java.util.Arrays; import java.util.Collections; From e8dd078d31455fef184b85314ec8e33df8c1879c Mon Sep 17 00:00:00 2001 From: jon-wei Date: Tue, 24 Sep 2019 16:59:16 -0700 Subject: [PATCH 06/15] PR comments, Add outer limit context flag --- .idea/mavenProjectSettings.xml | 10 -- .../filter/sql/BloomDimFilterSqlTest.java | 26 ++--- .../calcite/planner/DruidOperatorTable.java | 10 +- .../sql/calcite/planner/DruidPlanner.java | 51 +++++++++- .../sql/calcite/planner/PlannerContext.java | 4 + .../sql/calcite/rule/SortCollapseRule.java | 5 +- .../druid/sql/calcite/CalciteQueryTest.java | 94 ++++++++++++++++--- 7 files changed, 155 insertions(+), 45 deletions(-) delete mode 100644 .idea/mavenProjectSettings.xml diff --git a/.idea/mavenProjectSettings.xml b/.idea/mavenProjectSettings.xml deleted file mode 100644 index 1eb1f057ae63..000000000000 --- a/.idea/mavenProjectSettings.xml +++ /dev/null @@ -1,10 +0,0 @@ - - - - - - diff --git a/extensions-core/druid-bloom-filter/src/test/java/org/apache/druid/query/filter/sql/BloomDimFilterSqlTest.java b/extensions-core/druid-bloom-filter/src/test/java/org/apache/druid/query/filter/sql/BloomDimFilterSqlTest.java index f26cf1304fee..bd13b3a41e4a 100644 --- a/extensions-core/druid-bloom-filter/src/test/java/org/apache/druid/query/filter/sql/BloomDimFilterSqlTest.java +++ b/extensions-core/druid-bloom-filter/src/test/java/org/apache/druid/query/filter/sql/BloomDimFilterSqlTest.java @@ -42,6 +42,7 @@ import org.apache.druid.query.filter.BloomDimFilter; import org.apache.druid.query.filter.BloomKFilter; import org.apache.druid.query.filter.BloomKFilterHolder; +import org.apache.druid.query.filter.ExpressionDimFilter; import org.apache.druid.query.filter.OrDimFilter; import org.apache.druid.query.lookup.LookupExtractorFactoryContainerProvider; import org.apache.druid.segment.TestHelper; @@ -133,9 +134,6 @@ public void testBloomFilter() throws Exception @Test public void testBloomFilterExprFilter() throws Exception { - // Cannot vectorize due to expression virtual columns. - cannotVectorize(); - BloomKFilter filter = new BloomKFilter(1500); filter.addString("a-foo"); filter.addString("-foo"); @@ -147,25 +145,21 @@ public void testBloomFilterExprFilter() throws Exception // fool the planner to make an expression virtual column to test bloom filter Druid expression testQuery( - StringUtils.format("SELECT COUNT(*) FROM druid.foo WHERE bloom_filter_test(concat(dim2, '-foo'), '%s') = TRUE", base64), + StringUtils.format("SELECT COUNT(*) FROM druid.foo WHERE nullif(bloom_filter_test(concat(dim2, '-foo'), '%s'), 1) is null", base64), ImmutableList.of( Druids.newTimeseriesQueryBuilder() .dataSource(CalciteTests.DATASOURCE1) .intervals(querySegmentSpec(Filtration.eternity())) .granularity(Granularities.ALL) - .virtualColumns( - new ExpressionVirtualColumn( - "v0", - "concat(\"dim2\",'-foo')", - ValueType.STRING, - createExprMacroTable() - ) - ) .filters( - new BloomDimFilter( - "v0", - BloomKFilterHolder.fromBytes(bytes), - null + new ExpressionDimFilter( + StringUtils.format( + "case_searched(bloom_filter_test(concat(\"dim2\",'-foo'),'%s'),1,isnull(bloom_filter_test(concat(\"dim2\",'-foo'),'%s')))", + base64, + base64 + ), + null, + createExprMacroTable() ) ) .aggregators(aggregators(new CountAggregatorFactory("a0"))) diff --git a/sql/src/main/java/org/apache/druid/sql/calcite/planner/DruidOperatorTable.java b/sql/src/main/java/org/apache/druid/sql/calcite/planner/DruidOperatorTable.java index 8c9982d1a648..e1c1900bfaf0 100644 --- a/sql/src/main/java/org/apache/druid/sql/calcite/planner/DruidOperatorTable.java +++ b/sql/src/main/java/org/apache/druid/sql/calcite/planner/DruidOperatorTable.java @@ -356,11 +356,11 @@ public SqlOperatorConversion lookupOperatorConversion(final SqlOperator operator @Override public void lookupOperatorOverloads( - SqlIdentifier opName, - SqlFunctionCategory category, - SqlSyntax syntax, - List operatorList, - SqlNameMatcher nameMatcher + final SqlIdentifier opName, + final SqlFunctionCategory category, + final SqlSyntax syntax, + final List operatorList, + final SqlNameMatcher nameMatcher ) { if (opName == null) { diff --git a/sql/src/main/java/org/apache/druid/sql/calcite/planner/DruidPlanner.java b/sql/src/main/java/org/apache/druid/sql/calcite/planner/DruidPlanner.java index 5e67ee5fdc8c..fa5b4ad0f82d 100644 --- a/sql/src/main/java/org/apache/druid/sql/calcite/planner/DruidPlanner.java +++ b/sql/src/main/java/org/apache/druid/sql/calcite/planner/DruidPlanner.java @@ -35,6 +35,7 @@ import org.apache.calcite.plan.RelOptUtil; import org.apache.calcite.rel.RelNode; import org.apache.calcite.rel.RelRoot; +import org.apache.calcite.rel.logical.LogicalSort; import org.apache.calcite.rel.type.RelDataTypeFactory; import org.apache.calcite.rex.RexBuilder; import org.apache.calcite.rex.RexNode; @@ -42,6 +43,7 @@ import org.apache.calcite.sql.SqlKind; import org.apache.calcite.sql.SqlNode; import org.apache.calcite.sql.parser.SqlParseException; +import org.apache.calcite.sql.type.BasicSqlType; import org.apache.calcite.sql.type.SqlTypeName; import org.apache.calcite.tools.Planner; import org.apache.calcite.tools.RelConversionException; @@ -50,8 +52,10 @@ import org.apache.druid.java.util.common.guava.BaseSequence; import org.apache.druid.java.util.common.guava.Sequence; import org.apache.druid.java.util.common.guava.Sequences; +import org.apache.druid.segment.DimensionHandlerUtils; import org.apache.druid.sql.calcite.rel.DruidConvention; import org.apache.druid.sql.calcite.rel.DruidRel; +import org.checkerframework.checker.nullness.qual.Nullable; import java.io.Closeable; import java.util.ArrayList; @@ -63,6 +67,7 @@ public class DruidPlanner implements Closeable { private final Planner planner; private final PlannerContext plannerContext; + private RexBuilder rexBuilder; public DruidPlanner( final Planner planner, @@ -82,6 +87,9 @@ public PlannerResult plan(final String sql) explain = (SqlExplain) parsed; parsed = explain.getExplicandum(); } + // the planner's type factory is not available until after parsing + this.rexBuilder = new RexBuilder(planner.getTypeFactory()); + final SqlNode validated = planner.validate(parsed); final RelRoot root = planner.rel(validated); @@ -116,12 +124,14 @@ private PlannerResult planWithDruidConvention( final RelRoot root ) throws RelConversionException { + final RelNode possiblyWrappedRootRel = possiblyWrapRootWithOuterLimitFromContext(root); + final DruidRel druidRel = (DruidRel) planner.transform( Rules.DRUID_CONVENTION_RULES, planner.getEmptyTraitSet() .replace(DruidConvention.instance()) .plus(root.collation), - root.rel + possiblyWrappedRootRel ); final Set dataSourceNames = ImmutableSet.copyOf(druidRel.getDataSourceNames()); @@ -232,6 +242,45 @@ public void cleanup(EnumeratorIterator iterFromMake) } } + /** + * This method wraps the root with a logical sort that applies a limit (no ordering change). + * The CTX_SQL_OUTER_LIMIT flag that controls this wrapping is meant for internal use only by the + * web console, allowing it to apply a limit to queries without rewriting the original SQL. + * + * @param root root node + * @return root node wrapped with a limiting logical sort if a limit is specified in the query context. + */ + @Nullable + private RelNode possiblyWrapRootWithOuterLimitFromContext( + RelRoot root + ) + { + Object outerLimitObj = plannerContext.getQueryContext().get(PlannerContext.CTX_SQL_OUTER_LIMIT); + if (outerLimitObj == null) { + return root.rel; + } + Long outerLimit = DimensionHandlerUtils.convertObjectToLong(outerLimitObj, true); + if (outerLimit == null) { + return root.rel; + } + + return LogicalSort.create( + root.rel, + root.collation, + makeBigIntLiteral(0), + makeBigIntLiteral(outerLimit) + ); + } + + private RexNode makeBigIntLiteral(long value) + { + return rexBuilder.makeLiteral( + value, + new BasicSqlType(DruidTypeSystem.INSTANCE, SqlTypeName.BIGINT), + false + ); + } + private static class EnumeratorIterator implements Iterator { private final Iterator it; diff --git a/sql/src/main/java/org/apache/druid/sql/calcite/planner/PlannerContext.java b/sql/src/main/java/org/apache/druid/sql/calcite/planner/PlannerContext.java index 6477a007dced..03dc196c6ad2 100644 --- a/sql/src/main/java/org/apache/druid/sql/calcite/planner/PlannerContext.java +++ b/sql/src/main/java/org/apache/druid/sql/calcite/planner/PlannerContext.java @@ -50,6 +50,10 @@ public class PlannerContext public static final String CTX_SQL_CURRENT_TIMESTAMP = "sqlCurrentTimestamp"; public static final String CTX_SQL_TIME_ZONE = "sqlTimeZone"; + // This context parameter is an undocumented parameter, used internally, to allow the web console to + // apply a limit without having to rewrite the SQL query. + public static final String CTX_SQL_OUTER_LIMIT = "sqlOuterLimit"; + // DataContext keys public static final String DATA_CTX_AUTHENTICATION_RESULT = "authenticationResult"; diff --git a/sql/src/main/java/org/apache/druid/sql/calcite/rule/SortCollapseRule.java b/sql/src/main/java/org/apache/druid/sql/calcite/rule/SortCollapseRule.java index 7bbc888809ca..c3e91233f990 100644 --- a/sql/src/main/java/org/apache/druid/sql/calcite/rule/SortCollapseRule.java +++ b/sql/src/main/java/org/apache/druid/sql/calcite/rule/SortCollapseRule.java @@ -49,7 +49,8 @@ public void onMatch(final RelOptRuleCall call) final Sort first = call.rel(1); final Sort second = call.rel(0); - if (second.collation.getFieldCollations().isEmpty()) { + if (second.collation.getFieldCollations().isEmpty() + || second.collation.getFieldCollations().equals(first.collation.getFieldCollations())) { // Add up the offsets. final int firstOffset = (first.offset != null ? RexLiteral.intValue(first.offset) : 0); final int secondOffset = (second.offset != null ? RexLiteral.intValue(second.offset) : 0); @@ -81,7 +82,7 @@ public void onMatch(final RelOptRuleCall call) first.getInput(), first.getCollation(), offset == 0 ? null : call.builder().literal(offset), - call.builder().literal(fetch) + fetch < 0 ? null : call.builder().literal(fetch) ); call.transformTo(combined); diff --git a/sql/src/test/java/org/apache/druid/sql/calcite/CalciteQueryTest.java b/sql/src/test/java/org/apache/druid/sql/calcite/CalciteQueryTest.java index 3fcbedbbe042..05742f671329 100644 --- a/sql/src/test/java/org/apache/druid/sql/calcite/CalciteQueryTest.java +++ b/sql/src/test/java/org/apache/druid/sql/calcite/CalciteQueryTest.java @@ -72,6 +72,7 @@ import org.apache.druid.sql.calcite.expression.DruidExpression; import org.apache.druid.sql.calcite.filtration.Filtration; import org.apache.druid.sql.calcite.planner.Calcites; +import org.apache.druid.sql.calcite.planner.PlannerContext; import org.apache.druid.sql.calcite.rel.CannotBuildQueryException; import org.apache.druid.sql.calcite.util.CalciteTests; import org.hamcrest.CoreMatchers; @@ -87,7 +88,9 @@ import java.util.ArrayList; import java.util.Arrays; import java.util.Collections; +import java.util.HashMap; import java.util.List; +import java.util.Map; public class CalciteQueryTest extends BaseCalciteQueryTest { @@ -802,9 +805,8 @@ public void testSelectProjectionFromSelectSingleColumnDescending() throws Except { // Regression test for https://github.com/apache/incubator-druid/issues/7768. - // After upgrading to Calcite 1.21, the results return in the wrong order, the ORDER BY __time DESC - // in the inner query is not being applied. - + // After upgrading to Calcite 1.21, Calcite no longer respects the ORDER BY __time DESC + // in the inner query. testQuery( "SELECT 'beep ' || dim1 FROM (SELECT dim1 FROM druid.foo ORDER BY __time DESC)", ImmutableList.of( @@ -1807,7 +1809,6 @@ public void testNullEmptyStringEquality() throws Exception ) ) ) - //.filters(expressionFilter("case_searched((\"dim2\" == 'a'),1,isnull(\"dim2\"))")) .aggregators(aggregators(new CountAggregatorFactory("a0"))) .context(TIMESERIES_CONTEXT_DEFAULT) .build() @@ -1884,8 +1885,8 @@ public void testNullStringEquality() throws Exception ); /* The SQL query in this test planned to the following Druid query in Calcite 1.17. - After upgrading to Calcite 1.21, it seems like there is an optimization based on the NULLIF() == null, - and no query is generated. + After upgrading to Calcite 1.21, Calcite applies an optimization based on the NULLIF() == null, + which is valid assuming standard SQL null handling, and no query is generated. testQuery( "SELECT COUNT(*)\n" @@ -1981,7 +1982,7 @@ public void testColumnIsNull() throws Exception } @Test - public void testUnplannableQueries() + public void testUnplannableQueries() throws Exception { // All of these queries are unplannable because they rely on features Druid doesn't support. // This test is here to confirm that we don't fall back to Calcite's interpreter or enumerable implementation. @@ -3151,7 +3152,7 @@ public void testCountStarWithDegenerateFilter() throws Exception .build() ), ImmutableList.of( - new Object[]{NullHandling.sqlCompatible() ? 2L : 2L} + new Object[]{2L} ) ); } @@ -8034,8 +8035,8 @@ public void testTrigonometricFunction() throws Exception .aggregators(aggregators( new CountAggregatorFactory("a0") )) - // after upgrading to Calcite 1.21, the sin(pi/6)-type expressions with no references - // to columns are optimized into constant values + // after upgrading to Calcite 1.21, expressions like sin(pi/6) that only reference + // literals are optimized into literals .postAggregators( expressionPostAgg("p0", "(exp(\"a0\") + 10)"), expressionPostAgg("p1", "0.49999999999999994"), @@ -9093,7 +9094,6 @@ public void testMultiValueStringToStringToMultiValueString() throws Exception ); } - @Test public void testLeftRightStringOperators() throws Exception { @@ -9127,4 +9127,76 @@ public void testLeftRightStringOperators() throws Exception ) ); } + + @Test + public void testQueryContextOuterLimit() throws Exception + { + Map outerLimitContext = new HashMap<>(QUERY_CONTEXT_DEFAULT); + outerLimitContext.put(PlannerContext.CTX_SQL_OUTER_LIMIT, 4); + + TopNQueryBuilder baseBuilder = new TopNQueryBuilder() + .dataSource(CalciteTests.DATASOURCE1) + .intervals(querySegmentSpec(Filtration.eternity())) + .granularity(Granularities.ALL) + .dimension(new DefaultDimensionSpec("dim1", "d0")) + .metric( + new InvertedTopNMetricSpec( + new DimensionTopNMetricSpec( + null, + StringComparators.LEXICOGRAPHIC + ) + ) + ) + .context(outerLimitContext); + + // no existing limit + testQuery( + PLANNER_CONFIG_DEFAULT, + outerLimitContext, + "SELECT dim1 FROM druid.foo GROUP BY dim1 ORDER BY dim1 DESC", + CalciteTests.REGULAR_USER_AUTH_RESULT, + ImmutableList.of( + baseBuilder.threshold(4).build() + ), + ImmutableList.of( + new Object[]{""}, + new Object[]{"def"}, + new Object[]{"abc"}, + new Object[]{"2"} + ) + ); + + // existing limit greater than context limit, override existing limit + testQuery( + PLANNER_CONFIG_DEFAULT, + outerLimitContext, + "SELECT dim1 FROM druid.foo GROUP BY dim1 ORDER BY dim1 DESC LIMIT 9", + CalciteTests.REGULAR_USER_AUTH_RESULT, + ImmutableList.of( + baseBuilder.threshold(4).build() + ), + ImmutableList.of( + new Object[]{""}, + new Object[]{"def"}, + new Object[]{"abc"}, + new Object[]{"2"} + ) + ); + + // existing limit less than context limit, keep existing limit + testQuery( + PLANNER_CONFIG_DEFAULT, + outerLimitContext, + "SELECT dim1 FROM druid.foo GROUP BY dim1 ORDER BY dim1 DESC LIMIT 2", + CalciteTests.REGULAR_USER_AUTH_RESULT, + ImmutableList.of( + baseBuilder.threshold(2).build() + + ), + ImmutableList.of( + new Object[]{""}, + new Object[]{"def"} + ) + ); + } } From 6264b71f04db5ee84b84881be6015559a1475c18 Mon Sep 17 00:00:00 2001 From: jon-wei Date: Thu, 17 Oct 2019 13:46:29 -0700 Subject: [PATCH 07/15] Revert project settings change --- .idea/mavenProjectSettings.xml | 10 ++++++++++ 1 file changed, 10 insertions(+) create mode 100644 .idea/mavenProjectSettings.xml diff --git a/.idea/mavenProjectSettings.xml b/.idea/mavenProjectSettings.xml new file mode 100644 index 000000000000..1eb1f057ae63 --- /dev/null +++ b/.idea/mavenProjectSettings.xml @@ -0,0 +1,10 @@ + + + + + + From 4f5cf7708f34e9091952d0875e0c8ac117b32024 Mon Sep 17 00:00:00 2001 From: jon-wei Date: Thu, 17 Oct 2019 13:50:19 -0700 Subject: [PATCH 08/15] Update subquery test comment --- .../java/org/apache/druid/sql/calcite/CalciteQueryTest.java | 3 ++- 1 file changed, 2 insertions(+), 1 deletion(-) diff --git a/sql/src/test/java/org/apache/druid/sql/calcite/CalciteQueryTest.java b/sql/src/test/java/org/apache/druid/sql/calcite/CalciteQueryTest.java index 05742f671329..f112218f5bfa 100644 --- a/sql/src/test/java/org/apache/druid/sql/calcite/CalciteQueryTest.java +++ b/sql/src/test/java/org/apache/druid/sql/calcite/CalciteQueryTest.java @@ -806,7 +806,8 @@ public void testSelectProjectionFromSelectSingleColumnDescending() throws Except // Regression test for https://github.com/apache/incubator-druid/issues/7768. // After upgrading to Calcite 1.21, Calcite no longer respects the ORDER BY __time DESC - // in the inner query. + // in the inner query. This is valid, as the SQL standard considers the subquery results to be an unordered + // set of rows. testQuery( "SELECT 'beep ' || dim1 FROM (SELECT dim1 FROM druid.foo ORDER BY __time DESC)", ImmutableList.of( From 0e80ce1be51128116ec3a43c17e65266bbc2df2e Mon Sep 17 00:00:00 2001 From: jon-wei Date: Thu, 17 Oct 2019 15:50:15 -0700 Subject: [PATCH 09/15] Checkstyle fix --- .../org/apache/druid/query/filter/sql/BloomDimFilterSqlTest.java | 1 - 1 file changed, 1 deletion(-) diff --git a/extensions-core/druid-bloom-filter/src/test/java/org/apache/druid/query/filter/sql/BloomDimFilterSqlTest.java b/extensions-core/druid-bloom-filter/src/test/java/org/apache/druid/query/filter/sql/BloomDimFilterSqlTest.java index bd13b3a41e4a..6e5265a75366 100644 --- a/extensions-core/druid-bloom-filter/src/test/java/org/apache/druid/query/filter/sql/BloomDimFilterSqlTest.java +++ b/extensions-core/druid-bloom-filter/src/test/java/org/apache/druid/query/filter/sql/BloomDimFilterSqlTest.java @@ -47,7 +47,6 @@ import org.apache.druid.query.lookup.LookupExtractorFactoryContainerProvider; import org.apache.druid.segment.TestHelper; import org.apache.druid.segment.column.ValueType; -import org.apache.druid.segment.virtual.ExpressionVirtualColumn; import org.apache.druid.server.security.AuthenticationResult; import org.apache.druid.sql.calcite.BaseCalciteQueryTest; import org.apache.druid.sql.calcite.filtration.Filtration; From 4567a560d3a2fdc2f4c571ee064ae31bbf7e4a38 Mon Sep 17 00:00:00 2001 From: jon-wei Date: Thu, 17 Oct 2019 18:37:12 -0700 Subject: [PATCH 10/15] Fix test in sql compat mode --- .../druid/sql/calcite/CalciteQueryTest.java | 54 +++++++++++++------ 1 file changed, 37 insertions(+), 17 deletions(-) diff --git a/sql/src/test/java/org/apache/druid/sql/calcite/CalciteQueryTest.java b/sql/src/test/java/org/apache/druid/sql/calcite/CalciteQueryTest.java index f112218f5bfa..a5a3564fadc0 100644 --- a/sql/src/test/java/org/apache/druid/sql/calcite/CalciteQueryTest.java +++ b/sql/src/test/java/org/apache/druid/sql/calcite/CalciteQueryTest.java @@ -9150,7 +9150,24 @@ public void testQueryContextOuterLimit() throws Exception ) .context(outerLimitContext); - // no existing limit + List results1; + if (NullHandling.replaceWithDefault()) { + results1 = ImmutableList.of( + new Object[]{""}, + new Object[]{"def"}, + new Object[]{"abc"}, + new Object[]{"2"} + ); + } else { + results1 = ImmutableList.of( + new Object[]{"def"}, + new Object[]{"abc"}, + new Object[]{"2"}, + new Object[]{"10.1"} + ); + } + + // no existing limit testQuery( PLANNER_CONFIG_DEFAULT, outerLimitContext, @@ -9159,12 +9176,7 @@ public void testQueryContextOuterLimit() throws Exception ImmutableList.of( baseBuilder.threshold(4).build() ), - ImmutableList.of( - new Object[]{""}, - new Object[]{"def"}, - new Object[]{"abc"}, - new Object[]{"2"} - ) + results1 ); // existing limit greater than context limit, override existing limit @@ -9176,14 +9188,25 @@ public void testQueryContextOuterLimit() throws Exception ImmutableList.of( baseBuilder.threshold(4).build() ), - ImmutableList.of( - new Object[]{""}, - new Object[]{"def"}, - new Object[]{"abc"}, - new Object[]{"2"} - ) + results1 ); + + List results2; + if (NullHandling.replaceWithDefault()) { + results2 = ImmutableList.of( + new Object[]{""}, + new Object[]{"def"}, + new Object[]{"abc"}, + new Object[]{"2"} + ); + } else { + results2 = ImmutableList.of( + new Object[]{"def"}, + new Object[]{"abc"} + ); + } + // existing limit less than context limit, keep existing limit testQuery( PLANNER_CONFIG_DEFAULT, @@ -9194,10 +9217,7 @@ public void testQueryContextOuterLimit() throws Exception baseBuilder.threshold(2).build() ), - ImmutableList.of( - new Object[]{""}, - new Object[]{"def"} - ) + results2 ); } } From 60f1b7690b62417b966ebea25a45ba96fb1195f4 Mon Sep 17 00:00:00 2001 From: jon-wei Date: Fri, 18 Oct 2019 11:22:57 -0700 Subject: [PATCH 11/15] Fix test --- .../java/org/apache/druid/sql/calcite/CalciteQueryTest.java | 4 +--- 1 file changed, 1 insertion(+), 3 deletions(-) diff --git a/sql/src/test/java/org/apache/druid/sql/calcite/CalciteQueryTest.java b/sql/src/test/java/org/apache/druid/sql/calcite/CalciteQueryTest.java index a5a3564fadc0..6a427c7f8507 100644 --- a/sql/src/test/java/org/apache/druid/sql/calcite/CalciteQueryTest.java +++ b/sql/src/test/java/org/apache/druid/sql/calcite/CalciteQueryTest.java @@ -9196,9 +9196,7 @@ public void testQueryContextOuterLimit() throws Exception if (NullHandling.replaceWithDefault()) { results2 = ImmutableList.of( new Object[]{""}, - new Object[]{"def"}, - new Object[]{"abc"}, - new Object[]{"2"} + new Object[]{"def"} ); } else { results2 = ImmutableList.of( From dc72b0fc957aa1c03acf5c6d002165169d7ae70d Mon Sep 17 00:00:00 2001 From: jon-wei Date: Fri, 18 Oct 2019 11:46:47 -0700 Subject: [PATCH 12/15] Fix dependency analysis --- sql/pom.xml | 6 ++++++ 1 file changed, 6 insertions(+) diff --git a/sql/pom.xml b/sql/pom.xml index d8bdc886e9dd..260605e3845f 100644 --- a/sql/pom.xml +++ b/sql/pom.xml @@ -166,6 +166,12 @@ org.apache.curator curator-x-discovery + + org.checkerframework + checker-qual + ${checkerframework.version} + provided + From 59486e688351c9d2fa8d4f026e8f96c9aa38c7a4 Mon Sep 17 00:00:00 2001 From: jon-wei Date: Wed, 20 Nov 2019 15:31:04 -0800 Subject: [PATCH 13/15] Address PR comments --- .../calcite/planner/DruidOperatorTable.java | 2 - .../sql/calcite/planner/DruidPlanner.java | 5 +- .../druid/sql/calcite/CalciteQueryTest.java | 100 ++++++++++++------ 3 files changed, 67 insertions(+), 40 deletions(-) diff --git a/sql/src/main/java/org/apache/druid/sql/calcite/planner/DruidOperatorTable.java b/sql/src/main/java/org/apache/druid/sql/calcite/planner/DruidOperatorTable.java index c19fcd41fba7..e9feb4ed2ea7 100644 --- a/sql/src/main/java/org/apache/druid/sql/calcite/planner/DruidOperatorTable.java +++ b/sql/src/main/java/org/apache/druid/sql/calcite/planner/DruidOperatorTable.java @@ -115,8 +115,6 @@ public class DruidOperatorTable implements SqlOperatorTable { - private static final EmittingLogger log = new EmittingLogger(DruidOperatorTable.class); - private static final List STANDARD_AGGREGATORS = ImmutableList.builder() .add(new ApproxCountDistinctSqlAggregator()) diff --git a/sql/src/main/java/org/apache/druid/sql/calcite/planner/DruidPlanner.java b/sql/src/main/java/org/apache/druid/sql/calcite/planner/DruidPlanner.java index fa5b4ad0f82d..9349286b4696 100644 --- a/sql/src/main/java/org/apache/druid/sql/calcite/planner/DruidPlanner.java +++ b/sql/src/main/java/org/apache/druid/sql/calcite/planner/DruidPlanner.java @@ -55,8 +55,8 @@ import org.apache.druid.segment.DimensionHandlerUtils; import org.apache.druid.sql.calcite.rel.DruidConvention; import org.apache.druid.sql.calcite.rel.DruidRel; -import org.checkerframework.checker.nullness.qual.Nullable; +import javax.annotation.Nullable; import java.io.Closeable; import java.util.ArrayList; import java.util.Iterator; @@ -256,9 +256,6 @@ private RelNode possiblyWrapRootWithOuterLimitFromContext( ) { Object outerLimitObj = plannerContext.getQueryContext().get(PlannerContext.CTX_SQL_OUTER_LIMIT); - if (outerLimitObj == null) { - return root.rel; - } Long outerLimit = DimensionHandlerUtils.convertObjectToLong(outerLimitObj, true); if (outerLimit == null) { return root.rel; diff --git a/sql/src/test/java/org/apache/druid/sql/calcite/CalciteQueryTest.java b/sql/src/test/java/org/apache/druid/sql/calcite/CalciteQueryTest.java index 63ba0fcccb01..d49459a464b4 100644 --- a/sql/src/test/java/org/apache/druid/sql/calcite/CalciteQueryTest.java +++ b/sql/src/test/java/org/apache/druid/sql/calcite/CalciteQueryTest.java @@ -765,6 +765,9 @@ public void testSelectSingleColumnWithLimitDescending() throws Exception @Test public void testSelectStarFromSelectSingleColumnWithLimitDescending() throws Exception { + // After upgrading to Calcite 1.21, Calcite no longer respects the ORDER BY __time DESC + // in the inner query. This is valid, as the SQL standard considers the subquery results to be an unordered + // set of rows. testQuery( "SELECT * FROM (SELECT dim1 FROM druid.foo ORDER BY __time DESC) LIMIT 2", ImmutableList.of( @@ -783,6 +786,29 @@ public void testSelectStarFromSelectSingleColumnWithLimitDescending() throws Exc new Object[]{"10.1"} ) ); + + // The outer limit wrapping behavior that was used in the query above can be applied with a context flag instead + Map outerLimitContext = new HashMap<>(QUERY_CONTEXT_DEFAULT); + outerLimitContext.put(PlannerContext.CTX_SQL_OUTER_LIMIT, 2); + testQuery( + "SELECT __time, dim1 FROM druid.foo ORDER BY __time DESC", + outerLimitContext, + ImmutableList.of( + newScanQueryBuilder() + .dataSource(CalciteTests.DATASOURCE1) + .intervals(querySegmentSpec(Filtration.eternity())) + .columns(ImmutableList.of("__time", "dim1")) + .limit(2) + .order(ScanQuery.Order.DESCENDING) + .resultFormat(ScanQuery.ResultFormat.RESULT_FORMAT_COMPACTED_LIST) + .context(outerLimitContext) + .build() + ), + ImmutableList.of( + new Object[]{timestamp("2001-01-03"), "abc"}, + new Object[]{timestamp("2001-01-02"), "def"} + ) + ); } @Test @@ -816,7 +842,7 @@ public void testSelectProjectionFromSelectSingleColumnDescending() throws Except // After upgrading to Calcite 1.21, Calcite no longer respects the ORDER BY __time DESC // in the inner query. This is valid, as the SQL standard considers the subquery results to be an unordered - // set of rows. + // set of rows. This test now validates that the inner ordering is not applied. testQuery( "SELECT 'beep ' || dim1 FROM (SELECT dim1 FROM druid.foo ORDER BY __time DESC)", ImmutableList.of( @@ -1927,6 +1953,8 @@ public void testNullEmptyStringEquality() throws Exception .dataSource(CalciteTests.DATASOURCE1) .intervals(querySegmentSpec(Filtration.eternity())) .granularity(Granularities.ALL) + // Ideally the following filter should be simplified to (dim2 == 'a' || dim2 IS NULL), the + // (dim2 != 'a') component is unnecessary. .filters( or( selector("dim2", "a", null), @@ -1999,45 +2027,15 @@ public void testEmptyStringEquality() throws Exception @Test public void testNullStringEquality() throws Exception { + // In Calcite 1.21, this query is optimized to return 0 without generating a native Druid query, since + // null is not equal to null or any other value. testQuery( "SELECT COUNT(*)\n" + "FROM druid.foo\n" + "WHERE NULLIF(dim2, 'a') = null", ImmutableList.of(), - NullHandling.replaceWithDefault() ? - // Matches everything but "abc" - ImmutableList.of(new Object[]{0L}) : - // null is not eqaual to null or any other value ImmutableList.of(new Object[]{0L}) ); - /* - The SQL query in this test planned to the following Druid query in Calcite 1.17. - After upgrading to Calcite 1.21, Calcite applies an optimization based on the NULLIF() == null, - which is valid assuming standard SQL null handling, and no query is generated. - - testQuery( - "SELECT COUNT(*)\n" - + "FROM druid.foo\n" - + "WHERE NULLIF(dim2, 'a') = null", - ImmutableList.of( - Druids.newTimeseriesQueryBuilder() - .dataSource(CalciteTests.DATASOURCE1) - .intervals(querySegmentSpec(Filtration.eternity())) - .granularity(Granularities.ALL) - .filters(expressionFilter("case_searched((\"dim2\" == 'a')," - + (NullHandling.replaceWithDefault() ? "1" : "0") - + ",(\"dim2\" == null))")) - .aggregators(aggregators(new CountAggregatorFactory("a0"))) - .context(TIMESERIES_CONTEXT_DEFAULT) - .build() - ), - NullHandling.replaceWithDefault() ? - // Matches everything but "abc" - ImmutableList.of(new Object[]{5L}) : - // null is not eqaual to null or any other value - ImmutableList.of() - ); - */ } @Test @@ -2332,7 +2330,6 @@ public void testCountNullableExpression() throws Exception .dataSource(CalciteTests.DATASOURCE1) .intervals(querySegmentSpec(Filtration.eternity())) .granularity(Granularities.ALL) - .virtualColumns() .aggregators(aggregators( new FilteredAggregatorFactory( new CountAggregatorFactory("a0"), @@ -3981,6 +3978,7 @@ public void testSelectDistinctWithStrlenFilter() throws Exception .setGranularity(Granularities.ALL) .setVirtualColumns( expressionVirtualColumn("v0", "strlen(\"dim1\")", ValueType.LONG), + // The two layers of CASTs here are unusual, they should really be collapsed into one expressionVirtualColumn("v1", "CAST(CAST(strlen(\"dim1\"), 'STRING'), 'LONG')", ValueType.LONG) ) .setDimensions(dimensions(new DefaultDimensionSpec("dim1", "d0"))) @@ -7638,6 +7636,38 @@ public void testProjectAfterSort3() throws Exception ); } + + @Test + public void testProjectAfterSort3WithoutAmbiguity() throws Exception + { + // This query is equivalent to the one in testProjectAfterSort3 but renames the second grouping column + // to avoid the ambiguous name exception. The inner sort is also optimized out in Calcite 1.21. + testQuery( + "select copydim1 from (select dim1, dim1 AS copydim1, count(*) cnt from druid.foo group by dim1, dim1 order by cnt)", + ImmutableList.of( + GroupByQuery.builder() + .setDataSource(CalciteTests.DATASOURCE1) + .setInterval(querySegmentSpec(Filtration.eternity())) + .setGranularity(Granularities.ALL) + .setDimensions( + dimensions( + new DefaultDimensionSpec("dim1", "d0") + ) + ) + .setContext(QUERY_CONTEXT_DEFAULT) + .build() + ), + ImmutableList.of( + new Object[]{""}, + new Object[]{"1"}, + new Object[]{"10.1"}, + new Object[]{"2"}, + new Object[]{"abc"}, + new Object[]{"def"} + ) + ); + } + @Test public void testSortProjectAfterNestedGroupBy() throws Exception { @@ -9254,6 +9284,8 @@ public void testLeftRightStringOperators() throws Exception ) ); } + + @Test public void testQueryContextOuterLimit() throws Exception From adf49e4ebe63b16a8a55b3cb0c9034dbcd7f3be8 Mon Sep 17 00:00:00 2001 From: jon-wei Date: Wed, 20 Nov 2019 15:34:54 -0800 Subject: [PATCH 14/15] Checkstyle --- .../org/apache/druid/sql/calcite/planner/DruidOperatorTable.java | 1 - 1 file changed, 1 deletion(-) diff --git a/sql/src/main/java/org/apache/druid/sql/calcite/planner/DruidOperatorTable.java b/sql/src/main/java/org/apache/druid/sql/calcite/planner/DruidOperatorTable.java index e9feb4ed2ea7..16ab45470a1e 100644 --- a/sql/src/main/java/org/apache/druid/sql/calcite/planner/DruidOperatorTable.java +++ b/sql/src/main/java/org/apache/druid/sql/calcite/planner/DruidOperatorTable.java @@ -32,7 +32,6 @@ import org.apache.calcite.sql.validate.SqlNameMatcher; import org.apache.druid.java.util.common.ISE; import org.apache.druid.java.util.common.StringUtils; -import org.apache.druid.java.util.emitter.EmittingLogger; import org.apache.druid.sql.calcite.aggregation.SqlAggregator; import org.apache.druid.sql.calcite.aggregation.builtin.ApproxCountDistinctSqlAggregator; import org.apache.druid.sql.calcite.aggregation.builtin.AvgSqlAggregator; From 66d23a40ca7abce52cbc906d85053ee7b4285de0 Mon Sep 17 00:00:00 2001 From: jon-wei Date: Wed, 20 Nov 2019 18:00:32 -0800 Subject: [PATCH 15/15] Adjust testSelectStarFromSelectSingleColumnWithLimitDescending --- .../java/org/apache/druid/sql/calcite/CalciteQueryTest.java | 6 +++--- 1 file changed, 3 insertions(+), 3 deletions(-) diff --git a/sql/src/test/java/org/apache/druid/sql/calcite/CalciteQueryTest.java b/sql/src/test/java/org/apache/druid/sql/calcite/CalciteQueryTest.java index d49459a464b4..0c1b9f96fa11 100644 --- a/sql/src/test/java/org/apache/druid/sql/calcite/CalciteQueryTest.java +++ b/sql/src/test/java/org/apache/druid/sql/calcite/CalciteQueryTest.java @@ -791,7 +791,7 @@ public void testSelectStarFromSelectSingleColumnWithLimitDescending() throws Exc Map outerLimitContext = new HashMap<>(QUERY_CONTEXT_DEFAULT); outerLimitContext.put(PlannerContext.CTX_SQL_OUTER_LIMIT, 2); testQuery( - "SELECT __time, dim1 FROM druid.foo ORDER BY __time DESC", + "SELECT dim1 FROM druid.foo ORDER BY __time DESC", outerLimitContext, ImmutableList.of( newScanQueryBuilder() @@ -805,8 +805,8 @@ public void testSelectStarFromSelectSingleColumnWithLimitDescending() throws Exc .build() ), ImmutableList.of( - new Object[]{timestamp("2001-01-03"), "abc"}, - new Object[]{timestamp("2001-01-02"), "def"} + new Object[]{"abc"}, + new Object[]{"def"} ) ); }