diff --git a/benchmarks/src/test/java/org/apache/druid/benchmark/query/SqlBenchmark.java b/benchmarks/src/test/java/org/apache/druid/benchmark/query/SqlBenchmark.java index 9459b234303b..ba9debb28815 100644 --- a/benchmarks/src/test/java/org/apache/druid/benchmark/query/SqlBenchmark.java +++ b/benchmarks/src/test/java/org/apache/druid/benchmark/query/SqlBenchmark.java @@ -543,7 +543,7 @@ public void tearDown() throws Exception @Benchmark @BenchmarkMode(Mode.AverageTime) @OutputTimeUnit(TimeUnit.MILLISECONDS) - public void querySql(Blackhole blackhole) throws Exception + public void querySql(Blackhole blackhole) { final Map context = ImmutableMap.of( QueryContexts.VECTORIZE_KEY, vectorize, @@ -561,7 +561,7 @@ public void querySql(Blackhole blackhole) throws Exception @Benchmark @BenchmarkMode(Mode.AverageTime) @OutputTimeUnit(TimeUnit.MILLISECONDS) - public void planSql(Blackhole blackhole) throws Exception + public void planSql(Blackhole blackhole) { final Map context = ImmutableMap.of( QueryContexts.VECTORIZE_KEY, vectorize, diff --git a/benchmarks/src/test/java/org/apache/druid/benchmark/query/SqlExpressionBenchmark.java b/benchmarks/src/test/java/org/apache/druid/benchmark/query/SqlExpressionBenchmark.java index 1c64d7a749df..7733281908f0 100644 --- a/benchmarks/src/test/java/org/apache/druid/benchmark/query/SqlExpressionBenchmark.java +++ b/benchmarks/src/test/java/org/apache/druid/benchmark/query/SqlExpressionBenchmark.java @@ -67,7 +67,6 @@ import org.openjdk.jmh.infra.Blackhole; import javax.annotation.Nullable; - import java.util.List; import java.util.Map; import java.util.concurrent.TimeUnit; @@ -349,7 +348,7 @@ public void tearDown() throws Exception @Benchmark @BenchmarkMode(Mode.AverageTime) @OutputTimeUnit(TimeUnit.MILLISECONDS) - public void querySql(Blackhole blackhole) throws Exception + public void querySql(Blackhole blackhole) { final Map context = ImmutableMap.of( QueryContexts.VECTORIZE_KEY, vectorize, diff --git a/benchmarks/src/test/java/org/apache/druid/benchmark/query/SqlNestedDataBenchmark.java b/benchmarks/src/test/java/org/apache/druid/benchmark/query/SqlNestedDataBenchmark.java index aeda68b25b05..98514512e9ab 100644 --- a/benchmarks/src/test/java/org/apache/druid/benchmark/query/SqlNestedDataBenchmark.java +++ b/benchmarks/src/test/java/org/apache/druid/benchmark/query/SqlNestedDataBenchmark.java @@ -375,7 +375,7 @@ public void tearDown() throws Exception @Benchmark @BenchmarkMode(Mode.AverageTime) @OutputTimeUnit(TimeUnit.MILLISECONDS) - public void querySql(Blackhole blackhole) throws Exception + public void querySql(Blackhole blackhole) { final Map context = ImmutableMap.of( QueryContexts.VECTORIZE_KEY, vectorize, diff --git a/benchmarks/src/test/java/org/apache/druid/benchmark/query/SqlVsNativeBenchmark.java b/benchmarks/src/test/java/org/apache/druid/benchmark/query/SqlVsNativeBenchmark.java index 1cb747048dc7..de3db00accf5 100644 --- a/benchmarks/src/test/java/org/apache/druid/benchmark/query/SqlVsNativeBenchmark.java +++ b/benchmarks/src/test/java/org/apache/druid/benchmark/query/SqlVsNativeBenchmark.java @@ -170,7 +170,7 @@ public void queryNative(Blackhole blackhole) @Benchmark @BenchmarkMode(Mode.AverageTime) @OutputTimeUnit(TimeUnit.MILLISECONDS) - public void queryPlanner(Blackhole blackhole) throws Exception + public void queryPlanner(Blackhole blackhole) { try (final DruidPlanner planner = plannerFactory.createPlannerForTesting(engine, sqlQuery, Collections.emptyMap())) { final PlannerResult plannerResult = planner.plan(); diff --git a/extensions-core/datasketches/pom.xml b/extensions-core/datasketches/pom.xml index 2657926a8a30..e5d408256aa0 100644 --- a/extensions-core/datasketches/pom.xml +++ b/extensions-core/datasketches/pom.xml @@ -165,6 +165,11 @@ hamcrest-core test + + org.hamcrest + hamcrest-all + test + nl.jqno.equalsverifier equalsverifier diff --git a/extensions-core/datasketches/src/test/java/org/apache/druid/query/aggregation/datasketches/theta/sql/ThetaSketchSqlAggregatorTest.java b/extensions-core/datasketches/src/test/java/org/apache/druid/query/aggregation/datasketches/theta/sql/ThetaSketchSqlAggregatorTest.java index 0887cc4e69a5..c1ddfa279d21 100644 --- a/extensions-core/datasketches/src/test/java/org/apache/druid/query/aggregation/datasketches/theta/sql/ThetaSketchSqlAggregatorTest.java +++ b/extensions-core/datasketches/src/test/java/org/apache/druid/query/aggregation/datasketches/theta/sql/ThetaSketchSqlAggregatorTest.java @@ -1035,7 +1035,7 @@ public void testThetaSketchIntersectOnScalarExpression() { assertQueryIsUnplannable( "SELECT THETA_SKETCH_INTERSECT(NULL, NULL) FROM foo", - "Possible error: THETA_SKETCH_INTERSECT can only be used on aggregates. " + + "THETA_SKETCH_INTERSECT can only be used on aggregates. " + "It cannot be used directly on a column or on a scalar expression." ); } diff --git a/extensions-core/datasketches/src/test/java/org/apache/druid/query/aggregation/datasketches/tuple/sql/ArrayOfDoublesSketchSqlAggregatorTest.java b/extensions-core/datasketches/src/test/java/org/apache/druid/query/aggregation/datasketches/tuple/sql/ArrayOfDoublesSketchSqlAggregatorTest.java index 3be8e8b18854..a240f89bdcc8 100644 --- a/extensions-core/datasketches/src/test/java/org/apache/druid/query/aggregation/datasketches/tuple/sql/ArrayOfDoublesSketchSqlAggregatorTest.java +++ b/extensions-core/datasketches/src/test/java/org/apache/druid/query/aggregation/datasketches/tuple/sql/ArrayOfDoublesSketchSqlAggregatorTest.java @@ -158,13 +158,13 @@ public void testMetricsSumEstimate() cannotVectorize(); final String sql = "SELECT\n" - + " dim1,\n" - + " SUM(cnt),\n" - + " DS_TUPLE_DOUBLES_METRICS_SUM_ESTIMATE(DS_TUPLE_DOUBLES(tuplesketch_dim2)),\n" - + " DS_TUPLE_DOUBLES_METRICS_SUM_ESTIMATE(DS_TUPLE_DOUBLES(dim2, m1)),\n" - + " DS_TUPLE_DOUBLES_METRICS_SUM_ESTIMATE(DS_TUPLE_DOUBLES(dim2, m1, 256))\n" - + "FROM druid.foo\n" - + "GROUP BY dim1"; + + " dim1,\n" + + " SUM(cnt),\n" + + " DS_TUPLE_DOUBLES_METRICS_SUM_ESTIMATE(DS_TUPLE_DOUBLES(tuplesketch_dim2)),\n" + + " DS_TUPLE_DOUBLES_METRICS_SUM_ESTIMATE(DS_TUPLE_DOUBLES(dim2, m1)),\n" + + " DS_TUPLE_DOUBLES_METRICS_SUM_ESTIMATE(DS_TUPLE_DOUBLES(dim2, m1, 256))\n" + + "FROM druid.foo\n" + + "GROUP BY dim1"; final List expectedResults; @@ -189,54 +189,54 @@ public void testMetricsSumEstimate() sql, ImmutableList.of( GroupByQuery.builder() - .setDataSource(CalciteTests.DATASOURCE1) - .setInterval(querySegmentSpec(Filtration.eternity())) - .setGranularity(Granularities.ALL) - .setDimensions(new DefaultDimensionSpec("dim1", "d0", ColumnType.STRING)) - .setAggregatorSpecs( - aggregators( - new LongSumAggregatorFactory("a0", "cnt"), - new ArrayOfDoublesSketchAggregatorFactory( - "a1", - "tuplesketch_dim2", - null, - null, - null - ), - new ArrayOfDoublesSketchAggregatorFactory( - "a2", - "dim2", - null, - ImmutableList.of("m1"), - null - ), - new ArrayOfDoublesSketchAggregatorFactory( - "a3", - "dim2", - 256, - ImmutableList.of("m1"), - null - ) - ) - ) - .setPostAggregatorSpecs( - ImmutableList.of( - new ArrayOfDoublesSketchToMetricsSumEstimatePostAggregator( - "p1", - new FieldAccessPostAggregator("p0", "a1") - ), - new ArrayOfDoublesSketchToMetricsSumEstimatePostAggregator( - "p3", - new FieldAccessPostAggregator("p2", "a2") - ), - new ArrayOfDoublesSketchToMetricsSumEstimatePostAggregator( - "p5", - new FieldAccessPostAggregator("p4", "a3") - ) - ) - ) - .setContext(QUERY_CONTEXT_DEFAULT) - .build() + .setDataSource(CalciteTests.DATASOURCE1) + .setInterval(querySegmentSpec(Filtration.eternity())) + .setGranularity(Granularities.ALL) + .setDimensions(new DefaultDimensionSpec("dim1", "d0", ColumnType.STRING)) + .setAggregatorSpecs( + aggregators( + new LongSumAggregatorFactory("a0", "cnt"), + new ArrayOfDoublesSketchAggregatorFactory( + "a1", + "tuplesketch_dim2", + null, + null, + null + ), + new ArrayOfDoublesSketchAggregatorFactory( + "a2", + "dim2", + null, + ImmutableList.of("m1"), + null + ), + new ArrayOfDoublesSketchAggregatorFactory( + "a3", + "dim2", + 256, + ImmutableList.of("m1"), + null + ) + ) + ) + .setPostAggregatorSpecs( + ImmutableList.of( + new ArrayOfDoublesSketchToMetricsSumEstimatePostAggregator( + "p1", + new FieldAccessPostAggregator("p0", "a1") + ), + new ArrayOfDoublesSketchToMetricsSumEstimatePostAggregator( + "p3", + new FieldAccessPostAggregator("p2", "a2") + ), + new ArrayOfDoublesSketchToMetricsSumEstimatePostAggregator( + "p5", + new FieldAccessPostAggregator("p4", "a3") + ) + ) + ) + .setContext(QUERY_CONTEXT_DEFAULT) + .build() ), expectedResults ); @@ -248,14 +248,14 @@ public void testMetricsSumEstimateIntersect() cannotVectorize(); final String sql = "SELECT\n" - + " SUM(cnt),\n" - + " DS_TUPLE_DOUBLES_METRICS_SUM_ESTIMATE(DS_TUPLE_DOUBLES(tuplesketch_dim2)) AS all_sum_estimates,\n" - + StringUtils.replace( - "DS_TUPLE_DOUBLES_METRICS_SUM_ESTIMATE(DS_TUPLE_DOUBLES_INTERSECT(COMPLEX_DECODE_BASE64('arrayOfDoublesSketch', '%s'), DS_TUPLE_DOUBLES(tuplesketch_dim2), 128)) AS intersect_sum_estimates\n", - "%s", - COMPACT_BASE_64_ENCODED_SKETCH_FOR_INTERSECTION - ) - + "FROM druid.foo"; + + " SUM(cnt),\n" + + " DS_TUPLE_DOUBLES_METRICS_SUM_ESTIMATE(DS_TUPLE_DOUBLES(tuplesketch_dim2)) AS all_sum_estimates,\n" + + StringUtils.replace( + "DS_TUPLE_DOUBLES_METRICS_SUM_ESTIMATE(DS_TUPLE_DOUBLES_INTERSECT(COMPLEX_DECODE_BASE64('arrayOfDoublesSketch', '%s'), DS_TUPLE_DOUBLES(tuplesketch_dim2), 128)) AS intersect_sum_estimates\n", + "%s", + COMPACT_BASE_64_ENCODED_SKETCH_FOR_INTERSECTION + ) + + "FROM druid.foo"; final List expectedResults; @@ -268,8 +268,12 @@ public void testMetricsSumEstimateIntersect() ); final String expectedBase64Constant = "'" - + StringUtils.replace(COMPACT_BASE_64_ENCODED_SKETCH_FOR_INTERSECTION, "=", "\\u003D") - + "'"; + + StringUtils.replace( + COMPACT_BASE_64_ENCODED_SKETCH_FOR_INTERSECTION, + "=", + "\\u003D" + ) + + "'"; testQuery( sql, @@ -282,38 +286,40 @@ public void testMetricsSumEstimateIntersect() ImmutableList.of( new LongSumAggregatorFactory("a0", "cnt"), new ArrayOfDoublesSketchAggregatorFactory( - "a1", - "tuplesketch_dim2", - null, - null, - null + "a1", + "tuplesketch_dim2", + null, + null, + null ) ) ) .postAggregators( ImmutableList.of( - new ArrayOfDoublesSketchToMetricsSumEstimatePostAggregator( - "p1", - new FieldAccessPostAggregator("p0", "a1") - ), - new ArrayOfDoublesSketchToMetricsSumEstimatePostAggregator( - "p5", - new ArrayOfDoublesSketchSetOpPostAggregator( - "p4", - "INTERSECT", - 128, - null, - ImmutableList.of( - new ExpressionPostAggregator( - "p2", - "complex_decode_base64('arrayOfDoublesSketch'," + expectedBase64Constant + ")", - null, - queryFramework().macroTable() - ), - new FieldAccessPostAggregator("p3", "a1") - ) - ) - ) + new ArrayOfDoublesSketchToMetricsSumEstimatePostAggregator( + "p1", + new FieldAccessPostAggregator("p0", "a1") + ), + new ArrayOfDoublesSketchToMetricsSumEstimatePostAggregator( + "p5", + new ArrayOfDoublesSketchSetOpPostAggregator( + "p4", + "INTERSECT", + 128, + null, + ImmutableList.of( + new ExpressionPostAggregator( + "p2", + "complex_decode_base64('arrayOfDoublesSketch'," + + expectedBase64Constant + + ")", + null, + queryFramework().macroTable() + ), + new FieldAccessPostAggregator("p3", "a1") + ) + ) + ) ) ) .context(QUERY_CONTEXT_DEFAULT) @@ -329,12 +335,12 @@ public void testNullInputs() cannotVectorize(); final String sql = "SELECT\n" - + " DS_TUPLE_DOUBLES(NULL),\n" - + " DS_TUPLE_DOUBLES_METRICS_SUM_ESTIMATE(NULL),\n" - + " DS_TUPLE_DOUBLES_UNION(NULL, NULL),\n" - + " DS_TUPLE_DOUBLES_UNION(NULL, DS_TUPLE_DOUBLES(tuplesketch_dim2)),\n" - + " DS_TUPLE_DOUBLES_UNION(DS_TUPLE_DOUBLES(tuplesketch_dim2), NULL)\n" - + "FROM druid.foo"; + + " DS_TUPLE_DOUBLES(NULL),\n" + + " DS_TUPLE_DOUBLES_METRICS_SUM_ESTIMATE(NULL),\n" + + " DS_TUPLE_DOUBLES_UNION(NULL, NULL),\n" + + " DS_TUPLE_DOUBLES_UNION(NULL, DS_TUPLE_DOUBLES(tuplesketch_dim2)),\n" + + " DS_TUPLE_DOUBLES_UNION(DS_TUPLE_DOUBLES(tuplesketch_dim2), NULL)\n" + + "FROM druid.foo"; final List expectedResults; @@ -345,7 +351,7 @@ public void testNullInputs() "\"AQEJAwQBzJP/////////fw==\"", "\"AQEJAwgBzJP/////////fwIAAAAAAAAAjFnadZuMrkg6WYAWZ8t1NgAAAAAAACBAAAAAAAAANkA=\"", "\"AQEJAwgBzJP/////////fwIAAAAAAAAAjFnadZuMrkg6WYAWZ8t1NgAAAAAAACBAAAAAAAAANkA=\"", - } + } ); testQuery( @@ -366,57 +372,57 @@ public void testNullInputs() .aggregators( ImmutableList.of( new ArrayOfDoublesSketchAggregatorFactory( - "a0", - "v0", - null, - null, - null + "a0", + "v0", + null, + null, + null ), new ArrayOfDoublesSketchAggregatorFactory( - "a1", - "tuplesketch_dim2", - null, - null, - null + "a1", + "tuplesketch_dim2", + null, + null, + null ) ) ) .postAggregators( ImmutableList.of( - new ArrayOfDoublesSketchToMetricsSumEstimatePostAggregator( - "p1", - new ExpressionPostAggregator("p0", "null", null, queryFramework().macroTable()) - ), - new ArrayOfDoublesSketchSetOpPostAggregator( - "p4", - ArrayOfDoublesSketchOperations.Operation.UNION.name(), - null, - null, - ImmutableList.of( - new ExpressionPostAggregator("p2", "null", null, queryFramework().macroTable()), - new ExpressionPostAggregator("p3", "null", null, queryFramework().macroTable()) - ) - ), - new ArrayOfDoublesSketchSetOpPostAggregator( - "p7", - ArrayOfDoublesSketchOperations.Operation.UNION.name(), - null, - null, - ImmutableList.of( - new ExpressionPostAggregator("p5", "null", null, queryFramework().macroTable()), - new FieldAccessPostAggregator("p6", "a1") - ) - ), - new ArrayOfDoublesSketchSetOpPostAggregator( - "p10", - ArrayOfDoublesSketchOperations.Operation.UNION.name(), - null, - null, - ImmutableList.of( - new FieldAccessPostAggregator("p8", "a1"), - new ExpressionPostAggregator("p9", "null", null, queryFramework().macroTable()) - ) - ) + new ArrayOfDoublesSketchToMetricsSumEstimatePostAggregator( + "p1", + new ExpressionPostAggregator("p0", "null", null, queryFramework().macroTable()) + ), + new ArrayOfDoublesSketchSetOpPostAggregator( + "p4", + ArrayOfDoublesSketchOperations.Operation.UNION.name(), + null, + null, + ImmutableList.of( + new ExpressionPostAggregator("p2", "null", null, queryFramework().macroTable()), + new ExpressionPostAggregator("p3", "null", null, queryFramework().macroTable()) + ) + ), + new ArrayOfDoublesSketchSetOpPostAggregator( + "p7", + ArrayOfDoublesSketchOperations.Operation.UNION.name(), + null, + null, + ImmutableList.of( + new ExpressionPostAggregator("p5", "null", null, queryFramework().macroTable()), + new FieldAccessPostAggregator("p6", "a1") + ) + ), + new ArrayOfDoublesSketchSetOpPostAggregator( + "p10", + ArrayOfDoublesSketchOperations.Operation.UNION.name(), + null, + null, + ImmutableList.of( + new FieldAccessPostAggregator("p8", "a1"), + new ExpressionPostAggregator("p9", "null", null, queryFramework().macroTable()) + ) + ) ) ) .context(QUERY_CONTEXT_DEFAULT) @@ -429,24 +435,30 @@ public void testNullInputs() @Test public void testArrayOfDoublesSketchIntersectOnScalarExpression() { - assertQueryIsUnplannable("SELECT DS_TUPLE_DOUBLES_INTERSECT(NULL, NULL) FROM foo", - "Possible error: DS_TUPLE_DOUBLES_INTERSECT can only be used on aggregates. " + - "It cannot be used directly on a column or on a scalar expression."); + assertQueryIsUnplannable( + "SELECT DS_TUPLE_DOUBLES_INTERSECT(NULL, NULL) FROM foo", + "DS_TUPLE_DOUBLES_INTERSECT can only be used on aggregates. " + + "It cannot be used directly on a column or on a scalar expression." + ); } @Test public void testArrayOfDoublesSketchNotOnScalarExpression() { - assertQueryIsUnplannable("SELECT DS_TUPLE_DOUBLES_NOT(NULL, NULL) FROM foo", - "Possible error: DS_TUPLE_DOUBLES_NOT can only be used on aggregates. " + - "It cannot be used directly on a column or on a scalar expression."); + assertQueryIsUnplannable( + "SELECT DS_TUPLE_DOUBLES_NOT(NULL, NULL) FROM foo", + "DS_TUPLE_DOUBLES_NOT can only be used on aggregates. " + + "It cannot be used directly on a column or on a scalar expression." + ); } @Test public void testArrayOfDoublesSketchUnionOnScalarExpression() { - assertQueryIsUnplannable("SELECT DS_TUPLE_DOUBLES_UNION(NULL, NULL) FROM foo", - "Possible error: DS_TUPLE_DOUBLES_UNION can only be used on aggregates. " + - "It cannot be used directly on a column or on a scalar expression."); + assertQueryIsUnplannable( + "SELECT DS_TUPLE_DOUBLES_UNION(NULL, NULL) FROM foo", + "DS_TUPLE_DOUBLES_UNION can only be used on aggregates. " + + "It cannot be used directly on a column or on a scalar expression." + ); } } diff --git a/extensions-core/multi-stage-query/src/main/java/org/apache/druid/msq/sql/MSQTaskSqlEngine.java b/extensions-core/multi-stage-query/src/main/java/org/apache/druid/msq/sql/MSQTaskSqlEngine.java index 94c2532ca793..7d27103e0b92 100644 --- a/extensions-core/multi-stage-query/src/main/java/org/apache/druid/msq/sql/MSQTaskSqlEngine.java +++ b/extensions-core/multi-stage-query/src/main/java/org/apache/druid/msq/sql/MSQTaskSqlEngine.java @@ -32,6 +32,7 @@ import org.apache.calcite.sql.type.SqlTypeName; import org.apache.calcite.tools.ValidationException; import org.apache.calcite.util.Pair; +import org.apache.druid.error.InvalidSqlInput; import org.apache.druid.java.util.common.IAE; import org.apache.druid.java.util.common.StringUtils; import org.apache.druid.java.util.common.granularity.Granularities; @@ -86,7 +87,7 @@ public String name() } @Override - public void validateContext(Map queryContext) throws ValidationException + public void validateContext(Map queryContext) { SqlEngines.validateNoSpecialContextKeys(queryContext, SYSTEM_CONTEXT_PARAMETERS); } @@ -250,18 +251,17 @@ private static void validateInsert( * queries, because we use these output names to generate columns in segments. They must be unique. */ private static void validateNoDuplicateAliases(final List> fieldMappings) - throws ValidationException { final Set aliasesSeen = new HashSet<>(); for (final Pair field : fieldMappings) { if (!aliasesSeen.add(field.right)) { - throw new ValidationException("Duplicate field in SELECT: [" + field.right + "]"); + throw InvalidSqlInput.exception("Duplicate field in SELECT: [%s]", field.right); } } } - private static void validateLimitAndOffset(final RelNode topRel, final boolean limitOk) throws ValidationException + private static void validateLimitAndOffset(final RelNode topRel, final boolean limitOk) { Sort sort = null; @@ -283,13 +283,13 @@ private static void validateLimitAndOffset(final RelNode topRel, final boolean l // The segment generator relies on shuffle statistics to determine segment intervals when PARTITIONED BY is not ALL, // and LIMIT/OFFSET prevent shuffle statistics from being generated. This is because they always send everything // to a single partition, so there are no shuffle statistics. - throw new ValidationException( + throw InvalidSqlInput.exception( "INSERT and REPLACE queries cannot have a LIMIT unless PARTITIONED BY is \"ALL\"." ); } if (sort != null && sort.offset != null) { // Found an outer OFFSET that is not allowed. - throw new ValidationException("INSERT and REPLACE queries cannot have an OFFSET."); + throw InvalidSqlInput.exception("INSERT and REPLACE queries cannot have an OFFSET."); } } diff --git a/extensions-core/multi-stage-query/src/main/java/org/apache/druid/msq/sql/SqlTaskResource.java b/extensions-core/multi-stage-query/src/main/java/org/apache/druid/msq/sql/SqlTaskResource.java index f0cd7318f644..d270963db462 100644 --- a/extensions-core/multi-stage-query/src/main/java/org/apache/druid/msq/sql/SqlTaskResource.java +++ b/extensions-core/multi-stage-query/src/main/java/org/apache/druid/msq/sql/SqlTaskResource.java @@ -23,8 +23,9 @@ import com.google.common.collect.ImmutableMap; import com.google.common.io.CountingOutputStream; import com.google.inject.Inject; -import org.apache.calcite.plan.RelOptPlanner; import org.apache.druid.common.exception.SanitizableException; +import org.apache.druid.error.DruidException; +import org.apache.druid.error.ErrorResponse; import org.apache.druid.guice.annotations.MSQ; import org.apache.druid.indexer.TaskState; import org.apache.druid.java.util.common.guava.Sequence; @@ -69,15 +70,15 @@ /** * Endpoint for SQL execution using MSQ tasks. - * + *

* Unlike the SQL endpoint in {@link SqlResource}, this endpoint returns task IDs instead of inline results. Queries * are executed asynchronously using MSQ tasks via the indexing service (Overlord + MM or Indexer). This endpoint * does not provide a way for users to get the status or results of a query. That must be done using Overlord APIs * for status and reports. - * + *

* One exception: EXPLAIN query results are returned inline by this endpoint, in the same way as {@link SqlResource} * would return them. - * + *

* This endpoint does not support system tables or INFORMATION_SCHEMA. Queries on those tables result in errors. */ @Path("/druid/v2/sql/task/") @@ -129,7 +130,7 @@ public Response doGetEnabled(@Context final HttpServletRequest request) /** * Post a query task. - * + *

* Execution uses {@link MSQTaskSqlEngine} to ship the query off to the Overlord as an indexing task using * {@link org.apache.druid.msq.indexing.MSQControllerTask}. The task ID is returned immediately to the caller, * and execution proceeds asynchronously. @@ -159,6 +160,13 @@ public Response doPost( return buildStandardResponse(sequence, sqlQuery, sqlQueryId, rowTransformer); } } + catch (DruidException e) { + stmt.reporter().failed(e); + return Response.status(e.getStatusCode()) + .type(MediaType.APPLICATION_JSON_TYPE) + .entity(new ErrorResponse(e)) + .build(); + } // Kitchen-sinking the errors since they are all unchecked. // Just copied from SqlResource. catch (QueryCapacityExceededException cap) { @@ -182,14 +190,6 @@ public Response doPost( throw (ForbiddenException) serverConfig.getErrorResponseTransformStrategy() .transformIfNeeded(e); // let ForbiddenExceptionMapper handle this } - catch (RelOptPlanner.CannotPlanException e) { - stmt.reporter().failed(e); - SqlPlanningException spe = new SqlPlanningException( - SqlPlanningException.PlanningError.UNSUPPORTED_SQL_ERROR, - e.getMessage() - ); - return buildNonOkResponse(BadQueryException.STATUS_CODE, spe, sqlQueryId); - } // Calcite throws a java.lang.AssertionError which is type Error not Exception. Using Throwable catches both. catch (Throwable e) { stmt.reporter().failed(e); diff --git a/extensions-core/multi-stage-query/src/test/java/org/apache/druid/msq/exec/MSQInsertTest.java b/extensions-core/multi-stage-query/src/test/java/org/apache/druid/msq/exec/MSQInsertTest.java index b55de6c165c9..1769c3028a5b 100644 --- a/extensions-core/multi-stage-query/src/test/java/org/apache/druid/msq/exec/MSQInsertTest.java +++ b/extensions-core/multi-stage-query/src/test/java/org/apache/druid/msq/exec/MSQInsertTest.java @@ -25,6 +25,7 @@ import com.google.common.hash.HashFunction; import com.google.common.hash.Hashing; import org.apache.druid.common.config.NullHandling; +import org.apache.druid.error.DruidException; import org.apache.druid.hll.HyperLogLogCollector; import org.apache.druid.java.util.common.ISE; import org.apache.druid.java.util.common.Intervals; @@ -43,7 +44,6 @@ import org.apache.druid.segment.column.ColumnType; import org.apache.druid.segment.column.RowSignature; import org.apache.druid.segment.column.ValueType; -import org.apache.druid.sql.SqlPlanningException; import org.apache.druid.timeline.SegmentId; import org.apache.druid.utils.CompressionUtils; import org.hamcrest.CoreMatchers; @@ -92,7 +92,6 @@ public static Collection data() @Parameterized.Parameter(1) public Map context; - @Test public void testInsertOnFoo1() { @@ -541,11 +540,9 @@ public void testInsertOnFoo1WithMultiValueMeasureGroupBy() "INSERT INTO foo1 SELECT count(dim3) FROM foo WHERE dim3 IS NOT NULL GROUP BY 1 PARTITIONED BY ALL TIME") .setExpectedDataSource("foo1") .setQueryContext(context) - .setExpectedValidationErrorMatcher(CoreMatchers.allOf( - CoreMatchers.instanceOf(SqlPlanningException.class), - ThrowableMessageMatcher.hasMessage(CoreMatchers.containsString( - "Aggregate expression is illegal in GROUP BY clause")) - )) + .setExpectedValidationErrorMatcher( + invalidSqlContains("Aggregate expression is illegal in GROUP BY clause") + ) .verifyPlanningErrors(); } @@ -747,11 +744,9 @@ public void testInsertWithClusteredByDescendingThrowsException() + "PARTITIONED BY DAY " + "CLUSTERED BY dim1 DESC" ) - .setExpectedValidationErrorMatcher(CoreMatchers.allOf( - CoreMatchers.instanceOf(SqlPlanningException.class), - ThrowableMessageMatcher.hasMessage(CoreMatchers.startsWith( - "[`dim1` DESC] is invalid. CLUSTERED BY columns cannot be sorted in descending order.")) - )) + .setExpectedValidationErrorMatcher( + invalidSqlIs("Invalid CLUSTERED BY clause [`dim1` DESC]: cannot sort in descending order.") + ) .verifyPlanningErrors(); } @@ -967,7 +962,7 @@ public void testInsertWrongTypeTimestamp() .setExpectedRowSignature(rowSignature) .setQueryContext(context) .setExpectedValidationErrorMatcher(CoreMatchers.allOf( - CoreMatchers.instanceOf(SqlPlanningException.class), + CoreMatchers.instanceOf(DruidException.class), ThrowableMessageMatcher.hasMessage(CoreMatchers.containsString( "Field \"__time\" must be of type TIMESTAMP")) )) @@ -977,14 +972,14 @@ public void testInsertWrongTypeTimestamp() @Test public void testIncorrectInsertQuery() { - testIngestQuery().setSql( - "insert into foo1 select __time, dim1 , count(*) as cnt from foo where dim1 is not null group by 1, 2 clustered by dim1") - .setExpectedValidationErrorMatcher(CoreMatchers.allOf( - CoreMatchers.instanceOf(SqlPlanningException.class), - ThrowableMessageMatcher.hasMessage(CoreMatchers.startsWith( - "CLUSTERED BY found before PARTITIONED BY. In Druid, the CLUSTERED BY clause must follow the PARTITIONED BY clause")) - )) - .verifyPlanningErrors(); + testIngestQuery() + .setSql( + "insert into foo1 select __time, dim1 , count(*) as cnt from foo where dim1 is not null group by 1, 2 clustered by dim1" + ) + .setExpectedValidationErrorMatcher(invalidSqlContains( + "CLUSTERED BY found before PARTITIONED BY, CLUSTERED BY must come after the PARTITIONED BY clause" + )) + .verifyPlanningErrors(); } @@ -1032,11 +1027,9 @@ public void testInsertDuplicateColumnNames() + " )\n" + ") PARTITIONED by day") .setQueryContext(context) - .setExpectedValidationErrorMatcher(CoreMatchers.allOf( - CoreMatchers.instanceOf(SqlPlanningException.class), - ThrowableMessageMatcher.hasMessage(CoreMatchers.containsString( - "Duplicate field in SELECT: [namespace]")) - )) + .setExpectedValidationErrorMatcher( + invalidSqlIs("Duplicate field in SELECT: [namespace]") + ) .verifyPlanningErrors(); } @@ -1097,11 +1090,11 @@ public void testInsertLimitWithPeriodGranularityThrowsException() + "FROM foo " + "LIMIT 50 " + "PARTITIONED BY MONTH") - .setExpectedValidationErrorMatcher(CoreMatchers.allOf( - CoreMatchers.instanceOf(SqlPlanningException.class), - ThrowableMessageMatcher.hasMessage(CoreMatchers.containsString( - "INSERT and REPLACE queries cannot have a LIMIT unless PARTITIONED BY is \"ALL\"")) - )) + .setExpectedValidationErrorMatcher( + invalidSqlContains( + "INSERT and REPLACE queries cannot have a LIMIT unless PARTITIONED BY is \"ALL\"" + ) + ) .setQueryContext(context) .verifyPlanningErrors(); } @@ -1115,11 +1108,9 @@ public void testInsertOffsetThrowsException() + "LIMIT 50 " + "OFFSET 10" + "PARTITIONED BY ALL TIME") - .setExpectedValidationErrorMatcher(CoreMatchers.allOf( - CoreMatchers.instanceOf(SqlPlanningException.class), - ThrowableMessageMatcher.hasMessage(CoreMatchers.containsString( - "INSERT and REPLACE queries cannot have an OFFSET")) - )) + .setExpectedValidationErrorMatcher( + invalidSqlContains("INSERT and REPLACE queries cannot have an OFFSET") + ) .setQueryContext(context) .verifyPlanningErrors(); } diff --git a/extensions-core/multi-stage-query/src/test/java/org/apache/druid/msq/exec/MSQReplaceTest.java b/extensions-core/multi-stage-query/src/test/java/org/apache/druid/msq/exec/MSQReplaceTest.java index 1dfd7742146e..500d2a68bee8 100644 --- a/extensions-core/multi-stage-query/src/test/java/org/apache/druid/msq/exec/MSQReplaceTest.java +++ b/extensions-core/multi-stage-query/src/test/java/org/apache/druid/msq/exec/MSQReplaceTest.java @@ -30,13 +30,10 @@ import org.apache.druid.msq.test.MSQTestTaskActionClient; import org.apache.druid.segment.column.ColumnType; import org.apache.druid.segment.column.RowSignature; -import org.apache.druid.sql.SqlPlanningException; import org.apache.druid.timeline.DataSegment; import org.apache.druid.timeline.SegmentId; import org.apache.druid.timeline.partition.DimensionRangeShardSpec; -import org.hamcrest.CoreMatchers; import org.junit.Test; -import org.junit.internal.matchers.ThrowableMessageMatcher; import org.junit.runner.RunWith; import org.junit.runners.Parameterized; import org.mockito.ArgumentMatchers; @@ -327,17 +324,15 @@ public void testReplaceOnFoo1WithWhereExtern() throws IOException @Test public void testReplaceIncorrectSyntax() { - testIngestQuery().setSql("REPLACE INTO foo1 OVERWRITE SELECT * FROM foo PARTITIONED BY ALL TIME") - .setExpectedDataSource("foo1") - .setQueryContext(context) - .setExpectedValidationErrorMatcher( - CoreMatchers.allOf( - CoreMatchers.instanceOf(SqlPlanningException.class), - ThrowableMessageMatcher.hasMessage(CoreMatchers.containsString( - "Missing time chunk information in OVERWRITE clause for REPLACE. Use OVERWRITE WHERE <__time based condition> or OVERWRITE ALL to overwrite the entire table.")) - ) - ) - .verifyPlanningErrors(); + testIngestQuery() + .setSql("REPLACE INTO foo1 OVERWRITE SELECT * FROM foo PARTITIONED BY ALL TIME") + .setExpectedDataSource("foo1") + .setQueryContext(context) + .setExpectedValidationErrorMatcher(invalidSqlContains( + "Missing time chunk information in OVERWRITE clause for REPLACE. " + + "Use OVERWRITE WHERE <__time based condition> or OVERWRITE ALL to overwrite the entire table." + )) + .verifyPlanningErrors(); } @Test @@ -581,10 +576,8 @@ public void testReplaceLimitWithPeriodGranularityThrowsException() + "LIMIT 50" + "PARTITIONED BY MONTH") .setQueryContext(context) - .setExpectedValidationErrorMatcher(CoreMatchers.allOf( - CoreMatchers.instanceOf(SqlPlanningException.class), - ThrowableMessageMatcher.hasMessage(CoreMatchers.containsString( - "INSERT and REPLACE queries cannot have a LIMIT unless PARTITIONED BY is \"ALL\"")) + .setExpectedValidationErrorMatcher(invalidSqlContains( + "INSERT and REPLACE queries cannot have a LIMIT unless PARTITIONED BY is \"ALL\"" )) .verifyPlanningErrors(); } @@ -599,10 +592,8 @@ public void testReplaceOffsetThrowsException() + "LIMIT 50 " + "OFFSET 10" + "PARTITIONED BY ALL TIME") - .setExpectedValidationErrorMatcher(CoreMatchers.allOf( - CoreMatchers.instanceOf(SqlPlanningException.class), - ThrowableMessageMatcher.hasMessage(CoreMatchers.containsString( - "INSERT and REPLACE queries cannot have an OFFSET")) + .setExpectedValidationErrorMatcher(invalidSqlContains( + "INSERT and REPLACE queries cannot have an OFFSET" )) .setQueryContext(context) .verifyPlanningErrors(); @@ -742,11 +733,9 @@ public void testReplaceWithClusteredByDescendingThrowsException() + "PARTITIONED BY ALL TIME " + "CLUSTERED BY m2, m1 DESC" ) - .setExpectedValidationErrorMatcher(CoreMatchers.allOf( - CoreMatchers.instanceOf(SqlPlanningException.class), - ThrowableMessageMatcher.hasMessage(CoreMatchers.startsWith( - "[`m1` DESC] is invalid. CLUSTERED BY columns cannot be sorted in descending order.")) - )) + .setExpectedValidationErrorMatcher( + invalidSqlIs("Invalid CLUSTERED BY clause [`m1` DESC]: cannot sort in descending order.") + ) .verifyPlanningErrors(); } diff --git a/extensions-core/multi-stage-query/src/test/java/org/apache/druid/msq/exec/MSQSelectTest.java b/extensions-core/multi-stage-query/src/test/java/org/apache/druid/msq/exec/MSQSelectTest.java index b09e3aa01f11..0ad6e4d90bea 100644 --- a/extensions-core/multi-stage-query/src/test/java/org/apache/druid/msq/exec/MSQSelectTest.java +++ b/extensions-core/multi-stage-query/src/test/java/org/apache/druid/msq/exec/MSQSelectTest.java @@ -25,6 +25,8 @@ import org.apache.druid.data.input.impl.CsvInputFormat; import org.apache.druid.data.input.impl.JsonInputFormat; import org.apache.druid.data.input.impl.LocalInputSource; +import org.apache.druid.error.DruidException; +import org.apache.druid.error.DruidExceptionMatcher; import org.apache.druid.frame.util.DurableStorageUtils; import org.apache.druid.java.util.common.DateTimes; import org.apache.druid.java.util.common.ISE; @@ -64,7 +66,6 @@ import org.apache.druid.segment.column.RowSignature; import org.apache.druid.segment.join.JoinType; import org.apache.druid.segment.virtual.ExpressionVirtualColumn; -import org.apache.druid.sql.SqlPlanningException; import org.apache.druid.sql.calcite.expression.DruidExpression; import org.apache.druid.sql.calcite.external.ExternalDataSource; import org.apache.druid.sql.calcite.filtration.Filtration; @@ -72,7 +73,6 @@ import org.apache.druid.sql.calcite.planner.ColumnMappings; import org.apache.druid.sql.calcite.planner.JoinAlgorithm; import org.apache.druid.sql.calcite.planner.PlannerContext; -import org.apache.druid.sql.calcite.planner.UnsupportedSQLQueryException; import org.apache.druid.sql.calcite.util.CalciteTests; import org.hamcrest.CoreMatchers; import org.junit.Test; @@ -95,7 +95,6 @@ @RunWith(Parameterized.class) public class MSQSelectTest extends MSQTestBase { - @Parameterized.Parameters(name = "{index}:with context {0}") public static Collection data() { @@ -1186,10 +1185,9 @@ public void testIncorrectSelectQuery() { testSelectQuery() .setSql("select a from ") - .setExpectedValidationErrorMatcher(CoreMatchers.allOf( - CoreMatchers.instanceOf(SqlPlanningException.class), - ThrowableMessageMatcher.hasMessage(CoreMatchers.startsWith("Encountered \"from \"")) - )) + .setExpectedValidationErrorMatcher( + invalidSqlContains("Received an unexpected token [from ] (line [1], column [10]), acceptable options") + ) .setQueryContext(context) .verifyPlanningErrors(); } @@ -1201,11 +1199,7 @@ public void testSelectOnInformationSchemaSource() .setSql("SELECT * FROM INFORMATION_SCHEMA.SCHEMATA") .setQueryContext(context) .setExpectedValidationErrorMatcher( - CoreMatchers.allOf( - CoreMatchers.instanceOf(SqlPlanningException.class), - ThrowableMessageMatcher.hasMessage(CoreMatchers.startsWith( - "Cannot query table INFORMATION_SCHEMA.SCHEMATA with SQL engine 'msq-task'.")) - ) + invalidSqlIs("Cannot query table(s) [INFORMATION_SCHEMA.SCHEMATA] with SQL engine [msq-task]") ) .verifyPlanningErrors(); } @@ -1217,11 +1211,7 @@ public void testSelectOnSysSource() .setSql("SELECT * FROM sys.segments") .setQueryContext(context) .setExpectedValidationErrorMatcher( - CoreMatchers.allOf( - CoreMatchers.instanceOf(SqlPlanningException.class), - ThrowableMessageMatcher.hasMessage(CoreMatchers.startsWith( - "Cannot query table sys.segments with SQL engine 'msq-task'.")) - ) + invalidSqlIs("Cannot query table(s) [sys.segments] with SQL engine [msq-task]") ) .verifyPlanningErrors(); } @@ -1233,11 +1223,7 @@ public void testSelectOnSysSourceWithJoin() .setSql("select s.segment_id, s.num_rows, f.dim1 from sys.segments as s, foo as f") .setQueryContext(context) .setExpectedValidationErrorMatcher( - CoreMatchers.allOf( - CoreMatchers.instanceOf(SqlPlanningException.class), - ThrowableMessageMatcher.hasMessage(CoreMatchers.startsWith( - "Cannot query table sys.segments with SQL engine 'msq-task'.")) - ) + invalidSqlIs("Cannot query table(s) [sys.segments] with SQL engine [msq-task]") ) .verifyPlanningErrors(); } @@ -1250,16 +1236,11 @@ public void testSelectOnSysSourceContainingWith() + "select segment_source.segment_id, segment_source.num_rows from segment_source") .setQueryContext(context) .setExpectedValidationErrorMatcher( - CoreMatchers.allOf( - CoreMatchers.instanceOf(SqlPlanningException.class), - ThrowableMessageMatcher.hasMessage(CoreMatchers.startsWith( - "Cannot query table sys.segments with SQL engine 'msq-task'.")) - ) + invalidSqlIs("Cannot query table(s) [sys.segments] with SQL engine [msq-task]") ) .verifyPlanningErrors(); } - @Test public void testSelectOnUserDefinedSourceContainingWith() { @@ -1644,8 +1625,13 @@ public void testTimeColumnAggregationFromExtern() throws IOException + "FROM kttm_data " + "GROUP BY 1") .setExpectedValidationErrorMatcher( - ThrowableMessageMatcher.hasMessage(CoreMatchers.containsString( - "LATEST() aggregator depends on __time column")) + new DruidExceptionMatcher(DruidException.Persona.ADMIN, DruidException.Category.INVALID_INPUT, "general") + .expectMessageIs( + "Query planning failed for unknown reason, our best guess is this " + + "[LATEST and EARLIEST aggregators implicitly depend on the __time column, " + + "but the table queried doesn't contain a __time column. " + + "Please use LATEST_BY or EARLIEST_BY and specify the column explicitly.]" + ) ) .setExpectedRowSignature(rowSignature) .verifyPlanningErrors(); @@ -1676,7 +1662,7 @@ public void testGroupByWithComplexColumnThrowsUnsupportedException() .setSql("select unique_dim1 from foo2 group by unique_dim1") .setQueryContext(context) .setExpectedExecutionErrorMatcher(CoreMatchers.allOf( - CoreMatchers.instanceOf(UnsupportedSQLQueryException.class), + CoreMatchers.instanceOf(DruidException.class), ThrowableMessageMatcher.hasMessage(CoreMatchers.containsString( "SQL requires a group-by on a column of type COMPLEX that is unsupported")) )) diff --git a/indexing-service/src/test/java/org/apache/druid/indexing/worker/shuffle/LocalIntermediaryDataManagerManualAddAndDeleteTest.java b/indexing-service/src/test/java/org/apache/druid/indexing/worker/shuffle/LocalIntermediaryDataManagerManualAddAndDeleteTest.java index 75fed3e81dde..fabb1cfb1961 100644 --- a/indexing-service/src/test/java/org/apache/druid/indexing/worker/shuffle/LocalIntermediaryDataManagerManualAddAndDeleteTest.java +++ b/indexing-service/src/test/java/org/apache/druid/indexing/worker/shuffle/LocalIntermediaryDataManagerManualAddAndDeleteTest.java @@ -24,6 +24,8 @@ import com.google.common.primitives.Ints; import org.apache.commons.io.FileUtils; import org.apache.druid.client.indexing.NoopOverlordClient; +import org.apache.druid.error.DruidException; +import org.apache.druid.error.DruidExceptionMatcher; import org.apache.druid.indexing.common.config.TaskConfig; import org.apache.druid.indexing.common.config.TaskConfigBuilder; import org.apache.druid.indexing.worker.config.WorkerConfig; @@ -36,13 +38,13 @@ import org.apache.druid.timeline.partition.BuildingShardSpec; import org.apache.druid.timeline.partition.ShardSpec; import org.apache.druid.timeline.partition.ShardSpecLookup; +import org.hamcrest.MatcherAssert; import org.joda.time.Interval; import org.junit.After; import org.junit.Assert; import org.junit.Before; import org.junit.Rule; import org.junit.Test; -import org.junit.rules.ExpectedException; import org.junit.rules.TemporaryFolder; import java.io.File; @@ -56,9 +58,6 @@ public class LocalIntermediaryDataManagerManualAddAndDeleteTest @Rule public TemporaryFolder tempDir = new TemporaryFolder(); - @Rule - public ExpectedException expectedException = ExpectedException.none(); - private LocalIntermediaryDataManager intermediaryDataManager; private File intermediarySegmentsLocation; private File siblingLocation; @@ -93,11 +92,14 @@ public void testAddSegmentFailure() throws IOException DataSegment segment = newSegment(Intervals.of("2018/2019"), i); intermediaryDataManager.addSegment("supervisorTaskId", "subTaskId", segment, segmentFile); } - expectedException.expect(IllegalStateException.class); - expectedException.expectMessage("Can't find location to handle segment"); File segmentFile = generateSegmentDir("file_" + i); DataSegment segment = newSegment(Intervals.of("2018/2019"), 4); - intermediaryDataManager.addSegment("supervisorTaskId", "subTaskId", segment, segmentFile); + + IllegalStateException e = Assert.assertThrows( + IllegalStateException.class, + () -> intermediaryDataManager.addSegment("supervisorTaskId", "subTaskId", segment, segmentFile) + ); + Assert.assertEquals(StringUtils.format("Can't find location to handle segment[%s]", segment), e.getMessage()); } @Test @@ -140,7 +142,8 @@ public void deletePartitions() throws IOException for (int partitionId = 0; partitionId < 2; partitionId++) { for (int subTaskId = 0; subTaskId < 2; subTaskId++) { Assert.assertFalse( - intermediaryDataManager.findPartitionFile(supervisorTaskId, "subTaskId_" + subTaskId, interval, partitionId).isPresent() + intermediaryDataManager.findPartitionFile(supervisorTaskId, "subTaskId_" + subTaskId, interval, partitionId) + .isPresent() ); } } @@ -166,8 +169,6 @@ public void testAddRemoveAdd() throws IOException @Test public void testFailsWithCraftyFabricatedNamesForDelete() throws IOException { - expectedException.expect(IllegalArgumentException.class); - expectedException.expectMessage("supervisorTaskId cannot start with the '.' character."); final String supervisorTaskId = "../" + siblingLocation.getName(); final String someFile = "sneaky-snake.txt"; File dataFile = new File(siblingLocation, someFile); @@ -178,7 +179,15 @@ public void testFailsWithCraftyFabricatedNamesForDelete() throws IOException ); Assert.assertTrue(new File(intermediarySegmentsLocation, supervisorTaskId).exists()); Assert.assertTrue(dataFile.exists()); - intermediaryDataManager.deletePartitions(supervisorTaskId); + MatcherAssert.assertThat( + Assert.assertThrows(DruidException.class, () -> intermediaryDataManager.deletePartitions(supervisorTaskId)), + DruidExceptionMatcher.invalidInput().expectMessageIs( + StringUtils.format( + "Invalid value for field [supervisorTaskId]: Value [%s] cannot start with '.'.", + supervisorTaskId + ) + ) + ); Assert.assertTrue(new File(intermediarySegmentsLocation, supervisorTaskId).exists()); Assert.assertTrue(dataFile.exists()); } @@ -186,8 +195,6 @@ public void testFailsWithCraftyFabricatedNamesForDelete() throws IOException @Test public void testFailsWithCraftyFabricatedNamesForFind() throws IOException { - expectedException.expect(IllegalArgumentException.class); - expectedException.expectMessage("supervisorTaskId cannot start with the '.' character."); final String supervisorTaskId = "../" + siblingLocation.getName(); final Interval interval = Intervals.of("2018/2019"); final int partitionId = 0; @@ -211,13 +218,22 @@ public void testFailsWithCraftyFabricatedNamesForFind() throws IOException Assert.assertTrue( new File(intermediarySegmentsLocation, supervisorTaskId + "/" + someFilePath).exists()); - final Optional foundFile1 = intermediaryDataManager.findPartitionFile( - supervisorTaskId, - someFile, - interval, - partitionId + + MatcherAssert.assertThat( + Assert.assertThrows(DruidException.class, () -> + intermediaryDataManager.findPartitionFile( + supervisorTaskId, + someFile, + interval, + partitionId + )), + DruidExceptionMatcher.invalidInput().expectMessageIs( + StringUtils.format( + "Invalid value for field [supervisorTaskId]: Value [%s] cannot start with '.'.", + supervisorTaskId + ) + ) ); - Assert.assertFalse(foundFile1.isPresent()); } private File generateSegmentDir(String fileName) throws IOException diff --git a/integration-tests/src/test/java/org/apache/druid/tests/query/ITJdbcQueryTest.java b/integration-tests/src/test/java/org/apache/druid/tests/query/ITJdbcQueryTest.java index 72e682c9fbf2..8d07f1b8d607 100644 --- a/integration-tests/src/test/java/org/apache/druid/tests/query/ITJdbcQueryTest.java +++ b/integration-tests/src/test/java/org/apache/druid/tests/query/ITJdbcQueryTest.java @@ -213,13 +213,15 @@ public void testJdbcPrepareStatementQuery() } } - @Test(expectedExceptions = AvaticaSqlException.class, expectedExceptionsMessageRegExp = ".* Parameter at position \\[0] is not bound") + @Test(expectedExceptions = AvaticaSqlException.class, expectedExceptionsMessageRegExp = ".* No value bound for parameter \\(position \\[1]\\)") public void testJdbcPrepareStatementQueryMissingParameters() throws SQLException { for (String url : connections) { try (Connection connection = DriverManager.getConnection(url, connectionProperties); PreparedStatement statement = connection.prepareStatement(QUERY_PARAMETERIZED); ResultSet resultSet = statement.executeQuery()) { + // This won't actually run as we expect the exception to be thrown before it gets here + throw new IllegalStateException(resultSet.toString()); } } } diff --git a/processing/src/main/java/org/apache/druid/common/exception/DruidException.java b/processing/src/main/java/org/apache/druid/common/exception/DruidException.java index 638653fc5cee..42a679b6bd2f 100644 --- a/processing/src/main/java/org/apache/druid/common/exception/DruidException.java +++ b/processing/src/main/java/org/apache/druid/common/exception/DruidException.java @@ -21,7 +21,11 @@ /** * A generic exception thrown by Druid. + * + * This class is deprecated and should not be used. {@link org.apache.druid.error.DruidException} should be used for + * any error that is intended to be delivered to the end user. */ +@Deprecated public class DruidException extends RuntimeException { public static final int HTTP_CODE_SERVER_ERROR = 500; diff --git a/processing/src/main/java/org/apache/druid/common/utils/IdUtils.java b/processing/src/main/java/org/apache/druid/common/utils/IdUtils.java index 2d3f33010162..88d4d0d413ba 100644 --- a/processing/src/main/java/org/apache/druid/common/utils/IdUtils.java +++ b/processing/src/main/java/org/apache/druid/common/utils/IdUtils.java @@ -21,10 +21,9 @@ import com.google.common.annotations.VisibleForTesting; import com.google.common.base.Joiner; -import com.google.common.base.Preconditions; import com.google.common.base.Strings; +import org.apache.druid.error.InvalidInput; import org.apache.druid.java.util.common.DateTimes; -import org.apache.druid.java.util.common.IAE; import org.joda.time.DateTime; import org.joda.time.Interval; @@ -43,23 +42,32 @@ public class IdUtils public static String validateId(String thingToValidate, String stringToValidate) { - Preconditions.checkArgument( - !Strings.isNullOrEmpty(stringToValidate), - "%s cannot be null or empty. Please provide a %s.", thingToValidate, thingToValidate - ); - Preconditions.checkArgument( - !stringToValidate.startsWith("."), - "%s cannot start with the '.' character.", thingToValidate - ); - Preconditions.checkArgument( - !stringToValidate.contains("/"), - "%s cannot contain the '/' character.", thingToValidate - ); + if (Strings.isNullOrEmpty(stringToValidate)) { + throw InvalidInput.exception("Invalid value for field [%s]: must not be null", thingToValidate); + } + if (stringToValidate.startsWith(".")) { + throw InvalidInput.exception( + "Invalid value for field [%s]: Value [%s] cannot start with '.'.", + thingToValidate, + stringToValidate + ); + } + if (stringToValidate.contains("/")) { + throw InvalidInput.exception( + "Invalid value for field [%s]: Value [%s] cannot contain '/'.", + thingToValidate, + stringToValidate + ); + } + Matcher m = INVALIDCHARS.matcher(stringToValidate); - Preconditions.checkArgument( - !m.matches(), - "%s cannot contain whitespace character except space.", thingToValidate - ); + if (m.matches()) { + throw InvalidInput.exception( + "Invalid value for field [%s]: Value [%s] contains illegal whitespace characters. Only space is allowed.", + thingToValidate, + stringToValidate + ); + } for (int i = 0; i < stringToValidate.length(); i++) { final char c = stringToValidate.charAt(i); @@ -68,7 +76,13 @@ public static String validateId(String thingToValidate, String stringToValidate) // znode paths. The first two ranges are control characters, the second two ranges correspond to surrogate // pairs. This means that characters outside the basic multilingual plane, such as emojis, are not allowed. 😢 if (c > 0 && c < 31 || c > 127 && c < 159 || c > '\ud800' && c < '\uf8ff' || c > '\ufff0' && c < '\uffff') { - throw new IAE("%s cannot contain character #%d (at position %d).", thingToValidate, (int) c, i); + throw InvalidInput.exception( + "Invalid value for field [%s]: Value [%s] contains illegal UTF8 character [#%d] at position [%d]", + thingToValidate, + stringToValidate, + (int) c, + i + ); } } @@ -94,7 +108,12 @@ public static String newTaskId(String typeName, String dataSource, @Nullable Int return newTaskId(null, typeName, dataSource, interval); } - public static String newTaskId(@Nullable String idPrefix, String typeName, String dataSource, @Nullable Interval interval) + public static String newTaskId( + @Nullable String idPrefix, + String typeName, + String dataSource, + @Nullable Interval interval + ) { return newTaskId(idPrefix, getRandomId(), DateTimes.nowUtc(), typeName, dataSource, interval); } diff --git a/processing/src/main/java/org/apache/druid/data/input/impl/JsonNodeReader.java b/processing/src/main/java/org/apache/druid/data/input/impl/JsonNodeReader.java index a6ebb0a91136..b5a61f692929 100644 --- a/processing/src/main/java/org/apache/druid/data/input/impl/JsonNodeReader.java +++ b/processing/src/main/java/org/apache/druid/data/input/impl/JsonNodeReader.java @@ -56,7 +56,7 @@ *

* The input text can be: * 1. a JSON string of an object in a line or multiple lines(such as pretty-printed JSON text) - * 2. multiple JSON object strings concated by white space character(s) + * 2. multiple JSON object strings concatenated by white space character(s) *

* If an input string contains invalid JSON syntax, any valid JSON objects found prior to encountering the invalid * syntax will be successfully parsed, but parsing will not continue after the invalid syntax. diff --git a/processing/src/main/java/org/apache/druid/error/DruidException.java b/processing/src/main/java/org/apache/druid/error/DruidException.java new file mode 100644 index 000000000000..6acedf55fdb0 --- /dev/null +++ b/processing/src/main/java/org/apache/druid/error/DruidException.java @@ -0,0 +1,449 @@ +/* + * Licensed to the Apache Software Foundation (ASF) under one + * or more contributor license agreements. See the NOTICE file + * distributed with this work for additional information + * regarding copyright ownership. The ASF licenses this file + * to you under the Apache License, Version 2.0 (the + * "License"); you may not use this file except in compliance + * with the License. You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, + * software distributed under the License is distributed on an + * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY + * KIND, either express or implied. See the License for the + * specific language governing permissions and limitations + * under the License. + */ + +package org.apache.druid.error; + +import com.fasterxml.jackson.annotation.JsonValue; +import com.google.common.base.Preconditions; +import org.apache.druid.java.util.common.StringUtils; + +import javax.annotation.concurrent.NotThreadSafe; +import java.util.LinkedHashMap; +import java.util.Map; + +/** + * Represents an error condition exposed to the user and/or operator of Druid. Given that a DruidException is intended + * to be delivered to the end user, it should generally never be caught. DruidExceptions are generated at terminal + * points where the operation that was happening cannot make forward progress. As such, the only reason to catch a + * DruidException is if the code has some extra context that it wants to add to the message of the DruidException using + * {@link #prependAndBuild(String, Object...)}. If code wants to catch and handle an exception instead, it should not + * be using the DruidException. + *

+ * Said another way, when a developer builds a DruidException in the code, they should be confident that the exception + * will make its way back to the user. DruidException is always the answer to "how do I generate an error message and + * deliver it to the user"? + *

+ * At the time that DruidException was introduced, this type of "show this to the user please" exception was largely + * handled by created {@link org.apache.druid.java.util.common.RE}, {@link org.apache.druid.java.util.common.IAE}, or + * {@link org.apache.druid.java.util.common.ISE} objects. It is intended that DruidException replaces all usage of + * these exceptions where the intention is to deliver a message to the user, which we believe to be the vast majority + * of usages. In cases where those exceptions are with the intention of being caught and acted upon, they should + * no change should occur. + * + * Notes about exception messages + * + * Firstly, exception messages should always be written with the notions from the style conventions covered in + * {@code dev/style-conventions.md}. Whenever possible, we should also try to provide an action to take to resolve + * the issue. + * + * Secondly, given that the DruidException requires defining a target persona, exception messages should always be + * written with that target persona in mind. Reviewers should use the targetPersona as added input to help validate + * that an exception message in meaningful. + * + * For example, at the time that this exception was introduced, there is an exception that the router throws which is + * an {@link org.apache.druid.java.util.common.ISE} with the message {@code "No default server found!"}. This + * exception is thrown when the router is unable to find a broker to forward a request to. It is completely + * meaningless to an end-user trying to run a query (what's a default server? why does it need to be found?). If we + * were to convert the exception to a DruidException and keep the same message, we should mark it as targetting the + * DEVELOPER persona as that is the only persona who should actually be able to figure out what a default server is + * and why it is important. That said, does it makes sense for an exception that means "router cannot find a broker + * to forward the query to" to only be targetting the DEVELOPER? The answer to that is no, it's something that should + * really be made meaningful to a wider group. Some options could be + * + * USER persona: Cannot find a queryable server, contact your cluster administrator to validate that all services are + * operational + * + * OPERATOR persona: Router unable to find a broker, check that brokers are up and active + * + * The user-facing message doesn't talk about any Druid-specific concepts and just tries to relay a high-level + * understanding of what happened. The admin-facing message includes Druid notions in it as it expects that an Admin + * will understand the various node types of Druid. + * + * If we think about this error more, we will realize that it's fundamentally something wrong with the cluster setup, + * which is something that we would expect an operator to be in charge of. So, we would pick the OPERATOR persona + * message, which also allows us to include more specific information about what server was not found and provide a + * more meaningful action to take (check the health of your brokers). + * + * Description of fields of DruidException + * Every error consists of: + *

    + *
  • A target persona
  • + *
  • A categorization of the error
  • + *
  • An error code
  • + *
  • An error message
  • + *
  • A context (possibly empty)
  • + *
+ *

+ *

+ * The target persona indicates who the message is written for. This is important for 2 reasons + *

    + *
  1. It identifies why the developer is creating the exception and who they believe can take action on it. + * This context allows for code reviewers and other developers to evaluate the message with the persona in mind
  2. + *
  3. It can be used as a way to control which error messages should be routed where. For example, a user-targetted + * error message should be able to be exposed directly to the user, while an operator-targetted error message should + * perhaps be routed to the operators of the system instead of the end user firing a query.
  4. + *
+ *

+ * The category indicates what kind of failure occurred. This is leveraged to align response codes (e.g. HTTP response + * codes) for similar exception messages. + *

+ * The error code is a code that indicates a grouping of error messages. There is no forced structure around whether + * a specific error code can be reused for different problems or not. That is, an error code like "general" will get + * reused in many different places as it's the basic error code used whenever a DruidException is created in-line. But, + * we might decide that a specific type of error should be identified explicitly by its error code and should only mean + * one thing, in which case that error code might only exist on a single error. + *

+ * The error message is a message written targetting the target persona. It should have values interpolated into it + * in order to be as meaningful as possible for the target persona without leaking potentially sensitive information. + *

+ * The context is a place to add extra information about the error that is not necessarily interpolated into the + * error message. It's a way to carry extra information that might be useful to a developer, but not necessarily to + * the target persona. + * + * Notes for developers working with DruidException + *

+ * A DruidException can be built from one of 2 static methods: {@link #forPersona} or {@link #fromFailure(Failure)}. + * The only way to set a specific error code is to build a DruidException from a Failure, when built in-line using + * forPersona, it will always be an "general" error. + *

+ * Additionally, DruidException is not intended to be directly serialized. The intention is that something converts + * it into an {@link ErrorResponse} first using {@link ErrorResponse#ErrorResponse(DruidException)} and then that + * ErrorResponse is used for serialization. DruidException carries a {@link #toErrorResponse()} method because there + * are some code paths that directly serialize Exceptions and adjusting them was deemed out-of-scope for the PR that + * introduced DruidException. + */ +@NotThreadSafe +public class DruidException extends RuntimeException +{ + /** + * Starts building an "general" DruidException targetting the specific persona. + * + * @param persona the target persona of the exception message + * @return a builder that can be used to complete the creation of the DruidException + */ + public static PartialDruidExceptionBuilder forPersona(Persona persona) + { + return new PartialDruidExceptionBuilder("general", persona); + } + + /** + * Builds a DruidException using the provided Failure class. The errorCode is determined by the + * specific Failure class being used and the Failure class is responsible for setting all other + * required fields of the DruidException + * + * @param failure failure implementation to use to build the DruidException + * @return DruidException instance built from the Failure instance provided + */ + public static DruidException fromFailure(Failure failure) + { + return failure.makeException(new DruidExceptionBuilder(failure.getErrorCode())); + } + + private final Persona targetPersona; + private final Category category; + private final String errorCode; + protected final Map context = new LinkedHashMap<>(); + + private DruidException( + Throwable cause, + final String errorCode, + Persona targetPersona, + Category category, + final String message + ) + { + this(cause, errorCode, targetPersona, category, message, false); + } + + private DruidException( + Throwable throwable, + final String errorCode, + Persona targetPersona, + Category category, + String message, + boolean deserialized + ) + { + super(message, throwable, true, !deserialized); + this.errorCode = Preconditions.checkNotNull(errorCode, "errorCode"); + this.targetPersona = Preconditions.checkNotNull(targetPersona, "targetPersona"); + this.category = Preconditions.checkNotNull(category, "category"); + } + + public DruidException withContext(String key, Object value) + { + context.put(key, value == null ? null : value.toString()); + return this; + } + + public DruidException withContext(Map values) + { + this.context.putAll(values); + return this; + } + + public Persona getTargetPersona() + { + return targetPersona; + } + + public Category getCategory() + { + return category; + } + + public String getErrorCode() + { + return errorCode; + } + + public String getContextValue(String key) + { + return context.get(key); + } + + public Map getContext() + { + return context; + } + + public int getStatusCode() + { + return category.getExpectedStatus(); + } + + /** + * Returns this DruidException as an ErrorResponse. This method exists for compatibility with some older code + * paths that serialize out Exceptions directly using Jackson. Instead of serializing a DruidException + * directly, code should be structured to take the DruidException and build an ErrorResponse from it to be + * used to push across the wire. + *

+ * As such, this method should be deleted in some future world. Anyone wondering how to serialize and deserialize + * a DruidException should look at {@link ErrorResponse} and leverage that instead of this. + * + * @return an ErrorResponse + */ + @SuppressWarnings("unused") + @JsonValue + public ErrorResponse toErrorResponse() + { + return new ErrorResponse(this); + } + + /** + * Builds a new DruidException with a message that is the result of prepending the message passed as a parameter + * with the message already on the DruidException. + * + * @param msg Message to be prepended, can be a Java format string + * @param args Arguments to be passed to the message if it is a Java format string + * @return a new DruidException with prepended-message + */ + public DruidException prependAndBuild(String msg, Object... args) + { + return new DruidException( + this, + errorCode, + targetPersona, + category, + StringUtils.format("%s: %s", StringUtils.nonStrictFormat(msg, args), getMessage()) + ).withContext(context); + } + + /** + * The persona that the message on a DruidException is targetting + */ + public enum Persona + { + /** + * Represents the end-user, a persona who is issuing queries to the Druid Query APIs + */ + USER, + /** + * Represents an administrative user, a persona who is interacting with admin APIs and understands Druid query + * concepts without necessarily owning the infrastructure and operations of the cluster + */ + ADMIN, + /** + * Represents a persona who actively owns and operates the cluster. This persona is not assumed to understand + * Druid query concepts, but instead understand cluster operational concepts. + */ + OPERATOR, + /** + * Represents someone who has all of the context and knowledge to be actively diving into the Druid codebase. + * This persona exists as a catch-all for anything that is so deep and technically in the weeds that it is not + * possible to make a message that will make sense to a different persona. Generally speaking, there is a hope + * that only DEFENSIVE error messages will target this persona. + */ + DEVELOPER + } + + /** + * Category of error. The simplest way to describe this is that it exists as a classification of errors that + * enables us to identify the expected response code (e.g. HTTP status code) of a specific DruidException + */ + public enum Category + { + /** + * Means that the exception is being created defensively, because we want to validate something but expect that + * it should never actually be hit. Using this category is good to provide an indication to future reviewers and + * developers that the case being checked is not intended to actually be able to occur in the wild. + */ + DEFENSIVE(500), + /** + * Means that the input provided was malformed in some way. Generally speaking, it is hoped that errors of this + * category have messages written either targetting the USER or ADMIN personas as those are the general users + * of the APIs who could generate invalid inputs. + */ + INVALID_INPUT(400), + /** + * Means that the error is a problem with authorization. + */ + UNAUTHORIZED(401), + /** + * Means that some capacity limit was exceeded, this could be due to throttling or due to some system limit + */ + CAPACITY_EXCEEDED(429), + /** + * Means that the query was canceled for some reason + */ + CANCELED(500), + /** + * Indicates a server-side failure of some sort at runtime + */ + RUNTIME_FAILURE(500), + /** + * A timeout happened + */ + TIMEOUT(504), + /** + * Indicates some unsupported behavior was requested. + */ + UNSUPPORTED(501), + /** + * A catch-all for any time when we cannot come up with a meaningful categorization. This is hopefully only + * used when converting generic exceptions from frameworks and libraries that we do not control into DruidExcpetions + */ + UNCATEGORIZED(500); + + private final int expectedStatus; + + Category(int expectedStatus) + { + this.expectedStatus = expectedStatus; + } + + public int getExpectedStatus() + { + return expectedStatus; + } + } + + public static class PartialDruidExceptionBuilder + { + private String errorCode; + private Persona targetPersona; + + private PartialDruidExceptionBuilder(String errorCode, Persona targetPersona) + { + this.errorCode = errorCode; + this.targetPersona = targetPersona; + } + + public DruidExceptionBuilder ofCategory(Category category) + { + return new DruidExceptionBuilder(errorCode).forPersona(targetPersona).ofCategory(category); + } + } + + public static class DruidExceptionBuilder + { + private String errorCode; + private Persona targetPersona; + private Category category; + + private boolean deserialized = false; + + private DruidExceptionBuilder(String errorCode) + { + this.errorCode = errorCode; + } + + public DruidExceptionBuilder forPersona(Persona targetPersona) + { + this.targetPersona = targetPersona; + return this; + } + + public DruidExceptionBuilder ofCategory(Category category) + { + this.category = category; + return this; + } + + /** + * Exists for ErrorMessage to be able to indicate that the exception was deserialized and (therefore) + * should not carry any stack-trace as the stack-trace generated would be to the deserialization code rather than + * the actual error. + * + * @return the builder + */ + DruidExceptionBuilder wasDeserialized() + { + this.deserialized = true; + return this; + } + + public DruidException build(String formatMe, Object... vals) + { + return build(null, formatMe, vals); + } + + public DruidException build(Throwable cause, String formatMe, Object... vals) + { + return new DruidException( + cause, + errorCode, + targetPersona, + category, + StringUtils.nonStrictFormat(formatMe, vals), + deserialized + ); + } + } + + public abstract static class Failure + { + private final String errorCode; + + public Failure( + String errorCode + ) + { + this.errorCode = errorCode; + } + + public String getErrorCode() + { + return errorCode; + } + + protected abstract DruidException makeException(DruidExceptionBuilder bob); + } + +} diff --git a/processing/src/main/java/org/apache/druid/error/ErrorResponse.java b/processing/src/main/java/org/apache/druid/error/ErrorResponse.java new file mode 100644 index 000000000000..7b571cca2719 --- /dev/null +++ b/processing/src/main/java/org/apache/druid/error/ErrorResponse.java @@ -0,0 +1,214 @@ +/* + * Licensed to the Apache Software Foundation (ASF) under one + * or more contributor license agreements. See the NOTICE file + * distributed with this work for additional information + * regarding copyright ownership. The ASF licenses this file + * to you under the Apache License, Version 2.0 (the + * "License"); you may not use this file except in compliance + * with the License. You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, + * software distributed under the License is distributed on an + * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY + * KIND, either express or implied. See the License for the + * specific language governing permissions and limitations + * under the License. + */ + +package org.apache.druid.error; + +import com.fasterxml.jackson.annotation.JsonCreator; +import com.fasterxml.jackson.annotation.JsonValue; +import org.apache.druid.query.QueryException; + +import javax.annotation.Nullable; +import java.util.LinkedHashMap; +import java.util.Map; + +/** + * A Response Object that represents an error to be returned over the wire. This object carries legacy bits to + * deal with compatibility issues of converging the error responses from {@link QueryException} + * with the intended going-forward error responses from {@link DruidException} + *

+ * The intent is that eventually {@link QueryException} is completely subsumed by + * {@link DruidException} in which case the legacy bits of this class can hopefully also be removed. + *

+ * The intended long-term schema of output is an object that looks like + *

+ * { + * "errorCode": `a code string`, + * "persona": USER | ADMIN | OPERATOR | DEVELOPER + * "category": DEFENSIVE | INVALID_INPUT | UNAUTHORIZED | CAPACITY_EXCEEDED | CANCELED | RUNTIME_FAILURE | TIMEOUT | UNSUPPORTED | UNCATEGORIZED + * "errorMessage": `a message for the intended audience` + * "context": `a map of extra context values that might be helpful` + * } + *

+ * In the interim, there are extra fields that also end up included so that the wire-schema can also be interpretted + * and handled by clients that are built assuming they are looking at QueryExceptions. These extra fields are + *

+ * { + * "error": `an error code from QueryException` | "druidException" + * "errorClass": `the error class, as used by QueryException` + * "host": `the host that the exception occurred on, as used by QueryException` + * } + *

+ * These 3 top-level fields are deprecated and will eventually disappear from API responses. The values can, instead, + * be pulled from the context object of an "legacyQueryException" errorCode object. The field names in the context + * object map as follows + * * "error" -> "legacyErrorCode" + * * "errorClass" -> "errorClass" + * * "host" -> "host" + */ +public class ErrorResponse +{ + @JsonCreator + public static ErrorResponse fromMap(Map map) + { + final DruidException.Failure failure; + + final Object legacyErrorType = map.get("error"); + if (!"druidException".equals(legacyErrorType)) { + // The non "druidException" errorCode field means that we are deserializing a legacy QueryException object rather + // than deserializing a DruidException. So, we make a QueryException, map it to a DruidException and build + // our response from that DruidException. This allows all code after us to only consider DruidException + // and helps aid the removal of QueryException. + failure = new QueryExceptionCompat( + new QueryException( + nullOrString(map.get("error")), + nullOrString(map.get("errorMessage")), + nullOrString(map.get("errorClass")), + nullOrString(map.get("host")) + ) + ); + } else { + failure = new DruidException.Failure(stringOrFailure(map, "errorCode")) + { + @Override + protected DruidException makeException(DruidException.DruidExceptionBuilder bob) + { + final DruidException retVal = bob.forPersona(DruidException.Persona.valueOf(stringOrFailure(map, "persona"))) + .ofCategory(DruidException.Category.valueOf(stringOrFailure( + map, + "category" + ))) + .build(stringOrFailure(map, "errorMessage")); + + final Object context = map.get("context"); + if (context instanceof Map) { + //noinspection unchecked + retVal.withContext((Map) context); + } + + return retVal; + } + }; + } + return new ErrorResponse(DruidException.fromFailure(new DeserializedFailure(failure))); + } + + private final DruidException underlyingException; + + public ErrorResponse(DruidException underlyingException) + { + this.underlyingException = underlyingException; + } + + @JsonValue + public Map getAsMap() + { + final LinkedHashMap retVal = new LinkedHashMap<>(); + + // This if statement is a compatibility layer to help bridge the time while we are introducing the DruidException. + // In a future release, QueryException should be completely eliminated, at which point we should also be + // able to eliminate this compatibility layer. + if (QueryExceptionCompat.ERROR_CODE.equals(underlyingException.getErrorCode())) { + retVal.put("error", underlyingException.getContextValue("legacyErrorCode")); + retVal.put("errorClass", underlyingException.getContextValue("errorClass")); + retVal.put("host", underlyingException.getContextValue("host")); + } else { + retVal.put("error", "druidException"); + } + + retVal.put("errorCode", underlyingException.getErrorCode()); + retVal.put("persona", underlyingException.getTargetPersona().toString()); + retVal.put("category", underlyingException.getCategory().toString()); + retVal.put("errorMessage", underlyingException.getMessage()); + retVal.put("context", underlyingException.getContext()); + + return retVal; + } + + public DruidException getUnderlyingException() + { + return underlyingException; + } + + @Nullable + private static String nullOrString(Object o) + { + return o == null ? null : o.toString(); + } + + private static String stringOrFailure(Map map, String key) + { + final Object o = map.get(key); + if (o instanceof String) { + return (String) o; + } + + final DruidException problem = DruidException + .forPersona(DruidException.Persona.DEVELOPER) + .ofCategory(DruidException.Category.DEFENSIVE) + .build("Got an error response that had a non-String value [%s] for key [%s]", o, key); + + for (Map.Entry entry : map.entrySet()) { + final Object value = entry.getValue(); + if (value != null) { + problem.withContext(entry.getKey(), value.toString()); + } + } + + throw problem; + } + + private static class DeserializedFailure extends DruidException.Failure + { + private final DruidException.Failure delegate; + + public DeserializedFailure( + DruidException.Failure delegate + ) + { + super(delegate.getErrorCode()); + this.delegate = delegate; + } + + @Override + protected DruidException makeException(DruidException.DruidExceptionBuilder bob) + { + // By setting wasDeserialized, we get the initial exception built with no stack-trace, we then create a new + // exception with the exact same values that will contain our current stack-trace and to be relevant inside + // of the current process. It's a little bit of a weird dance to create a new exception with the same stuff, + // it might be nice to have a DelegatingDruidException or something like that which looks like a DruidException + // but just delegates everything. That's something that can be explored another day though. + bob.wasDeserialized(); + final DruidException cause = delegate.makeException(bob); + + return DruidException.fromFailure( + new DruidException.Failure(cause.getErrorCode()) + { + @Override + protected DruidException makeException(DruidException.DruidExceptionBuilder bob) + { + return bob.forPersona(cause.getTargetPersona()) + .ofCategory(cause.getCategory()) + .build(cause, cause.getMessage()) + .withContext(cause.getContext()); + } + } + ); + } + } +} diff --git a/processing/src/main/java/org/apache/druid/error/InvalidInput.java b/processing/src/main/java/org/apache/druid/error/InvalidInput.java new file mode 100644 index 000000000000..ce50d4db3763 --- /dev/null +++ b/processing/src/main/java/org/apache/druid/error/InvalidInput.java @@ -0,0 +1,63 @@ +/* + * Licensed to the Apache Software Foundation (ASF) under one + * or more contributor license agreements. See the NOTICE file + * distributed with this work for additional information + * regarding copyright ownership. The ASF licenses this file + * to you under the Apache License, Version 2.0 (the + * "License"); you may not use this file except in compliance + * with the License. You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, + * software distributed under the License is distributed on an + * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY + * KIND, either express or implied. See the License for the + * specific language governing permissions and limitations + * under the License. + */ + +package org.apache.druid.error; + +public class InvalidInput extends DruidException.Failure +{ + public static DruidException exception(String msg, Object... args) + { + return exception(null, msg, args); + } + + public static DruidException exception(Throwable t, String msg, Object... args) + { + return DruidException.fromFailure(new InvalidInput(t, msg, args)); + } + + private final Throwable t; + private final String msg; + private final Object[] args; + + public InvalidInput( + Throwable t, + String msg, + Object... args + ) + { + super("invalidInput"); + this.t = t; + this.msg = msg; + this.args = args; + } + + + @Override + public DruidException makeException(DruidException.DruidExceptionBuilder bob) + { + bob = bob.forPersona(DruidException.Persona.USER) + .ofCategory(DruidException.Category.INVALID_INPUT); + + if (t == null) { + return bob.build(msg, args); + } else { + return bob.build(t, msg, args); + } + } +} diff --git a/processing/src/main/java/org/apache/druid/error/InvalidSqlInput.java b/processing/src/main/java/org/apache/druid/error/InvalidSqlInput.java new file mode 100644 index 000000000000..17a392962f9b --- /dev/null +++ b/processing/src/main/java/org/apache/druid/error/InvalidSqlInput.java @@ -0,0 +1,50 @@ +/* + * Licensed to the Apache Software Foundation (ASF) under one + * or more contributor license agreements. See the NOTICE file + * distributed with this work for additional information + * regarding copyright ownership. The ASF licenses this file + * to you under the Apache License, Version 2.0 (the + * "License"); you may not use this file except in compliance + * with the License. You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, + * software distributed under the License is distributed on an + * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY + * KIND, either express or implied. See the License for the + * specific language governing permissions and limitations + * under the License. + */ + +package org.apache.druid.error; + +public class InvalidSqlInput extends InvalidInput +{ + public static DruidException exception(String msg, Object... args) + { + return exception(null, msg, args); + } + + public static DruidException exception(Throwable t, String msg, Object... args) + { + return DruidException.fromFailure(new InvalidSqlInput(t, msg, args)); + } + + public InvalidSqlInput( + Throwable t, + String msg, + Object... args + ) + { + super(t, msg, args); + } + + @Override + public DruidException makeException(DruidException.DruidExceptionBuilder bob) + { + final DruidException retVal = super.makeException(bob); + retVal.withContext("sourceType", "sql"); + return retVal; + } +} diff --git a/processing/src/main/java/org/apache/druid/error/QueryExceptionCompat.java b/processing/src/main/java/org/apache/druid/error/QueryExceptionCompat.java new file mode 100644 index 000000000000..12e4905efae9 --- /dev/null +++ b/processing/src/main/java/org/apache/druid/error/QueryExceptionCompat.java @@ -0,0 +1,79 @@ +/* + * Licensed to the Apache Software Foundation (ASF) under one + * or more contributor license agreements. See the NOTICE file + * distributed with this work for additional information + * regarding copyright ownership. The ASF licenses this file + * to you under the Apache License, Version 2.0 (the + * "License"); you may not use this file except in compliance + * with the License. You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, + * software distributed under the License is distributed on an + * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY + * KIND, either express or implied. See the License for the + * specific language governing permissions and limitations + * under the License. + */ + +package org.apache.druid.error; + +import org.apache.druid.query.QueryException; + +/** + * A {@link DruidException.Failure} that serves to cover conversions from {@link QueryException}. + * + * When/if QueryException is completely eliminated from the code base, this compat layer should also be able to + * be removed. Additionally, it is the hope that nobody should actually be interacting with this class as it should + * be an implementation detail of {@link DruidException} and not really seen outside of that. + */ +public class QueryExceptionCompat extends DruidException.Failure +{ + public static final String ERROR_CODE = "legacyQueryException"; + + private final QueryException exception; + + public QueryExceptionCompat( + QueryException exception + ) + { + super(ERROR_CODE); + this.exception = exception; + } + + @Override + protected DruidException makeException(DruidException.DruidExceptionBuilder bob) + { + return bob.forPersona(DruidException.Persona.OPERATOR) + .ofCategory(convertFailType(exception.getFailType())) + .build(exception.getMessage()) + .withContext("host", exception.getHost()) + .withContext("errorClass", exception.getErrorClass()) + .withContext("legacyErrorCode", exception.getErrorCode()); + } + + private DruidException.Category convertFailType(QueryException.FailType failType) + { + switch (failType) { + case USER_ERROR: + return DruidException.Category.INVALID_INPUT; + case UNAUTHORIZED: + return DruidException.Category.UNAUTHORIZED; + case CAPACITY_EXCEEDED: + return DruidException.Category.CAPACITY_EXCEEDED; + case QUERY_RUNTIME_FAILURE: + return DruidException.Category.RUNTIME_FAILURE; + case CANCELED: + return DruidException.Category.CANCELED; + case UNKNOWN: + return DruidException.Category.UNCATEGORIZED; + case UNSUPPORTED: + return DruidException.Category.UNSUPPORTED; + case TIMEOUT: + return DruidException.Category.TIMEOUT; + default: + return DruidException.Category.UNCATEGORIZED; + } + } +} diff --git a/processing/src/main/java/org/apache/druid/java/util/common/logger/Logger.java b/processing/src/main/java/org/apache/druid/java/util/common/logger/Logger.java index 3872c2cf7686..1639bf5378f8 100644 --- a/processing/src/main/java/org/apache/druid/java/util/common/logger/Logger.java +++ b/processing/src/main/java/org/apache/druid/java/util/common/logger/Logger.java @@ -33,6 +33,37 @@ import java.util.function.BiConsumer; import java.util.stream.Stream; +/** + * A Logger for usage inside of Druid. Provides a layer that allows for simple changes to the logging framework + * with minimal changes to the Druid code. + * + * Log levels are used as an indication of urgency around the behavior that is being logged. The intended generic + * rubric for when to use the different logging levels is as follows. + * + * DEBUG: something that a developer wants to look at while actively debugging, but should not be included by default. + * + * INFO: a message that is useful to have when trying to retro-actively understand what happened in a running system. + * There is often a fine line between INFO and DEBUG. We want information from INFO logs but do not want to spam log + * files either. One rubric to use to help determine if something should be INFO or DEBUG is how often we expect the + * line to be logged. If there is clarity that it will happen in a controlled manner such that it does not spam the + * logs, then INFO is fine. Additionally, it can be okay to log at INFO level even if there is a risk of spamming the + * log file in the case that the log line only happens in specific "error-oriented" situations, this is because such + * error-oriented situations are more likely to necessitate reading and understanding the logs to eliminate the error. + * Additionally, it is perfectly acceptable and reasonable to log an exception at INFO level. + * + * WARN: a message that indicates something bad has happened in the system that a human should potentially investigate. + * While it is bad and deserves investigation, it is of a nature that it should be able to wait until the next + * "business day" for investigation instead of needing immediate attention. + * + * ERROR: a message that indicates that something bad has happened such that a human operator should take immediate + * intervention to triage and resolve the issue as it runs a risk to the smooth operations of the system. Logs at + * the ERROR level should generally be severe enough to warrant paging someone in the middle of the night. + * + * Even though this is the intended rubric, it is very difficult to ensure that, e.g. all ERROR log lines are pageable + * offenses. As such, it is questionable whether an operator should actually ALWAYS page on every ERROR log line, + * but as a directional target of when and how to log things, the above rubric should be used to evaluate if a log + * line is at the correct level. + */ public class Logger { @VisibleForTesting diff --git a/processing/src/main/java/org/apache/druid/segment/nested/NestedPathFinder.java b/processing/src/main/java/org/apache/druid/segment/nested/NestedPathFinder.java index b275199bb0e7..5d0596a6cc82 100644 --- a/processing/src/main/java/org/apache/druid/segment/nested/NestedPathFinder.java +++ b/processing/src/main/java/org/apache/druid/segment/nested/NestedPathFinder.java @@ -19,7 +19,8 @@ package org.apache.druid.segment.nested; -import org.apache.druid.java.util.common.IAE; +import org.apache.druid.error.InvalidInput; +import org.apache.druid.java.util.common.StringUtils; import javax.annotation.Nullable; import java.util.ArrayList; @@ -74,7 +75,7 @@ public static List parseJsonPath(@Nullable String path) List parts = new ArrayList<>(); if (!path.startsWith(JSON_PATH_ROOT)) { - badFormatJsonPath(path, "must start with '$'"); + badFormatJsonPath(path, "it must start with '$'"); } if (path.length() == 1) { @@ -97,7 +98,7 @@ public static List parseJsonPath(@Nullable String path) partMark = i + 1; } else if (current == '[' && arrayMark < 0 && quoteMark < 0) { if (dotMark == (i - 1) && dotMark != 0) { - badFormatJsonPath(path, "invalid position " + i + " for '[', must not follow '.' or must be contained with '"); + badFormatJsonPath(path, "found '[' at invalid position [%s], must not follow '.' or must be contained with '", i); } if (dotMark >= 0 && i > 1) { parts.add(new NestedPathField(getPathSubstring(path, partMark, i))); @@ -115,13 +116,13 @@ public static List parseJsonPath(@Nullable String path) partMark = i + 1; } catch (NumberFormatException ignored) { - badFormatJsonPath(path, "expected number for array specifier got " + maybeNumber + " instead. Use ' if this value was meant to be a field name"); + badFormatJsonPath(path, "array specifier [%s] should be a number, it was not. Use ' if this value was meant to be a field name", maybeNumber); } } else if (dotMark == -1 && arrayMark == -1) { badFormatJsonPath(path, "path parts must be separated with '.'"); } else if (current == '\'' && quoteMark < 0) { if (arrayMark != i - 1) { - badFormatJsonPath(path, "' must be immediately after '['"); + badFormatJsonPath(path, "single-quote (') must be immediately after '['"); } quoteMark = i; partMark = i + 1; @@ -130,7 +131,7 @@ public static List parseJsonPath(@Nullable String path) if (arrayMark >= 0) { continue; } - badFormatJsonPath(path, "closing ' must immediately precede ']'"); + badFormatJsonPath(path, "closing single-quote (') must immediately precede ']'"); } parts.add(new NestedPathField(getPathSubstring(path, partMark, i))); @@ -147,7 +148,7 @@ public static List parseJsonPath(@Nullable String path) // add the last element, this should never be an array because they close themselves if (partMark < path.length()) { if (quoteMark != -1) { - badFormatJsonPath(path, "unterminated '"); + badFormatJsonPath(path, "unterminated single-quote (')"); } if (arrayMark != -1) { badFormatJsonPath(path, "unterminated '['"); @@ -195,7 +196,7 @@ public static List parseJqPath(@Nullable String path) List parts = new ArrayList<>(); if (path.charAt(0) != '.') { - badFormat(path, "must start with '.'"); + badFormat(path, "it must start with '.'"); } int partMark = -1; // position to start the next substring to build the path part @@ -217,13 +218,13 @@ public static List parseJqPath(@Nullable String path) parts.add(new NestedPathField(getPathSubstring(path, partMark, i))); dotMark = -1; } else { - badFormat(path, "invalid position " + i + " for '?'"); + badFormat(path, "found '?' at invalid position [%s]", i); } } partMark = i + 1; } else if (current == '[' && arrayMark < 0 && quoteMark < 0) { if (dotMark == (i - 1) && dotMark != 0) { - badFormat(path, "invalid position " + i + " for '[', must not follow '.' or must be contained with '\"'"); + badFormat(path, "found '[' at invalid position [%s], must not follow '.' or must be contained with '\"'", i); } if (dotMark >= 0 && i > 1) { parts.add(new NestedPathField(getPathSubstring(path, partMark, i))); @@ -241,16 +242,16 @@ public static List parseJqPath(@Nullable String path) partMark = i + 1; } catch (NumberFormatException ignored) { - badFormat(path, "expected number for array specifier got " + maybeNumber + " instead. Use \"\" if this value was meant to be a field name"); + badFormat(path, "array specifier [%s] should be a number, it was not. Use \"\" if this value was meant to be a field name", maybeNumber); } } else if (dotMark == -1 && arrayMark == -1) { badFormat(path, "path parts must be separated with '.'"); } else if (current == '"' && quoteMark < 0) { if (partMark != i) { - badFormat(path, "invalid position " + i + " for '\"', must immediately follow '.' or '['"); + badFormat(path, "found '\"' at invalid position [%s], it must immediately follow '.' or '['", i); } if (arrayMark > 0 && arrayMark != i - 1) { - badFormat(path, "'\"' within '[' must be immediately after"); + badFormat(path, "'\"' within '[', must be immediately after"); } quoteMark = i; partMark = i + 1; @@ -295,14 +296,14 @@ private static String getPathSubstring(String path, int start, int end) return path.substring(start, end); } - private static void badFormat(String path, String message) + private static void badFormat(String path, String message, Object... args) { - throw new IAE("Bad format, '%s' is not a valid 'jq' path: %s", path, message); + throw InvalidInput.exception("jq path [%s] is invalid, %s", path, StringUtils.format(message, args)); } - private static void badFormatJsonPath(String path, String message) + private static void badFormatJsonPath(String path, String message, Object... args) { - throw new IAE("Bad format, '%s' is not a valid JSONPath path: %s", path, message); + throw InvalidInput.exception("JSONPath [%s] is invalid, %s", path, StringUtils.format(message, args)); } /** diff --git a/processing/src/test/java/org/apache/druid/common/utils/IdUtilsTest.java b/processing/src/test/java/org/apache/druid/common/utils/IdUtilsTest.java index 806c74e61617..b61ef5df4cd8 100644 --- a/processing/src/test/java/org/apache/druid/common/utils/IdUtilsTest.java +++ b/processing/src/test/java/org/apache/druid/common/utils/IdUtilsTest.java @@ -19,21 +19,17 @@ package org.apache.druid.common.utils; +import org.apache.druid.error.DruidExceptionMatcher; import org.apache.druid.java.util.common.DateTimes; import org.apache.druid.java.util.common.Intervals; import org.junit.Assert; -import org.junit.Rule; import org.junit.Test; -import org.junit.rules.ExpectedException; public class IdUtilsTest { private static final String THINGO = "thingToValidate"; public static final String VALID_ID_CHARS = "alpha123..*~!@#&%^&*()-+ Россия\\ 한국 中国!"; - @Rule - public ExpectedException expectedException = ExpectedException.none(); - @Test public void testValidIdName() { @@ -43,89 +39,89 @@ public void testValidIdName() @Test public void testInvalidNull() { - expectedException.expect(IllegalArgumentException.class); - expectedException.expectMessage("thingToValidate cannot be null or empty. Please provide a thingToValidate."); - IdUtils.validateId(THINGO, null); + DruidExceptionMatcher.invalidInput().expectMessageIs( + "Invalid value for field [thingToValidate]: must not be null" + ).assertThrowsAndMatches(() -> IdUtils.validateId(THINGO, null)); } @Test public void testInvalidEmpty() { - expectedException.expect(IllegalArgumentException.class); - expectedException.expectMessage("thingToValidate cannot be null or empty. Please provide a thingToValidate."); - IdUtils.validateId(THINGO, ""); + DruidExceptionMatcher.invalidInput().expectMessageIs( + "Invalid value for field [thingToValidate]: must not be null" + ).assertThrowsAndMatches(() -> IdUtils.validateId(THINGO, "")); } @Test public void testInvalidSlashes() { - expectedException.expect(IllegalArgumentException.class); - expectedException.expectMessage("thingToValidate cannot contain the '/' character."); - IdUtils.validateId(THINGO, "/paths/are/bad/since/we/make/files/from/stuff"); + DruidExceptionMatcher.invalidInput().expectMessageIs( + "Invalid value for field [thingToValidate]: Value [/paths/are/bad/since/we/make/files/from/stuff] cannot contain '/'." + ).assertThrowsAndMatches(() -> IdUtils.validateId(THINGO, "/paths/are/bad/since/we/make/files/from/stuff")); } @Test public void testInvalidLeadingDot() { - expectedException.expect(IllegalArgumentException.class); - expectedException.expectMessage("thingToValidate cannot start with the '.' character."); - IdUtils.validateId(THINGO, "./nice/try"); + DruidExceptionMatcher.invalidInput().expectMessageIs( + "Invalid value for field [thingToValidate]: Value [./nice/try] cannot start with '.'." + ).assertThrowsAndMatches(() -> IdUtils.validateId(THINGO, "./nice/try")); } @Test public void testInvalidSpacesRegexTabs() { - expectedException.expect(IllegalArgumentException.class); - expectedException.expectMessage("thingToValidate cannot contain whitespace character except space."); - IdUtils.validateId(THINGO, "spaces\tare\tbetter\tthan\ttabs\twhich\tare\tillegal"); + DruidExceptionMatcher.invalidInput().expectMessageIs( + "Invalid value for field [thingToValidate]: Value [spaces\tare\tbetter\tthan\ttabs\twhich\tare\tillegal] contains illegal whitespace characters. Only space is allowed." + ).assertThrowsAndMatches(() -> IdUtils.validateId(THINGO, "spaces\tare\tbetter\tthan\ttabs\twhich\tare\tillegal")); } @Test public void testInvalidSpacesRegexNewline() { - expectedException.expect(IllegalArgumentException.class); - expectedException.expectMessage("thingToValidate cannot contain whitespace character except space."); - IdUtils.validateId(THINGO, "new\nline"); + DruidExceptionMatcher.invalidInput().expectMessageIs( + "Invalid value for field [thingToValidate]: Value [new\nline] contains illegal whitespace characters. Only space is allowed." + ).assertThrowsAndMatches(() -> IdUtils.validateId(THINGO, "new\nline")); } @Test public void testInvalidSpacesRegexCarriageReturn() { - expectedException.expect(IllegalArgumentException.class); - expectedException.expectMessage("thingToValidate cannot contain whitespace character except space."); - IdUtils.validateId(THINGO, "does\rexist\rby\ritself"); + DruidExceptionMatcher.invalidInput().expectMessageIs( + "Invalid value for field [thingToValidate]: Value [does\rexist\rby\ritself] contains illegal whitespace characters. Only space is allowed." + ).assertThrowsAndMatches(() -> IdUtils.validateId(THINGO, "does\rexist\rby\ritself")); } @Test public void testInvalidSpacesRegexLineTabulation() { - expectedException.expect(IllegalArgumentException.class); - expectedException.expectMessage("thingToValidate cannot contain whitespace character except space."); - IdUtils.validateId(THINGO, "what\u000Bis line tabulation"); + DruidExceptionMatcher.invalidInput().expectMessageIs( + "Invalid value for field [thingToValidate]: Value [what\u000Bis line tabulation] contains illegal whitespace characters. Only space is allowed." + ).assertThrowsAndMatches(() -> IdUtils.validateId(THINGO, "what\u000Bis line tabulation")); } @Test public void testInvalidSpacesRegexFormFeed() { - expectedException.expect(IllegalArgumentException.class); - expectedException.expectMessage("thingToValidate cannot contain whitespace character except space."); - IdUtils.validateId(THINGO, "form\u000cfeed?"); + DruidExceptionMatcher.invalidInput().expectMessageIs( + "Invalid value for field [thingToValidate]: Value [form\ffeed?] contains illegal whitespace characters. Only space is allowed." + ).assertThrowsAndMatches(() -> IdUtils.validateId(THINGO, "form\u000cfeed?")); } @Test public void testInvalidUnprintableChars() { - expectedException.expect(IllegalArgumentException.class); - expectedException.expectMessage("thingToValidate cannot contain character #129 (at position 4)."); - IdUtils.validateId(THINGO, "form\u0081feed?"); + DruidExceptionMatcher.invalidInput().expectMessageIs( + "Invalid value for field [thingToValidate]: Value [form\u0081feed?] contains illegal UTF8 character [#129] at position [4]" + ).assertThrowsAndMatches(() -> IdUtils.validateId(THINGO, "form\u0081feed?")); } @Test public void testInvalidEmojis() { - expectedException.expect(IllegalArgumentException.class); - expectedException.expectMessage("thingToValidate cannot contain character #55357 (at position 4)."); - IdUtils.validateId(THINGO, "form💯feed?"); + DruidExceptionMatcher.invalidInput().expectMessageIs( + "Invalid value for field [thingToValidate]: Value [form\uD83D\uDCAFfeed?] contains illegal UTF8 character [#55357] at position [4]" + ).assertThrowsAndMatches(() -> IdUtils.validateId(THINGO, "form💯feed?")); } @Test diff --git a/processing/src/test/java/org/apache/druid/error/DruidExceptionMatcher.java b/processing/src/test/java/org/apache/druid/error/DruidExceptionMatcher.java new file mode 100644 index 000000000000..d3d4e057c5e0 --- /dev/null +++ b/processing/src/test/java/org/apache/druid/error/DruidExceptionMatcher.java @@ -0,0 +1,126 @@ +/* + * Licensed to the Apache Software Foundation (ASF) under one + * or more contributor license agreements. See the NOTICE file + * distributed with this work for additional information + * regarding copyright ownership. The ASF licenses this file + * to you under the Apache License, Version 2.0 (the + * "License"); you may not use this file except in compliance + * with the License. You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, + * software distributed under the License is distributed on an + * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY + * KIND, either express or implied. See the License for the + * specific language governing permissions and limitations + * under the License. + */ + +package org.apache.druid.error; + +import org.apache.druid.matchers.DruidMatchers; +import org.hamcrest.Description; +import org.hamcrest.DiagnosingMatcher; +import org.hamcrest.Matcher; +import org.hamcrest.MatcherAssert; +import org.hamcrest.Matchers; +import org.hamcrest.core.AllOf; + +import java.util.ArrayList; + +public class DruidExceptionMatcher extends DiagnosingMatcher +{ + public static DruidExceptionMatcher invalidInput() + { + return new DruidExceptionMatcher( + DruidException.Persona.USER, + DruidException.Category.INVALID_INPUT, + "invalidInput" + ); + } + + public static DruidExceptionMatcher invalidSqlInput() + { + return invalidInput().expectContext("sourceType", "sql"); + } + + private final AllOf delegate; + private final ArrayList> matcherList; + + public DruidExceptionMatcher( + DruidException.Persona targetPersona, + DruidException.Category category, + String errorCode + ) + { + matcherList = new ArrayList<>(); + matcherList.add(DruidMatchers.fn("targetPersona", DruidException::getTargetPersona, Matchers.is(targetPersona))); + matcherList.add(DruidMatchers.fn("category", DruidException::getCategory, Matchers.is(category))); + matcherList.add(DruidMatchers.fn("errorCode", DruidException::getErrorCode, Matchers.is(errorCode))); + + delegate = new AllOf<>(matcherList); + } + + public DruidExceptionMatcher expectContext(String key, String value) + { + matcherList.add(DruidMatchers.fn("context", DruidException::getContext, Matchers.hasEntry(key, value))); + return this; + } + + public DruidExceptionMatcher expectMessageIs(String s) + { + return expectMessage(Matchers.equalTo(s)); + } + + public DruidExceptionMatcher expectMessageContains(String contains) + { + return expectMessage(Matchers.containsString(contains)); + } + + public DruidExceptionMatcher expectMessage(Matcher messageMatcher) + { + matcherList.add(DruidMatchers.fn("message", DruidException::getMessage, messageMatcher)); + return this; + } + + public DruidExceptionMatcher expectException(Matcher causeMatcher) + { + matcherList.add(DruidMatchers.fn("cause", DruidException::getCause, causeMatcher)); + return this; + } + + @Override + protected boolean matches(Object item, Description mismatchDescription) + { + return delegate.matches(item, mismatchDescription); + } + + @Override + public void describeTo(Description description) + { + delegate.describeTo(description); + } + + public void assertThrowsAndMatches(ThrowingSupplier fn) + { + boolean thrown = false; + try { + fn.get(); + } + catch (Throwable e) { + if (e instanceof DruidException) { + MatcherAssert.assertThat(e, this); + thrown = true; + } else { + throw new RuntimeException(e); + } + } + MatcherAssert.assertThat(thrown, Matchers.is(true)); + } + + public interface ThrowingSupplier + { + void get(); + } +} diff --git a/processing/src/test/java/org/apache/druid/error/ErrorResponseTest.java b/processing/src/test/java/org/apache/druid/error/ErrorResponseTest.java new file mode 100644 index 000000000000..2ddd39aa7dae --- /dev/null +++ b/processing/src/test/java/org/apache/druid/error/ErrorResponseTest.java @@ -0,0 +1,110 @@ +/* + * Licensed to the Apache Software Foundation (ASF) under one + * or more contributor license agreements. See the NOTICE file + * distributed with this work for additional information + * regarding copyright ownership. The ASF licenses this file + * to you under the Apache License, Version 2.0 (the + * "License"); you may not use this file except in compliance + * with the License. You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, + * software distributed under the License is distributed on an + * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY + * KIND, either express or implied. See the License for the + * specific language governing permissions and limitations + * under the License. + */ + +package org.apache.druid.error; + +import com.google.common.collect.ImmutableMap; +import org.apache.druid.matchers.DruidMatchers; +import org.apache.druid.query.QueryTimeoutException; +import org.hamcrest.Matcher; +import org.hamcrest.MatcherAssert; +import org.hamcrest.Matchers; +import org.junit.Test; + +import java.util.Map; + +public class ErrorResponseTest +{ + @Test + public void testSanity() + { + ErrorResponse response = new ErrorResponse(InvalidSqlInput.exception("bad sql!")); + + final Map asMap = response.getAsMap(); + MatcherAssert.assertThat( + asMap, + DruidMatchers.mapMatcher( + "error", "druidException", + "errorCode", "invalidInput", + "persona", "USER", + "category", "INVALID_INPUT", + "errorMessage", "bad sql!", + "context", ImmutableMap.of("sourceType", "sql") + ) + ); + + ErrorResponse recomposed = ErrorResponse.fromMap(asMap); + + MatcherAssert.assertThat( + recomposed.getUnderlyingException(), + DruidExceptionMatcher.invalidSqlInput().expectMessageIs("bad sql!") + ); + } + + @SuppressWarnings({"unchecked", "rawtypes"}) + @Test + public void testQueryExceptionCompat() + { + ErrorResponse response = new ErrorResponse( + DruidException.fromFailure(new QueryExceptionCompat(new QueryTimeoutException())) + ); + + final Map asMap = response.getAsMap(); + MatcherAssert.assertThat( + asMap, + DruidMatchers.mapMatcher( + "error", + "Query timeout", + + "errorCode", + "legacyQueryException", + + "persona", + "OPERATOR", + + "category", + "TIMEOUT", + + "errorMessage", + "Query did not complete within configured timeout period. You can increase query timeout or tune the performance of query." + ) + ); + MatcherAssert.assertThat( + asMap, + (Matcher) Matchers.hasEntry( + Matchers.is("context"), + Matchers.allOf( + DruidMatchers.mapMatcher( + "errorClass", "org.apache.druid.query.QueryTimeoutException", + "legacyErrorCode", "Query timeout" + ), + Matchers.hasKey("host") + ) + ) + ); + + ErrorResponse recomposed = ErrorResponse.fromMap(asMap); + + MatcherAssert.assertThat( + recomposed.getUnderlyingException(), + new DruidExceptionMatcher(DruidException.Persona.OPERATOR, DruidException.Category.TIMEOUT, "legacyQueryException") + .expectMessageIs("Query did not complete within configured timeout period. You can increase query timeout or tune the performance of query.") + ); + } +} diff --git a/processing/src/test/java/org/apache/druid/matchers/DruidMatchers.java b/processing/src/test/java/org/apache/druid/matchers/DruidMatchers.java new file mode 100644 index 000000000000..de9b10bbefc5 --- /dev/null +++ b/processing/src/test/java/org/apache/druid/matchers/DruidMatchers.java @@ -0,0 +1,49 @@ +/* + * Licensed to the Apache Software Foundation (ASF) under one + * or more contributor license agreements. See the NOTICE file + * distributed with this work for additional information + * regarding copyright ownership. The ASF licenses this file + * to you under the Apache License, Version 2.0 (the + * "License"); you may not use this file except in compliance + * with the License. You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, + * software distributed under the License is distributed on an + * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY + * KIND, either express or implied. See the License for the + * specific language governing permissions and limitations + * under the License. + */ + +package org.apache.druid.matchers; + +import org.apache.druid.java.util.common.IAE; +import org.hamcrest.Matcher; +import org.hamcrest.Matchers; + +import java.util.ArrayList; +import java.util.Map; +import java.util.function.Function; + +public class DruidMatchers +{ + public static LambdaMatcher fn(String name, Function fn, Matcher matcher) + { + return new LambdaMatcher<>(name + ": ", fn, matcher); + } + + @SuppressWarnings({"unchecked", "rawtypes"}) + public static Matcher> mapMatcher(Object... keysAndValues) + { + if (keysAndValues.length % 2 == 1) { + throw new IAE("keysAndValues should be pairs, but had an odd length [%s]", keysAndValues.length); + } + ArrayList>> entryMatchers = new ArrayList<>(); + for (int i = 0; i < keysAndValues.length; i += 2) { + entryMatchers.add(Matchers.hasEntry((K) keysAndValues[i], (V) keysAndValues[i + 1])); + } + return Matchers.allOf((Iterable) entryMatchers); + } +} diff --git a/processing/src/test/java/org/apache/druid/matchers/LambdaMatcher.java b/processing/src/test/java/org/apache/druid/matchers/LambdaMatcher.java new file mode 100644 index 000000000000..3eb50466ee1a --- /dev/null +++ b/processing/src/test/java/org/apache/druid/matchers/LambdaMatcher.java @@ -0,0 +1,62 @@ +/* + * Licensed to the Apache Software Foundation (ASF) under one + * or more contributor license agreements. See the NOTICE file + * distributed with this work for additional information + * regarding copyright ownership. The ASF licenses this file + * to you under the Apache License, Version 2.0 (the + * "License"); you may not use this file except in compliance + * with the License. You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, + * software distributed under the License is distributed on an + * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY + * KIND, either express or implied. See the License for the + * specific language governing permissions and limitations + * under the License. + */ + +package org.apache.druid.matchers; + +import org.hamcrest.Description; +import org.hamcrest.DiagnosingMatcher; +import org.hamcrest.Matcher; + +import java.util.function.Function; + +public class LambdaMatcher extends DiagnosingMatcher +{ + private final String name; + private final Function fn; + private final Matcher matcher; + + public LambdaMatcher( + String name, + Function fn, + Matcher matcher + ) + { + this.name = name; + this.fn = fn; + this.matcher = matcher; + } + + @Override + protected boolean matches(Object item, Description mismatchDescription) + { + final S result = fn.apply((T) item); + if (!matcher.matches(result)) { + matcher.describeMismatch(result, mismatchDescription); + return false; + } + return true; + } + + @Override + public void describeTo(Description description) + { + description.appendText(name); + matcher.describeTo(description); + } +} diff --git a/processing/src/test/java/org/apache/druid/segment/nested/NestedPathFinderTest.java b/processing/src/test/java/org/apache/druid/segment/nested/NestedPathFinderTest.java index 161a752ce1e0..b22131833c64 100644 --- a/processing/src/test/java/org/apache/druid/segment/nested/NestedPathFinderTest.java +++ b/processing/src/test/java/org/apache/druid/segment/nested/NestedPathFinderTest.java @@ -21,11 +21,9 @@ import com.google.common.collect.ImmutableList; import com.google.common.collect.ImmutableMap; -import org.apache.druid.java.util.common.IAE; +import org.apache.druid.error.DruidExceptionMatcher; import org.junit.Assert; -import org.junit.Rule; import org.junit.Test; -import org.junit.rules.ExpectedException; import java.util.List; import java.util.Map; @@ -41,9 +39,6 @@ public class NestedPathFinderTest "[also_sneaky]", ImmutableList.of(ImmutableMap.of("a", "x"), ImmutableMap.of("b", "y", "c", "z")) ); - @Rule - public ExpectedException expectedException = ExpectedException.none(); - @Test public void testParseJqPath() { @@ -188,7 +183,10 @@ public void testParseJqPath() Assert.assertEquals("f?o.o", pathParts.get(2).getPartIdentifier()); Assert.assertTrue(pathParts.get(3) instanceof NestedPathField); Assert.assertEquals(".b?.a.r.", pathParts.get(3).getPartIdentifier()); - Assert.assertEquals(".\"x.y.z]?[\\\"]][]\".\"13234.12[]][23\".\"f?o.o\".\".b?.a.r.\"", NestedPathFinder.toNormalizedJqPath(pathParts)); + Assert.assertEquals( + ".\"x.y.z]?[\\\"]][]\".\"13234.12[]][23\".\"f?o.o\".\".b?.a.r.\"", + NestedPathFinder.toNormalizedJqPath(pathParts) + ); } @Test @@ -334,85 +332,84 @@ public void testParseJsonPath() @Test public void testBadFormatMustStartWithDot() { - expectedException.expect(IAE.class); - expectedException.expectMessage("Bad format, 'x.y' is not a valid 'jq' path: must start with '.'"); - NestedPathFinder.parseJqPath("x.y"); + DruidExceptionMatcher.invalidInput().expectMessageIs( + "jq path [x.y] is invalid, it must start with '.'" + ).assertThrowsAndMatches(() -> NestedPathFinder.parseJqPath("x.y")); } @Test public void testBadFormatNoDot() { - expectedException.expect(IAE.class); - expectedException.expectMessage(".\"x\"\"y\"' is not a valid 'jq' path: path parts must be separated with '.'"); - NestedPathFinder.parseJqPath(".\"x\"\"y\""); + DruidExceptionMatcher.invalidInput().expectMessageIs( + "jq path [.\"x\"\"y\"] is invalid, path parts must be separated with '.'" + ).assertThrowsAndMatches(() -> NestedPathFinder.parseJqPath(".\"x\"\"y\"")); } @Test public void testBadFormatWithDot2() { - expectedException.expect(IAE.class); - expectedException.expectMessage("Bad format, '..\"x\"' is not a valid 'jq' path: path parts separated by '.' must not be empty"); - NestedPathFinder.parseJqPath("..\"x\""); + DruidExceptionMatcher.invalidInput().expectMessageIs( + "jq path [..\"x\"] is invalid, path parts separated by '.' must not be empty" + ).assertThrowsAndMatches(() -> NestedPathFinder.parseJqPath("..\"x\"")); } @Test public void testBadFormatWithDot3() { - expectedException.expect(IAE.class); - expectedException.expectMessage("Bad format, '.x.[1]' is not a valid 'jq' path: invalid position 3 for '[', must not follow '.' or must be contained with '\"'"); - NestedPathFinder.parseJqPath(".x.[1]"); + DruidExceptionMatcher.invalidInput().expectMessageIs( + "jq path [.x.[1]] is invalid, found '[' at invalid position [3], must not follow '.' or must be contained with '\"'" + ).assertThrowsAndMatches(() -> NestedPathFinder.parseJqPath(".x.[1]")); } @Test public void testBadFormatWithDot4() { - expectedException.expect(IAE.class); - expectedException.expectMessage("Bad format, '.x[1].[2]' is not a valid 'jq' path: invalid position 6 for '[', must not follow '.' or must be contained with '\"'"); - NestedPathFinder.parseJqPath(".x[1].[2]"); + DruidExceptionMatcher.invalidInput().expectMessageIs( + "jq path [.x[1].[2]] is invalid, found '[' at invalid position [6], must not follow '.' or must be contained with '\"'" + ).assertThrowsAndMatches(() -> NestedPathFinder.parseJqPath(".x[1].[2]")); } @Test public void testBadFormatNotANumber() { - expectedException.expect(IAE.class); - expectedException.expectMessage("Bad format, '.x[.1]' is not a valid 'jq' path: expected number for array specifier got .1 instead. Use \"\" if this value was meant to be a field name"); - NestedPathFinder.parseJqPath(".x[.1]"); + DruidExceptionMatcher.invalidInput().expectMessageIs( + "jq path [.x[.1]] is invalid, array specifier [.1] should be a number, it was not. Use \"\" if this value was meant to be a field name" + ).assertThrowsAndMatches(() -> NestedPathFinder.parseJqPath(".x[.1]")); } @Test public void testBadFormatUnclosedArray() { - expectedException.expect(IAE.class); - expectedException.expectMessage("Bad format, '.x[1' is not a valid 'jq' path: unterminated '['"); - NestedPathFinder.parseJqPath(".x[1"); + DruidExceptionMatcher.invalidInput().expectMessageIs( + "jq path [.x[1] is invalid, unterminated '['" + ).assertThrowsAndMatches(() -> NestedPathFinder.parseJqPath(".x[1")); } @Test public void testBadFormatUnclosedArray2() { - expectedException.expect(IAE.class); - expectedException.expectMessage("Bad format, '.x[\"1\"' is not a valid 'jq' path: unterminated '['"); - NestedPathFinder.parseJqPath(".x[\"1\""); + DruidExceptionMatcher.invalidInput().expectMessageIs( + "jq path [.x[\"1\"] is invalid, unterminated '['" + ).assertThrowsAndMatches(() -> NestedPathFinder.parseJqPath(".x[\"1\"")); } @Test public void testBadFormatUnclosedQuote() { - expectedException.expect(IAE.class); - expectedException.expectMessage("Bad format, '.x.\"1' is not a valid 'jq' path: unterminated '\"'"); - NestedPathFinder.parseJqPath(".x.\"1"); + DruidExceptionMatcher.invalidInput().expectMessageIs( + "jq path [.x.\"1] is invalid, unterminated '\"'" + ).assertThrowsAndMatches(() -> NestedPathFinder.parseJqPath(".x.\"1")); } @Test public void testBadFormatUnclosedQuote2() { - expectedException.expect(IAE.class); - expectedException.expectMessage("Bad format, '.x[\"1]' is not a valid 'jq' path: unterminated '\"'"); - NestedPathFinder.parseJqPath(".x[\"1]"); + DruidExceptionMatcher.invalidInput().expectMessageIs( + "jq path [.x[\"1]] is invalid, unterminated '\"'" + ).assertThrowsAndMatches(() -> NestedPathFinder.parseJqPath(".x[\"1]")); } - @Test public void testPathSplitter() { diff --git a/server/src/main/java/org/apache/druid/server/QueryResource.java b/server/src/main/java/org/apache/druid/server/QueryResource.java index f4a7ab3edb75..2db205ca0bed 100644 --- a/server/src/main/java/org/apache/druid/server/QueryResource.java +++ b/server/src/main/java/org/apache/druid/server/QueryResource.java @@ -33,12 +33,10 @@ import com.google.common.collect.ImmutableMap; import com.google.common.collect.Iterables; import com.google.inject.Inject; -import org.apache.druid.client.DirectDruidClient; import org.apache.druid.guice.LazySingleton; import org.apache.druid.guice.annotations.Json; import org.apache.druid.guice.annotations.Self; import org.apache.druid.guice.annotations.Smile; -import org.apache.druid.java.util.common.StringUtils; import org.apache.druid.java.util.emitter.EmittingLogger; import org.apache.druid.query.BadJsonQueryException; import org.apache.druid.query.Query; @@ -46,7 +44,6 @@ import org.apache.druid.query.QueryException; import org.apache.druid.query.QueryInterruptedException; import org.apache.druid.query.QueryToolChest; -import org.apache.druid.query.TruncatedResponseContextException; import org.apache.druid.query.context.ResponseContext; import org.apache.druid.query.context.ResponseContext.Keys; import org.apache.druid.server.metrics.QueryCountStatsProvider; @@ -268,48 +265,6 @@ public interface QueryMetricCounter void incrementTimedOut(); } - public static void attachResponseContextToHttpResponse( - String queryId, - ResponseContext responseContext, - Response.ResponseBuilder responseBuilder, - ObjectMapper jsonMapper, ResponseContextConfig responseContextConfig, DruidNode selfNode - ) throws JsonProcessingException - { - transferEntityTag(responseContext, responseBuilder); - - DirectDruidClient.removeMagicResponseContextFields(responseContext); - - // Limit the response-context header, see https://github.com/apache/druid/issues/2331 - // Note that Response.ResponseBuilder.header(String key,Object value).build() calls value.toString() - // and encodes the string using ASCII, so 1 char is = 1 byte - final ResponseContext.SerializationResult serializationResult = responseContext.serializeWith( - jsonMapper, - responseContextConfig.getMaxResponseContextHeaderSize() - ); - - if (serializationResult.isTruncated()) { - final String logToPrint = StringUtils.format( - "Response Context truncated for id [%s]. Full context is [%s].", - queryId, - serializationResult.getFullResult() - ); - if (responseContextConfig.shouldFailOnTruncatedResponseContext()) { - log.error(logToPrint); - throw new QueryInterruptedException( - new TruncatedResponseContextException( - "Serialized response context exceeds the max size[%s]", - responseContextConfig.getMaxResponseContextHeaderSize() - ), - selfNode.getHostAndPortToUse() - ); - } else { - log.warn(logToPrint); - } - } - - responseBuilder.header(HEADER_RESPONSE_CONTEXT, serializationResult.getResult()); - } - private Query readQuery( final HttpServletRequest req, final InputStream in, diff --git a/server/src/main/java/org/apache/druid/server/QueryResultPusher.java b/server/src/main/java/org/apache/druid/server/QueryResultPusher.java index ad268a78ef0f..074beb545b43 100644 --- a/server/src/main/java/org/apache/druid/server/QueryResultPusher.java +++ b/server/src/main/java/org/apache/druid/server/QueryResultPusher.java @@ -23,6 +23,9 @@ import com.fasterxml.jackson.databind.ObjectMapper; import com.google.common.io.CountingOutputStream; import org.apache.druid.client.DirectDruidClient; +import org.apache.druid.error.DruidException; +import org.apache.druid.error.ErrorResponse; +import org.apache.druid.error.QueryExceptionCompat; import org.apache.druid.java.util.common.ISE; import org.apache.druid.java.util.common.RE; import org.apache.druid.java.util.common.StringUtils; @@ -33,6 +36,7 @@ import org.apache.druid.query.QueryInterruptedException; import org.apache.druid.query.TruncatedResponseContextException; import org.apache.druid.query.context.ResponseContext; +import org.apache.druid.server.security.AuthConfig; import org.apache.druid.server.security.ForbiddenException; import javax.annotation.Nullable; @@ -42,7 +46,6 @@ import javax.servlet.http.HttpServletResponse; import javax.ws.rs.core.MediaType; import javax.ws.rs.core.Response; -import javax.ws.rs.core.StreamingOutput; import java.io.Closeable; import java.io.IOException; import java.io.OutputStream; @@ -61,9 +64,9 @@ public abstract class QueryResultPusher private final MediaType contentType; private final Map extraHeaders; - private StreamingHttpResponseAccumulator accumulator = null; - private AsyncContext asyncContext = null; - private HttpServletResponse response = null; + private StreamingHttpResponseAccumulator accumulator; + private AsyncContext asyncContext; + private HttpServletResponse response; public QueryResultPusher( HttpServletRequest request, @@ -149,6 +152,14 @@ public Response push() accumulator.close(); resultsWriter.recordSuccess(accumulator.getNumBytesSent()); } + catch (DruidException e) { + // Less than ideal. But, if we return the result as JSON, this is + // the only way for the security filter to know that, yes, it is OK + // to show the user this error even if we didn't get to the step where + // we did a security check. + request.setAttribute(AuthConfig.DRUID_AUTHORIZATION_CHECKED, true); + return handleDruidException(resultsWriter, e); + } catch (QueryException e) { return handleQueryException(resultsWriter, e); } @@ -159,7 +170,7 @@ public Response push() // returning results before a ForbiddenException gets thrown, that means that we've already leaked stuff // that should not have been leaked. I.e. it means, we haven't validated the authorization early enough. if (response != null && response.isCommitted()) { - log.error(re, "Got a forbidden exception for query[%s] after the response was already committed.", queryId); + log.error(re, "Got a forbidden exception for query [%s] after the response was already committed.", queryId); } throw re; } @@ -174,17 +185,17 @@ public Response push() accumulator.close(); } catch (IOException e) { - log.warn(e, "Suppressing exception closing accumulator for query[%s]", queryId); + log.warn(e, "Suppressing exception closing accumulator for query [%s]", queryId); } } if (resultsWriter == null) { - log.warn("resultsWriter was null for query[%s], work was maybe done in start() that shouldn't be.", queryId); + log.warn("resultsWriter was null for query [%s], work was maybe done in start() that shouldn't be.", queryId); } else { try { resultsWriter.close(); } catch (IOException e) { - log.warn(e, "Suppressing exception closing accumulator for query[%s]", queryId); + log.warn(e, "Suppressing exception closing accumulator for query [%s]", queryId); } } if (asyncContext != null) { @@ -197,58 +208,48 @@ public Response push() @Nullable private Response handleQueryException(ResultsWriter resultsWriter, QueryException e) { - if (accumulator != null && accumulator.isInitialized()) { - // We already started sending a response when we got the error message. In this case we just give up - // and hope that the partial stream generates a meaningful failure message for our client. We could consider - // also throwing the exception body into the response to make it easier for the client to choke if it manages - // to parse a meaningful object out, but that's potentially an API change so we leave that as an exercise for - // the future. + return handleDruidException(resultsWriter, DruidException.fromFailure(new QueryExceptionCompat(e))); + } + private Response handleDruidException(ResultsWriter resultsWriter, DruidException e) + { + if (resultsWriter != null) { resultsWriter.recordFailure(e); - - // This case is always a failure because the error happened mid-stream of sending results back. Therefore, - // we do not believe that the response stream was actually usable counter.incrementFailed(); - return null; + + if (accumulator != null && accumulator.isInitialized()) { + // We already started sending a response when we got the error message. In this case we just give up + // and hope that the partial stream generates a meaningful failure message for our client. We could consider + // also throwing the exception body into the response to make it easier for the client to choke if it manages + // to parse a meaningful object out, but that's potentially an API change so we leave that as an exercise for + // the future. + return null; + } } - final QueryException.FailType failType = e.getFailType(); - switch (failType) { - case USER_ERROR: + switch (e.getCategory()) { + case INVALID_INPUT: case UNAUTHORIZED: - case QUERY_RUNTIME_FAILURE: + case RUNTIME_FAILURE: case CANCELED: counter.incrementInterrupted(); break; case CAPACITY_EXCEEDED: case UNSUPPORTED: + case UNCATEGORIZED: + case DEFENSIVE: counter.incrementFailed(); break; case TIMEOUT: counter.incrementTimedOut(); break; - case UNKNOWN: - log.warn( - e, - "Unknown errorCode[%s], support needs to be added for error handling.", - e.getErrorCode() - ); - counter.incrementFailed(); } - resultsWriter.recordFailure(e); - - final int responseStatus = failType.getExpectedStatus(); - if (response == null) { - // No response object yet, so assume we haven't started the async context and is safe to return Response final Response.ResponseBuilder bob = Response - .status(responseStatus) + .status(e.getStatusCode()) .type(contentType) - .entity((StreamingOutput) output -> { - writeException(e, output); - output.close(); - }); + .entity(new ErrorResponse(e)); bob.header(QueryResource.QUERY_ID_RESPONSE_HEADER, queryId); for (Map.Entry entry : extraHeaders.entrySet()) { @@ -261,7 +262,7 @@ private Response handleQueryException(ResultsWriter resultsWriter, QueryExceptio QueryResource.NO_STACK_LOGGER.warn(e, "Response was committed without the accumulator writing anything!?"); } - response.setStatus(responseStatus); + response.setStatus(e.getStatusCode()); response.setHeader("Content-Type", contentType.toString()); try (ServletOutputStream out = response.getOutputStream()) { writeException(e, out); @@ -269,7 +270,7 @@ private Response handleQueryException(ResultsWriter resultsWriter, QueryExceptio catch (IOException ioException) { log.warn( ioException, - "Suppressing IOException thrown sending error response for query[%s]", + "Suppressing IOException thrown sending error response for query [%s]", queryId ); } diff --git a/server/src/main/java/org/apache/druid/server/security/AllowOptionsResourceFilter.java b/server/src/main/java/org/apache/druid/server/security/AllowOptionsResourceFilter.java index 46fe78a0a470..776f5b6df4d3 100644 --- a/server/src/main/java/org/apache/druid/server/security/AllowOptionsResourceFilter.java +++ b/server/src/main/java/org/apache/druid/server/security/AllowOptionsResourceFilter.java @@ -44,7 +44,6 @@ public AllowOptionsResourceFilter( @Override public void init(FilterConfig filterConfig) { - } @Override @@ -78,6 +77,5 @@ public void doFilter(ServletRequest request, ServletResponse response, FilterCha @Override public void destroy() { - } } diff --git a/server/src/main/java/org/apache/druid/server/security/PreResponseAuthorizationCheckFilter.java b/server/src/main/java/org/apache/druid/server/security/PreResponseAuthorizationCheckFilter.java index 97d3e05a5f33..454d8566f29f 100644 --- a/server/src/main/java/org/apache/druid/server/security/PreResponseAuthorizationCheckFilter.java +++ b/server/src/main/java/org/apache/druid/server/security/PreResponseAuthorizationCheckFilter.java @@ -100,7 +100,7 @@ public void doFilter(ServletRequest servletRequest, ServletResponse servletRespo if (authInfoChecked != null && !authInfoChecked && response.getStatus() != HttpServletResponse.SC_FORBIDDEN) { handleAuthorizationCheckError( - "Request's authorization check failed but status code was not 403.", + "Request's authorization check failed but status code was not 403", request, response ); diff --git a/server/src/test/java/org/apache/druid/segment/indexing/DataSchemaTest.java b/server/src/test/java/org/apache/druid/segment/indexing/DataSchemaTest.java index ffc7934fafe0..78294fca0c4b 100644 --- a/server/src/test/java/org/apache/druid/segment/indexing/DataSchemaTest.java +++ b/server/src/test/java/org/apache/druid/segment/indexing/DataSchemaTest.java @@ -32,8 +32,10 @@ import org.apache.druid.data.input.impl.JSONParseSpec; import org.apache.druid.data.input.impl.StringInputRowParser; import org.apache.druid.data.input.impl.TimestampSpec; +import org.apache.druid.error.DruidExceptionMatcher; import org.apache.druid.java.util.common.DateTimes; import org.apache.druid.java.util.common.Intervals; +import org.apache.druid.java.util.common.StringUtils; import org.apache.druid.java.util.common.granularity.DurationGranularity; import org.apache.druid.java.util.common.granularity.Granularities; import org.apache.druid.java.util.common.jackson.JacksonUtils; @@ -48,6 +50,7 @@ import org.apache.druid.segment.transform.TransformSpec; import org.apache.druid.testing.InitializedNullHandlingTest; import org.hamcrest.CoreMatchers; +import org.hamcrest.MatcherAssert; import org.junit.Assert; import org.junit.Rule; import org.junit.Test; @@ -60,7 +63,6 @@ import java.nio.charset.StandardCharsets; import java.util.Arrays; import java.util.Collections; -import java.util.List; import java.util.Map; import java.util.Set; @@ -413,22 +415,24 @@ public void testEmptyDatasource() ), JacksonUtils.TYPE_REFERENCE_MAP_STRING_OBJECT ); - expectedException.expect(CoreMatchers.instanceOf(IllegalArgumentException.class)); - expectedException.expectMessage( - "dataSource cannot be null or empty. Please provide a dataSource." - ); - - DataSchema schema = new DataSchema( - "", - parser, - new AggregatorFactory[]{ - new DoubleSumAggregatorFactory("metric1", "col1"), - new DoubleSumAggregatorFactory("metric2", "col2"), - }, - new ArbitraryGranularitySpec(Granularities.DAY, ImmutableList.of(Intervals.of("2014/2015"))), - null, - jsonMapper - ); + DruidExceptionMatcher + .invalidInput() + .expectMessageIs("Invalid value for field [dataSource]: must not be null") + .assertThrowsAndMatches( + () -> new DataSchema( + "", + parser, + new AggregatorFactory[]{ + new DoubleSumAggregatorFactory("metric1", "col1"), + new DoubleSumAggregatorFactory("metric2", "col2"), + }, + new ArbitraryGranularitySpec( + Granularities.DAY, + ImmutableList.of(Intervals.of("2014/2015")) + ), + null, + jsonMapper + )); } @@ -442,27 +446,21 @@ public void testInvalidWhitespaceDatasource() ); for (Map.Entry entry : invalidCharToDataSourceName.entrySet()) { - testInvalidWhitespaceDatasourceHelper(entry.getValue(), entry.getKey()); - } - } - - private void testInvalidWhitespaceDatasourceHelper(String dataSource, String invalidChar) - { - String testFailMsg = "dataSource contain invalid whitespace character: " + invalidChar; - try { - DataSchema schema = new DataSchema( - dataSource, - Collections.emptyMap(), - null, - null, - null, - jsonMapper + String dataSource = entry.getValue(); + final String msg = StringUtils.format( + "Invalid value for field [dataSource]: Value [%s] contains illegal whitespace characters. Only space is allowed.", + dataSource + ); + DruidExceptionMatcher.invalidInput().expectMessageIs(msg).assertThrowsAndMatches( + () -> new DataSchema( + dataSource, + Collections.emptyMap(), + null, + null, + null, + jsonMapper + ) ); - Assert.fail(testFailMsg); - } - catch (IllegalArgumentException errorMsg) { - String expectedMsg = "dataSource cannot contain whitespace character except space."; - Assert.assertEquals(testFailMsg, expectedMsg, errorMsg.getMessage()); } } @@ -524,10 +522,22 @@ public void testSerde() throws Exception public void testSerializeWithInvalidDataSourceName() throws Exception { // Escape backslashes to insert a tab character in the datasource name. - List datasources = ImmutableList.of("", "../invalid", "\tname", "name\t invalid"); - for (String datasource : datasources) { + Map datasourceToErrorMsg = ImmutableMap.of( + "", + "Invalid value for field [dataSource]: must not be null", + + "../invalid", + "Invalid value for field [dataSource]: Value [../invalid] cannot start with '.'.", + + "\tname", + "Invalid value for field [dataSource]: Value [\tname] contains illegal whitespace characters. Only space is allowed.", + + "name\t invalid", + "Invalid value for field [dataSource]: Value [name\t invalid] contains illegal whitespace characters. Only space is allowed." + ); + for (Map.Entry entry : datasourceToErrorMsg.entrySet()) { String jsonStr = "{" - + "\"dataSource\":\"" + StringEscapeUtils.escapeJson(datasource) + "\"," + + "\"dataSource\":\"" + StringEscapeUtils.escapeJson(entry.getKey()) + "\"," + "\"parser\":{" + "\"type\":\"string\"," + "\"parseSpec\":{" @@ -552,10 +562,16 @@ public void testSerializeWithInvalidDataSourceName() throws Exception ); } catch (ValueInstantiationException e) { - Assert.assertEquals(IllegalArgumentException.class, e.getCause().getClass()); + MatcherAssert.assertThat( + entry.getKey(), + e.getCause(), + DruidExceptionMatcher.invalidInput().expectMessageIs( + entry.getValue() + ) + ); continue; } - Assert.fail("Serialization of datasource " + datasource + " should have failed."); + Assert.fail("Serialization of datasource " + entry.getKey() + " should have failed."); } } diff --git a/server/src/test/java/org/apache/druid/server/QueryResourceTest.java b/server/src/test/java/org/apache/druid/server/QueryResourceTest.java index 34c0a44a5cf9..c7c96ecd3b12 100644 --- a/server/src/test/java/org/apache/druid/server/QueryResourceTest.java +++ b/server/src/test/java/org/apache/druid/server/QueryResourceTest.java @@ -30,6 +30,9 @@ import com.google.common.util.concurrent.MoreExecutors; import com.google.inject.Injector; import com.google.inject.Key; +import org.apache.druid.error.DruidException; +import org.apache.druid.error.DruidExceptionMatcher; +import org.apache.druid.error.ErrorResponse; import org.apache.druid.guice.GuiceInjectors; import org.apache.druid.guice.annotations.Smile; import org.apache.druid.jackson.DefaultObjectMapper; @@ -76,6 +79,7 @@ import org.apache.druid.server.security.ForbiddenException; import org.apache.druid.server.security.Resource; import org.apache.http.HttpStatus; +import org.hamcrest.MatcherAssert; import org.joda.time.Interval; import org.junit.Assert; import org.junit.Before; @@ -87,16 +91,18 @@ import javax.ws.rs.core.MediaType; import javax.ws.rs.core.Response; import javax.ws.rs.core.Response.Status; -import javax.ws.rs.core.StreamingOutput; import java.io.ByteArrayInputStream; import java.io.ByteArrayOutputStream; import java.io.IOException; import java.nio.charset.StandardCharsets; +import java.util.ArrayList; import java.util.Collection; import java.util.Collections; import java.util.List; import java.util.concurrent.CountDownLatch; +import java.util.concurrent.ExecutionException; import java.util.concurrent.Executors; +import java.util.concurrent.Future; import java.util.concurrent.atomic.AtomicReference; import java.util.function.Consumer; @@ -296,6 +302,79 @@ public void testGoodQueryWithQueryConfigOverrideDefault() throws IOException ); } + @Test + public void testGoodQueryThrowsDruidExceptionFromLifecycleExecute() throws IOException + { + String overrideConfigKey = "priority"; + String overrideConfigValue = "678"; + DefaultQueryConfig overrideConfig = new DefaultQueryConfig(ImmutableMap.of(overrideConfigKey, overrideConfigValue)); + queryResource = new QueryResource( + new QueryLifecycleFactory( + WAREHOUSE, + new QuerySegmentWalker() + { + @Override + public QueryRunner getQueryRunnerForIntervals( + Query query, + Iterable intervals + ) + { + throw DruidException.forPersona(DruidException.Persona.OPERATOR) + .ofCategory(DruidException.Category.RUNTIME_FAILURE) + .build("failing for coverage!"); + } + + @Override + public QueryRunner getQueryRunnerForSegments( + Query query, + Iterable specs + ) + { + throw new UnsupportedOperationException(); + } + }, + new DefaultGenericQueryMetricsFactory(), + new NoopServiceEmitter(), + testRequestLogger, + new AuthConfig(), + AuthTestUtils.TEST_AUTHORIZER_MAPPER, + Suppliers.ofInstance(overrideConfig) + ), + jsonMapper, + smileMapper, + queryScheduler, + new AuthConfig(), + null, + ResponseContextConfig.newConfig(true), + DRUID_NODE + ); + + expectPermissiveHappyPathAuth(); + + final Response response = expectSynchronousRequestFlow(SIMPLE_TIMESERIES_QUERY); + Assert.assertEquals(Status.INTERNAL_SERVER_ERROR.getStatusCode(), response.getStatus()); + + final ErrorResponse entity = (ErrorResponse) response.getEntity(); + MatcherAssert.assertThat( + entity.getUnderlyingException(), + new DruidExceptionMatcher(DruidException.Persona.OPERATOR, DruidException.Category.RUNTIME_FAILURE, "general") + .expectMessageIs("failing for coverage!") + ); + + Assert.assertEquals(1, testRequestLogger.getNativeQuerylogs().size()); + Assert.assertNotNull(testRequestLogger.getNativeQuerylogs().get(0).getQuery()); + Assert.assertNotNull(testRequestLogger.getNativeQuerylogs().get(0).getQuery().getContext()); + Assert.assertTrue(testRequestLogger.getNativeQuerylogs() + .get(0) + .getQuery() + .getContext() + .containsKey(overrideConfigKey)); + Assert.assertEquals( + overrideConfigValue, + testRequestLogger.getNativeQuerylogs().get(0).getQuery().getContext().get(overrideConfigKey) + ); + } + @Test public void testGoodQueryWithQueryConfigDoesNotOverrideQueryContext() throws IOException { @@ -642,11 +721,23 @@ public QueryRunner getQueryRunnerForSegments(Query query, Iterable> back2 = new ArrayList<>(); + createScheduledQueryResource(laningScheduler, Collections.emptyList(), ImmutableList.of(waitTwoScheduled)); - assertAsyncResponseAndCountdownOrBlockForever( + back2.add(eventuallyAssertAsyncResponse( SIMPLE_TIMESERIES_QUERY, - waitAllFinished, response -> Assert.assertEquals(Response.Status.OK.getStatusCode(), response.getStatus()) - ); - assertAsyncResponseAndCountdownOrBlockForever( + )); + back2.add(eventuallyAssertAsyncResponse( SIMPLE_TIMESERIES_QUERY, - waitAllFinished, response -> Assert.assertEquals(Status.OK.getStatusCode(), response.getStatus()) - ); + )); waitTwoScheduled.await(); - assertSynchronousResponseAndCountdownOrBlockForever( + back2.add(eventuallyaAssertSynchronousResponse( SIMPLE_TIMESERIES_QUERY, - waitAllFinished, response -> { Assert.assertEquals(QueryCapacityExceededException.STATUS_CODE, response.getStatus()); QueryCapacityExceededException ex; + final ErrorResponse entity = (ErrorResponse) response.getEntity(); + MatcherAssert.assertThat( + entity.getUnderlyingException(), + new DruidExceptionMatcher( + DruidException.Persona.OPERATOR, + DruidException.Category.CAPACITY_EXCEEDED, + "legacyQueryException" + ) + .expectMessageIs( + "Too many concurrent queries, total query capacity of 2 exceeded. Please try your query again later.") + ); + try { ByteArrayOutputStream baos = new ByteArrayOutputStream(); - ((StreamingOutput) response.getEntity()).write(baos); + jsonMapper.writeValue(baos, entity); + + // Here we are converting to a QueryCapacityExceededException. This is just to validate legacy stuff. + // When we delete the QueryException class, we can just rely on validating the DruidException instead ex = jsonMapper.readValue(baos.toByteArray(), QueryCapacityExceededException.class); } catch (IOException e) { @@ -917,17 +1021,19 @@ public void testTooManyQuery() throws InterruptedException Assert.assertEquals(QueryCapacityExceededException.makeTotalErrorMessage(2), ex.getMessage()); Assert.assertEquals(QueryException.QUERY_CAPACITY_EXCEEDED_ERROR_CODE, ex.getErrorCode()); } - ); - waitAllFinished.await(); + )); + + for (Future theFuture : back2) { + Assert.assertTrue(theFuture.get()); + } } @Test(timeout = 10_000L) - public void testTooManyQueryInLane() throws InterruptedException + public void testTooManyQueryInLane() throws InterruptedException, ExecutionException { expectPermissiveHappyPathAuth(); final CountDownLatch waitTwoStarted = new CountDownLatch(2); final CountDownLatch waitOneScheduled = new CountDownLatch(1); - final CountDownLatch waitAllFinished = new CountDownLatch(3); final QueryScheduler scheduler = new QueryScheduler( 40, ManualQueryPrioritizationStrategy.INSTANCE, @@ -935,23 +1041,39 @@ public void testTooManyQueryInLane() throws InterruptedException new ServerConfig() ); + ArrayList> back2 = new ArrayList<>(); + createScheduledQueryResource(scheduler, ImmutableList.of(waitTwoStarted), ImmutableList.of(waitOneScheduled)); - assertAsyncResponseAndCountdownOrBlockForever( + back2.add(eventuallyAssertAsyncResponse( SIMPLE_TIMESERIES_QUERY_LOW_PRIORITY, - waitAllFinished, response -> Assert.assertEquals(Response.Status.OK.getStatusCode(), response.getStatus()) - ); + )); waitOneScheduled.await(); - assertSynchronousResponseAndCountdownOrBlockForever( + back2.add(eventuallyaAssertSynchronousResponse( SIMPLE_TIMESERIES_QUERY_LOW_PRIORITY, - waitAllFinished, response -> { Assert.assertEquals(QueryCapacityExceededException.STATUS_CODE, response.getStatus()); QueryCapacityExceededException ex; + + final ErrorResponse entity = (ErrorResponse) response.getEntity(); + MatcherAssert.assertThat( + entity.getUnderlyingException(), + new DruidExceptionMatcher( + DruidException.Persona.OPERATOR, + DruidException.Category.CAPACITY_EXCEEDED, + "legacyQueryException" + ) + .expectMessageIs( + "Too many concurrent queries for lane 'low', query capacity of 1 exceeded. Please try your query again later.") + ); + try { ByteArrayOutputStream baos = new ByteArrayOutputStream(); - ((StreamingOutput) response.getEntity()).write(baos); + jsonMapper.writeValue(baos, entity); + + // Here we are converting to a QueryCapacityExceededException. This is just to validate legacy stuff. + // When we delete the QueryException class, we can just rely on validating the DruidException instead ex = jsonMapper.readValue(baos.toByteArray(), QueryCapacityExceededException.class); } catch (IOException e) { @@ -964,24 +1086,24 @@ public void testTooManyQueryInLane() throws InterruptedException Assert.assertEquals(QueryException.QUERY_CAPACITY_EXCEEDED_ERROR_CODE, ex.getErrorCode()); } - ); + )); waitTwoStarted.await(); - assertAsyncResponseAndCountdownOrBlockForever( + back2.add(eventuallyAssertAsyncResponse( SIMPLE_TIMESERIES_QUERY, - waitAllFinished, response -> Assert.assertEquals(Response.Status.OK.getStatusCode(), response.getStatus()) - ); + )); - waitAllFinished.await(); + for (Future theFuture : back2) { + Assert.assertTrue(theFuture.get()); + } } @Test(timeout = 10_000L) - public void testTooManyQueryInLaneImplicitFromDurationThreshold() throws InterruptedException + public void testTooManyQueryInLaneImplicitFromDurationThreshold() throws InterruptedException, ExecutionException { expectPermissiveHappyPathAuth(); final CountDownLatch waitTwoStarted = new CountDownLatch(2); final CountDownLatch waitOneScheduled = new CountDownLatch(1); - final CountDownLatch waitAllFinished = new CountDownLatch(3); final QueryScheduler scheduler = new QueryScheduler( 40, new ThresholdBasedQueryPrioritizationStrategy(null, "P90D", null, null), @@ -989,23 +1111,38 @@ public void testTooManyQueryInLaneImplicitFromDurationThreshold() throws Interru new ServerConfig() ); + ArrayList> back2 = new ArrayList<>(); createScheduledQueryResource(scheduler, ImmutableList.of(waitTwoStarted), ImmutableList.of(waitOneScheduled)); - assertAsyncResponseAndCountdownOrBlockForever( + back2.add(eventuallyAssertAsyncResponse( SIMPLE_TIMESERIES_QUERY, - waitAllFinished, response -> Assert.assertEquals(Response.Status.OK.getStatusCode(), response.getStatus()) - ); + )); waitOneScheduled.await(); - assertSynchronousResponseAndCountdownOrBlockForever( + back2.add(eventuallyaAssertSynchronousResponse( SIMPLE_TIMESERIES_QUERY, - waitAllFinished, response -> { Assert.assertEquals(QueryCapacityExceededException.STATUS_CODE, response.getStatus()); QueryCapacityExceededException ex; + + final ErrorResponse entity = (ErrorResponse) response.getEntity(); + MatcherAssert.assertThat( + entity.getUnderlyingException(), + new DruidExceptionMatcher( + DruidException.Persona.OPERATOR, + DruidException.Category.CAPACITY_EXCEEDED, + "legacyQueryException" + ) + .expectMessageIs( + "Too many concurrent queries for lane 'low', query capacity of 1 exceeded. Please try your query again later.") + ); + try { ByteArrayOutputStream baos = new ByteArrayOutputStream(); - ((StreamingOutput) response.getEntity()).write(baos); + jsonMapper.writeValue(baos, entity); + + // Here we are converting to a QueryCapacityExceededException. This is just to validate legacy stuff. + // When we delete the QueryException class, we can just rely on validating the DruidException instead ex = jsonMapper.readValue(baos.toByteArray(), QueryCapacityExceededException.class); } catch (IOException e) { @@ -1017,15 +1154,16 @@ public void testTooManyQueryInLaneImplicitFromDurationThreshold() throws Interru ); Assert.assertEquals(QueryException.QUERY_CAPACITY_EXCEEDED_ERROR_CODE, ex.getErrorCode()); } - ); + )); waitTwoStarted.await(); - assertAsyncResponseAndCountdownOrBlockForever( + back2.add(eventuallyAssertAsyncResponse( SIMPLE_TIMESERIES_QUERY_SMALLISH_INTERVAL, - waitAllFinished, response -> Assert.assertEquals(Response.Status.OK.getStatusCode(), response.getStatus()) - ); + )); - waitAllFinished.await(); + for (Future theFuture : back2) { + Assert.assertTrue(theFuture.get()); + } } private void createScheduledQueryResource( @@ -1090,20 +1228,19 @@ public QueryRunner getQueryRunnerForSegments(Query query, Iterable eventuallyAssertAsyncResponse( String query, - CountDownLatch done, Consumer asserts ) { - Executors.newSingleThreadExecutor().submit(() -> { + return Executors.newSingleThreadExecutor().submit(() -> { try { asserts.accept(expectAsyncRequestFlow(query, testServletRequest.mimic())); } catch (IOException e) { throw new RuntimeException(e); } - done.countDown(); + return true; }); } @@ -1139,7 +1276,8 @@ private MockHttpServletResponse expectAsyncRequestFlow( @Nonnull private MockHttpServletResponse expectAsyncRequestFlow( MockHttpServletRequest req, - byte[] queryBytes, QueryResource queryResource + byte[] queryBytes, + QueryResource queryResource ) throws IOException { final MockHttpServletResponse response = MockHttpServletResponse.forRequest(req); @@ -1152,13 +1290,12 @@ private MockHttpServletResponse expectAsyncRequestFlow( return response; } - private void assertSynchronousResponseAndCountdownOrBlockForever( + private Future eventuallyaAssertSynchronousResponse( String query, - CountDownLatch done, Consumer asserts ) { - Executors.newSingleThreadExecutor().submit(() -> { + return Executors.newSingleThreadExecutor().submit(() -> { try { asserts.accept( expectSynchronousRequestFlow( @@ -1171,10 +1308,19 @@ private void assertSynchronousResponseAndCountdownOrBlockForever( catch (IOException e) { throw new RuntimeException(e); } - done.countDown(); + return true; }); } + private Response expectSynchronousRequestFlow(String simpleTimeseriesQuery) throws IOException + { + return expectSynchronousRequestFlow( + testServletRequest, + simpleTimeseriesQuery.getBytes(StandardCharsets.UTF_8), + queryResource + ); + } + private Response expectSynchronousRequestFlow( MockHttpServletRequest req, byte[] bytes, diff --git a/server/src/test/java/org/apache/druid/server/security/AuthValidatorTest.java b/server/src/test/java/org/apache/druid/server/security/AuthValidatorTest.java index 3edfec1b5d0f..c8f78fb15a52 100644 --- a/server/src/test/java/org/apache/druid/server/security/AuthValidatorTest.java +++ b/server/src/test/java/org/apache/druid/server/security/AuthValidatorTest.java @@ -19,6 +19,7 @@ package org.apache.druid.server.security; +import org.apache.druid.error.DruidExceptionMatcher; import org.junit.Before; import org.junit.Rule; import org.junit.Test; @@ -40,41 +41,41 @@ public void setUp() @Test public void testAuthorizerNameWithEmptyIsInvalid() { - expectedException.expect(IllegalArgumentException.class); - expectedException.expectMessage("authorizerName cannot be null or empty."); - target.validateAuthorizerName(""); + DruidExceptionMatcher.invalidInput().expectMessageIs( + "Invalid value for field [authorizerName]: must not be null" + ).assertThrowsAndMatches(() -> target.validateAuthorizerName("")); } @Test public void testAuthorizerNameWithNullIsInvalid() { - expectedException.expect(IllegalArgumentException.class); - expectedException.expectMessage("authorizerName cannot be null or empty."); - target.validateAuthorizerName(null); + DruidExceptionMatcher.invalidInput().expectMessageIs( + "Invalid value for field [authorizerName]: must not be null" + ).assertThrowsAndMatches(() -> target.validateAuthorizerName(null)); } @Test public void testAuthorizerNameStartsWithDotIsInValid() { - expectedException.expect(IllegalArgumentException.class); - expectedException.expectMessage("authorizerName cannot start with the '.' character."); - target.validateAuthorizerName(".test"); + DruidExceptionMatcher.invalidInput().expectMessageIs( + "Invalid value for field [authorizerName]: Value [.test] cannot start with '.'." + ).assertThrowsAndMatches(() -> target.validateAuthorizerName(".test")); } @Test public void testAuthorizerNameWithSlashIsInvalid() { - expectedException.expect(IllegalArgumentException.class); - expectedException.expectMessage("authorizerName cannot contain the '/' character."); - target.validateAuthorizerName("tes/t"); + DruidExceptionMatcher.invalidInput().expectMessageIs( + "Invalid value for field [authorizerName]: Value [tes/t] cannot contain '/'." + ).assertThrowsAndMatches(() -> target.validateAuthorizerName("tes/t")); } @Test public void testAuthorizerNameWithWhitespaceIsInvalid() { - expectedException.expect(IllegalArgumentException.class); - expectedException.expectMessage("authorizerName cannot contain whitespace character except space."); - target.validateAuthorizerName("tes\tt"); + DruidExceptionMatcher.invalidInput().expectMessageIs( + "Invalid value for field [authorizerName]: Value [tes\tt] contains illegal whitespace characters. Only space is allowed." + ).assertThrowsAndMatches(() -> target.validateAuthorizerName("tes\tt")); } @Test @@ -92,8 +93,8 @@ public void testAuthenticatorNameWithAllowedCharactersIsValid() @Test public void testAuthenticatorNameWithWhitespaceIsInvalid() { - expectedException.expect(IllegalArgumentException.class); - expectedException.expectMessage("authenticatorName cannot contain whitespace character except space."); - target.validateAuthenticatorName("tes\tt"); + DruidExceptionMatcher.invalidInput().expectMessageIs( + "Invalid value for field [authenticatorName]: Value [tes\tt] contains illegal whitespace characters. Only space is allowed." + ).assertThrowsAndMatches(() -> target.validateAuthenticatorName("tes\tt")); } } diff --git a/sql/pom.xml b/sql/pom.xml index eb2c1bf7a39c..d032a64f5e2f 100644 --- a/sql/pom.xml +++ b/sql/pom.xml @@ -229,6 +229,11 @@ test-jar test + + org.hamcrest + hamcrest-all + test + org.hamcrest hamcrest-core diff --git a/sql/src/main/codegen/includes/common.ftl b/sql/src/main/codegen/includes/common.ftl index 2eccdbc2a5a5..8de677647bef 100644 --- a/sql/src/main/codegen/includes/common.ftl +++ b/sql/src/main/codegen/includes/common.ftl @@ -65,7 +65,6 @@ org.apache.druid.java.util.common.Pair PartitionGranularity e = Expression(ExprContext.ACCEPT_SUB_QUERY) { granularity = DruidSqlParserUtils.convertSqlNodeToGranularityThrowingParseExceptions(e); - DruidSqlParserUtils.throwIfUnsupportedGranularityInPartitionedBy(granularity); unparseString = e.toString(); } ) diff --git a/sql/src/main/codegen/includes/insert.ftl b/sql/src/main/codegen/includes/insert.ftl index c0e04bc77245..a0482dbf8a6d 100644 --- a/sql/src/main/codegen/includes/insert.ftl +++ b/sql/src/main/codegen/includes/insert.ftl @@ -38,7 +38,9 @@ SqlNode DruidSqlInsertEof() : ] { if (clusteredBy != null && partitionedBy.lhs == null) { - throw new ParseException("CLUSTERED BY found before PARTITIONED BY. In Druid, the CLUSTERED BY clause must follow the PARTITIONED BY clause"); + throw org.apache.druid.sql.calcite.parser.DruidSqlParserUtils.problemParsing( + "CLUSTERED BY found before PARTITIONED BY, CLUSTERED BY must come after the PARTITIONED BY clause" + ); } } // EOF is also present in SqlStmtEof but EOF is a special case and a single EOF can be consumed multiple times. diff --git a/sql/src/main/codegen/includes/replace.ftl b/sql/src/main/codegen/includes/replace.ftl index ed8dbb10eed2..5d47c9195816 100644 --- a/sql/src/main/codegen/includes/replace.ftl +++ b/sql/src/main/codegen/includes/replace.ftl @@ -58,7 +58,9 @@ SqlNode DruidSqlReplaceEof() : ] { if (clusteredBy != null && partitionedBy.lhs == null) { - throw new ParseException("CLUSTERED BY found before PARTITIONED BY. In Druid, the CLUSTERED BY clause must follow the PARTITIONED BY clause"); + throw org.apache.druid.sql.calcite.parser.DruidSqlParserUtils.problemParsing( + "CLUSTERED BY found before PARTITIONED BY, CLUSTERED BY must come after the PARTITIONED BY clause" + ); } } // EOF is also present in SqlStmtEof but EOF is a special case and a single EOF can be consumed multiple times. diff --git a/sql/src/main/java/org/apache/druid/sql/AbstractStatement.java b/sql/src/main/java/org/apache/druid/sql/AbstractStatement.java index 99d2fa17a618..bfa95c5d5562 100644 --- a/sql/src/main/java/org/apache/druid/sql/AbstractStatement.java +++ b/sql/src/main/java/org/apache/druid/sql/AbstractStatement.java @@ -19,8 +19,6 @@ package org.apache.druid.sql; -import org.apache.calcite.sql.parser.SqlParseException; -import org.apache.calcite.tools.ValidationException; import org.apache.druid.java.util.common.logger.Logger; import org.apache.druid.query.QueryContexts; import org.apache.druid.server.security.Access; @@ -130,17 +128,7 @@ protected void validate(final DruidPlanner planner) plannerContext = planner.getPlannerContext(); plannerContext.setAuthenticationResult(queryPlus.authResult()); plannerContext.setParameters(queryPlus.parameters()); - try { - planner.validate(); - } - // We can't collapse catch clauses since SqlPlanningException has - // type-sensitive constructors. - catch (SqlParseException e) { - throw new SqlPlanningException(e); - } - catch (ValidationException e) { - throw new SqlPlanningException(e); - } + planner.validate(); } /** diff --git a/sql/src/main/java/org/apache/druid/sql/DirectStatement.java b/sql/src/main/java/org/apache/druid/sql/DirectStatement.java index 62830063d21c..0bcf0f684caf 100644 --- a/sql/src/main/java/org/apache/druid/sql/DirectStatement.java +++ b/sql/src/main/java/org/apache/druid/sql/DirectStatement.java @@ -20,7 +20,9 @@ package org.apache.druid.sql; import com.google.common.annotations.VisibleForTesting; -import org.apache.calcite.tools.ValidationException; +import org.apache.calcite.plan.RelOptPlanner; +import org.apache.druid.error.DruidException; +import org.apache.druid.error.InvalidSqlInput; import org.apache.druid.java.util.common.ISE; import org.apache.druid.java.util.common.StringUtils; import org.apache.druid.java.util.common.logger.Logger; @@ -225,11 +227,22 @@ public ResultSet plan() reporter.planningTimeNanos(System.nanoTime() - planningStartNanos); return resultSet; } + catch (RelOptPlanner.CannotPlanException e) { + // Not sure if this is even thrown here. + throw DruidException.forPersona(DruidException.Persona.DEVELOPER) + .ofCategory(DruidException.Category.UNCATEGORIZED) + .build(e, "Problem planning SQL query"); + } catch (RuntimeException e) { state = State.FAILED; reporter.failed(e); throw e; } + catch (AssertionError e) { + state = State.FAILED; + reporter.failed(e); + throw InvalidSqlInput.exception(e, "Calcite assertion violated: [%s]", e.getMessage()); + } } /** @@ -239,12 +252,7 @@ public ResultSet plan() @VisibleForTesting protected PlannerResult createPlan(DruidPlanner planner) { - try { - return planner.plan(); - } - catch (ValidationException e) { - throw new SqlPlanningException(e); - } + return planner.plan(); } /** diff --git a/sql/src/main/java/org/apache/druid/sql/calcite/aggregation/builtin/AvgSqlAggregator.java b/sql/src/main/java/org/apache/druid/sql/calcite/aggregation/builtin/AvgSqlAggregator.java index e7e252cf8e38..8d84ab30760f 100644 --- a/sql/src/main/java/org/apache/druid/sql/calcite/aggregation/builtin/AvgSqlAggregator.java +++ b/sql/src/main/java/org/apache/druid/sql/calcite/aggregation/builtin/AvgSqlAggregator.java @@ -32,8 +32,8 @@ import org.apache.druid.query.aggregation.AggregatorFactory; import org.apache.druid.query.aggregation.post.ArithmeticPostAggregator; import org.apache.druid.query.aggregation.post.FieldAccessPostAggregator; +import org.apache.druid.segment.column.ColumnType; import org.apache.druid.segment.column.RowSignature; -import org.apache.druid.segment.column.ValueType; import org.apache.druid.sql.calcite.aggregation.Aggregation; import org.apache.druid.sql.calcite.aggregation.Aggregations; import org.apache.druid.sql.calcite.aggregation.SqlAggregator; @@ -95,12 +95,12 @@ public Aggregation toDruidAggregation( final DruidExpression arg = Iterables.getOnlyElement(arguments); final ExprMacroTable macroTable = plannerContext.getExprMacroTable(); - final ValueType sumType; + final ColumnType sumType; // Use 64-bit sum regardless of the type of the AVG aggregator. if (SqlTypeName.INT_TYPES.contains(aggregateCall.getType().getSqlTypeName())) { - sumType = ValueType.LONG; + sumType = ColumnType.LONG; } else { - sumType = ValueType.DOUBLE; + sumType = ColumnType.DOUBLE; } final String fieldName; diff --git a/sql/src/main/java/org/apache/druid/sql/calcite/aggregation/builtin/EarliestLatestAnySqlAggregator.java b/sql/src/main/java/org/apache/druid/sql/calcite/aggregation/builtin/EarliestLatestAnySqlAggregator.java index 6efc8846e914..0137689a8512 100644 --- a/sql/src/main/java/org/apache/druid/sql/calcite/aggregation/builtin/EarliestLatestAnySqlAggregator.java +++ b/sql/src/main/java/org/apache/druid/sql/calcite/aggregation/builtin/EarliestLatestAnySqlAggregator.java @@ -35,8 +35,8 @@ import org.apache.calcite.sql.type.SqlTypeName; import org.apache.calcite.sql.type.SqlTypeUtil; import org.apache.calcite.util.Optionality; -import org.apache.druid.java.util.common.IAE; -import org.apache.druid.java.util.common.ISE; +import org.apache.druid.error.DruidException; +import org.apache.druid.error.InvalidSqlInput; import org.apache.druid.query.aggregation.AggregatorFactory; import org.apache.druid.query.aggregation.any.DoubleAnyAggregatorFactory; import org.apache.druid.query.aggregation.any.FloatAnyAggregatorFactory; @@ -60,7 +60,6 @@ import org.apache.druid.sql.calcite.expression.Expressions; import org.apache.druid.sql.calcite.planner.Calcites; import org.apache.druid.sql.calcite.planner.PlannerContext; -import org.apache.druid.sql.calcite.planner.UnsupportedSQLQueryException; import org.apache.druid.sql.calcite.rel.VirtualColumnRegistry; import javax.annotation.Nullable; @@ -78,7 +77,13 @@ enum AggregatorType { EARLIEST { @Override - AggregatorFactory createAggregatorFactory(String name, String fieldName, String timeColumn, ColumnType type, int maxStringBytes) + AggregatorFactory createAggregatorFactory( + String name, + String fieldName, + String timeColumn, + ColumnType type, + int maxStringBytes + ) { switch (type.getType()) { case LONG: @@ -91,14 +96,20 @@ AggregatorFactory createAggregatorFactory(String name, String fieldName, String case COMPLEX: return new StringFirstAggregatorFactory(name, fieldName, timeColumn, maxStringBytes); default: - throw new UnsupportedSQLQueryException("EARLIEST aggregator is not supported for '%s' type", type); + throw SimpleSqlAggregator.badTypeException(fieldName, "EARLIEST", type); } } }, LATEST { @Override - AggregatorFactory createAggregatorFactory(String name, String fieldName, String timeColumn, ColumnType type, int maxStringBytes) + AggregatorFactory createAggregatorFactory( + String name, + String fieldName, + String timeColumn, + ColumnType type, + int maxStringBytes + ) { switch (type.getType()) { case LONG: @@ -111,14 +122,20 @@ AggregatorFactory createAggregatorFactory(String name, String fieldName, String case COMPLEX: return new StringLastAggregatorFactory(name, fieldName, timeColumn, maxStringBytes); default: - throw new UnsupportedSQLQueryException("LATEST aggregator is not supported for '%s' type", type); + throw SimpleSqlAggregator.badTypeException(fieldName, "LATEST", type); } } }, ANY_VALUE { @Override - AggregatorFactory createAggregatorFactory(String name, String fieldName, String timeColumn, ColumnType type, int maxStringBytes) + AggregatorFactory createAggregatorFactory( + String name, + String fieldName, + String timeColumn, + ColumnType type, + int maxStringBytes + ) { switch (type.getType()) { case LONG: @@ -130,7 +147,7 @@ AggregatorFactory createAggregatorFactory(String name, String fieldName, String case STRING: return new StringAnyAggregatorFactory(name, fieldName, maxStringBytes); default: - throw new UnsupportedSQLQueryException("ANY aggregation is not supported for '%s' type", type); + throw SimpleSqlAggregator.badTypeException(fieldName, "ANY", type); } } }; @@ -188,21 +205,30 @@ public Aggregation toDruidAggregation( final String aggregatorName = finalizeAggregations ? Calcites.makePrefixedName(name, "a") : name; final ColumnType outputType = Calcites.getColumnTypeForRelDataType(aggregateCall.getType()); if (outputType == null) { - throw new ISE( - "Cannot translate output sqlTypeName[%s] to Druid type for aggregator[%s]", - aggregateCall.getType().getSqlTypeName(), - aggregateCall.getName() - ); + throw DruidException.forPersona(DruidException.Persona.ADMIN) + .ofCategory(DruidException.Category.DEFENSIVE) + .build( + "Cannot convert output SQL type[%s] to a Druid type for function [%s]", + aggregateCall.getName(), + aggregateCall.getType().getSqlTypeName() + ); } final String fieldName = getColumnName(plannerContext, virtualColumnRegistry, args.get(0), rexNodes.get(0)); if (!rowSignature.contains(ColumnHolder.TIME_COLUMN_NAME) && (aggregatorType == AggregatorType.LATEST || aggregatorType == AggregatorType.EARLIEST)) { - plannerContext.setPlanningError("%s() aggregator depends on __time column, the underlying datasource " - + "or extern function you are querying doesn't contain __time column, " - + "Please use %s_BY() and specify the time column you want to use", - aggregatorType.name(), - aggregatorType.name() + // This code is being run as part of the exploratory volcano planner, currently, the definition of these + // aggregators does not tell Calcite that they depend on a __time column being in existence, instead we are + // allowing the volcano planner to explore paths that put projections which eliminate the time column in between + // the table scan and the aggregation and then relying on this check to tell Calcite that the plan is bogus. + // In some future, it would be good to make the aggregator definition capable of telling Calcite that it depends + // on a __time column to be in existence. Or perhaps we should just kill these aggregators and have everything + // move to the _BY aggregators that require an explicit definition. Either way, for now, we set this potential + // error and let the volcano planner continue exploring + plannerContext.setPlanningError( + "LATEST and EARLIEST aggregators implicitly depend on the __time column, but the " + + "table queried doesn't contain a __time column. Please use LATEST_BY or EARLIEST_BY " + + "and specify the column explicitly." ); return null; } @@ -218,7 +244,11 @@ public Aggregation toDruidAggregation( maxStringBytes = RexLiteral.intValue(rexNodes.get(1)); } catch (AssertionError ae) { - plannerContext.setPlanningError("The second argument '%s' to function '%s' is not a number", rexNodes.get(1), aggregateCall.getName()); + plannerContext.setPlanningError( + "The second argument '%s' to function '%s' is not a number", + rexNodes.get(1), + aggregateCall.getName() + ); return null; } theAggFactory = aggregatorType.createAggregatorFactory( @@ -230,11 +260,10 @@ public Aggregation toDruidAggregation( ); break; default: - throw new IAE( - "aggregation[%s], Invalid number of arguments[%,d] to [%s] operator", - aggregatorName, - args.size(), - aggregatorType.name() + throw InvalidSqlInput.exception( + "Function [%s] expects 1 or 2 arguments but found [%s]", + aggregateCall.getName(), + args.size() ); } diff --git a/sql/src/main/java/org/apache/druid/sql/calcite/aggregation/builtin/MaxSqlAggregator.java b/sql/src/main/java/org/apache/druid/sql/calcite/aggregation/builtin/MaxSqlAggregator.java index e27b006778e0..4f29b276a544 100644 --- a/sql/src/main/java/org/apache/druid/sql/calcite/aggregation/builtin/MaxSqlAggregator.java +++ b/sql/src/main/java/org/apache/druid/sql/calcite/aggregation/builtin/MaxSqlAggregator.java @@ -28,10 +28,8 @@ import org.apache.druid.query.aggregation.FloatMaxAggregatorFactory; import org.apache.druid.query.aggregation.LongMaxAggregatorFactory; import org.apache.druid.segment.column.ColumnType; -import org.apache.druid.segment.column.ValueType; import org.apache.druid.sql.calcite.aggregation.Aggregation; import org.apache.druid.sql.calcite.planner.Calcites; -import org.apache.druid.sql.calcite.planner.UnsupportedSQLQueryException; public class MaxSqlAggregator extends SimpleSqlAggregator { @@ -53,17 +51,17 @@ Aggregation getAggregation( if (valueType == null) { return null; } - return Aggregation.create(createMaxAggregatorFactory(valueType.getType(), name, fieldName, macroTable)); + return Aggregation.create(createMaxAggregatorFactory(valueType, name, fieldName, macroTable)); } private static AggregatorFactory createMaxAggregatorFactory( - final ValueType aggregationType, + final ColumnType aggregationType, final String name, final String fieldName, final ExprMacroTable macroTable ) { - switch (aggregationType) { + switch (aggregationType.getType()) { case LONG: return new LongMaxAggregatorFactory(name, fieldName, null, macroTable); case FLOAT: @@ -71,7 +69,9 @@ private static AggregatorFactory createMaxAggregatorFactory( case DOUBLE: return new DoubleMaxAggregatorFactory(name, fieldName, null, macroTable); default: - throw new UnsupportedSQLQueryException("Max aggregation is not supported for '%s' type", aggregationType); + // This error refers to the Druid type. But, we're in SQL validation. + // It should refer to the SQL type. + throw SimpleSqlAggregator.badTypeException(fieldName, "MAX", aggregationType); } } } diff --git a/sql/src/main/java/org/apache/druid/sql/calcite/aggregation/builtin/MinSqlAggregator.java b/sql/src/main/java/org/apache/druid/sql/calcite/aggregation/builtin/MinSqlAggregator.java index b009ead1fe95..93b87d376b52 100644 --- a/sql/src/main/java/org/apache/druid/sql/calcite/aggregation/builtin/MinSqlAggregator.java +++ b/sql/src/main/java/org/apache/druid/sql/calcite/aggregation/builtin/MinSqlAggregator.java @@ -30,7 +30,6 @@ import org.apache.druid.segment.column.ColumnType; import org.apache.druid.sql.calcite.aggregation.Aggregation; import org.apache.druid.sql.calcite.planner.Calcites; -import org.apache.druid.sql.calcite.planner.UnsupportedSQLQueryException; public class MinSqlAggregator extends SimpleSqlAggregator { @@ -67,7 +66,7 @@ private static AggregatorFactory createMinAggregatorFactory( case DOUBLE: return new DoubleMinAggregatorFactory(name, fieldName, null, macroTable); default: - throw new UnsupportedSQLQueryException("MIN aggregator is not supported for '%s' type", aggregationType); + throw SimpleSqlAggregator.badTypeException(fieldName, "MIN", aggregationType); } } } diff --git a/sql/src/main/java/org/apache/druid/sql/calcite/aggregation/builtin/SimpleSqlAggregator.java b/sql/src/main/java/org/apache/druid/sql/calcite/aggregation/builtin/SimpleSqlAggregator.java index 643a99394fec..3ac68483611a 100644 --- a/sql/src/main/java/org/apache/druid/sql/calcite/aggregation/builtin/SimpleSqlAggregator.java +++ b/sql/src/main/java/org/apache/druid/sql/calcite/aggregation/builtin/SimpleSqlAggregator.java @@ -23,7 +23,10 @@ import org.apache.calcite.rel.core.AggregateCall; import org.apache.calcite.rel.core.Project; import org.apache.calcite.rex.RexBuilder; +import org.apache.druid.error.DruidException; +import org.apache.druid.error.InvalidSqlInput; import org.apache.druid.math.expr.ExprMacroTable; +import org.apache.druid.segment.column.ColumnType; import org.apache.druid.segment.column.RowSignature; import org.apache.druid.sql.calcite.aggregation.Aggregation; import org.apache.druid.sql.calcite.aggregation.Aggregations; @@ -45,6 +48,11 @@ */ public abstract class SimpleSqlAggregator implements SqlAggregator { + public static DruidException badTypeException(String columnName, String agg, ColumnType type) + { + return InvalidSqlInput.exception("Aggregation [%s] does not support type [%s], column [%s]", agg, type, columnName); + } + @Nullable @Override public Aggregation toDruidAggregation( diff --git a/sql/src/main/java/org/apache/druid/sql/calcite/aggregation/builtin/StringSqlAggregator.java b/sql/src/main/java/org/apache/druid/sql/calcite/aggregation/builtin/StringSqlAggregator.java index 53d7fc4cf7ae..4411ebd33cdc 100644 --- a/sql/src/main/java/org/apache/druid/sql/calcite/aggregation/builtin/StringSqlAggregator.java +++ b/sql/src/main/java/org/apache/druid/sql/calcite/aggregation/builtin/StringSqlAggregator.java @@ -27,6 +27,7 @@ import org.apache.calcite.rex.RexLiteral; import org.apache.calcite.rex.RexNode; import org.apache.calcite.sql.SqlAggFunction; +import org.apache.calcite.sql.SqlCallBinding; import org.apache.calcite.sql.SqlFunctionCategory; import org.apache.calcite.sql.SqlKind; import org.apache.calcite.sql.SqlOperatorBinding; @@ -51,7 +52,6 @@ import org.apache.druid.sql.calcite.expression.Expressions; import org.apache.druid.sql.calcite.planner.Calcites; import org.apache.druid.sql.calcite.planner.PlannerContext; -import org.apache.druid.sql.calcite.planner.UnsupportedSQLQueryException; import org.apache.druid.sql.calcite.rel.VirtualColumnRegistry; import org.apache.druid.sql.calcite.table.RowSignatures; @@ -197,7 +197,16 @@ public RelDataType inferReturnType(SqlOperatorBinding sqlOperatorBinding) { RelDataType type = sqlOperatorBinding.getOperandType(0); if (type instanceof RowSignatures.ComplexSqlType) { - throw new UnsupportedSQLQueryException("Cannot use STRING_AGG on complex inputs %s", type); + String columnName = ""; + if (sqlOperatorBinding instanceof SqlCallBinding) { + columnName = ((SqlCallBinding) sqlOperatorBinding).getCall().operand(0).toString(); + } + + throw SimpleSqlAggregator.badTypeException( + columnName, + "STRING_AGG", + ((RowSignatures.ComplexSqlType) type).getColumnType() + ); } return Calcites.createSqlTypeWithNullability( sqlOperatorBinding.getTypeFactory(), diff --git a/sql/src/main/java/org/apache/druid/sql/calcite/aggregation/builtin/SumSqlAggregator.java b/sql/src/main/java/org/apache/druid/sql/calcite/aggregation/builtin/SumSqlAggregator.java index f4dcad3ed598..148c4dd0d0c6 100644 --- a/sql/src/main/java/org/apache/druid/sql/calcite/aggregation/builtin/SumSqlAggregator.java +++ b/sql/src/main/java/org/apache/druid/sql/calcite/aggregation/builtin/SumSqlAggregator.java @@ -37,10 +37,8 @@ import org.apache.druid.query.aggregation.FloatSumAggregatorFactory; import org.apache.druid.query.aggregation.LongSumAggregatorFactory; import org.apache.druid.segment.column.ColumnType; -import org.apache.druid.segment.column.ValueType; import org.apache.druid.sql.calcite.aggregation.Aggregation; import org.apache.druid.sql.calcite.planner.Calcites; -import org.apache.druid.sql.calcite.planner.UnsupportedSQLQueryException; public class SumSqlAggregator extends SimpleSqlAggregator { @@ -70,17 +68,17 @@ Aggregation getAggregation( if (valueType == null) { return null; } - return Aggregation.create(createSumAggregatorFactory(valueType.getType(), name, fieldName, macroTable)); + return Aggregation.create(createSumAggregatorFactory(valueType, name, fieldName, macroTable)); } static AggregatorFactory createSumAggregatorFactory( - final ValueType aggregationType, + final ColumnType aggregationType, final String name, final String fieldName, final ExprMacroTable macroTable ) { - switch (aggregationType) { + switch (aggregationType.getType()) { case LONG: return new LongSumAggregatorFactory(name, fieldName, null, macroTable); case FLOAT: @@ -88,7 +86,7 @@ static AggregatorFactory createSumAggregatorFactory( case DOUBLE: return new DoubleSumAggregatorFactory(name, fieldName, null, macroTable); default: - throw new UnsupportedSQLQueryException("Sum aggregation is not supported for '%s' type", aggregationType); + throw SimpleSqlAggregator.badTypeException(fieldName, "SUM", aggregationType); } } diff --git a/sql/src/main/java/org/apache/druid/sql/calcite/expression/builtin/NestedDataOperatorConversions.java b/sql/src/main/java/org/apache/druid/sql/calcite/expression/builtin/NestedDataOperatorConversions.java index 76640bcf809b..c5503f7eb85f 100644 --- a/sql/src/main/java/org/apache/druid/sql/calcite/expression/builtin/NestedDataOperatorConversions.java +++ b/sql/src/main/java/org/apache/druid/sql/calcite/expression/builtin/NestedDataOperatorConversions.java @@ -40,6 +40,8 @@ import org.apache.calcite.sql.type.SqlTypeName; import org.apache.calcite.sql.type.SqlTypeTransforms; import org.apache.calcite.sql2rel.SqlRexConvertlet; +import org.apache.druid.error.DruidException; +import org.apache.druid.error.InvalidSqlInput; import org.apache.druid.java.util.common.IAE; import org.apache.druid.java.util.common.StringUtils; import org.apache.druid.math.expr.Expr; @@ -56,10 +58,10 @@ import org.apache.druid.sql.calcite.expression.SqlOperatorConversion; import org.apache.druid.sql.calcite.planner.Calcites; import org.apache.druid.sql.calcite.planner.PlannerContext; -import org.apache.druid.sql.calcite.planner.UnsupportedSQLQueryException; import org.apache.druid.sql.calcite.planner.convertlet.DruidConvertletFactory; import org.apache.druid.sql.calcite.table.RowSignatures; +import javax.annotation.Nonnull; import javax.annotation.Nullable; import java.util.Collections; import java.util.List; @@ -195,17 +197,7 @@ public DruidExpression toDruidExpression( } // pre-normalize path so that the same expressions with different jq syntax are collapsed final String path = (String) pathExpr.eval(InputBindings.nilBindings()).value(); - final List parts; - try { - parts = NestedPathFinder.parseJsonPath(path); - } - catch (IllegalArgumentException iae) { - throw new UnsupportedSQLQueryException( - "Cannot use [%s]: [%s]", - call.getOperator().getName(), - iae.getMessage() - ); - } + final List parts = extractNestedPathParts(call, path); final String jsonPath = NestedPathFinder.toNormalizedJsonPath(parts); final DruidExpression.ExpressionGenerator builder = (args) -> "json_query(" + args.get(0).getExpression() + ",'" + jsonPath + "')"; @@ -232,7 +224,6 @@ public DruidExpression toDruidExpression( } } - /** * The {@link org.apache.calcite.sql2rel.StandardConvertletTable} converts json_value(.. RETURNING type) into * cast(json_value_any(..), type). @@ -386,17 +377,9 @@ public DruidExpression toDruidExpression( } // pre-normalize path so that the same expressions with different jq syntax are collapsed final String path = (String) pathExpr.eval(InputBindings.nilBindings()).value(); - final List parts; - try { - parts = NestedPathFinder.parseJsonPath(path); - } - catch (IllegalArgumentException iae) { - throw new UnsupportedSQLQueryException( - "Cannot use [%s]: [%s]", - call.getOperator().getName(), - iae.getMessage() - ); - } + + final List parts = extractNestedPathParts(call, path); + final String jsonPath = NestedPathFinder.toNormalizedJsonPath(parts); final DruidExpression.ExpressionGenerator builder = (args) -> "json_value(" + args.get(0).getExpression() + ",'" + jsonPath + "', '" + druidType.asTypeString() + "')"; @@ -521,7 +504,7 @@ public DruidExpression toDruidExpression( parts = NestedPathFinder.parseJsonPath(path); } catch (IllegalArgumentException iae) { - throw new UnsupportedSQLQueryException( + throw InvalidSqlInput.exception( "Cannot use [%s]: [%s]", call.getOperator().getName(), iae.getMessage() @@ -684,17 +667,7 @@ public DruidExpression toDruidExpression( } // pre-normalize path so that the same expressions with different jq syntax are collapsed final String path = (String) pathExpr.eval(InputBindings.nilBindings()).value(); - final List parts; - try { - parts = NestedPathFinder.parseJsonPath(path); - } - catch (IllegalArgumentException iae) { - throw new UnsupportedSQLQueryException( - "Cannot use [%s]: [%s]", - call.getOperator().getName(), - iae.getMessage() - ); - } + final List parts = extractNestedPathParts(call, path); final String jsonPath = NestedPathFinder.toNormalizedJsonPath(parts); final DruidExpression.ExpressionGenerator builder = (args) -> "json_value(" + args.get(0).getExpression() + ",'" + jsonPath + "')"; @@ -897,4 +870,19 @@ public DruidExpression toDruidExpression( ); } } + + @Nonnull + private static List extractNestedPathParts(RexCall call, String path) + { + try { + return NestedPathFinder.parseJsonPath(path); + } + catch (IllegalArgumentException iae) { + final String name = call.getOperator().getName(); + throw DruidException + .forPersona(DruidException.Persona.USER) + .ofCategory(DruidException.Category.INVALID_INPUT) + .build(iae, "Error when processing path [%s], operator [%s] is not useable", path, name); + } + } } diff --git a/sql/src/main/java/org/apache/druid/sql/calcite/parser/DruidSqlParserUtils.java b/sql/src/main/java/org/apache/druid/sql/calcite/parser/DruidSqlParserUtils.java index 9009237b780a..5f11c6f836a6 100644 --- a/sql/src/main/java/org/apache/druid/sql/calcite/parser/DruidSqlParserUtils.java +++ b/sql/src/main/java/org/apache/druid/sql/calcite/parser/DruidSqlParserUtils.java @@ -33,7 +33,8 @@ import org.apache.calcite.sql.SqlOrderBy; import org.apache.calcite.sql.SqlTimestampLiteral; import org.apache.calcite.tools.ValidationException; -import org.apache.druid.java.util.common.IAE; +import org.apache.druid.error.DruidException; +import org.apache.druid.error.InvalidSqlInput; import org.apache.druid.java.util.common.StringUtils; import org.apache.druid.java.util.common.granularity.Granularity; import org.apache.druid.java.util.common.granularity.GranularityType; @@ -65,7 +66,6 @@ public class DruidSqlParserUtils { - private static final Logger log = new Logger(DruidSqlParserUtils.class); public static final String ALL = "all"; @@ -78,6 +78,9 @@ public static Granularity convertSqlNodeToGranularityThrowingParseExceptions(Sql try { return convertSqlNodeToGranularity(sqlNode); } + catch (DruidException e) { + throw e; + } catch (Exception e) { log.debug(e, StringUtils.format("Unable to convert %s to a valid granularity.", sqlNode.toString())); throw new ParseException(e.getMessage()); @@ -88,7 +91,7 @@ public static Granularity convertSqlNodeToGranularityThrowingParseExceptions(Sql * This method is used to extract the granularity from a SqlNode representing following function calls: * 1. FLOOR(__time TO TimeUnit) * 2. TIME_FLOOR(__time, 'PT1H') - * + *

* Validation on the sqlNode is contingent to following conditions: * 1. sqlNode is an instance of SqlCall * 2. Operator is either one of TIME_FLOOR or FLOOR @@ -96,7 +99,7 @@ public static Granularity convertSqlNodeToGranularityThrowingParseExceptions(Sql * 4. First operand is a SimpleIdentifier representing __time * 5. If operator is TIME_FLOOR, the second argument is a literal, and can be converted to the Granularity class * 6. If operator is FLOOR, the second argument is a TimeUnit, and can be mapped using {@link TimeUnits} - * + *

* Since it is to be used primarily while parsing the SqlNode, it is wrapped in {@code convertSqlNodeToGranularityThrowingParseExceptions} * * @param sqlNode SqlNode representing a call to a function @@ -105,16 +108,8 @@ public static Granularity convertSqlNodeToGranularityThrowingParseExceptions(Sql */ public static Granularity convertSqlNodeToGranularity(SqlNode sqlNode) throws ParseException { - - final String genericParseFailedMessageFormatString = "Encountered %s after PARTITIONED BY. " - + "Expected HOUR, DAY, MONTH, YEAR, ALL TIME, FLOOR function or %s function"; - if (!(sqlNode instanceof SqlCall)) { - throw new ParseException(StringUtils.format( - genericParseFailedMessageFormatString, - sqlNode.toString(), - TimeFloorOperatorConversion.SQL_FUNCTION_NAME - )); + throw makeInvalidPartitionByException(sqlNode); } SqlCall sqlCall = (SqlCall) sqlNode; @@ -163,7 +158,9 @@ public static Granularity convertSqlNodeToGranularity(SqlNode sqlNode) throws Pa catch (IllegalArgumentException e) { throw new ParseException(StringUtils.format("%s is an invalid period string", granularitySqlNode.toString())); } - return new PeriodGranularity(period, null, null); + final PeriodGranularity retVal = new PeriodGranularity(period, null, null); + validateSupportedGranularityForPartitionedBy(sqlNode, retVal); + return retVal; } else if ("FLOOR".equalsIgnoreCase(operatorName)) { // If the floor function is of form FLOOR(__time TO DAY) SqlNode granularitySqlNode = operandList.get(1); @@ -184,15 +181,22 @@ public static Granularity convertSqlNodeToGranularity(SqlNode sqlNode) throws Pa granularityIntervalQualifier.timeUnitRange.toString() ) ); - return new PeriodGranularity(period, null, null); + final PeriodGranularity retVal = new PeriodGranularity(period, null, null); + validateSupportedGranularityForPartitionedBy(sqlNode, retVal); + return retVal; } // Shouldn't reach here - throw new ParseException(StringUtils.format( - genericParseFailedMessageFormatString, - sqlNode.toString(), - TimeFloorOperatorConversion.SQL_FUNCTION_NAME - )); + throw makeInvalidPartitionByException(sqlNode); + } + + private static DruidException makeInvalidPartitionByException(SqlNode sqlNode) + { + return InvalidSqlInput.exception( + "Invalid granularity [%s] after PARTITIONED BY. " + + "Expected HOUR, DAY, MONTH, YEAR, ALL TIME, FLOOR() or TIME_FLOOR()", + sqlNode + ); } /** @@ -200,7 +204,7 @@ public static Granularity convertSqlNodeToGranularity(SqlNode sqlNode) throws Pa * be used in creating an ingestion spec. If the sqlNode is an SqlLiteral of {@link #ALL}, returns a singleton list of * "ALL". Otherwise, it converts and optimizes the query using {@link MoveTimeFiltersToIntervals} into a list of * intervals which contain all valid values of time as per the query. - * + *

* The following validations are performed * 1. Only __time column and timestamp literals are present in the query * 2. The interval after optimization is not empty @@ -208,8 +212,8 @@ public static Granularity convertSqlNodeToGranularity(SqlNode sqlNode) throws Pa * 4. The intervals after adjusting for timezone are aligned with the granularity parameter * * @param replaceTimeQuery Sql node representing the query - * @param granularity granularity of the query for validation - * @param dateTimeZone timezone + * @param granularity granularity of the query for validation + * @param dateTimeZone timezone * @return List of string representation of intervals * @throws ValidationException if the SqlNode cannot be converted to a list of intervals */ @@ -217,7 +221,7 @@ public static List validateQueryAndConvertToIntervals( SqlNode replaceTimeQuery, Granularity granularity, DateTimeZone dateTimeZone - ) throws ValidationException + ) { if (replaceTimeQuery instanceof SqlLiteral && ALL.equalsIgnoreCase(((SqlLiteral) replaceTimeQuery).toValue())) { return ImmutableList.of(ALL); @@ -230,19 +234,31 @@ public static List validateQueryAndConvertToIntervals( List intervals = filtration.getIntervals(); if (filtration.getDimFilter() != null) { - throw new ValidationException("Only " + ColumnHolder.TIME_COLUMN_NAME + " column is supported in OVERWRITE WHERE clause"); + throw InvalidSqlInput.exception( + "OVERWRITE WHERE clause only supports filtering on the __time column, got [%s]", + filtration.getDimFilter() + ); } if (intervals.isEmpty()) { - throw new ValidationException("Intervals for replace are empty"); + throw InvalidSqlInput.exception( + "The OVERWRITE WHERE clause [%s] produced no time intervals, are the bounds overly restrictive?", + dimFilter, + intervals + ); } for (Interval interval : intervals) { DateTime intervalStart = interval.getStart(); DateTime intervalEnd = interval.getEnd(); - if (!granularity.bucketStart(intervalStart).equals(intervalStart) || !granularity.bucketStart(intervalEnd).equals(intervalEnd)) { - throw new ValidationException("OVERWRITE WHERE clause contains an interval " + intervals + - " which is not aligned with PARTITIONED BY granularity " + granularity); + if (!granularity.bucketStart(intervalStart).equals(intervalStart) + || !granularity.bucketStart(intervalEnd).equals(intervalEnd)) { + throw InvalidSqlInput.exception( + "OVERWRITE WHERE clause identified interval [%s]" + + " which is not aligned with PARTITIONED BY granularity [%s]", + interval, + granularity + ); } } return intervals @@ -254,13 +270,12 @@ public static List validateQueryAndConvertToIntervals( /** * Extracts and converts the information in the CLUSTERED BY clause to a new SqlOrderBy node. * - * @param query sql query + * @param query sql query * @param clusteredByList List of clustered by columns * @return SqlOrderBy node containing the clusteredByList information * @throws ValidationException if any of the clustered by columns contain DESCENDING order. */ public static SqlOrderBy convertClusterByToOrderBy(SqlNode query, SqlNodeList clusteredByList) - throws ValidationException { validateClusteredByColumns(clusteredByList); // If we have a CLUSTERED BY clause, extract the information in that CLUSTERED BY and create a new @@ -290,10 +305,9 @@ public static SqlOrderBy convertClusterByToOrderBy(SqlNode query, SqlNodeList cl /** * Validates the clustered by columns to ensure that it does not contain DESCENDING order columns. * - * @param clusteredByNodes List of SqlNodes representing columns to be clustered by. - * @throws ValidationException if any of the clustered by columns contain DESCENDING order. + * @param clusteredByNodes List of SqlNodes representing columns to be clustered by. */ - public static void validateClusteredByColumns(final SqlNodeList clusteredByNodes) throws ValidationException + public static void validateClusteredByColumns(final SqlNodeList clusteredByNodes) { if (clusteredByNodes == null) { return; @@ -301,10 +315,9 @@ public static void validateClusteredByColumns(final SqlNodeList clusteredByNodes for (final SqlNode clusteredByNode : clusteredByNodes.getList()) { if (clusteredByNode.isA(ImmutableSet.of(SqlKind.DESCENDING))) { - throw new ValidationException( - StringUtils.format("[%s] is invalid." - + " CLUSTERED BY columns cannot be sorted in descending order.", clusteredByNode.toString() - ) + throw InvalidSqlInput.exception( + "Invalid CLUSTERED BY clause [%s]: cannot sort in descending order.", + clusteredByNode ); } } @@ -316,111 +329,121 @@ public static void validateClusteredByColumns(final SqlNodeList clusteredByNodes * are AND, OR, NOT, >, <, >=, <= and BETWEEN operators in the sql query. * * @param replaceTimeQuery Sql node representing the query - * @param dateTimeZone timezone + * @param dateTimeZone timezone * @return Dimfilter for the query * @throws ValidationException if the SqlNode cannot be converted a Dimfilter */ public static DimFilter convertQueryToDimFilter(SqlNode replaceTimeQuery, DateTimeZone dateTimeZone) - throws ValidationException { if (!(replaceTimeQuery instanceof SqlBasicCall)) { - log.error("Expected SqlBasicCall during parsing, but found " + replaceTimeQuery.getClass().getName()); - throw new ValidationException("Invalid OVERWRITE WHERE clause"); + throw InvalidSqlInput.exception( + "Invalid OVERWRITE WHERE clause [%s]: expected clause including AND, OR, NOT, >, <, >=, <= OR BETWEEN operators", + replaceTimeQuery + ); } - String columnName; - SqlBasicCall sqlBasicCall = (SqlBasicCall) replaceTimeQuery; - List operandList = sqlBasicCall.getOperandList(); - switch (sqlBasicCall.getOperator().getKind()) { - case AND: - List dimFilters = new ArrayList<>(); - for (SqlNode sqlNode : sqlBasicCall.getOperandList()) { - dimFilters.add(convertQueryToDimFilter(sqlNode, dateTimeZone)); - } - return new AndDimFilter(dimFilters); - case OR: - dimFilters = new ArrayList<>(); - for (SqlNode sqlNode : sqlBasicCall.getOperandList()) { - dimFilters.add(convertQueryToDimFilter(sqlNode, dateTimeZone)); - } - return new OrDimFilter(dimFilters); - case NOT: - return new NotDimFilter(convertQueryToDimFilter(sqlBasicCall.getOperandList().get(0), dateTimeZone)); - case GREATER_THAN_OR_EQUAL: - columnName = parseColumnName(operandList.get(0)); - return new BoundDimFilter( - columnName, - parseTimeStampWithTimeZone(operandList.get(1), dateTimeZone), - null, - false, - null, - null, - null, - StringComparators.NUMERIC - ); - case LESS_THAN_OR_EQUAL: - columnName = parseColumnName(operandList.get(0)); - return new BoundDimFilter( - columnName, - null, - parseTimeStampWithTimeZone(operandList.get(1), dateTimeZone), - null, - false, - null, - null, - StringComparators.NUMERIC - ); - case GREATER_THAN: - columnName = parseColumnName(operandList.get(0)); - return new BoundDimFilter( - columnName, - parseTimeStampWithTimeZone(operandList.get(1), dateTimeZone), - null, - true, - null, - null, - null, - StringComparators.NUMERIC - ); - case LESS_THAN: - columnName = parseColumnName(operandList.get(0)); - return new BoundDimFilter( - columnName, - null, - parseTimeStampWithTimeZone(operandList.get(1), dateTimeZone), - null, - true, - null, - null, - StringComparators.NUMERIC - ); - case BETWEEN: - columnName = parseColumnName(operandList.get(0)); - return new BoundDimFilter( - columnName, - parseTimeStampWithTimeZone(operandList.get(1), dateTimeZone), - parseTimeStampWithTimeZone(operandList.get(2), dateTimeZone), - false, - false, - null, - null, - StringComparators.NUMERIC - ); - default: - throw new ValidationException("Unsupported operation in OVERWRITE WHERE clause: " + sqlBasicCall.getOperator().getName()); + + try { + String columnName; + SqlBasicCall sqlBasicCall = (SqlBasicCall) replaceTimeQuery; + List operandList = sqlBasicCall.getOperandList(); + switch (sqlBasicCall.getOperator().getKind()) { + case AND: + List dimFilters = new ArrayList<>(); + for (SqlNode sqlNode : sqlBasicCall.getOperandList()) { + dimFilters.add(convertQueryToDimFilter(sqlNode, dateTimeZone)); + } + return new AndDimFilter(dimFilters); + case OR: + dimFilters = new ArrayList<>(); + for (SqlNode sqlNode : sqlBasicCall.getOperandList()) { + dimFilters.add(convertQueryToDimFilter(sqlNode, dateTimeZone)); + } + return new OrDimFilter(dimFilters); + case NOT: + return new NotDimFilter(convertQueryToDimFilter(sqlBasicCall.getOperandList().get(0), dateTimeZone)); + case GREATER_THAN_OR_EQUAL: + columnName = parseColumnName(operandList.get(0)); + return new BoundDimFilter( + columnName, + parseTimeStampWithTimeZone(operandList.get(1), dateTimeZone), + null, + false, + null, + null, + null, + StringComparators.NUMERIC + ); + case LESS_THAN_OR_EQUAL: + columnName = parseColumnName(operandList.get(0)); + return new BoundDimFilter( + columnName, + null, + parseTimeStampWithTimeZone(operandList.get(1), dateTimeZone), + null, + false, + null, + null, + StringComparators.NUMERIC + ); + case GREATER_THAN: + columnName = parseColumnName(operandList.get(0)); + return new BoundDimFilter( + columnName, + parseTimeStampWithTimeZone(operandList.get(1), dateTimeZone), + null, + true, + null, + null, + null, + StringComparators.NUMERIC + ); + case LESS_THAN: + columnName = parseColumnName(operandList.get(0)); + return new BoundDimFilter( + columnName, + null, + parseTimeStampWithTimeZone(operandList.get(1), dateTimeZone), + null, + true, + null, + null, + StringComparators.NUMERIC + ); + case BETWEEN: + columnName = parseColumnName(operandList.get(0)); + return new BoundDimFilter( + columnName, + parseTimeStampWithTimeZone(operandList.get(1), dateTimeZone), + parseTimeStampWithTimeZone(operandList.get(2), dateTimeZone), + false, + false, + null, + null, + StringComparators.NUMERIC + ); + default: + throw InvalidSqlInput.exception( + "Unsupported operation [%s] in OVERWRITE WHERE clause.", + sqlBasicCall.getOperator().getName() + ); + } + } + catch (DruidException e) { + throw e.prependAndBuild("Invalid OVERWRITE WHERE clause [%s]", replaceTimeQuery); } } /** * Converts a {@link SqlNode} identifier into a string representation * - * @param sqlNode the sql node + * @param sqlNode the SQL node * @return string representing the column name - * @throws ValidationException if the sql node is not an SqlIdentifier + * @throws DruidException if the SQL node is not an SqlIdentifier */ - public static String parseColumnName(SqlNode sqlNode) throws ValidationException + public static String parseColumnName(SqlNode sqlNode) { if (!(sqlNode instanceof SqlIdentifier)) { - throw new ValidationException("Expressions must be of the form __time TIMESTAMP"); + throw InvalidSqlInput.exception("Cannot parse column name from SQL expression [%s]", sqlNode); } return ((SqlIdentifier) sqlNode).getSimple(); } @@ -428,15 +451,15 @@ public static String parseColumnName(SqlNode sqlNode) throws ValidationException /** * Converts a {@link SqlNode} into a timestamp, taking into account the timezone * - * @param sqlNode the sql node + * @param sqlNode the SQL node * @param timeZone timezone * @return the timestamp string as milliseconds from epoch - * @throws ValidationException if the sql node is not a SqlTimestampLiteral + * @throws DruidException if the SQL node is not a SqlTimestampLiteral */ - public static String parseTimeStampWithTimeZone(SqlNode sqlNode, DateTimeZone timeZone) throws ValidationException + private static String parseTimeStampWithTimeZone(SqlNode sqlNode, DateTimeZone timeZone) { if (!(sqlNode instanceof SqlTimestampLiteral)) { - throw new ValidationException("Expressions must be of the form __time TIMESTAMP"); + throw InvalidSqlInput.exception("Cannot get a timestamp from sql expression [%s]", sqlNode); } Timestamp sqlTimestamp = Timestamp.valueOf(((SqlTimestampLiteral) sqlNode).toFormattedString()); @@ -444,17 +467,12 @@ public static String parseTimeStampWithTimeZone(SqlNode sqlNode, DateTimeZone ti return String.valueOf(zonedTimestamp.toInstant().toEpochMilli()); } - /** - * Throws an IAE with appropriate message if the granularity supplied is not present in - * {@link org.apache.druid.java.util.common.granularity.Granularities}. It also filters out NONE as it is not a valid - * granularity that can be supplied in PARTITIONED BY - */ - public static void throwIfUnsupportedGranularityInPartitionedBy(Granularity granularity) + public static void validateSupportedGranularityForPartitionedBy(SqlNode originalNode, Granularity granularity) { if (!GranularityType.isStandard(granularity)) { - throw new IAE( - "The granularity specified in PARTITIONED BY is not supported. " - + "Please use an equivalent of these granularities: %s.", + throw InvalidSqlInput.exception( + "The granularity specified in PARTITIONED BY [%s] is not supported. Valid options: [%s]", + originalNode == null ? granularity : originalNode, Arrays.stream(GranularityType.values()) .filter(granularityType -> !granularityType.equals(GranularityType.NONE)) .map(Enum::name) @@ -463,4 +481,9 @@ public static void throwIfUnsupportedGranularityInPartitionedBy(Granularity gran ); } } + + public static DruidException problemParsing(String message) + { + return InvalidSqlInput.exception(message); + } } diff --git a/sql/src/main/java/org/apache/druid/sql/calcite/planner/DruidPlanner.java b/sql/src/main/java/org/apache/druid/sql/calcite/planner/DruidPlanner.java index 59c4cca2851c..fc6c08a04033 100644 --- a/sql/src/main/java/org/apache/druid/sql/calcite/planner/DruidPlanner.java +++ b/sql/src/main/java/org/apache/druid/sql/calcite/planner/DruidPlanner.java @@ -20,25 +20,33 @@ package org.apache.druid.sql.calcite.planner; import com.fasterxml.jackson.databind.ObjectMapper; +import com.google.common.base.Joiner; import com.google.common.base.Preconditions; +import org.apache.calcite.plan.RelOptPlanner; +import org.apache.calcite.runtime.CalciteContextException; import org.apache.calcite.schema.SchemaPlus; import org.apache.calcite.sql.SqlExplain; import org.apache.calcite.sql.SqlKind; import org.apache.calcite.sql.SqlNode; import org.apache.calcite.sql.parser.SqlParseException; +import org.apache.calcite.sql.parser.SqlParserPos; import org.apache.calcite.tools.FrameworkConfig; import org.apache.calcite.tools.ValidationException; -import org.apache.druid.java.util.common.StringUtils; +import org.apache.druid.error.DruidException; +import org.apache.druid.error.InvalidSqlInput; import org.apache.druid.query.QueryContext; import org.apache.druid.server.security.Access; import org.apache.druid.server.security.Resource; import org.apache.druid.server.security.ResourceAction; import org.apache.druid.sql.calcite.parser.DruidSqlInsert; import org.apache.druid.sql.calcite.parser.DruidSqlReplace; +import org.apache.druid.sql.calcite.parser.ParseException; +import org.apache.druid.sql.calcite.parser.Token; import org.apache.druid.sql.calcite.run.SqlEngine; import org.joda.time.DateTimeZone; import java.io.Closeable; +import java.util.ArrayList; import java.util.HashSet; import java.util.Map; import java.util.Set; @@ -56,6 +64,10 @@ */ public class DruidPlanner implements Closeable { + + public static final Joiner SPACE_JOINER = Joiner.on(" "); + public static final Joiner COMMA_JOINER = Joiner.on(", "); + public enum State { START, VALIDATED, PREPARED, PLANNED @@ -119,7 +131,7 @@ public AuthResult( * @return set of {@link Resource} corresponding to any Druid datasources * or views which are taking part in the query. */ - public void validate() throws SqlParseException, ValidationException + public void validate() { Preconditions.checkState(state == State.START); @@ -129,22 +141,21 @@ public void validate() throws SqlParseException, ValidationException // Parse the query string. String sql = plannerContext.getSql(); hook.captureSql(sql); - SqlNode root = planner.parse(sql); - handler = createHandler(root); - + SqlNode root; try { - handler.validate(); - plannerContext.setResourceActions(handler.resourceActions()); - plannerContext.setExplainAttributes(handler.explainAttributes()); + root = planner.parse(sql); } - catch (RuntimeException e) { - throw new ValidationException(e); + catch (SqlParseException e1) { + throw translateException(e1); } - + handler = createHandler(root); + handler.validate(); + plannerContext.setResourceActions(handler.resourceActions()); + plannerContext.setExplainAttributes(handler.explainAttributes()); state = State.VALIDATED; } - private SqlStatementHandler createHandler(final SqlNode node) throws ValidationException + private SqlStatementHandler createHandler(final SqlNode node) { SqlNode query = node; SqlExplain explain = null; @@ -165,17 +176,17 @@ private SqlStatementHandler createHandler(final SqlNode node) throws ValidationE if (query.isA(SqlKind.QUERY)) { return new QueryHandler.SelectHandler(handlerContext, query, explain); } - throw new ValidationException(StringUtils.format("Cannot execute [%s].", node.getKind())); + throw InvalidSqlInput.exception("Unsupported SQL statement [%s]", node.getKind()); } /** * Prepare a SQL query for execution, including some initial parsing and * validation and any dynamic parameter type resolution, to support prepared * statements via JDBC. - * + *

* Prepare reuses the validation done in {@link #validate()} which must be * called first. - * + *

* A query can be prepared on a data source without having permissions on * that data source. This odd state of affairs is necessary because * {@link org.apache.druid.sql.calcite.view.DruidViewMacro} prepares @@ -193,11 +204,10 @@ public PrepareResult prepare() * Authorizes the statement. Done within the planner to enforce the authorization * step within the planner's state machine. * - * @param authorizer a function from resource actions to a {@link Access} result. + * @param authorizer a function from resource actions to a {@link Access} result. * @param extraActions set of additional resource actions beyond those inferred - * from the query itself. Specifically, the set of context keys to - * authorize. - * + * from the query itself. Specifically, the set of context keys to + * authorize. * @return the return value from the authorizer */ public AuthResult authorize( @@ -227,7 +237,7 @@ public AuthResult authorize( *

* Planning reuses the validation done in {@code validate()} which must be called first. */ - public PlannerResult plan() throws ValidationException + public PlannerResult plan() { Preconditions.checkState(state == State.VALIDATED || state == State.PREPARED); Preconditions.checkState(authorized); @@ -307,4 +317,186 @@ public PlannerHook hook() return hook; } } + + public static DruidException translateException(Exception e) + { + try { + throw e; + } + catch (DruidException inner) { + return inner; + } + catch (ValidationException inner) { + return parseValidationMessage(inner); + } + catch (SqlParseException inner) { + final Throwable cause = inner.getCause(); + if (cause instanceof DruidException) { + return (DruidException) cause; + } + + if (cause instanceof ParseException) { + ParseException parseException = (ParseException) cause; + final SqlParserPos failurePosition = inner.getPos(); + final String theUnexpectedToken = getUnexpectedTokenString(parseException); + + final String[] tokenDictionary = inner.getTokenImages(); + final int[][] expectedTokenSequences = inner.getExpectedTokenSequences(); + final ArrayList expectedTokens = new ArrayList<>(expectedTokenSequences.length); + for (int[] expectedTokenSequence : expectedTokenSequences) { + String[] strings = new String[expectedTokenSequence.length]; + for (int i = 0; i < expectedTokenSequence.length; ++i) { + strings[i] = tokenDictionary[expectedTokenSequence[i]]; + } + expectedTokens.add(SPACE_JOINER.join(strings)); + } + + return InvalidSqlInput + .exception( + inner, + "Received an unexpected token [%s] (line [%s], column [%s]), acceptable options: [%s]", + theUnexpectedToken, + failurePosition.getLineNum(), + failurePosition.getColumnNum(), + COMMA_JOINER.join(expectedTokens) + ) + .withContext("line", failurePosition.getLineNum()) + .withContext("column", failurePosition.getColumnNum()) + .withContext("endLine", failurePosition.getEndLineNum()) + .withContext("endColumn", failurePosition.getEndColumnNum()) + .withContext("token", theUnexpectedToken) + .withContext("expected", expectedTokens); + + } + + return DruidException.forPersona(DruidException.Persona.DEVELOPER) + .ofCategory(DruidException.Category.UNCATEGORIZED) + .build( + inner, + "Unable to parse the SQL, unrecognized error from calcite: [%s]", + inner.getMessage() + ); + } + catch (RelOptPlanner.CannotPlanException inner) { + return DruidException.forPersona(DruidException.Persona.USER) + .ofCategory(DruidException.Category.INVALID_INPUT) + .build(inner, inner.getMessage()); + } + catch (Exception inner) { + // Anything else. Should not get here. Anything else should already have + // been translated to a DruidException unless it is an unexpected exception. + return DruidException.forPersona(DruidException.Persona.ADMIN) + .ofCategory(DruidException.Category.UNCATEGORIZED) + .build(inner, inner.getMessage()); + } + } + + private static DruidException parseValidationMessage(Exception e) + { + if (e.getCause() instanceof DruidException) { + return (DruidException) e.getCause(); + } + + Throwable maybeContextException = e; + CalciteContextException contextException = null; + while (maybeContextException != null) { + if (maybeContextException instanceof CalciteContextException) { + contextException = (CalciteContextException) maybeContextException; + break; + } + maybeContextException = maybeContextException.getCause(); + } + + if (contextException != null) { + return InvalidSqlInput + .exception( + e, + "%s (line [%s], column [%s])", + // the CalciteContextException .getMessage() assumes cause is non-null, so this should be fine + contextException.getCause().getMessage(), + contextException.getPosLine(), + contextException.getPosColumn() + ) + .withContext("line", String.valueOf(contextException.getPosLine())) + .withContext("column", String.valueOf(contextException.getPosColumn())) + .withContext("endLine", String.valueOf(contextException.getEndPosLine())) + .withContext("endColumn", String.valueOf(contextException.getEndPosColumn())); + } else { + return DruidException.forPersona(DruidException.Persona.USER) + .ofCategory(DruidException.Category.UNCATEGORIZED) + .build(e, "Uncategorized calcite error message: [%s]", e.getMessage()); + } + } + + /** + * Grabs the unexpected token string. This code is borrowed with minimal adjustments from + * {@link ParseException#getMessage()}. It is possible that if that code changes, we need to also + * change this code to match it. + * + * @param parseException the parse exception to extract from + * @return the String representation of the unexpected token string + */ + private static String getUnexpectedTokenString(ParseException parseException) + { + int maxSize = 0; + for (int[] ints : parseException.expectedTokenSequences) { + if (maxSize < ints.length) { + maxSize = ints.length; + } + } + + StringBuilder bob = new StringBuilder(); + Token tok = parseException.currentToken.next; + for (int i = 0; i < maxSize; i++) { + if (i != 0) { + bob.append(" "); + } + if (tok.kind == 0) { + bob.append(""); + break; + } + char ch; + for (int i1 = 0; i1 < tok.image.length(); i1++) { + switch (tok.image.charAt(i1)) { + case 0: + continue; + case '\b': + bob.append("\\b"); + continue; + case '\t': + bob.append("\\t"); + continue; + case '\n': + bob.append("\\n"); + continue; + case '\f': + bob.append("\\f"); + continue; + case '\r': + bob.append("\\r"); + continue; + case '\"': + bob.append("\\\""); + continue; + case '\'': + bob.append("\\\'"); + continue; + case '\\': + bob.append("\\\\"); + continue; + default: + if ((ch = tok.image.charAt(i1)) < 0x20 || ch > 0x7e) { + String s = "0000" + Integer.toString(ch, 16); + bob.append("\\u").append(s.substring(s.length() - 4, s.length())); + } else { + bob.append(ch); + } + continue; + } + } + tok = tok.next; + } + return bob.toString(); + } + } diff --git a/sql/src/main/java/org/apache/druid/sql/calcite/planner/DruidRexExecutor.java b/sql/src/main/java/org/apache/druid/sql/calcite/planner/DruidRexExecutor.java index 6b9e42bafd01..844d9896ae8d 100644 --- a/sql/src/main/java/org/apache/druid/sql/calcite/planner/DruidRexExecutor.java +++ b/sql/src/main/java/org/apache/druid/sql/calcite/planner/DruidRexExecutor.java @@ -23,6 +23,7 @@ import org.apache.calcite.rex.RexExecutor; import org.apache.calcite.rex.RexNode; import org.apache.calcite.sql.type.SqlTypeName; +import org.apache.druid.error.InvalidSqlInput; import org.apache.druid.java.util.common.DateTimes; import org.apache.druid.math.expr.Expr; import org.apache.druid.math.expr.ExprEval; @@ -87,7 +88,7 @@ public void reduce( // as a primitive long/float/double. // ExprEval.isNumericNull checks whether the parsed primitive value is null or not. if (!constExp.getType().isNullable() && exprResult.isNumericNull()) { - throw new UnsupportedSQLQueryException("Illegal DATE constant: %s", constExp); + throw InvalidSqlInput.exception("Illegal DATE constant [%s]", constExp); } literal = rexBuilder.makeDateLiteral( @@ -101,7 +102,7 @@ public void reduce( // as a primitive long/float/double. // ExprEval.isNumericNull checks whether the parsed primitive value is null or not. if (!constExp.getType().isNullable() && exprResult.isNumericNull()) { - throw new UnsupportedSQLQueryException("Illegal TIMESTAMP constant: %s", constExp); + throw InvalidSqlInput.exception("Illegal TIMESTAMP constant [%s]", constExp); } literal = Calcites.jodaToCalciteTimestampLiteral( @@ -125,12 +126,12 @@ public void reduce( // the query can execute. double exprResultDouble = exprResult.asDouble(); if (Double.isNaN(exprResultDouble) || Double.isInfinite(exprResultDouble)) { - String expression = druidExpression.getExpression(); - throw new UnsupportedSQLQueryException("'%s' evaluates to '%s' that is not supported in SQL. You can either cast the expression as BIGINT ('CAST(%s as BIGINT)') or VARCHAR ('CAST(%s as VARCHAR)') or change the expression itself", - expression, - Double.toString(exprResultDouble), - expression, - expression); + throw InvalidSqlInput.exception( + "Expression [%s] evaluates to an unsupported value [%s], expected something that" + + " can be a Double. Consider casting with 'CAST( AS BIGINT)'", + druidExpression.getExpression(), + exprResultDouble + ); } bigDecimal = BigDecimal.valueOf(exprResult.asDouble()); } @@ -160,12 +161,14 @@ public void reduce( if (doubleVal == null) { resultAsBigDecimalList.add(null); } else if (Double.isNaN(doubleVal.doubleValue()) || Double.isInfinite(doubleVal.doubleValue())) { - String expression = druidExpression.getExpression(); - throw new UnsupportedSQLQueryException( - "'%s' contains an element that evaluates to '%s' which is not supported in SQL. You can either cast the element in the ARRAY to BIGINT or VARCHAR or change the expression itself", - expression, - Double.toString(doubleVal.doubleValue()) - ); + throw InvalidSqlInput.exception( + "Expression [%s] was expected to generate values that are all Doubles," + + " but entry at index[%d] was not: [%s]." + + " Consider Casting values to ensure a consistent type.", + druidExpression.getExpression(), + resultAsBigDecimalList.size(), + doubleVal + ); } else { resultAsBigDecimalList.add(BigDecimal.valueOf(doubleVal.doubleValue())); } diff --git a/sql/src/main/java/org/apache/druid/sql/calcite/planner/IngestHandler.java b/sql/src/main/java/org/apache/druid/sql/calcite/planner/IngestHandler.java index 459784eb3bb9..52b4efcfeb07 100644 --- a/sql/src/main/java/org/apache/druid/sql/calcite/planner/IngestHandler.java +++ b/sql/src/main/java/org/apache/druid/sql/calcite/planner/IngestHandler.java @@ -20,7 +20,6 @@ package org.apache.druid.sql.calcite.planner; import com.fasterxml.jackson.core.JsonProcessingException; -import com.google.common.annotations.VisibleForTesting; import com.google.common.collect.Iterables; import org.apache.calcite.jdbc.CalciteSchema; import org.apache.calcite.rel.RelRoot; @@ -36,7 +35,8 @@ import org.apache.calcite.tools.ValidationException; import org.apache.calcite.util.Pair; import org.apache.druid.common.utils.IdUtils; -import org.apache.druid.java.util.common.StringUtils; +import org.apache.druid.error.DruidException; +import org.apache.druid.error.InvalidSqlInput; import org.apache.druid.java.util.common.granularity.Granularity; import org.apache.druid.server.security.Action; import org.apache.druid.server.security.Resource; @@ -55,12 +55,6 @@ public abstract class IngestHandler extends QueryHandler { private static final Pattern UNNAMED_COLUMN_PATTERN = Pattern.compile("^EXPR\\$\\d+$", Pattern.CASE_INSENSITIVE); - @VisibleForTesting - public static final String UNNAMED_INGESTION_COLUMN_ERROR = - "Cannot ingest expressions that do not have an alias " - + "or columns with names like EXPR$[digit].\n" - + "E.g. if you are ingesting \"func(X)\", then you can rewrite it as " - + "\"func(X) as myColumn\""; protected final Granularity ingestionGranularity; protected String targetDatasource; @@ -77,7 +71,7 @@ public abstract class IngestHandler extends QueryHandler handlerContext.hook().captureInsert(ingestNode); } - protected static SqlNode convertQuery(DruidSqlIngest sqlNode) throws ValidationException + protected static SqlNode convertQuery(DruidSqlIngest sqlNode) { SqlNode query = sqlNode.getSource(); @@ -86,12 +80,10 @@ protected static SqlNode convertQuery(DruidSqlIngest sqlNode) throws ValidationE SqlOrderBy sqlOrderBy = (SqlOrderBy) query; SqlNodeList orderByList = sqlOrderBy.orderList; if (!(orderByList == null || orderByList.equals(SqlNodeList.EMPTY))) { - String opName = sqlNode.getOperator().getName(); - throw new ValidationException(StringUtils.format( - "Cannot have ORDER BY on %s %s statement, use CLUSTERED BY instead.", - "INSERT".equals(opName) ? "an" : "a", - opName - )); + throw InvalidSqlInput.exception( + "Cannot use an ORDER BY clause on a Query of type [%s], use CLUSTERED BY instead", + sqlNode.getOperator().getName() + ); } } if (sqlNode.getClusteredBy() != null) { @@ -99,7 +91,7 @@ protected static SqlNode convertQuery(DruidSqlIngest sqlNode) throws ValidationE } if (!query.isA(SqlKind.QUERY)) { - throw new ValidationException(StringUtils.format("Cannot execute [%s].", query.getKind())); + throw InvalidSqlInput.exception("Unexpected SQL statement type [%s], expected it to be a QUERY", query.getKind()); } return query; } @@ -112,13 +104,13 @@ protected String operationName() protected abstract DruidSqlIngest ingestNode(); @Override - public void validate() throws ValidationException + public void validate() { if (ingestNode().getPartitionedBy() == null) { - throw new ValidationException(StringUtils.format( - "%s statements must specify PARTITIONED BY clause explicitly", + throw InvalidSqlInput.exception( + "Operation [%s] requires a PARTITIONED BY to be explicitly defined, but none was found.", operationName() - )); + ); } try { PlannerContext plannerContext = handlerContext.plannerContext(); @@ -130,18 +122,16 @@ public void validate() throws ValidationException } } catch (JsonProcessingException e) { - throw new ValidationException("Unable to serialize partition granularity."); + throw InvalidSqlInput.exception(e, "Invalid partition granularity [%s]", ingestionGranularity); } super.validate(); // Check if CTX_SQL_OUTER_LIMIT is specified and fail the query if it is. CTX_SQL_OUTER_LIMIT being provided causes // the number of rows inserted to be limited which is likely to be confusing and unintended. if (handlerContext.queryContextMap().get(PlannerContext.CTX_SQL_OUTER_LIMIT) != null) { - throw new ValidationException( - StringUtils.format( - "%s cannot be provided with %s.", - PlannerContext.CTX_SQL_OUTER_LIMIT, - operationName() - ) + throw InvalidSqlInput.exception( + "Context parameter [%s] cannot be provided on operator [%s]", + PlannerContext.CTX_SQL_OUTER_LIMIT, + operationName() ); } targetDatasource = validateAndGetDataSourceForIngest(); @@ -154,22 +144,27 @@ protected RelDataType returnedRowType() final RelDataTypeFactory typeFactory = rootQueryRel.rel.getCluster().getTypeFactory(); return handlerContext.engine().resultTypeForInsert( typeFactory, - rootQueryRel.validatedRowType); + rootQueryRel.validatedRowType + ); } /** * Extract target datasource from a {@link SqlInsert}, and also validate that the ingestion is of a form we support. * Expects the target datasource to be either an unqualified name, or a name qualified by the default schema. */ - private String validateAndGetDataSourceForIngest() throws ValidationException + private String validateAndGetDataSourceForIngest() { final SqlInsert insert = ingestNode(); if (insert.isUpsert()) { - throw new ValidationException("UPSERT is not supported."); + throw InvalidSqlInput.exception("UPSERT is not supported."); } if (insert.getTargetColumnList() != null) { - throw new ValidationException(operationName() + " with a target column list is not supported."); + throw InvalidSqlInput.exception( + "Operation [%s] cannot be run with a target column list, given [%s (%s)]", + operationName(), + insert.getTargetTable(), insert.getTargetColumnList() + ); } final SqlIdentifier tableIdentifier = (SqlIdentifier) insert.getTargetTable(); @@ -177,7 +172,9 @@ private String validateAndGetDataSourceForIngest() throws ValidationException if (tableIdentifier.names.isEmpty()) { // I don't think this can happen, but include a branch for it just in case. - throw new ValidationException(operationName() + " requires a target table."); + throw DruidException.forPersona(DruidException.Persona.USER) + .ofCategory(DruidException.Category.DEFENSIVE) + .build("Operation [%s] requires a target table", operationName()); } else if (tableIdentifier.names.size() == 1) { // Unqualified name. dataSource = Iterables.getOnlyElement(tableIdentifier.names); @@ -189,22 +186,15 @@ private String validateAndGetDataSourceForIngest() throws ValidationException if (tableIdentifier.names.size() == 2 && defaultSchemaName.equals(tableIdentifier.names.get(0))) { dataSource = tableIdentifier.names.get(1); } else { - throw new ValidationException( - StringUtils.format( - "Cannot %s into %s because it is not a Druid datasource.", - operationName(), - tableIdentifier - ) + throw InvalidSqlInput.exception( + "Table [%s] does not support operation [%s] because it is not a Druid datasource", + tableIdentifier, + operationName() ); } } - try { - IdUtils.validateId(operationName() + " dataSource", dataSource); - } - catch (IllegalArgumentException e) { - throw new ValidationException(e.getMessage()); - } + IdUtils.validateId("table", dataSource); return dataSource; } @@ -222,15 +212,20 @@ protected QueryMaker buildQueryMaker(final RelRoot rootQueryRel) throws Validati return handlerContext.engine().buildQueryMakerForInsert( targetDatasource, rootQueryRel, - handlerContext.plannerContext()); + handlerContext.plannerContext() + ); } - private void validateColumnsForIngestion(RelRoot rootQueryRel) throws ValidationException + private void validateColumnsForIngestion(RelRoot rootQueryRel) { // Check that there are no unnamed columns in the insert. for (Pair field : rootQueryRel.fields) { if (UNNAMED_COLUMN_PATTERN.matcher(field.right).matches()) { - throw new ValidationException(UNNAMED_INGESTION_COLUMN_ERROR); + throw InvalidSqlInput.exception( + "Insertion requires columns to be named, but at least one of the columns was unnamed. This is usually " + + "the result of applying a function without having an AS clause, please ensure that all function calls" + + "are named with an AS clause as in \"func(X) as myColumn\"." + ); } } } @@ -246,13 +241,14 @@ public InsertHandler( SqlStatementHandler.HandlerContext handlerContext, DruidSqlInsert sqlNode, SqlExplain explain - ) throws ValidationException + ) { super( handlerContext, sqlNode, convertQuery(sqlNode), - explain); + explain + ); this.sqlNode = sqlNode; } @@ -263,12 +259,12 @@ protected DruidSqlIngest ingestNode() } @Override - public void validate() throws ValidationException + public void validate() { if (!handlerContext.plannerContext().featureAvailable(EngineFeature.CAN_INSERT)) { - throw new ValidationException(StringUtils.format( - "Cannot execute INSERT with SQL engine '%s'.", - handlerContext.engine().name()) + throw InvalidSqlInput.exception( + "INSERT operations are not supported by requested SQL engine [%s], consider using MSQ.", + handlerContext.engine().name() ); } super.validate(); @@ -299,7 +295,7 @@ public ReplaceHandler( SqlStatementHandler.HandlerContext handlerContext, DruidSqlReplace sqlNode, SqlExplain explain - ) throws ValidationException + ) { super( handlerContext, @@ -317,24 +313,27 @@ protected DruidSqlIngest ingestNode() } @Override - public void validate() throws ValidationException + public void validate() { if (!handlerContext.plannerContext().featureAvailable(EngineFeature.CAN_REPLACE)) { - throw new ValidationException(StringUtils.format( - "Cannot execute REPLACE with SQL engine '%s'.", - handlerContext.engine().name()) + throw InvalidSqlInput.exception( + "REPLACE operations are not supported by the requested SQL engine [%s]. Consider using MSQ.", + handlerContext.engine().name() ); } SqlNode replaceTimeQuery = sqlNode.getReplaceTimeQuery(); if (replaceTimeQuery == null) { - throw new ValidationException("Missing time chunk information in OVERWRITE clause for REPLACE. Use " - + "OVERWRITE WHERE <__time based condition> or OVERWRITE ALL to overwrite the entire table."); + throw InvalidSqlInput.exception( + "Missing time chunk information in OVERWRITE clause for REPLACE. Use " + + "OVERWRITE WHERE <__time based condition> or OVERWRITE ALL to overwrite the entire table." + ); } replaceIntervals = DruidSqlParserUtils.validateQueryAndConvertToIntervals( replaceTimeQuery, ingestionGranularity, - handlerContext.timeZone()); + handlerContext.timeZone() + ); super.validate(); if (replaceIntervals != null) { handlerContext.queryContextMap().put( diff --git a/sql/src/main/java/org/apache/druid/sql/calcite/planner/PlannerFactory.java b/sql/src/main/java/org/apache/druid/sql/calcite/planner/PlannerFactory.java index 4395da5fe441..691c33567a89 100644 --- a/sql/src/main/java/org/apache/druid/sql/calcite/planner/PlannerFactory.java +++ b/sql/src/main/java/org/apache/druid/sql/calcite/planner/PlannerFactory.java @@ -30,13 +30,11 @@ import org.apache.calcite.plan.Context; import org.apache.calcite.plan.ConventionTraitDef; import org.apache.calcite.rel.RelCollationTraitDef; -import org.apache.calcite.sql.parser.SqlParseException; import org.apache.calcite.sql.parser.SqlParser; import org.apache.calcite.sql.validate.SqlConformance; import org.apache.calcite.sql2rel.SqlToRelConverter; import org.apache.calcite.tools.FrameworkConfig; import org.apache.calcite.tools.Frameworks; -import org.apache.calcite.tools.ValidationException; import org.apache.druid.guice.annotations.Json; import org.apache.druid.math.expr.ExprMacroTable; import org.apache.druid.segment.join.JoinableFactoryWrapper; @@ -126,12 +124,7 @@ public DruidPlanner createPlannerForTesting(final SqlEngine engine, final String final DruidPlanner thePlanner = createPlanner(engine, sql, queryContext, null); thePlanner.getPlannerContext() .setAuthenticationResult(NoopEscalator.getInstance().createEscalatedAuthenticationResult()); - try { - thePlanner.validate(); - } - catch (SqlParseException | ValidationException e) { - throw new RuntimeException(e); - } + thePlanner.validate(); thePlanner.authorize(ra -> Access.OK, ImmutableSet.of()); return thePlanner; } diff --git a/sql/src/main/java/org/apache/druid/sql/calcite/planner/QueryHandler.java b/sql/src/main/java/org/apache/druid/sql/calcite/planner/QueryHandler.java index 45b4390d0ba9..c11d600e2622 100644 --- a/sql/src/main/java/org/apache/druid/sql/calcite/planner/QueryHandler.java +++ b/sql/src/main/java/org/apache/druid/sql/calcite/planner/QueryHandler.java @@ -54,10 +54,10 @@ import org.apache.calcite.sql.validate.SqlValidator; import org.apache.calcite.tools.ValidationException; import org.apache.calcite.util.Pair; -import org.apache.druid.java.util.common.StringUtils; +import org.apache.druid.error.DruidException; +import org.apache.druid.error.InvalidSqlInput; import org.apache.druid.java.util.common.guava.BaseSequence; import org.apache.druid.java.util.common.guava.Sequences; -import org.apache.druid.java.util.common.logger.Logger; import org.apache.druid.java.util.emitter.EmittingLogger; import org.apache.druid.query.Query; import org.apache.druid.server.QueryResponse; @@ -106,10 +106,15 @@ public QueryHandler(SqlStatementHandler.HandlerContext handlerContext, SqlNode s } @Override - public void validate() throws ValidationException + public void validate() { CalcitePlanner planner = handlerContext.planner(); - validatedQueryNode = planner.validate(rewriteParameters()); + try { + validatedQueryNode = planner.validate(rewriteParameters()); + } + catch (ValidationException e) { + throw DruidPlanner.translateException(e); + } final SqlValidator validator = planner.getValidator(); SqlResourceCollectorShuttle resourceCollectorShuttle = new SqlResourceCollectorShuttle( @@ -183,7 +188,7 @@ private static RelDataType getExplainStructType(RelDataTypeFactory typeFactory) } @Override - public PlannerResult plan() throws ValidationException + public PlannerResult plan() { prepare(); final Set bindableTables = getBindableTables(rootQueryRel.rel); @@ -196,15 +201,12 @@ public PlannerResult plan() throws ValidationException // Consider BINDABLE convention when necessary. Used for metadata tables. if (!handlerContext.plannerContext().featureAvailable(EngineFeature.ALLOW_BINDABLE_PLAN)) { - throw new ValidationException( - StringUtils.format( - "Cannot query table%s %s with SQL engine '%s'.", - bindableTables.size() != 1 ? "s" : "", - bindableTables.stream() - .map(table -> Joiner.on(".").join(table.getQualifiedName())) - .collect(Collectors.joining(", ")), - handlerContext.engine().name() - ) + throw InvalidSqlInput.exception( + "Cannot query table(s) [%s] with SQL engine [%s]", + bindableTables.stream() + .map(table -> Joiner.on(".").join(table.getQualifiedName())) + .collect(Collectors.joining(", ")), + handlerContext.engine().name() ); } @@ -214,20 +216,30 @@ public PlannerResult plan() throws ValidationException return planForDruid(); } } - catch (Exception e) { - Throwable cannotPlanException = Throwables.getCauseOfType(e, RelOptPlanner.CannotPlanException.class); - if (null == cannotPlanException) { - // Not a CannotPlanException, rethrow without logging. + catch (RelOptPlanner.CannotPlanException e) { + throw buildSQLPlanningError(e); + } + catch (RuntimeException e) { + if (e instanceof DruidException) { throw e; } - Logger logger = log; - if (!handlerContext.queryContext().isDebug()) { - logger = log.noStackTrace(); + // Calcite throws a Runtime exception as the result of an IllegalTargetException + // as the result of invoking a method dynamically, when that method throws an + // exception. Unwrap the exception if this exception is from Calcite. + RelOptPlanner.CannotPlanException cpe = Throwables.getCauseOfType(e, RelOptPlanner.CannotPlanException.class); + if (cpe != null) { + throw buildSQLPlanningError(cpe); + } + DruidException de = Throwables.getCauseOfType(e, DruidException.class); + if (de != null) { + throw de; } - String errorMessage = buildSQLPlanningErrorMessage(cannotPlanException); - logger.warn(e, errorMessage); - throw new UnsupportedSQLQueryException(errorMessage); + throw DruidPlanner.translateException(e); + } + catch (Exception e) { + // Not sure what this is. Should it have been translated sooner? + throw DruidPlanner.translateException(e); } } @@ -274,10 +286,10 @@ public void visit(RelNode node, int ordinal, RelNode parent) * things that are not directly translatable to native Druid queries such * as system tables and just a general purpose (but definitely not optimized) * fall-back. - * + *

* See {@link #planWithDruidConvention} which will handle things which are * directly translatable to native Druid queries. - * + *

* The bindable path handles parameter substitution of any values not * bound by the earlier steps. */ @@ -313,43 +325,43 @@ private PlannerResult planWithBindableConvention() } else { final BindableRel theRel = bindableRel; final DataContext dataContext = plannerContext.createDataContext( - planner.getTypeFactory(), - plannerContext.getParameters() + planner.getTypeFactory(), + plannerContext.getParameters() ); final Supplier> resultsSupplier = () -> { final Enumerable enumerable = theRel.bind(dataContext); final Enumerator enumerator = enumerable.enumerator(); return QueryResponse.withEmptyContext( Sequences.withBaggage(new BaseSequence<>( - new BaseSequence.IteratorMaker>() - { - @Override - public QueryHandler.EnumeratorIterator make() + new BaseSequence.IteratorMaker>() { - return new QueryHandler.EnumeratorIterator<>(new Iterator() + @Override + public QueryHandler.EnumeratorIterator make() { - @Override - public boolean hasNext() - { - return enumerator.moveNext(); - } - - @Override - public Object[] next() + return new QueryHandler.EnumeratorIterator<>(new Iterator() { - return (Object[]) enumerator.current(); - } - }); - } - - @Override - public void cleanup(QueryHandler.EnumeratorIterator iterFromMake) - { + @Override + public boolean hasNext() + { + return enumerator.moveNext(); + } + + @Override + public Object[] next() + { + return (Object[]) enumerator.current(); + } + }); + } + + @Override + public void cleanup(QueryHandler.EnumeratorIterator iterFromMake) + { + } } - } - ), enumerator::close) - ); + ), enumerator::close) + ); }; return new PlannerResult(resultsSupplier, rootQueryRel.validatedRowType); } @@ -562,12 +574,11 @@ protected PlannerResult planWithDruidConvention() throws ValidationException * This method wraps the root with a {@link LogicalSort} that applies a limit (no ordering change). If the outer rel * is already a {@link Sort}, we can merge our outerLimit into it, similar to what is going on in * {@link org.apache.druid.sql.calcite.rule.SortCollapseRule}. - * + *

* The {@link PlannerContext#CTX_SQL_OUTER_LIMIT} flag that controls this wrapping is meant for internal use only by * the web console, allowing it to apply a limit to queries without rewriting the original SQL. * * @param root root node - * * @return root node wrapped with a limiting logical sort if a limit is specified in the query context. */ @Nullable @@ -611,23 +622,28 @@ private RelRoot possiblyWrapRootWithOuterLimitFromContext(RelRoot root) protected abstract QueryMaker buildQueryMaker(RelRoot rootQueryRel) throws ValidationException; - private String buildSQLPlanningErrorMessage(Throwable exception) + private DruidException buildSQLPlanningError(RelOptPlanner.CannotPlanException exception) { String errorMessage = handlerContext.plannerContext().getPlanningError(); if (null == errorMessage && exception instanceof UnsupportedSQLQueryException) { errorMessage = exception.getMessage(); } - if (null == errorMessage) { - errorMessage = "Please check Broker logs for additional details."; + if (errorMessage == null) { + throw DruidException.forPersona(DruidException.Persona.OPERATOR) + .ofCategory(DruidException.Category.UNSUPPORTED) + .build(exception, "Unhandled Query Planning Failure, see broker logs for details"); } else { // Planning errors are more like hints: it isn't guaranteed that the planning error is actually what went wrong. - errorMessage = "Possible error: " + errorMessage; + // For this reason, we consider these as targetting a more expert persona, i.e. the admin instead of the actual + // user. + throw DruidException.forPersona(DruidException.Persona.ADMIN) + .ofCategory(DruidException.Category.INVALID_INPUT) + .build( + exception, + "Query planning failed for unknown reason, our best guess is this [%s]", + errorMessage + ); } - // Finally, add the query itself to error message that user will get. - return StringUtils.format( - "Query not supported. %s SQL was: %s", errorMessage, - handlerContext.plannerContext().getSql() - ); } public static class SelectHandler extends QueryHandler @@ -635,19 +651,17 @@ public static class SelectHandler extends QueryHandler public SelectHandler( HandlerContext handlerContext, SqlNode sqlNode, - SqlExplain explain) + SqlExplain explain + ) { super(handlerContext, sqlNode, explain); } @Override - public void validate() throws ValidationException + public void validate() { if (!handlerContext.plannerContext().featureAvailable(EngineFeature.CAN_SELECT)) { - throw new ValidationException(StringUtils.format( - "Cannot execute SELECT with SQL engine '%s'.", - handlerContext.engine().name()) - ); + throw InvalidSqlInput.exception("Cannot execute SELECT with SQL engine [%s]", handlerContext.engine().name()); } super.validate(); } @@ -673,7 +687,8 @@ protected QueryMaker buildQueryMaker(final RelRoot rootQueryRel) throws Validati { return handlerContext.engine().buildQueryMakerForSelect( rootQueryRel, - handlerContext.plannerContext()); + handlerContext.plannerContext() + ); } } diff --git a/sql/src/main/java/org/apache/druid/sql/calcite/planner/RelParameterizerShuttle.java b/sql/src/main/java/org/apache/druid/sql/calcite/planner/RelParameterizerShuttle.java index cd9b1c2d2138..4fc27d3af403 100644 --- a/sql/src/main/java/org/apache/druid/sql/calcite/planner/RelParameterizerShuttle.java +++ b/sql/src/main/java/org/apache/druid/sql/calcite/planner/RelParameterizerShuttle.java @@ -43,9 +43,8 @@ import org.apache.calcite.rex.RexNode; import org.apache.calcite.rex.RexShuttle; import org.apache.calcite.sql.type.SqlTypeName; -import org.apache.druid.java.util.common.StringUtils; -import org.apache.druid.sql.SqlPlanningException; -import org.apache.druid.sql.SqlPlanningException.PlanningError; +import org.apache.druid.error.DruidException; +import org.apache.druid.error.InvalidSqlInput; /** * Traverse {@link RelNode} tree and replaces all {@link RexDynamicParam} with {@link org.apache.calcite.rex.RexLiteral} @@ -201,10 +200,7 @@ private RexNode bind(RexNode node, RexBuilder builder, RelDataTypeFactory typeFa if (plannerContext.getParameters().size() > dynamicParam.getIndex()) { TypedValue param = plannerContext.getParameters().get(dynamicParam.getIndex()); if (param == null) { - throw new SqlPlanningException( - PlanningError.VALIDATION_ERROR, - StringUtils.format("Parameter at position [%s] is not bound", dynamicParam.getIndex()) - ); + throw unbound(dynamicParam); } if (param.value == null) { return builder.makeNullLiteral(typeFactory.createSqlType(SqlTypeName.NULL)); @@ -216,12 +212,14 @@ private RexNode bind(RexNode node, RexBuilder builder, RelDataTypeFactory typeFa true ); } else { - throw new SqlPlanningException( - PlanningError.VALIDATION_ERROR, - StringUtils.format("Parameter at position [%s] is not bound", dynamicParam.getIndex()) - ); + throw unbound(dynamicParam); } } return node; } + + private static DruidException unbound(RexDynamicParam dynamicParam) + { + return InvalidSqlInput.exception("No value bound for parameter (position [%s])", dynamicParam.getIndex() + 1); + } } diff --git a/sql/src/main/java/org/apache/druid/sql/calcite/planner/SqlParameterizerShuttle.java b/sql/src/main/java/org/apache/druid/sql/calcite/planner/SqlParameterizerShuttle.java index a0c9fcd72854..6619f48704e4 100644 --- a/sql/src/main/java/org/apache/druid/sql/calcite/planner/SqlParameterizerShuttle.java +++ b/sql/src/main/java/org/apache/druid/sql/calcite/planner/SqlParameterizerShuttle.java @@ -28,7 +28,8 @@ import org.apache.calcite.sql.type.SqlTypeName; import org.apache.calcite.sql.util.SqlShuttle; import org.apache.calcite.util.TimestampString; -import org.apache.druid.java.util.common.IAE; +import org.apache.druid.error.DruidException; +import org.apache.druid.error.InvalidSqlInput; import java.util.ArrayList; import java.util.Arrays; @@ -38,7 +39,7 @@ * Replaces all {@link SqlDynamicParam} encountered in an {@link SqlNode} tree * with a {@link SqlLiteral} if a value binding exists for the parameter, if * possible. This is used in tandem with {@link RelParameterizerShuttle}. - * + *

* It is preferable that all parameters are placed here to pick up as many * optimizations as possible, but the facilities to convert jdbc types to * {@link SqlLiteral} are a bit less rich here than exist for converting a @@ -46,7 +47,7 @@ * {@link org.apache.calcite.rex.RexLiteral}, which is why * {@link SqlParameterizerShuttle} and {@link RelParameterizerShuttle} * both exist. - * + *

* As it turns out, most parameters will be replaced in this shuttle. * The one exception are DATE types expressed as integers. For reasons * known only to Calcite, the {@code RexBuilder.clean()} method, used by @@ -69,11 +70,11 @@ public SqlParameterizerShuttle(PlannerContext plannerContext) public SqlNode visit(SqlDynamicParam param) { if (plannerContext.getParameters().size() <= param.getIndex()) { - throw new IAE("Parameter at position [%s] is not bound", param.getIndex()); + throw unbound(param); } TypedValue paramBinding = plannerContext.getParameters().get(param.getIndex()); if (paramBinding == null) { - throw new IAE("Parameter at position [%s] is not bound", param.getIndex()); + throw unbound(param); } if (paramBinding.value == null) { return SqlLiteral.createNull(param.getParserPosition()); @@ -91,7 +92,7 @@ public SqlNode visit(SqlDynamicParam param) } if (typeName == SqlTypeName.ARRAY) { - return createArrayLiteral(paramBinding.value); + return createArrayLiteral(paramBinding.value, param.getIndex()); } try { // This throws ClassCastException for a DATE parameter given as @@ -105,6 +106,11 @@ public SqlNode visit(SqlDynamicParam param) } } + private static DruidException unbound(SqlDynamicParam param) + { + return InvalidSqlInput.exception("No value bound for parameter (position [%s])", param.getIndex() + 1); + } + /** * Convert an ARRAY parameter to the equivalent of the ARRAY[a, b, ...] * syntax. This is not well-supported in the present version of Calcite, @@ -112,7 +118,7 @@ public SqlNode visit(SqlDynamicParam param) * structure. Supports a limited set of member types. Does not attempt * to enforce that all elements have the same type. */ - private SqlNode createArrayLiteral(Object value) + private SqlNode createArrayLiteral(Object value, int posn) { List list; if (value instanceof List) { @@ -121,9 +127,10 @@ private SqlNode createArrayLiteral(Object value) list = Arrays.asList((Object[]) value); } List args = new ArrayList<>(list.size()); - for (Object element : list) { + for (int i = 0, listSize = list.size(); i < listSize; i++) { + Object element = list.get(i); if (element == null) { - throw new IAE("An array parameter cannot contain null values"); + throw InvalidSqlInput.exception("parameter [%d] is an array, with an illegal null at index [%d]", posn + 1, i); } SqlNode node; if (element instanceof String) { @@ -135,9 +142,11 @@ private SqlNode createArrayLiteral(Object value) } else if (element instanceof Boolean) { node = SqlLiteral.createBoolean((Boolean) value, SqlParserPos.ZERO); } else { - throw new IAE( - "An array parameter does not allow values of type %s", - value.getClass().getSimpleName() + throw InvalidSqlInput.exception( + "parameter [%d] is an array, with an illegal value of type [%s] at index [%d]", + posn + 1, + value.getClass(), + i ); } args.add(node); diff --git a/sql/src/main/java/org/apache/druid/sql/calcite/planner/SqlStatementHandler.java b/sql/src/main/java/org/apache/druid/sql/calcite/planner/SqlStatementHandler.java index 4cb263c52202..267feae1f0f3 100644 --- a/sql/src/main/java/org/apache/druid/sql/calcite/planner/SqlStatementHandler.java +++ b/sql/src/main/java/org/apache/druid/sql/calcite/planner/SqlStatementHandler.java @@ -21,7 +21,6 @@ import com.fasterxml.jackson.databind.ObjectMapper; import org.apache.calcite.schema.SchemaPlus; -import org.apache.calcite.tools.ValidationException; import org.apache.druid.query.QueryContext; import org.apache.druid.server.security.ResourceAction; import org.apache.druid.sql.calcite.run.SqlEngine; @@ -36,11 +35,11 @@ */ public interface SqlStatementHandler { - void validate() throws ValidationException; + void validate(); Set resourceActions(); void prepare(); PrepareResult prepareResult(); - PlannerResult plan() throws ValidationException; + PlannerResult plan(); ExplainAttributes explainAttributes(); /** diff --git a/sql/src/main/java/org/apache/druid/sql/calcite/rel/DruidJoinQueryRel.java b/sql/src/main/java/org/apache/druid/sql/calcite/rel/DruidJoinQueryRel.java index 5ab29ab13b1d..4ea14c3a7414 100644 --- a/sql/src/main/java/org/apache/druid/sql/calcite/rel/DruidJoinQueryRel.java +++ b/sql/src/main/java/org/apache/druid/sql/calcite/rel/DruidJoinQueryRel.java @@ -38,6 +38,7 @@ import org.apache.calcite.rel.type.RelDataType; import org.apache.calcite.rex.RexNode; import org.apache.calcite.sql.SqlKind; +import org.apache.druid.error.InvalidSqlInput; import org.apache.druid.java.util.common.ISE; import org.apache.druid.java.util.common.Pair; import org.apache.druid.java.util.common.StringUtils; @@ -53,7 +54,6 @@ import org.apache.druid.sql.calcite.planner.Calcites; import org.apache.druid.sql.calcite.planner.PlannerConfig; import org.apache.druid.sql.calcite.planner.PlannerContext; -import org.apache.druid.sql.calcite.planner.UnsupportedSQLQueryException; import org.apache.druid.sql.calcite.table.RowSignatures; import javax.annotation.Nullable; @@ -360,7 +360,10 @@ public static JoinType toDruidJoinType(JoinRelType calciteJoinType) case INNER: return JoinType.INNER; default: - throw new UnsupportedSQLQueryException("Cannot handle joinType '%s'", calciteJoinType); + throw InvalidSqlInput.exception( + "Cannot handle joinType [%s]", + calciteJoinType + ); } } diff --git a/sql/src/main/java/org/apache/druid/sql/calcite/rel/DruidQuery.java b/sql/src/main/java/org/apache/druid/sql/calcite/rel/DruidQuery.java index 1f71a9212cbb..d0fcacf5df26 100644 --- a/sql/src/main/java/org/apache/druid/sql/calcite/rel/DruidQuery.java +++ b/sql/src/main/java/org/apache/druid/sql/calcite/rel/DruidQuery.java @@ -1430,12 +1430,14 @@ private ScanQuery toScanQuery() } if (!plannerContext.featureAvailable(EngineFeature.SCAN_ORDER_BY_NON_TIME) && !orderByColumns.isEmpty()) { - if (orderByColumns.size() > 1 || orderByColumns.stream() - .anyMatch(orderBy -> !orderBy.getColumnName().equals(ColumnHolder.TIME_COLUMN_NAME))) { - // Cannot handle this ordering. - // Scan cannot ORDER BY non-time columns. + if (orderByColumns.size() > 1 || !ColumnHolder.TIME_COLUMN_NAME.equals(orderByColumns.get(0).getColumnName())) { + // We cannot handle this ordering, but we encounter this ordering as part of the exploration of the volcano + // planner, which means that the query that we are looking right now might only be doing this as one of the + // potential branches of exploration rather than being a semantic requirement of the query itself. So, it is + // not safe to send an error message telling the end-user exactly what is happening, instead we need to set the + // planning error and hope. plannerContext.setPlanningError( - "SQL query requires order by non-time column %s, which is not supported.", + "SQL query requires order by non-time column [%s], which is not supported.", orderByColumns ); return null; diff --git a/sql/src/main/java/org/apache/druid/sql/calcite/rule/DruidLogicalValuesRule.java b/sql/src/main/java/org/apache/druid/sql/calcite/rule/DruidLogicalValuesRule.java index b94a6ee4ac89..ea71dfd90986 100644 --- a/sql/src/main/java/org/apache/druid/sql/calcite/rule/DruidLogicalValuesRule.java +++ b/sql/src/main/java/org/apache/druid/sql/calcite/rule/DruidLogicalValuesRule.java @@ -25,11 +25,11 @@ import org.apache.calcite.plan.RelOptRuleCall; import org.apache.calcite.rel.logical.LogicalValues; import org.apache.calcite.rex.RexLiteral; +import org.apache.druid.error.InvalidSqlInput; import org.apache.druid.query.InlineDataSource; import org.apache.druid.segment.column.RowSignature; import org.apache.druid.sql.calcite.planner.Calcites; import org.apache.druid.sql.calcite.planner.PlannerContext; -import org.apache.druid.sql.calcite.planner.UnsupportedSQLQueryException; import org.apache.druid.sql.calcite.rel.DruidQueryRel; import org.apache.druid.sql.calcite.table.RowSignatures; @@ -42,7 +42,7 @@ * This rule is used when the query directly reads in-memory tuples. For example, given a query of * `SELECT 1 + 1`, the query planner will create {@link LogicalValues} that contains one tuple, * which in turn containing one column of value 2. - * + *

* The query planner can sometimes reduce a regular query to a query that reads in-memory tuples. * For example, `SELECT count(*) FROM foo WHERE 1 = 0` is reduced to `SELECT 0`. This rule will * be used for this case as well. @@ -126,14 +126,18 @@ static Object getValueFromLiteral(RexLiteral literal, PlannerContext plannerCont return Calcites.calciteDateTimeLiteralToJoda(literal, plannerContext.getTimeZone()).getMillis(); case NULL: if (!literal.isNull()) { - throw new UnsupportedSQLQueryException("Query has a non-null constant but is of NULL type."); + throw InvalidSqlInput.exception("Expected a NULL literal, but got non-null constant [%s]", literal); } return null; case TIMESTAMP_WITH_LOCAL_TIME_ZONE: case TIME: case TIME_WITH_LOCAL_TIME_ZONE: default: - throw new UnsupportedSQLQueryException("%s type is not supported", literal.getType().getSqlTypeName()); + throw InvalidSqlInput.exception( + "Cannot handle literal [%s] of unsupported type [%s].", + literal, + literal.getType().getSqlTypeName() + ); } } } diff --git a/sql/src/main/java/org/apache/druid/sql/calcite/run/NativeSqlEngine.java b/sql/src/main/java/org/apache/druid/sql/calcite/run/NativeSqlEngine.java index 3d952accda3e..f5d9056246b3 100644 --- a/sql/src/main/java/org/apache/druid/sql/calcite/run/NativeSqlEngine.java +++ b/sql/src/main/java/org/apache/druid/sql/calcite/run/NativeSqlEngine.java @@ -25,10 +25,9 @@ import org.apache.calcite.rel.RelRoot; import org.apache.calcite.rel.type.RelDataType; import org.apache.calcite.rel.type.RelDataTypeFactory; -import org.apache.calcite.tools.ValidationException; +import org.apache.druid.error.InvalidSqlInput; import org.apache.druid.guice.LazySingleton; import org.apache.druid.java.util.common.IAE; -import org.apache.druid.java.util.common.StringUtils; import org.apache.druid.query.groupby.GroupByQuery; import org.apache.druid.query.timeboundary.TimeBoundaryQuery; import org.apache.druid.server.QueryLifecycleFactory; @@ -77,7 +76,7 @@ public String name() } @Override - public void validateContext(Map queryContext) throws ValidationException + public void validateContext(Map queryContext) { SqlEngines.validateNoSpecialContextKeys(queryContext, SYSTEM_CONTEXT_PARAMETERS); validateJoinAlgorithm(queryContext); @@ -146,18 +145,12 @@ public QueryMaker buildQueryMakerForInsert( * Validates that {@link PlannerContext#CTX_SQL_JOIN_ALGORITHM} is {@link JoinAlgorithm#BROADCAST}. This is the * only join algorithm supported by native queries. */ - private static void validateJoinAlgorithm(final Map queryContext) throws ValidationException + private static void validateJoinAlgorithm(final Map queryContext) { final JoinAlgorithm joinAlgorithm = PlannerContext.getJoinAlgorithm(queryContext); if (joinAlgorithm != JoinAlgorithm.BROADCAST) { - throw new ValidationException( - StringUtils.format( - "Join algorithm [%s] is not supported by engine [%s]", - joinAlgorithm, - NAME - ) - ); + throw InvalidSqlInput.exception("Join algorithm [%s] is not supported by engine [%s]", joinAlgorithm, NAME); } } } diff --git a/sql/src/main/java/org/apache/druid/sql/calcite/run/SqlEngine.java b/sql/src/main/java/org/apache/druid/sql/calcite/run/SqlEngine.java index 22c8545dd67e..678ded23e9da 100644 --- a/sql/src/main/java/org/apache/druid/sql/calcite/run/SqlEngine.java +++ b/sql/src/main/java/org/apache/druid/sql/calcite/run/SqlEngine.java @@ -46,7 +46,7 @@ public interface SqlEngine * Validates a provided query context. Returns quietly if the context is OK; throws {@link ValidationException} * if the context has a problem. */ - void validateContext(Map queryContext) throws ValidationException; + void validateContext(Map queryContext); /** * SQL row type that would be emitted by the {@link QueryMaker} from {@link #buildQueryMakerForSelect}. diff --git a/sql/src/main/java/org/apache/druid/sql/calcite/run/SqlEngines.java b/sql/src/main/java/org/apache/druid/sql/calcite/run/SqlEngines.java index 30dd7926bd20..cc7bef80f712 100644 --- a/sql/src/main/java/org/apache/druid/sql/calcite/run/SqlEngines.java +++ b/sql/src/main/java/org/apache/druid/sql/calcite/run/SqlEngines.java @@ -20,7 +20,7 @@ package org.apache.druid.sql.calcite.run; import org.apache.calcite.tools.ValidationException; -import org.apache.druid.java.util.common.StringUtils; +import org.apache.druid.error.InvalidInput; import java.util.Map; import java.util.Set; @@ -35,17 +35,14 @@ public class SqlEngines * * This is a helper function used by {@link SqlEngine#validateContext} implementations. */ - public static void validateNoSpecialContextKeys(final Map queryContext, final Set specialContextKeys) - throws ValidationException + public static void validateNoSpecialContextKeys( + final Map queryContext, + final Set specialContextKeys + ) { for (String contextParameterName : queryContext.keySet()) { if (specialContextKeys.contains(contextParameterName)) { - throw new ValidationException( - StringUtils.format( - "Cannot execute query with context parameter [%s]", - contextParameterName - ) - ); + throw InvalidInput.exception("Query context parameter [%s] is not allowed", contextParameterName); } } } diff --git a/sql/src/main/java/org/apache/druid/sql/calcite/table/RowSignatures.java b/sql/src/main/java/org/apache/druid/sql/calcite/table/RowSignatures.java index 13e2c268d06b..32abe56ee8d6 100644 --- a/sql/src/main/java/org/apache/druid/sql/calcite/table/RowSignatures.java +++ b/sql/src/main/java/org/apache/druid/sql/calcite/table/RowSignatures.java @@ -220,6 +220,11 @@ protected void generateTypeString(StringBuilder sb, boolean withDetail) sb.append(columnType.asTypeString()); } + public ColumnType getColumnType() + { + return columnType; + } + public String getComplexTypeName() { return columnType.getComplexTypeName(); diff --git a/sql/src/main/java/org/apache/druid/sql/http/SqlResource.java b/sql/src/main/java/org/apache/druid/sql/http/SqlResource.java index dad391e6bf04..4adea5d8d84e 100644 --- a/sql/src/main/java/org/apache/druid/sql/http/SqlResource.java +++ b/sql/src/main/java/org/apache/druid/sql/http/SqlResource.java @@ -22,13 +22,11 @@ import com.fasterxml.jackson.databind.ObjectMapper; import com.google.common.base.Preconditions; import com.google.inject.Inject; -import org.apache.calcite.plan.RelOptPlanner; import org.apache.druid.common.exception.SanitizableException; import org.apache.druid.guice.annotations.NativeQuery; import org.apache.druid.guice.annotations.Self; import org.apache.druid.java.util.common.StringUtils; import org.apache.druid.java.util.common.logger.Logger; -import org.apache.druid.query.QueryInterruptedException; import org.apache.druid.server.DruidNode; import org.apache.druid.server.QueryResource; import org.apache.druid.server.QueryResponse; @@ -43,7 +41,6 @@ import org.apache.druid.sql.HttpStatement; import org.apache.druid.sql.SqlLifecycleManager; import org.apache.druid.sql.SqlLifecycleManager.Cancelable; -import org.apache.druid.sql.SqlPlanningException; import org.apache.druid.sql.SqlRowTransformer; import org.apache.druid.sql.SqlStatementFactory; @@ -102,7 +99,6 @@ public class SqlResource this.serverConfig = Preconditions.checkNotNull(serverConfig, "serverConfig"); this.responseContextConfig = responseContextConfig; this.selfNode = selfNode; - } @POST @@ -177,25 +173,21 @@ private static class SqlResourceQueryMetricCounter implements QueryResource.Quer @Override public void incrementSuccess() { - } @Override public void incrementFailed() { - } @Override public void incrementInterrupted() { - } @Override public void incrementTimedOut() { - } } @@ -214,7 +206,6 @@ private SqlResourceQueryResultPusher makePusher(HttpServletRequest req, HttpStat private class SqlResourceQueryResultPusher extends QueryResultPusher { - private final String sqlQueryId; private final HttpStatement stmt; private final SqlQuery sqlQuery; @@ -229,9 +220,9 @@ public SqlResourceQueryResultPusher( { super( req, - SqlResource.this.jsonMapper, - SqlResource.this.responseContextConfig, - SqlResource.this.selfNode, + jsonMapper, + responseContextConfig, + selfNode, SqlResource.QUERY_METRIC_COUNTER, sqlQueryId, MediaType.APPLICATION_JSON_TYPE, @@ -254,28 +245,9 @@ public ResultsWriter start() @Nullable public Response.ResponseBuilder start() { - try { - thePlan = stmt.plan(); - queryResponse = thePlan.run(); - return null; - } - catch (RelOptPlanner.CannotPlanException e) { - throw new SqlPlanningException( - SqlPlanningException.PlanningError.UNSUPPORTED_SQL_ERROR, - e.getMessage() - ); - } - // There is a claim that Calcite sometimes throws a java.lang.AssertionError, but we do not have a test that can - // reproduce it checked into the code (the best we have is something that uses mocks to throw an Error, which is - // dubious at best). We keep this just in case, but it might be best to remove it and see where the - // AssertionErrors are coming from and do something to ensure that they don't actually make it out of Calcite - catch (AssertionError e) { - log.warn(e, "AssertionError killed query: %s", sqlQuery); - - // We wrap the exception here so that we get the sanitization. java.lang.AssertionError apparently - // doesn't implement org.apache.druid.common.exception.SanitizableException. - throw new QueryInterruptedException(e); - } + thePlan = stmt.plan(); + queryResponse = thePlan.run(); + return null; } @Override @@ -368,6 +340,5 @@ public void writeException(Exception ex, OutputStream out) throws IOException } out.write(jsonMapper.writeValueAsBytes(ex)); } - } } diff --git a/sql/src/test/java/org/apache/druid/sql/SqlStatementTest.java b/sql/src/test/java/org/apache/druid/sql/SqlStatementTest.java index 4d96a2ec908e..c9a100e6a051 100644 --- a/sql/src/test/java/org/apache/druid/sql/SqlStatementTest.java +++ b/sql/src/test/java/org/apache/druid/sql/SqlStatementTest.java @@ -25,6 +25,8 @@ import com.google.common.util.concurrent.MoreExecutors; import org.apache.calcite.rel.type.RelDataType; import org.apache.calcite.rel.type.RelDataTypeField; +import org.apache.druid.error.DruidException; +import org.apache.druid.error.DruidExceptionMatcher; import org.apache.druid.java.util.common.ISE; import org.apache.druid.java.util.common.concurrent.Execs; import org.apache.druid.java.util.common.guava.LazySequence; @@ -47,7 +49,6 @@ import org.apache.druid.server.security.AuthenticationResult; import org.apache.druid.server.security.ForbiddenException; import org.apache.druid.sql.DirectStatement.ResultSet; -import org.apache.druid.sql.SqlPlanningException.PlanningError; import org.apache.druid.sql.calcite.planner.CalciteRulesManager; import org.apache.druid.sql.calcite.planner.CatalogResolver; import org.apache.druid.sql.calcite.planner.DruidOperatorTable; @@ -60,11 +61,13 @@ import org.apache.druid.sql.calcite.util.SpecificSegmentsQuerySegmentWalker; import org.apache.druid.sql.http.SqlQuery; import org.easymock.EasyMock; +import org.hamcrest.MatcherAssert; import org.junit.After; import org.junit.AfterClass; import org.junit.Assert; import org.junit.Before; import org.junit.BeforeClass; +import org.junit.ClassRule; import org.junit.Rule; import org.junit.Test; import org.junit.rules.TemporaryFolder; @@ -85,12 +88,12 @@ public class SqlStatementTest { private static QueryRunnerFactoryConglomerate conglomerate; + private static SpecificSegmentsQuerySegmentWalker walker; private static Closer resourceCloser; - @Rule - public TemporaryFolder temporaryFolder = new TemporaryFolder(); + @ClassRule + public static TemporaryFolder temporaryFolder = new TemporaryFolder(); @Rule public QueryLogHook queryLogHook = QueryLogHook.create(); - private SpecificSegmentsQuerySegmentWalker walker = null; private TestRequestLogger testRequestLogger; private ListeningExecutorService executorService; private SqlStatementFactory sqlStatementFactory; @@ -98,21 +101,11 @@ public class SqlStatementTest ImmutableMap.of("DEFAULT_KEY", "DEFAULT_VALUE")); @BeforeClass - public static void setUpClass() + public static void setUpClass() throws Exception { resourceCloser = Closer.create(); conglomerate = QueryStackTests.createQueryRunnerFactoryConglomerate(resourceCloser); - } - - @AfterClass - public static void tearDownClass() throws IOException - { - resourceCloser.close(); - } - @Before - public void setUp() throws Exception - { final QueryScheduler scheduler = new QueryScheduler( 5, ManualQueryPrioritizationStrategy.INSTANCE, @@ -125,15 +118,25 @@ public Sequence run(Query query, Sequence resultSequence) { return super.run( query, - new LazySequence(() -> { - return resultSequence; - }) + new LazySequence(() -> resultSequence) ); } }; - executorService = MoreExecutors.listeningDecorator(Execs.multiThreaded(8, "test_sql_resource_%s")); walker = CalciteTests.createMockWalker(conglomerate, temporaryFolder.newFolder(), scheduler); + resourceCloser.register(walker); + } + + @AfterClass + public static void tearDownClass() throws IOException + { + resourceCloser.close(); + } + + @Before + public void setUp() + { + executorService = MoreExecutors.listeningDecorator(Execs.multiThreaded(8, "test_sql_resource_%s")); final PlannerConfig plannerConfig = PlannerConfig.builder().serializeComplexValues(false).build(); final DruidSchemaCatalog rootSchema = CalciteTests.createMockRootSchema( @@ -178,8 +181,6 @@ public Sequence run(Query query, Sequence resultSequence) @After public void tearDown() throws Exception { - walker.close(); - walker = null; executorService.shutdownNow(); executorService.awaitTermination(2, TimeUnit.SECONDS); } @@ -222,7 +223,8 @@ public void testDirectHappyPath() { SqlQueryPlus sqlReq = queryPlus( "SELECT COUNT(*) AS cnt, 'foo' AS TheFoo FROM druid.foo", - CalciteTests.REGULAR_USER_AUTH_RESULT); + CalciteTests.REGULAR_USER_AUTH_RESULT + ); DirectStatement stmt = sqlStatementFactory.directStatement(sqlReq); ResultSet resultSet = stmt.plan(); assertTrue(resultSet.runnable()); @@ -243,7 +245,8 @@ public void testDirectPlanTwice() { SqlQueryPlus sqlReq = queryPlus( "SELECT COUNT(*) AS cnt, 'foo' AS TheFoo FROM druid.foo", - CalciteTests.REGULAR_USER_AUTH_RESULT); + CalciteTests.REGULAR_USER_AUTH_RESULT + ); DirectStatement stmt = sqlStatementFactory.directStatement(sqlReq); stmt.plan(); try { @@ -260,7 +263,8 @@ public void testDirectExecTwice() { SqlQueryPlus sqlReq = queryPlus( "SELECT COUNT(*) AS cnt, 'foo' AS TheFoo FROM druid.foo", - CalciteTests.REGULAR_USER_AUTH_RESULT); + CalciteTests.REGULAR_USER_AUTH_RESULT + ); DirectStatement stmt = sqlStatementFactory.directStatement(sqlReq); ResultSet resultSet = stmt.plan(); resultSet.run(); @@ -278,15 +282,20 @@ public void testDirectSyntaxError() { SqlQueryPlus sqlReq = queryPlus( "SELECT COUNT(*) AS cnt, 'foo' AS", - CalciteTests.REGULAR_USER_AUTH_RESULT); + CalciteTests.REGULAR_USER_AUTH_RESULT + ); DirectStatement stmt = sqlStatementFactory.directStatement(sqlReq); try { stmt.execute(); fail(); } - catch (SqlPlanningException e) { - // Expected - assertEquals(PlanningError.SQL_PARSE_ERROR.getErrorCode(), e.getErrorCode()); + catch (DruidException e) { + MatcherAssert.assertThat( + e, + DruidExceptionMatcher + .invalidSqlInput() + .expectMessageContains("Received an unexpected token [AS ]") + ); } } @@ -295,15 +304,20 @@ public void testDirectValidationError() { SqlQueryPlus sqlReq = queryPlus( "SELECT COUNT(*) AS cnt, 'foo' AS TheFoo FROM druid.bogus", - CalciteTests.REGULAR_USER_AUTH_RESULT); + CalciteTests.REGULAR_USER_AUTH_RESULT + ); DirectStatement stmt = sqlStatementFactory.directStatement(sqlReq); try { stmt.execute(); fail(); } - catch (SqlPlanningException e) { - // Expected - assertEquals(PlanningError.VALIDATION_ERROR.getErrorCode(), e.getErrorCode()); + catch (DruidException e) { + MatcherAssert.assertThat( + e, + DruidExceptionMatcher + .invalidSqlInput() + .expectMessageContains("Object 'bogus' not found within 'druid'") + ); } } @@ -312,7 +326,8 @@ public void testDirectPermissionError() { SqlQueryPlus sqlReq = queryPlus( "select count(*) from forbiddenDatasource", - CalciteTests.REGULAR_USER_AUTH_RESULT); + CalciteTests.REGULAR_USER_AUTH_RESULT + ); DirectStatement stmt = sqlStatementFactory.directStatement(sqlReq); try { stmt.execute(); @@ -336,7 +351,7 @@ private SqlQuery makeQuery(String sql) false, null, null - ); + ); } @Test @@ -345,7 +360,7 @@ public void testHttpHappyPath() HttpStatement stmt = sqlStatementFactory.httpStatement( makeQuery("SELECT COUNT(*) AS cnt, 'foo' AS TheFoo FROM druid.foo"), request(true) - ); + ); List results = stmt.execute().getResults().toList(); assertEquals(1, results.size()); assertEquals(6L, results.get(0)[0]); @@ -358,14 +373,18 @@ public void testHttpSyntaxError() HttpStatement stmt = sqlStatementFactory.httpStatement( makeQuery("SELECT COUNT(*) AS cnt, 'foo' AS"), request(true) - ); + ); try { stmt.execute(); fail(); } - catch (SqlPlanningException e) { - // Expected - assertEquals(PlanningError.SQL_PARSE_ERROR.getErrorCode(), e.getErrorCode()); + catch (DruidException e) { + MatcherAssert.assertThat( + e, + DruidExceptionMatcher + .invalidSqlInput() + .expectMessageContains("Received an unexpected token [AS ]") + ); } } @@ -375,14 +394,18 @@ public void testHttpValidationError() HttpStatement stmt = sqlStatementFactory.httpStatement( makeQuery("SELECT COUNT(*) AS cnt, 'foo' AS TheFoo FROM druid.bogus"), request(true) - ); + ); try { stmt.execute(); fail(); } - catch (SqlPlanningException e) { - // Expected - assertEquals(PlanningError.VALIDATION_ERROR.getErrorCode(), e.getErrorCode()); + catch (DruidException e) { + MatcherAssert.assertThat( + e, + DruidExceptionMatcher + .invalidSqlInput() + .expectMessageContains("Object 'bogus' not found within 'druid'") + ); } } @@ -392,7 +415,7 @@ public void testHttpPermissionError() HttpStatement stmt = sqlStatementFactory.httpStatement( makeQuery("select count(*) from forbiddenDatasource"), request(false) - ); + ); try { stmt.execute(); fail(); @@ -410,7 +433,8 @@ public void testPreparedHappyPath() { SqlQueryPlus sqlReq = queryPlus( "SELECT COUNT(*) AS cnt, 'foo' AS TheFoo FROM druid.foo", - CalciteTests.REGULAR_USER_AUTH_RESULT); + CalciteTests.REGULAR_USER_AUTH_RESULT + ); PreparedStatement stmt = sqlStatementFactory.preparedStatement(sqlReq); PrepareResult prepareResult = stmt.prepare(); @@ -440,15 +464,20 @@ public void testPrepareSyntaxError() { SqlQueryPlus sqlReq = queryPlus( "SELECT COUNT(*) AS cnt, 'foo' AS", - CalciteTests.REGULAR_USER_AUTH_RESULT); + CalciteTests.REGULAR_USER_AUTH_RESULT + ); PreparedStatement stmt = sqlStatementFactory.preparedStatement(sqlReq); try { stmt.prepare(); fail(); } - catch (SqlPlanningException e) { - // Expected - assertEquals(PlanningError.SQL_PARSE_ERROR.getErrorCode(), e.getErrorCode()); + catch (DruidException e) { + MatcherAssert.assertThat( + e, + DruidExceptionMatcher + .invalidSqlInput() + .expectMessageContains("Received an unexpected token [AS ]") + ); } } @@ -457,15 +486,20 @@ public void testPrepareValidationError() { SqlQueryPlus sqlReq = queryPlus( "SELECT COUNT(*) AS cnt, 'foo' AS TheFoo FROM druid.bogus", - CalciteTests.REGULAR_USER_AUTH_RESULT); + CalciteTests.REGULAR_USER_AUTH_RESULT + ); PreparedStatement stmt = sqlStatementFactory.preparedStatement(sqlReq); try { stmt.prepare(); fail(); } - catch (SqlPlanningException e) { - // Expected - assertEquals(PlanningError.VALIDATION_ERROR.getErrorCode(), e.getErrorCode()); + catch (DruidException e) { + MatcherAssert.assertThat( + e, + DruidExceptionMatcher + .invalidSqlInput() + .expectMessageContains("Object 'bogus' not found within 'druid'") + ); } } @@ -474,7 +508,8 @@ public void testPreparePermissionError() { SqlQueryPlus sqlReq = queryPlus( "select count(*) from forbiddenDatasource", - CalciteTests.REGULAR_USER_AUTH_RESULT); + CalciteTests.REGULAR_USER_AUTH_RESULT + ); PreparedStatement stmt = sqlStatementFactory.preparedStatement(sqlReq); try { stmt.prepare(); diff --git a/sql/src/test/java/org/apache/druid/sql/avatica/DruidAvaticaHandlerTest.java b/sql/src/test/java/org/apache/druid/sql/avatica/DruidAvaticaHandlerTest.java index cd036d04c3ac..88a237bd42d1 100644 --- a/sql/src/test/java/org/apache/druid/sql/avatica/DruidAvaticaHandlerTest.java +++ b/sql/src/test/java/org/apache/druid/sql/avatica/DruidAvaticaHandlerTest.java @@ -45,6 +45,7 @@ import org.apache.druid.initialization.CoreInjectorBuilder; import org.apache.druid.java.util.common.DateTimes; import org.apache.druid.java.util.common.Pair; +import org.apache.druid.java.util.common.RE; import org.apache.druid.java.util.common.StringUtils; import org.apache.druid.java.util.common.concurrent.Execs; import org.apache.druid.java.util.common.guava.Yielder; @@ -96,6 +97,7 @@ import org.junit.Assert; import org.junit.Before; import org.junit.BeforeClass; +import org.junit.ClassRule; import org.junit.Rule; import org.junit.Test; import org.junit.rules.TemporaryFolder; @@ -123,6 +125,7 @@ import java.util.Map; import java.util.Properties; import java.util.Set; +import java.util.concurrent.CountDownLatch; import java.util.concurrent.ExecutionException; import java.util.concurrent.ScheduledExecutorService; @@ -144,21 +147,27 @@ public class DruidAvaticaHandlerTest extends CalciteTestBase // This must match the number of Connection objects created in testTooManyStatements() AVATICA_CONFIG.maxConnections = CONNECTION_LIMIT; AVATICA_CONFIG.maxStatementsPerConnection = STATEMENT_LIMIT; + System.setProperty("user.timezone", "UTC"); } private static final String DUMMY_SQL_QUERY_ID = "dummy"; + @ClassRule + public static TemporaryFolder temporaryFolder = new TemporaryFolder(); + private static QueryRunnerFactoryConglomerate conglomerate; + private static SpecificSegmentsQuerySegmentWalker walker; private static Closer resourceCloser; private final boolean nullNumeric = !NullHandling.replaceWithDefault(); @BeforeClass - public static void setUpClass() + public static void setUpClass() throws Exception { resourceCloser = Closer.create(); conglomerate = QueryStackTests.createQueryRunnerFactoryConglomerate(resourceCloser); - System.setProperty("user.timezone", "UTC"); + walker = CalciteTests.createMockWalker(conglomerate, temporaryFolder.newFolder()); + resourceCloser.register(walker); } @AfterClass @@ -167,16 +176,12 @@ public static void tearDownClass() throws IOException resourceCloser.close(); } - @Rule - public TemporaryFolder temporaryFolder = new TemporaryFolder(); - @Rule public QueryLogHook queryLogHook = QueryLogHook.create(); private final PlannerConfig plannerConfig = new PlannerConfig(); private final DruidOperatorTable operatorTable = CalciteTests.createOperatorTable(); private final ExprMacroTable macroTable = CalciteTests.createExprMacroTable(); - private SpecificSegmentsQuerySegmentWalker walker; private ServerWrapper server; private Connection client; private Connection clientNoTrailingSlash; @@ -249,60 +254,66 @@ protected String getJdbcUrlTail() protected AbstractAvaticaHandler getAvaticaHandler(final DruidMeta druidMeta) { return new DruidAvaticaJsonHandler( - druidMeta, - new DruidNode("dummy", "dummy", false, 1, null, true, false), - new AvaticaMonitor() + druidMeta, + new DruidNode("dummy", "dummy", false, 1, null, true, false), + new AvaticaMonitor() ); } @Before public void setUp() throws Exception { - walker = CalciteTests.createMockWalker(conglomerate, temporaryFolder.newFolder()); final DruidSchemaCatalog rootSchema = makeRootSchema(); testRequestLogger = new TestRequestLogger(); injector = new CoreInjectorBuilder(new StartupInjectorBuilder().build()) - .addModule(binder -> { - binder.bindConstant().annotatedWith(Names.named("serviceName")).to("test"); - binder.bindConstant().annotatedWith(Names.named("servicePort")).to(0); - binder.bindConstant().annotatedWith(Names.named("tlsServicePort")).to(-1); - binder.bind(AuthenticatorMapper.class).toInstance(CalciteTests.TEST_AUTHENTICATOR_MAPPER); - binder.bind(AuthorizerMapper.class).toInstance(CalciteTests.TEST_AUTHORIZER_MAPPER); - binder.bind(Escalator.class).toInstance(CalciteTests.TEST_AUTHENTICATOR_ESCALATOR); - binder.bind(RequestLogger.class).toInstance(testRequestLogger); - binder.bind(DruidSchemaCatalog.class).toInstance(rootSchema); - for (NamedSchema schema : rootSchema.getNamedSchemas().values()) { - Multibinder.newSetBinder(binder, NamedSchema.class).addBinding().toInstance(schema); + .addModule( + binder -> { + binder.bindConstant().annotatedWith(Names.named("serviceName")).to("test"); + binder.bindConstant().annotatedWith(Names.named("servicePort")).to(0); + binder.bindConstant().annotatedWith(Names.named("tlsServicePort")).to(-1); + binder.bind(AuthenticatorMapper.class).toInstance(CalciteTests.TEST_AUTHENTICATOR_MAPPER); + binder.bind(AuthorizerMapper.class).toInstance(CalciteTests.TEST_AUTHORIZER_MAPPER); + binder.bind(Escalator.class).toInstance(CalciteTests.TEST_AUTHENTICATOR_ESCALATOR); + binder.bind(RequestLogger.class).toInstance(testRequestLogger); + binder.bind(DruidSchemaCatalog.class).toInstance(rootSchema); + for (NamedSchema schema : rootSchema.getNamedSchemas().values()) { + Multibinder.newSetBinder(binder, NamedSchema.class).addBinding().toInstance(schema); + } + binder.bind(QueryLifecycleFactory.class) + .toInstance(CalciteTests.createMockQueryLifecycleFactory(walker, conglomerate)); + binder.bind(DruidOperatorTable.class).toInstance(operatorTable); + binder.bind(ExprMacroTable.class).toInstance(macroTable); + binder.bind(PlannerConfig.class).toInstance(plannerConfig); + binder.bind(String.class) + .annotatedWith(DruidSchemaName.class) + .toInstance(CalciteTests.DRUID_SCHEMA_NAME); + binder.bind(AvaticaServerConfig.class).toInstance(AVATICA_CONFIG); + binder.bind(ServiceEmitter.class).to(NoopServiceEmitter.class); + binder.bind(QuerySchedulerProvider.class).in(LazySingleton.class); + binder.bind(QueryScheduler.class) + .toProvider(QuerySchedulerProvider.class) + .in(LazySingleton.class); + binder.install(new SqlModule.SqlStatementFactoryModule()); + binder.bind(new TypeLiteral>() + { + }).toInstance(Suppliers.ofInstance(new DefaultQueryConfig(ImmutableMap.of()))); + binder.bind(CalciteRulesManager.class).toInstance(new CalciteRulesManager(ImmutableSet.of())); + binder.bind(JoinableFactoryWrapper.class).toInstance(CalciteTests.createJoinableFactoryWrapper()); + binder.bind(CatalogResolver.class).toInstance(CatalogResolver.NULL_RESOLVER); } - binder.bind(QueryLifecycleFactory.class) - .toInstance(CalciteTests.createMockQueryLifecycleFactory(walker, conglomerate)); - binder.bind(DruidOperatorTable.class).toInstance(operatorTable); - binder.bind(ExprMacroTable.class).toInstance(macroTable); - binder.bind(PlannerConfig.class).toInstance(plannerConfig); - binder.bind(String.class) - .annotatedWith(DruidSchemaName.class) - .toInstance(CalciteTests.DRUID_SCHEMA_NAME); - binder.bind(AvaticaServerConfig.class).toInstance(AVATICA_CONFIG); - binder.bind(ServiceEmitter.class).to(NoopServiceEmitter.class); - binder.bind(QuerySchedulerProvider.class).in(LazySingleton.class); - binder.bind(QueryScheduler.class) - .toProvider(QuerySchedulerProvider.class) - .in(LazySingleton.class); - binder.install(new SqlModule.SqlStatementFactoryModule()); - binder.bind(new TypeLiteral>(){}).toInstance(Suppliers.ofInstance(new DefaultQueryConfig(ImmutableMap.of()))); - binder.bind(CalciteRulesManager.class).toInstance(new CalciteRulesManager(ImmutableSet.of())); - binder.bind(JoinableFactoryWrapper.class).toInstance(CalciteTests.createJoinableFactoryWrapper()); - binder.bind(CatalogResolver.class).toInstance(CatalogResolver.NULL_RESOLVER); - } - ) + ) .build(); DruidMeta druidMeta = injector.getInstance(DruidMeta.class); server = new ServerWrapper(druidMeta); client = server.getUserConnection(); superuserClient = server.getConnection(CalciteTests.TEST_SUPERUSER_NAME, "druid"); - clientNoTrailingSlash = DriverManager.getConnection(StringUtils.maybeRemoveTrailingSlash(server.url), CalciteTests.TEST_SUPERUSER_NAME, "druid"); + clientNoTrailingSlash = DriverManager.getConnection( + StringUtils.maybeRemoveTrailingSlash(server.url), + CalciteTests.TEST_SUPERUSER_NAME, + "druid" + ); final Properties propertiesLosAngeles = new Properties(); propertiesLosAngeles.setProperty("sqlTimeZone", "America/Los_Angeles"); @@ -324,8 +335,6 @@ public void tearDown() throws Exception clientNoTrailingSlash = null; server = null; } - walker.close(); - walker = null; } @Test @@ -820,35 +829,48 @@ public void testDatabaseMetaDataColumnsWithSuperuser() throws SQLException } @Test(timeout = 90_000L) - public void testConcurrentQueries() throws InterruptedException, ExecutionException + public void testConcurrentQueries() { - final List> futures = new ArrayList<>(); - final ListeningExecutorService exec = MoreExecutors.listeningDecorator( - Execs.multiThreaded(AVATICA_CONFIG.getMaxStatementsPerConnection(), "DruidAvaticaHandlerTest-%d") - ); - for (int i = 0; i < 2000; i++) { - final String query = StringUtils.format("SELECT COUNT(*) + %s AS ci FROM foo", i); - futures.add( - exec.submit(() -> { - try ( - final Statement statement = client.createStatement(); - final ResultSet resultSet = statement.executeQuery(query) - ) { - final List> rows = getRows(resultSet); - return ((Number) Iterables.getOnlyElement(rows).get("ci")).intValue(); - } - catch (SQLException e) { - throw new RuntimeException(e); - } - }) - ); - } + queryLogHook.withSkippedLog( + v -> { + final List> futures = new ArrayList<>(); + final ListeningExecutorService exec = MoreExecutors.listeningDecorator( + Execs.multiThreaded(AVATICA_CONFIG.getMaxStatementsPerConnection(), "DruidAvaticaHandlerTest-%d") + ); + for (int i = 0; i < 2000; i++) { + final String query = StringUtils.format("SELECT COUNT(*) + %s AS ci FROM foo", i); + futures.add( + exec.submit(() -> { + try ( + final Statement statement = client.createStatement(); + final ResultSet resultSet = statement.executeQuery(query) + ) { + final List> rows = getRows(resultSet); + return ((Number) Iterables.getOnlyElement(rows).get("ci")).intValue(); + } + catch (SQLException e) { + throw new RuntimeException(e); + } + }) + ); + } - final List integers = Futures.allAsList(futures).get(); - for (int i = 0; i < 2000; i++) { - Assert.assertEquals(i + 6, (int) integers.get(i)); - } - exec.shutdown(); + final List integers; + try { + integers = Futures.allAsList(futures).get(); + } + catch (InterruptedException e) { + throw new RE(e); + } + catch (ExecutionException e) { + throw new RE(e); + } + for (int i = 0; i < 2000; i++) { + Assert.assertEquals(i + 6, (int) integers.get(i)); + } + exec.shutdown(); + } + ); } @Test @@ -1246,7 +1268,8 @@ public void testSqlRequestLogPrepared() throws SQLException @Test public void testParameterBinding() throws SQLException { - try (PreparedStatement statement = client.prepareStatement("SELECT COUNT(*) AS cnt FROM druid.foo WHERE dim1 = ? OR dim1 = ?")) { + try (PreparedStatement statement = client.prepareStatement( + "SELECT COUNT(*) AS cnt FROM druid.foo WHERE dim1 = ? OR dim1 = ?")) { statement.setString(1, "abc"); statement.setString(2, "def"); final ResultSet resultSet = statement.executeQuery(); @@ -1264,7 +1287,7 @@ public void testParameterBinding() throws SQLException public void testSysTableParameterBindingRegularUser() throws SQLException { try (PreparedStatement statement = - client.prepareStatement("SELECT COUNT(*) AS cnt FROM sys.servers WHERE servers.host = ?")) { + client.prepareStatement("SELECT COUNT(*) AS cnt FROM sys.servers WHERE servers.host = ?")) { statement.setString(1, "dummy"); Assert.assertThrows( @@ -1279,7 +1302,7 @@ public void testSysTableParameterBindingRegularUser() throws SQLException public void testSysTableParameterBindingSuperUser() throws SQLException { try (PreparedStatement statement = - superuserClient.prepareStatement("SELECT COUNT(*) AS cnt FROM sys.servers WHERE servers.host = ?")) { + superuserClient.prepareStatement("SELECT COUNT(*) AS cnt FROM sys.servers WHERE servers.host = ?")) { statement.setString(1, "dummy"); Assert.assertEquals( ImmutableList.of( @@ -1294,7 +1317,7 @@ public void testSysTableParameterBindingSuperUser() throws SQLException public void testExecuteMany() throws SQLException { try (PreparedStatement statement = - superuserClient.prepareStatement("SELECT COUNT(*) AS cnt FROM sys.servers WHERE servers.host = ?")) { + superuserClient.prepareStatement("SELECT COUNT(*) AS cnt FROM sys.servers WHERE servers.host = ?")) { statement.setString(1, "dummy"); Assert.assertEquals( ImmutableList.of( @@ -1586,7 +1609,7 @@ public void testUnauthorizedTable() { final String query = "SELECT * FROM " + CalciteTests.FORBIDDEN_DATASOURCE; final String expectedError = "Error 2 (00002) : Error while executing SQL \"" + - query + "\": Remote driver error: " + Access.DEFAULT_ERROR_MESSAGE; + query + "\": Remote driver error: " + Access.DEFAULT_ERROR_MESSAGE; try (Statement statement = client.createStatement()) { statement.executeQuery(query); } @@ -1624,10 +1647,11 @@ public Meta.Frame call() } /** - * Test the async aspect of the Avatica implementation. The fetch of the - * first batch takes 3 seconds (due to a sleep). However, the client will - * wait only 1 second. So, we should get ~3 empty batches before we get - * the first batch with rows. + * Test the async aspect of the Avatica implementation. Uses a countdown latches to provide + * deterministic asynchronous behavior of not having results ready for the first 3 fetches. + *

+ * We set the fetch timeout to a small 1ms value because we want the test to complete fast and + * are ensuring the proper happens-before relationships with latches instead of time. */ @Test public void testAsync() throws Exception @@ -1636,24 +1660,57 @@ public void testAsync() throws Exception config.maxConnections = CONNECTION_LIMIT; config.maxStatementsPerConnection = STATEMENT_LIMIT; config.maxRowsPerFrame = 2; - config.fetchTimeoutMs = 1000; + config.fetchTimeoutMs = 1; final List frames = new ArrayList<>(); final ScheduledExecutorService exec = Execs.scheduledSingleThreaded("testMaxRowsPerFrame"); + final CountDownLatch startLatch = new CountDownLatch(1); + final CountDownLatch resultsLatch = new CountDownLatch(1); DruidMeta druidMeta = new DruidMeta( makeStatementFactory(), config, new ErrorHandler(new ServerConfig()), exec, injector.getInstance(AuthenticatorMapper.class).getAuthenticatorChain(), - new ResultFetcherFactory(config.fetchTimeoutMs) { + new ResultFetcherFactory(config.fetchTimeoutMs) + { + + @Override + public int fetchTimeoutMs() + { + // We override fetchTimeoutMs because the constructor here is enforcing a minimum timeout of 1s, so we + // have to workaround the constructor code by overriding this method. Luckily the internal field is + // not actually being referenced internally and is instead routing through this method. In a future + // refactoring of this code, we should move such enforcement onto the configuration layer and now + // squirreled away inside a constructor. + return config.fetchTimeoutMs; + } + @Override public ResultFetcher newFetcher( final int limit, final Yielder yielder ) { - return new TestResultFetcher(limit, yielder); + return new ResultFetcher(limit, yielder) + { + @Override + public Meta.Frame call() + { + try { + if (offset() == 0) { + startLatch.await(); + } + } + catch (InterruptedException e) { + throw new RuntimeException(e); + } + + final Meta.Frame retVal = super.call(); + resultsLatch.countDown(); + return retVal; + } + }; } } ) @@ -1665,6 +1722,15 @@ public Frame fetch( final int fetchMaxRowCount ) throws NoSuchStatementException, MissingResultsException { + if (frames.size() == 3) { + startLatch.countDown(); + try { + resultsLatch.await(); + } + catch (InterruptedException e) { + throw new RE(e); + } + } Frame frame = super.fetch(statement, offset, fetchMaxRowCount); frames.add(frame); return frame; @@ -1679,10 +1745,14 @@ public Frame fetch( "SELECT dim1 FROM druid.foo")) { List> rows = getRows(resultSet); Assert.assertEquals(6, rows.size()); - Assert.assertTrue(frames.size() > 3); + Assert.assertEquals(6, frames.size()); // 3 empty frames and then 3 frames of 2 rows each - // There should be at least one empty frame due to timeout Assert.assertFalse(frames.get(0).rows.iterator().hasNext()); + Assert.assertFalse(frames.get(1).rows.iterator().hasNext()); + Assert.assertFalse(frames.get(2).rows.iterator().hasNext()); + Assert.assertTrue(frames.get(3).rows.iterator().hasNext()); + Assert.assertTrue(frames.get(4).rows.iterator().hasNext()); + Assert.assertTrue(frames.get(5).rows.iterator().hasNext()); } } diff --git a/sql/src/test/java/org/apache/druid/sql/avatica/DruidStatementTest.java b/sql/src/test/java/org/apache/druid/sql/avatica/DruidStatementTest.java index 618665c25f66..505bfd98e877 100644 --- a/sql/src/test/java/org/apache/druid/sql/avatica/DruidStatementTest.java +++ b/sql/src/test/java/org/apache/druid/sql/avatica/DruidStatementTest.java @@ -53,6 +53,7 @@ import org.junit.Assert; import org.junit.Before; import org.junit.BeforeClass; +import org.junit.ClassRule; import org.junit.Rule; import org.junit.Test; import org.junit.rules.TemporaryFolder; @@ -72,20 +73,23 @@ public class DruidStatementTest extends CalciteTestBase private static String SELECT_STAR_FROM_FOO = "SELECT * FROM druid.foo"; - @Rule - public TemporaryFolder temporaryFolder = new TemporaryFolder(); + @ClassRule + public static TemporaryFolder temporaryFolder = new TemporaryFolder(); @Rule public QueryLogHook queryLogHook = QueryLogHook.create(); + private static SpecificSegmentsQuerySegmentWalker walker; private static QueryRunnerFactoryConglomerate conglomerate; private static Closer resourceCloser; @BeforeClass - public static void setUpClass() + public static void setUpClass() throws Exception { resourceCloser = Closer.create(); conglomerate = QueryStackTests.createQueryRunnerFactoryConglomerate(resourceCloser); + walker = CalciteTests.createMockWalker(conglomerate, temporaryFolder.newFolder()); + resourceCloser.register(walker); } @AfterClass @@ -94,13 +98,11 @@ public static void tearDownClass() throws IOException resourceCloser.close(); } - private SpecificSegmentsQuerySegmentWalker walker; private SqlStatementFactory sqlStatementFactory; @Before - public void setUp() throws Exception + public void setUp() { - walker = CalciteTests.createMockWalker(conglomerate, temporaryFolder.newFolder()); final PlannerConfig plannerConfig = new PlannerConfig(); final DruidOperatorTable operatorTable = CalciteTests.createOperatorTable(); final ExprMacroTable macroTable = CalciteTests.createExprMacroTable(); @@ -127,10 +129,9 @@ public void setUp() throws Exception } @After - public void tearDown() throws Exception + public void tearDown() { - walker.close(); - walker = null; + } //----------------------------------------------------------------- diff --git a/sql/src/test/java/org/apache/druid/sql/calcite/BaseCalciteQueryTest.java b/sql/src/test/java/org/apache/druid/sql/calcite/BaseCalciteQueryTest.java index 2a0930912b46..428e1d820045 100644 --- a/sql/src/test/java/org/apache/druid/sql/calcite/BaseCalciteQueryTest.java +++ b/sql/src/test/java/org/apache/druid/sql/calcite/BaseCalciteQueryTest.java @@ -26,10 +26,11 @@ import com.google.common.collect.ImmutableList; import com.google.common.collect.ImmutableMap; import com.google.inject.Injector; -import org.apache.calcite.plan.RelOptPlanner; import org.apache.commons.text.StringEscapeUtils; import org.apache.druid.annotations.UsedByJUnitParamsRunner; import org.apache.druid.common.config.NullHandling; +import org.apache.druid.error.DruidException; +import org.apache.druid.error.DruidExceptionMatcher; import org.apache.druid.guice.DruidInjectorBuilder; import org.apache.druid.hll.VersionOneHyperLogLogCollector; import org.apache.druid.java.util.common.DateTimes; @@ -102,6 +103,7 @@ import org.apache.druid.sql.calcite.util.SqlTestFramework.StandardPlannerComponentSupplier; import org.apache.druid.sql.calcite.view.ViewManager; import org.apache.druid.sql.http.SqlParameter; +import org.hamcrest.MatcherAssert; import org.joda.time.DateTime; import org.joda.time.DateTimeZone; import org.joda.time.Interval; @@ -487,6 +489,16 @@ protected static void resetFramework() queryFramework = null; } + protected static DruidExceptionMatcher invalidSqlIs(String s) + { + return DruidExceptionMatcher.invalidSqlInput().expectMessageIs(s); + } + + protected static DruidExceptionMatcher invalidSqlContains(String s) + { + return DruidExceptionMatcher.invalidSqlInput().expectMessageContains(s); + } + @Rule public QueryLogHook getQueryLogHook() { @@ -633,23 +645,25 @@ public void assertQueryIsUnplannable(final String sql, String expectedError) public void assertQueryIsUnplannable(final PlannerConfig plannerConfig, final String sql, String expectedError) { - Exception e = null; try { testQuery(plannerConfig, sql, CalciteTests.REGULAR_USER_AUTH_RESULT, ImmutableList.of(), ImmutableList.of()); } - catch (Exception e1) { - e = e1; + catch (DruidException e) { + MatcherAssert.assertThat( + e, + new DruidExceptionMatcher(DruidException.Persona.ADMIN, DruidException.Category.INVALID_INPUT, "general") + .expectMessageIs( + StringUtils.format( + "Query planning failed for unknown reason, our best guess is this [%s]", + expectedError + ) + ) + ); } - - if (!(e instanceof RelOptPlanner.CannotPlanException)) { - log.error(e, "Expected CannotPlanException for query: %s", sql); + catch (Exception e) { + log.error(e, "Expected DruidException for query: %s", sql); Assert.fail(sql); } - Assert.assertEquals( - sql, - StringUtils.format("Query not supported. %s SQL was: %s", expectedError, sql), - e.getMessage() - ); } /** @@ -986,7 +1000,7 @@ public SqlStatementFactory getSqlStatementFactory( return getSqlStatementFactory( plannerConfig, new AuthConfig() - ); + ); } /** @@ -1028,6 +1042,7 @@ protected static boolean isRewriteJoinToFilter(final Map queryCo /** * Override not just the outer query context, but also the contexts of all subqueries. + * * @return */ public static Query recursivelyClearContext(final Query query, ObjectMapper queryJsonMapper) @@ -1150,7 +1165,10 @@ protected Map withTimestampResultContext( output.put(GroupByQuery.CTX_TIMESTAMP_RESULT_FIELD, timestampResultField); try { - output.put(GroupByQuery.CTX_TIMESTAMP_RESULT_FIELD_GRANULARITY, queryFramework().queryJsonMapper().writeValueAsString(granularity)); + output.put( + GroupByQuery.CTX_TIMESTAMP_RESULT_FIELD_GRANULARITY, + queryFramework().queryJsonMapper().writeValueAsString(granularity) + ); } catch (JsonProcessingException e) { throw new RuntimeException(e); diff --git a/sql/src/test/java/org/apache/druid/sql/calcite/CalciteCorrelatedQueryTest.java b/sql/src/test/java/org/apache/druid/sql/calcite/CalciteCorrelatedQueryTest.java index 3a1ac2db9aa2..577f46a2e993 100644 --- a/sql/src/test/java/org/apache/druid/sql/calcite/CalciteCorrelatedQueryTest.java +++ b/sql/src/test/java/org/apache/druid/sql/calcite/CalciteCorrelatedQueryTest.java @@ -54,7 +54,6 @@ @RunWith(JUnitParamsRunner.class) public class CalciteCorrelatedQueryTest extends BaseCalciteQueryTest { - @Test @Parameters(source = QueryContextForJoinProvider.class) public void testCorrelatedSubquery(Map queryContext) diff --git a/sql/src/test/java/org/apache/druid/sql/calcite/CalciteIngestionDmlTest.java b/sql/src/test/java/org/apache/druid/sql/calcite/CalciteIngestionDmlTest.java index 22855033990b..071c8ae04d31 100644 --- a/sql/src/test/java/org/apache/druid/sql/calcite/CalciteIngestionDmlTest.java +++ b/sql/src/test/java/org/apache/druid/sql/calcite/CalciteIngestionDmlTest.java @@ -41,7 +41,6 @@ import org.apache.druid.initialization.DruidModule; import org.apache.druid.java.util.common.ISE; import org.apache.druid.java.util.common.StringUtils; -import org.apache.druid.java.util.common.UOE; import org.apache.druid.java.util.common.granularity.Granularity; import org.apache.druid.metadata.input.InputSourceModule; import org.apache.druid.query.Query; @@ -457,7 +456,7 @@ static class TestFileInputSource extends AbstractInputSource implements Splittab @Nonnull public Set getTypes() { - throw new UOE("This inputSource does not support input source based security"); + throw new CalciteIngestDmlTestException("getTypes()"); } @JsonProperty @@ -509,4 +508,12 @@ public int hashCode() return Objects.hash(files); } } + + static class CalciteIngestDmlTestException extends RuntimeException + { + public CalciteIngestDmlTestException(String message) + { + super(message); + } + } } diff --git a/sql/src/test/java/org/apache/druid/sql/calcite/CalciteInsertDmlTest.java b/sql/src/test/java/org/apache/druid/sql/calcite/CalciteInsertDmlTest.java index aeabf5241a0e..7fb52843b4e4 100644 --- a/sql/src/test/java/org/apache/druid/sql/calcite/CalciteInsertDmlTest.java +++ b/sql/src/test/java/org/apache/druid/sql/calcite/CalciteInsertDmlTest.java @@ -26,6 +26,8 @@ import org.apache.druid.data.input.InputSource; import org.apache.druid.data.input.impl.CsvInputFormat; import org.apache.druid.data.input.impl.InlineInputSource; +import org.apache.druid.error.DruidException; +import org.apache.druid.error.DruidExceptionMatcher; import org.apache.druid.java.util.common.StringUtils; import org.apache.druid.java.util.common.granularity.Granularities; import org.apache.druid.java.util.common.granularity.Granularity; @@ -41,16 +43,15 @@ import org.apache.druid.segment.join.JoinType; import org.apache.druid.server.security.AuthConfig; import org.apache.druid.server.security.ForbiddenException; -import org.apache.druid.sql.SqlPlanningException; import org.apache.druid.sql.calcite.external.ExternalDataSource; import org.apache.druid.sql.calcite.external.Externals; import org.apache.druid.sql.calcite.filtration.Filtration; import org.apache.druid.sql.calcite.parser.DruidSqlInsert; import org.apache.druid.sql.calcite.planner.Calcites; -import org.apache.druid.sql.calcite.planner.IngestHandler; import org.apache.druid.sql.calcite.planner.PlannerContext; import org.apache.druid.sql.calcite.util.CalciteTests; import org.hamcrest.CoreMatchers; +import org.hamcrest.MatcherAssert; import org.junit.Assert; import org.junit.Test; import org.junit.internal.matchers.ThrowableMessageMatcher; @@ -199,7 +200,11 @@ public void testInsertIntoInvalidDataSourceName() { testIngestionQuery() .sql("INSERT INTO \"in/valid\" SELECT dim1, dim2 FROM foo PARTITIONED BY ALL TIME") - .expectValidationError(SqlPlanningException.class, "INSERT dataSource cannot contain the '/' character.") + .expectValidationError( + DruidExceptionMatcher.invalidInput().expectMessageIs( + "Invalid value for field [table]: Value [in/valid] cannot contain '/'." + ) + ) .verify(); } @@ -208,7 +213,9 @@ public void testInsertUsingColumnList() { testIngestionQuery() .sql("INSERT INTO dst (foo, bar) SELECT dim1, dim2 FROM foo PARTITIONED BY ALL TIME") - .expectValidationError(SqlPlanningException.class, "INSERT with a target column list is not supported.") + .expectValidationError( + invalidSqlIs("Operation [INSERT] cannot be run with a target column list, given [dst (`foo`, `bar`)]") + ) .verify(); } @@ -217,7 +224,7 @@ public void testUpsert() { testIngestionQuery() .sql("UPSERT INTO dst SELECT * FROM foo PARTITIONED BY ALL TIME") - .expectValidationError(SqlPlanningException.class, "UPSERT is not supported.") + .expectValidationError(invalidSqlIs("UPSERT is not supported.")) .verify(); } @@ -229,8 +236,8 @@ public void testSelectFromSystemTable() testIngestionQuery() .sql("INSERT INTO dst SELECT * FROM INFORMATION_SCHEMA.COLUMNS PARTITIONED BY ALL TIME") .expectValidationError( - SqlPlanningException.class, - "Cannot query table INFORMATION_SCHEMA.COLUMNS with SQL engine 'ingestion-test'." + DruidException.class, + "Cannot query table(s) [INFORMATION_SCHEMA.COLUMNS] with SQL engine [ingestion-test]" ) .verify(); } @@ -240,10 +247,9 @@ public void testInsertIntoSystemTable() { testIngestionQuery() .sql("INSERT INTO INFORMATION_SCHEMA.COLUMNS SELECT * FROM foo PARTITIONED BY ALL TIME") - .expectValidationError( - SqlPlanningException.class, - "Cannot INSERT into INFORMATION_SCHEMA.COLUMNS because it is not a Druid datasource." - ) + .expectValidationError(invalidSqlIs( + "Table [INFORMATION_SCHEMA.COLUMNS] does not support operation [INSERT] because it is not a Druid datasource" + )) .verify(); } @@ -253,8 +259,7 @@ public void testInsertIntoView() testIngestionQuery() .sql("INSERT INTO view.aview SELECT * FROM foo PARTITIONED BY ALL TIME") .expectValidationError( - SqlPlanningException.class, - "Cannot INSERT into view.aview because it is not a Druid datasource." + invalidSqlIs("Table [view.aview] does not support operation [INSERT] because it is not a Druid datasource") ) .verify(); } @@ -282,10 +287,9 @@ public void testInsertIntoNonexistentSchema() { testIngestionQuery() .sql("INSERT INTO nonexistent.dst SELECT * FROM foo PARTITIONED BY ALL TIME") - .expectValidationError( - SqlPlanningException.class, - "Cannot INSERT into nonexistent.dst because it is not a Druid datasource." - ) + .expectValidationError(invalidSqlIs( + "Table [nonexistent.dst] does not support operation [INSERT] because it is not a Druid datasource" + )) .verify(); } @@ -366,11 +370,12 @@ public void testInsertFromExternalWithSchema() throw new RuntimeException(e); } testIngestionQuery() - .sql("INSERT INTO dst SELECT * FROM %s\n" + - " (x VARCHAR, y VARCHAR, z BIGINT)\n" + - "PARTITIONED BY ALL TIME", - extern - ) + .sql( + "INSERT INTO dst SELECT * FROM %s\n" + + " (x VARCHAR, y VARCHAR, z BIGINT)\n" + + "PARTITIONED BY ALL TIME", + extern + ) .authentication(CalciteTests.SUPER_USER_AUTH_RESULT) .expectTarget("dst", externalDataSource.getSignature()) .expectResources(dataSourceWrite("dst"), Externals.EXTERNAL_RESOURCE_ACTION) @@ -557,9 +562,8 @@ public void testInsertFromExternalWithoutSecuritySupportWithInputsourceSecurityE .expectLogicalPlanFrom("insertFromExternal") .expectValidationError( CoreMatchers.allOf( - CoreMatchers.instanceOf(SqlPlanningException.class), - ThrowableMessageMatcher.hasMessage(CoreMatchers.equalTo( - "org.apache.druid.java.util.common.UOE: This inputSource does not support input source based security")) + CoreMatchers.instanceOf(CalciteIngestDmlTestException.class), + ThrowableMessageMatcher.hasMessage(CoreMatchers.equalTo("getTypes()")) ) ) .verify(); @@ -763,8 +767,7 @@ public void testExplainPlanInsertWithClusteredByDescThrowsException() testIngestionQuery() .sql(sql) .expectValidationError( - SqlPlanningException.class, - "[`dim1` DESC] is invalid. CLUSTERED BY columns cannot be sorted in descending order." + invalidSqlIs("Invalid CLUSTERED BY clause [`dim1` DESC]: cannot sort in descending order.") ) .verify(); } @@ -819,10 +822,9 @@ public void testInsertWithoutPartitionedByWithClusteredBy() + "SELECT __time, FLOOR(m1) as floor_m1, dim1, CEIL(m2) as ceil_m2 FROM foo " + "CLUSTERED BY 2, dim1 DESC, CEIL(m2)" ) - .expectValidationError( - SqlPlanningException.class, - "CLUSTERED BY found before PARTITIONED BY. In Druid, the CLUSTERED BY clause must follow the PARTITIONED BY clause" - ) + .expectValidationError(invalidSqlIs( + "CLUSTERED BY found before PARTITIONED BY, CLUSTERED BY must come after the PARTITIONED BY clause" + )) .verify(); } @@ -901,11 +903,10 @@ public void testInsertWithClusteredByAndOrderBy() ); Assert.fail("Exception should be thrown"); } - catch (SqlPlanningException e) { - Assert.assertEquals( - "Cannot have ORDER BY on an INSERT statement, use CLUSTERED BY instead.", - e.getMessage() - ); + catch (DruidException e) { + MatcherAssert.assertThat(e, invalidSqlIs( + "Cannot use an ORDER BY clause on a Query of type [INSERT], use CLUSTERED BY instead" + )); } didTest = true; } @@ -913,7 +914,7 @@ public void testInsertWithClusteredByAndOrderBy() @Test public void testInsertWithPartitionedByContainingInvalidGranularity() { - // Throws a ValidationException, which gets converted to a SqlPlanningException before throwing to end user + // Throws a ValidationException, which gets converted to a DruidException before throwing to end user try { testQuery( "INSERT INTO dst SELECT * FROM foo PARTITIONED BY 'invalid_granularity'", @@ -922,11 +923,13 @@ public void testInsertWithPartitionedByContainingInvalidGranularity() ); Assert.fail("Exception should be thrown"); } - catch (SqlPlanningException e) { - Assert.assertEquals( - "Encountered 'invalid_granularity' after PARTITIONED BY. Expected HOUR, DAY, MONTH, YEAR, ALL TIME, FLOOR function or TIME_FLOOR function", - e.getMessage() - ); + catch (DruidException e) { + MatcherAssert.assertThat( + e, + invalidSqlIs( + "Invalid granularity ['invalid_granularity'] after PARTITIONED BY. " + + "Expected HOUR, DAY, MONTH, YEAR, ALL TIME, FLOOR() or TIME_FLOOR()" + )); } didTest = true; } @@ -945,10 +948,10 @@ public void testInsertWithOrderBy() ); Assert.fail("Exception should be thrown"); } - catch (SqlPlanningException e) { - Assert.assertEquals( - "Cannot have ORDER BY on an INSERT statement, use CLUSTERED BY instead.", - e.getMessage() + catch (DruidException e) { + MatcherAssert.assertThat( + e, + invalidSqlIs("Cannot use an ORDER BY clause on a Query of type [INSERT], use CLUSTERED BY instead") ); } finally { @@ -959,8 +962,8 @@ public void testInsertWithOrderBy() @Test public void testInsertWithoutPartitionedBy() { - SqlPlanningException e = Assert.assertThrows( - SqlPlanningException.class, + DruidException e = Assert.assertThrows( + DruidException.class, () -> testQuery( StringUtils.format("INSERT INTO dst SELECT * FROM %s", externSql(externalDataSource)), @@ -968,7 +971,11 @@ public void testInsertWithoutPartitionedBy() ImmutableList.of() ) ); - Assert.assertEquals("INSERT statements must specify PARTITIONED BY clause explicitly", e.getMessage()); + + MatcherAssert.assertThat( + e, + invalidSqlIs("Operation [INSERT] requires a PARTITIONED BY to be explicitly defined, but none was found.") + ); didTest = true; } @@ -1192,7 +1199,7 @@ public void testSurfaceErrorsWhenInsertingThroughIncorrectSelectStatment() { assertQueryIsUnplannable( "INSERT INTO druid.dst SELECT dim2, dim1, m1 FROM foo2 UNION SELECT dim1, dim2, m1 FROM foo PARTITIONED BY ALL TIME", - "Possible error: SQL requires 'UNION' but only 'UNION ALL' is supported." + "SQL requires 'UNION' but only 'UNION ALL' is supported." ); // Not using testIngestionQuery, so must set didTest manually to satisfy the check in tearDown. @@ -1308,14 +1315,13 @@ public void testInsertFromExternalAggregateAll() @Test public void testInsertWithInvalidSelectStatement() { + // This test fails because "count" is a reserved word and it is being used without quotes. So SQL is considering + // it a token instead of a name. It would be nice if our message was more direct telling the person that they + // used a reserved word instead of making them know that a "token" means Calcite is seeing a reserved word. But, + // that's an improvement for another day. testIngestionQuery() .sql("INSERT INTO t SELECT channel, added as count FROM foo PARTITIONED BY ALL") // count is a keyword - .expectValidationError( - CoreMatchers.allOf( - CoreMatchers.instanceOf(SqlPlanningException.class), - ThrowableMessageMatcher.hasMessage(CoreMatchers.startsWith("Encountered \"as count\"")) - ) - ) + .expectValidationError(invalidSqlContains("Received an unexpected token [as count]")) .verify(); } @@ -1324,10 +1330,7 @@ public void testInsertWithUnnamedColumnInSelectStatement() { testIngestionQuery() .sql("INSERT INTO t SELECT dim1, dim2 || '-lol' FROM foo PARTITIONED BY ALL") - .expectValidationError( - SqlPlanningException.class, - IngestHandler.UNNAMED_INGESTION_COLUMN_ERROR - ) + .expectValidationError(invalidSqlContains("Insertion requires columns to be named")) .verify(); } @@ -1336,10 +1339,7 @@ public void testInsertWithInvalidColumnNameInIngest() { testIngestionQuery() .sql("INSERT INTO t SELECT __time, dim1 AS EXPR$0 FROM foo PARTITIONED BY ALL") - .expectValidationError( - SqlPlanningException.class, - IngestHandler.UNNAMED_INGESTION_COLUMN_ERROR - ) + .expectValidationError(invalidSqlContains("Insertion requires columns to be named")) .verify(); } @@ -1350,10 +1350,7 @@ public void testInsertWithUnnamedColumnInNestedSelectStatement() .sql("INSERT INTO test " + "SELECT __time, * FROM " + "(SELECT __time, LOWER(dim1) FROM foo) PARTITIONED BY ALL TIME") - .expectValidationError( - SqlPlanningException.class, - IngestHandler.UNNAMED_INGESTION_COLUMN_ERROR - ) + .expectValidationError(invalidSqlContains("Insertion requires columns to be named")) .verify(); } @@ -1364,11 +1361,11 @@ public void testInsertQueryWithInvalidGranularity() .sql("insert into foo1 select __time, dim1 FROM foo partitioned by time_floor(__time, 'PT2H')") .expectValidationError( CoreMatchers.allOf( - CoreMatchers.instanceOf(SqlPlanningException.class), + CoreMatchers.instanceOf(DruidException.class), ThrowableMessageMatcher.hasMessage(CoreMatchers.containsString( - "The granularity specified in PARTITIONED BY is not supported. " - + "Please use an equivalent of these granularities: second, minute, five_minute, ten_minute, " - + "fifteen_minute, thirty_minute, hour, six_hour, eight_hour, day, week, month, quarter, year, all.")) + "The granularity specified in PARTITIONED BY [`time_floor`(`__time`, 'PT2H')] is not supported. " + + "Valid options: [second, minute, five_minute, ten_minute, fifteen_minute, thirty_minute, hour, " + + "six_hour, eight_hour, day, week, month, quarter, year, all]")) ) ) .verify(); @@ -1391,7 +1388,7 @@ public void testInsertOnExternalDataSourceWithIncompatibleTimeColumnSignature() ) .expectValidationError( CoreMatchers.allOf( - CoreMatchers.instanceOf(SqlPlanningException.class), + CoreMatchers.instanceOf(DruidException.class), ThrowableMessageMatcher.hasMessage(CoreMatchers.containsString( "EXTERN function with __time column can be used when __time column is of type long")) ) @@ -1409,8 +1406,7 @@ public void testInsertWithSqlOuterLimit() .context(context) .sql("INSERT INTO dst SELECT * FROM foo PARTITIONED BY ALL TIME") .expectValidationError( - SqlPlanningException.class, - "sqlOuterLimit cannot be provided with INSERT." + invalidSqlIs("Context parameter [sqlOuterLimit] cannot be provided on operator [INSERT]") ) .verify(); } diff --git a/sql/src/test/java/org/apache/druid/sql/calcite/CalciteJoinQueryTest.java b/sql/src/test/java/org/apache/druid/sql/calcite/CalciteJoinQueryTest.java index 1f45aebc8b52..337926d462ae 100644 --- a/sql/src/test/java/org/apache/druid/sql/calcite/CalciteJoinQueryTest.java +++ b/sql/src/test/java/org/apache/druid/sql/calcite/CalciteJoinQueryTest.java @@ -25,6 +25,8 @@ import junitparams.JUnitParamsRunner; import junitparams.Parameters; import org.apache.druid.common.config.NullHandling; +import org.apache.druid.error.DruidException; +import org.apache.druid.error.DruidExceptionMatcher; import org.apache.druid.java.util.common.DateTimes; import org.apache.druid.java.util.common.Intervals; import org.apache.druid.java.util.common.JodaUtils; @@ -89,8 +91,8 @@ import org.apache.druid.sql.calcite.expression.DruidExpression; import org.apache.druid.sql.calcite.filtration.Filtration; import org.apache.druid.sql.calcite.planner.PlannerConfig; -import org.apache.druid.sql.calcite.planner.UnsupportedSQLQueryException; import org.apache.druid.sql.calcite.util.CalciteTests; +import org.hamcrest.MatcherAssert; import org.joda.time.DateTimeZone; import org.joda.time.Period; import org.junit.Assert; @@ -1032,7 +1034,6 @@ public void testLeftJoinTwoLookupsUsingJoinOperator(Map queryCon ); } - @Test @Parameters(source = QueryContextForJoinProvider.class) public void testInnerJoinTableLookupLookupWithFilterWithOuterLimit(Map queryContext) @@ -1483,16 +1484,31 @@ public void testInnerJoinQueryOfLookup(Map queryContext) ); } - @Test(expected = UnsupportedSQLQueryException.class) + @Test @Parameters(source = QueryContextForJoinProvider.class) public void testTimeColumnAggregationsOnLookups(Map queryContext) { - testQuery( - "SELECT k, LATEST(v) v FROM lookup.lookyloo GROUP BY k", - queryContext, - ImmutableList.of(), - ImmutableList.of() - ); + try { + testQuery( + "SELECT k, LATEST(v) v FROM lookup.lookyloo GROUP BY k", + queryContext, + ImmutableList.of(), + ImmutableList.of() + ); + Assert.fail("Expected exception to be thrown."); + } + catch (DruidException e) { + MatcherAssert.assertThat( + e, + new DruidExceptionMatcher(DruidException.Persona.ADMIN, DruidException.Category.INVALID_INPUT, "general") + .expectMessageIs( + "Query planning failed for unknown reason, our best guess is this " + + "[LATEST and EARLIEST aggregators implicitly depend on the __time column, " + + "but the table queried doesn't contain a __time column. " + + "Please use LATEST_BY or EARLIEST_BY and specify the column explicitly.]" + ) + ); + } } @Test @@ -3342,7 +3358,7 @@ public void testJoinOnConstantShouldFail(Map queryContext) { assertQueryIsUnplannable( "SELECT t1.dim1 from foo as t1 LEFT JOIN foo as t2 on t1.dim1 = '10.1'", - "Possible error: SQL is resulting in a join that has unsupported operand types." + "SQL is resulting in a join that has unsupported operand types." ); } @@ -3498,6 +3514,9 @@ public void testLeftJoinSubqueryWithNullKeyFilter(Map queryConte .context(queryContext) .build(); + boolean isJoinFilterRewriteEnabled = queryContext.getOrDefault(QueryContexts.JOIN_FILTER_REWRITE_ENABLE_KEY, true) + .toString() + .equals("true"); testQuery( "SELECT dim1, l1.k\n" + "FROM foo\n" @@ -3505,7 +3524,16 @@ public void testLeftJoinSubqueryWithNullKeyFilter(Map queryConte + "WHERE l1.k IS NOT NULL\n", queryContext, ImmutableList.of(NullHandling.sqlCompatible() ? nullCompatibleModePlan : nonNullCompatibleModePlan), - ImmutableList.of(new Object[]{"abc", "abc"}) + NullHandling.sqlCompatible() || !isJoinFilterRewriteEnabled + ? ImmutableList.of(new Object[]{"abc", "abc"}) + : ImmutableList.of( + new Object[]{"10.1", ""}, + // this result is incorrect. TODO : fix this result when the JoinFilterAnalyzer bug is fixed + new Object[]{"2", ""}, + new Object[]{"1", ""}, + new Object[]{"def", ""}, + new Object[]{"abc", "abc"} + ) ); } diff --git a/sql/src/test/java/org/apache/druid/sql/calcite/CalciteMultiValueStringQueryTest.java b/sql/src/test/java/org/apache/druid/sql/calcite/CalciteMultiValueStringQueryTest.java index 2311372e75e3..d016f1795326 100644 --- a/sql/src/test/java/org/apache/druid/sql/calcite/CalciteMultiValueStringQueryTest.java +++ b/sql/src/test/java/org/apache/druid/sql/calcite/CalciteMultiValueStringQueryTest.java @@ -22,6 +22,7 @@ import com.google.common.collect.ImmutableList; import com.google.common.collect.ImmutableSet; import org.apache.druid.common.config.NullHandling; +import org.apache.druid.error.DruidException; import org.apache.druid.java.util.common.StringUtils; import org.apache.druid.java.util.common.granularity.Granularities; import org.apache.druid.math.expr.ExpressionProcessing; @@ -43,7 +44,6 @@ import org.apache.druid.segment.column.ColumnType; import org.apache.druid.segment.virtual.ExpressionVirtualColumn; import org.apache.druid.segment.virtual.ListFilteredVirtualColumn; -import org.apache.druid.sql.SqlPlanningException; import org.apache.druid.sql.calcite.filtration.Filtration; import org.apache.druid.sql.calcite.util.CalciteTests; import org.junit.Test; @@ -801,7 +801,11 @@ public void testMultiValueStringOrdinal() .setDataSource(CalciteTests.DATASOURCE3) .setInterval(querySegmentSpec(Filtration.eternity())) .setGranularity(Granularities.ALL) - .setVirtualColumns(expressionVirtualColumn("v0", "array_ordinal(\"dim3\",2)", ColumnType.STRING)) + .setVirtualColumns(expressionVirtualColumn( + "v0", + "array_ordinal(\"dim3\",2)", + ColumnType.STRING + )) .setDimensions( dimensions( new DefaultDimensionSpec("v0", "_d0", ColumnType.STRING) @@ -1797,7 +1801,7 @@ public void testMultiValueToArrayMoreArgs() testQueryThrows( "SELECT MV_TO_ARRAY(dim3,dim3) FROM druid.numfoo", exception -> { - exception.expect(SqlPlanningException.class); + exception.expect(DruidException.class); exception.expectMessage("Invalid number of arguments to function"); } ); @@ -1809,7 +1813,7 @@ public void testMultiValueToArrayNoArgs() testQueryThrows( "SELECT MV_TO_ARRAY() FROM druid.numfoo", exception -> { - exception.expect(SqlPlanningException.class); + exception.expect(DruidException.class); exception.expectMessage("Invalid number of arguments to function"); } ); @@ -2008,8 +2012,7 @@ public void testMultiValueStringOverlapFilterInconsistentUsage() "SELECT COALESCE(dim3, 'other') FROM druid.numfoo " + "WHERE MV_OVERLAP(COALESCE(dim3, ARRAY['other']), ARRAY['a', 'b', 'other']) LIMIT 5", e -> { - e.expect(SqlPlanningException.class); - e.expectMessage("Illegal mixing of types in CASE or COALESCE statement"); + e.expect(invalidSqlContains("Illegal mixing of types in CASE or COALESCE statement")); } ); diff --git a/sql/src/test/java/org/apache/druid/sql/calcite/CalciteNestedDataQueryTest.java b/sql/src/test/java/org/apache/druid/sql/calcite/CalciteNestedDataQueryTest.java index 007bb4926d78..83861485960b 100644 --- a/sql/src/test/java/org/apache/druid/sql/calcite/CalciteNestedDataQueryTest.java +++ b/sql/src/test/java/org/apache/druid/sql/calcite/CalciteNestedDataQueryTest.java @@ -32,6 +32,7 @@ import org.apache.druid.data.input.impl.LongDimensionSchema; import org.apache.druid.data.input.impl.StringDimensionSchema; import org.apache.druid.data.input.impl.TimestampSpec; +import org.apache.druid.error.DruidExceptionMatcher; import org.apache.druid.guice.DruidInjectorBuilder; import org.apache.druid.guice.NestedDataModule; import org.apache.druid.java.util.common.HumanReadableBytes; @@ -68,7 +69,6 @@ import org.apache.druid.segment.virtual.NestedFieldVirtualColumn; import org.apache.druid.segment.writeout.OffHeapMemorySegmentWriteOutMediumFactory; import org.apache.druid.sql.calcite.filtration.Filtration; -import org.apache.druid.sql.calcite.planner.UnsupportedSQLQueryException; import org.apache.druid.sql.calcite.util.SpecificSegmentsQuerySegmentWalker; import org.apache.druid.sql.calcite.util.TestDataBuilder; import org.apache.druid.timeline.DataSegment; @@ -4172,9 +4172,11 @@ public void testGroupByInvalidPath() + "SUM(cnt) " + "FROM druid.nested GROUP BY 1", (expected) -> { - expected.expect(UnsupportedSQLQueryException.class); - expected.expectMessage( - "Cannot use [JSON_VALUE_VARCHAR]: [Bad format, '.array.[1]' is not a valid JSONPath path: must start with '$']"); + expected.expect( + DruidExceptionMatcher + .invalidInput() + .expectMessageIs("JSONPath [.array.[1]] is invalid, it must start with '$'") + ); } ); } diff --git a/sql/src/test/java/org/apache/druid/sql/calcite/CalciteParameterQueryTest.java b/sql/src/test/java/org/apache/druid/sql/calcite/CalciteParameterQueryTest.java index 3e1bfe62b651..2266b8d6b825 100644 --- a/sql/src/test/java/org/apache/druid/sql/calcite/CalciteParameterQueryTest.java +++ b/sql/src/test/java/org/apache/druid/sql/calcite/CalciteParameterQueryTest.java @@ -22,6 +22,7 @@ import com.google.common.collect.ImmutableList; import org.apache.calcite.avatica.SqlType; import org.apache.druid.common.config.NullHandling; +import org.apache.druid.error.DruidExceptionMatcher; import org.apache.druid.java.util.common.DateTimes; import org.apache.druid.java.util.common.Intervals; import org.apache.druid.java.util.common.granularity.Granularities; @@ -37,7 +38,6 @@ import org.apache.druid.query.scan.ScanQuery.ResultFormat; import org.apache.druid.segment.column.ColumnType; import org.apache.druid.segment.column.RowSignature; -import org.apache.druid.sql.SqlPlanningException; import org.apache.druid.sql.calcite.filtration.Filtration; import org.apache.druid.sql.calcite.util.CalciteTests; import org.apache.druid.sql.http.SqlParameter; @@ -577,8 +577,9 @@ public void testLongs() @Test public void testMissingParameter() { - expectedException.expect(SqlPlanningException.class); - expectedException.expectMessage("Parameter at position [0] is not bound"); + expectedException.expect( + DruidExceptionMatcher.invalidSqlInput().expectMessageIs("No value bound for parameter (position [1])") + ); testQuery( "SELECT COUNT(*)\n" + "FROM druid.numfoo\n" @@ -592,8 +593,9 @@ public void testMissingParameter() @Test public void testPartiallyMissingParameter() { - expectedException.expect(SqlPlanningException.class); - expectedException.expectMessage("Parameter at position [1] is not bound"); + expectedException.expect( + DruidExceptionMatcher.invalidSqlInput().expectMessageIs("No value bound for parameter (position [2])") + ); testQuery( "SELECT COUNT(*)\n" + "FROM druid.numfoo\n" @@ -610,8 +612,9 @@ public void testPartiallyMissingParameterInTheMiddle() List params = new ArrayList<>(); params.add(null); params.add(new SqlParameter(SqlType.INTEGER, 1)); - expectedException.expect(SqlPlanningException.class); - expectedException.expectMessage("Parameter at position [0] is not bound"); + expectedException.expect( + DruidExceptionMatcher.invalidSqlInput().expectMessageIs("No value bound for parameter (position [1])") + ); testQuery( "SELECT 1 + ?, dim1 FROM foo LIMIT ?", ImmutableList.of(), diff --git a/sql/src/test/java/org/apache/druid/sql/calcite/CalciteQueryTest.java b/sql/src/test/java/org/apache/druid/sql/calcite/CalciteQueryTest.java index 0e7d377489de..792c25a5e1d7 100644 --- a/sql/src/test/java/org/apache/druid/sql/calcite/CalciteQueryTest.java +++ b/sql/src/test/java/org/apache/druid/sql/calcite/CalciteQueryTest.java @@ -23,9 +23,9 @@ import com.google.common.collect.ImmutableList; import com.google.common.collect.ImmutableMap; import com.google.common.collect.ImmutableSet; -import org.apache.calcite.plan.RelOptPlanner; import org.apache.calcite.runtime.CalciteContextException; import org.apache.druid.common.config.NullHandling; +import org.apache.druid.error.DruidException; import org.apache.druid.java.util.common.DateTimes; import org.apache.druid.java.util.common.HumanReadableBytes; import org.apache.druid.java.util.common.Intervals; @@ -110,8 +110,6 @@ import org.apache.druid.segment.column.ColumnType; import org.apache.druid.segment.column.RowSignature; import org.apache.druid.segment.join.JoinType; -import org.apache.druid.sql.SqlPlanningException; -import org.apache.druid.sql.SqlPlanningException.PlanningError; import org.apache.druid.sql.calcite.expression.DruidExpression; import org.apache.druid.sql.calcite.filtration.Filtration; import org.apache.druid.sql.calcite.planner.Calcites; @@ -372,8 +370,8 @@ public void testInformationSchemaColumnsOnAnotherView() public void testCannotInsertWithNativeEngine() { notMsqCompatible(); - final SqlPlanningException e = Assert.assertThrows( - SqlPlanningException.class, + final DruidException e = Assert.assertThrows( + DruidException.class, () -> testQuery( "INSERT INTO dst SELECT * FROM foo PARTITIONED BY ALL", ImmutableList.of(), @@ -383,9 +381,7 @@ public void testCannotInsertWithNativeEngine() MatcherAssert.assertThat( e, - ThrowableMessageMatcher.hasMessage( - CoreMatchers.equalTo("Cannot execute INSERT with SQL engine 'native'.") - ) + invalidSqlIs("INSERT operations are not supported by requested SQL engine [native], consider using MSQ.") ); } @@ -393,8 +389,8 @@ public void testCannotInsertWithNativeEngine() public void testCannotReplaceWithNativeEngine() { notMsqCompatible(); - final SqlPlanningException e = Assert.assertThrows( - SqlPlanningException.class, + final DruidException e = Assert.assertThrows( + DruidException.class, () -> testQuery( "REPLACE INTO dst OVERWRITE ALL SELECT * FROM foo PARTITIONED BY ALL", ImmutableList.of(), @@ -404,9 +400,7 @@ public void testCannotReplaceWithNativeEngine() MatcherAssert.assertThat( e, - ThrowableMessageMatcher.hasMessage( - CoreMatchers.equalTo("Cannot execute REPLACE with SQL engine 'native'.") - ) + invalidSqlIs("REPLACE operations are not supported by the requested SQL engine [native]. Consider using MSQ.") ); } @@ -814,7 +808,7 @@ public void testLatestAggregators() @Test public void testEarliestByInvalidTimestamp() { - expectedException.expect(SqlPlanningException.class); + expectedException.expect(DruidException.class); expectedException.expectMessage("Cannot apply 'EARLIEST_BY' to arguments of type 'EARLIEST_BY(, )"); testQuery( @@ -827,8 +821,9 @@ public void testEarliestByInvalidTimestamp() @Test public void testLatestByInvalidTimestamp() { - expectedException.expect(SqlPlanningException.class); - expectedException.expectMessage("Cannot apply 'LATEST_BY' to arguments of type 'LATEST_BY(, )"); + expectedException.expect( + invalidSqlContains("Cannot apply 'LATEST_BY' to arguments of type 'LATEST_BY(, )") + ); testQuery( "SELECT LATEST_BY(m1, l1) FROM druid.numfoo", @@ -1067,22 +1062,22 @@ public void testStringLatestGroupBy() public void testStringLatestGroupByWithAlwaysFalseCondition() { testQuery( - "SELECT LATEST(dim4, 10),dim2 FROM numfoo WHERE (dim1 = 'something' AND dim1 IN( 'something else') ) GROUP BY dim2", + "SELECT LATEST(dim4, 10), dim2 FROM numfoo WHERE (dim1 = 'something' AND dim1 IN('something else')) GROUP BY dim2", ImmutableList.of( Druids.newScanQueryBuilder() - .dataSource(InlineDataSource.fromIterable( - ImmutableList.of(), - RowSignature.builder() - .add("EXPR$0", ColumnType.STRING) - .add("dim2", ColumnType.STRING) - .build() - )) - .intervals(querySegmentSpec(Filtration.eternity())) - .columns("EXPR$0", "dim2") - .context(QUERY_CONTEXT_DEFAULT) - .resultFormat(ResultFormat.RESULT_FORMAT_COMPACTED_LIST) - .legacy(false) - .build() + .dataSource(InlineDataSource.fromIterable( + ImmutableList.of(), + RowSignature.builder() + .add("EXPR$0", ColumnType.STRING) + .add("dim2", ColumnType.STRING) + .build() + )) + .intervals(querySegmentSpec(Filtration.eternity())) + .columns("EXPR$0", "dim2") + .context(QUERY_CONTEXT_DEFAULT) + .resultFormat(ResultFormat.RESULT_FORMAT_COMPACTED_LIST) + .legacy(false) + .build() ), ImmutableList.of() ); @@ -1092,22 +1087,22 @@ public void testStringLatestGroupByWithAlwaysFalseCondition() public void testStringLatestByGroupByWithAlwaysFalseCondition() { testQuery( - "SELECT LATEST_BY(dim4, __time, 10),dim2 FROM numfoo WHERE (dim1 = 'something' AND dim1 IN( 'something else') ) GROUP BY dim2", + "SELECT LATEST_BY(dim4, __time, 10), dim2 FROM numfoo WHERE (dim1 = 'something' AND dim1 IN('something else')) GROUP BY dim2", ImmutableList.of( Druids.newScanQueryBuilder() - .dataSource(InlineDataSource.fromIterable( - ImmutableList.of(), - RowSignature.builder() - .add("EXPR$0", ColumnType.STRING) - .add("dim2", ColumnType.STRING) - .build() - )) - .intervals(querySegmentSpec(Filtration.eternity())) - .columns("EXPR$0", "dim2") - .context(QUERY_CONTEXT_DEFAULT) - .resultFormat(ResultFormat.RESULT_FORMAT_COMPACTED_LIST) - .legacy(false) - .build() + .dataSource(InlineDataSource.fromIterable( + ImmutableList.of(), + RowSignature.builder() + .add("EXPR$0", ColumnType.STRING) + .add("dim2", ColumnType.STRING) + .build() + )) + .intervals(querySegmentSpec(Filtration.eternity())) + .columns("EXPR$0", "dim2") + .context(QUERY_CONTEXT_DEFAULT) + .resultFormat(ResultFormat.RESULT_FORMAT_COMPACTED_LIST) + .legacy(false) + .build() ), ImmutableList.of() ); @@ -2903,12 +2898,8 @@ public void testUnionAllTablesColumnCountMismatch() ); Assert.fail("query execution should fail"); } - catch (SqlPlanningException e) { - Assert.assertTrue( - e.getMessage().contains("Column count mismatch in UNION ALL") - ); - Assert.assertEquals(PlanningError.VALIDATION_ERROR.getErrorCode(), e.getErrorCode()); - Assert.assertEquals(PlanningError.VALIDATION_ERROR.getErrorClass(), e.getErrorClass()); + catch (DruidException e) { + MatcherAssert.assertThat(e, invalidSqlIs("Column count mismatch in UNION ALL (line [3], column [42])")); } } @@ -2971,22 +2962,24 @@ public void testUnionAllTablesColumnTypeMismatchStringLong() + "FROM (SELECT dim3, dim2, m1 FROM foo2 UNION ALL SELECT dim3, dim2, m1 FROM foo)\n" + "WHERE dim2 = 'a' OR dim2 = 'en'\n" + "GROUP BY 1, 2", - "Possible error: SQL requires union between inputs that are not simple table scans and involve a " + - "filter or aliasing. Or column types of tables being unioned are not of same type."); + "SQL requires union between inputs that are not simple table scans and involve a " + + "filter or aliasing. Or column types of tables being unioned are not of same type." + ); } @Test public void testUnionAllTablesWhenMappingIsRequired() { // Cannot plan this UNION ALL operation, because the column swap would require generating a subquery. + assertQueryIsUnplannable( "SELECT\n" + "c, COUNT(*)\n" + "FROM (SELECT dim1 AS c, m1 FROM foo UNION ALL SELECT dim2 AS c, m1 FROM numfoo)\n" + "WHERE c = 'a' OR c = 'def'\n" + "GROUP BY 1", - "Possible error: SQL requires union between two tables " + - "and column names queried for each table are different Left: [dim1], Right: [dim2]." + "SQL requires union between two tables " + + "and column names queried for each table are different Left: [dim1], Right: [dim2]." ); } @@ -2996,7 +2989,7 @@ public void testUnionIsUnplannable() // Cannot plan this UNION operation assertQueryIsUnplannable( "SELECT dim2, dim1, m1 FROM foo2 UNION SELECT dim1, dim2, m1 FROM foo", - "Possible error: SQL requires 'UNION' but only 'UNION ALL' is supported." + "SQL requires 'UNION' but only 'UNION ALL' is supported." ); } @@ -3010,8 +3003,8 @@ public void testUnionAllTablesWhenCastAndMappingIsRequired() + "FROM (SELECT dim1 AS c, m1 FROM foo UNION ALL SELECT cnt AS c, m1 FROM numfoo)\n" + "WHERE c = 'a' OR c = 'def'\n" + "GROUP BY 1", - "Possible error: SQL requires union between inputs that are not simple table scans and involve " + - "a filter or aliasing. Or column types of tables being unioned are not of same type." + "SQL requires union between inputs that are not simple table scans and involve " + + "a filter or aliasing. Or column types of tables being unioned are not of same type." ); } @@ -3111,7 +3104,7 @@ public void testUnionAllSameTableTwiceWithDifferentMapping() + "FROM (SELECT dim1, dim2, m1 FROM foo UNION ALL SELECT dim2, dim1, m1 FROM foo)\n" + "WHERE dim2 = 'a' OR dim2 = 'def'\n" + "GROUP BY 1, 2", - "Possible error: SQL requires union between two tables and column names queried for each table are different Left: [dim1, dim2, m1], Right: [dim2, dim1, m1]." + "SQL requires union between two tables and column names queried for each table are different Left: [dim1, dim2, m1], Right: [dim2, dim1, m1]." ); } @@ -3174,12 +3167,8 @@ public void testUnionAllThreeTablesColumnCountMismatch1() ); Assert.fail("query execution should fail"); } - catch (SqlPlanningException e) { - Assert.assertTrue( - e.getMessage().contains("Column count mismatch in UNION ALL") - ); - Assert.assertEquals(PlanningError.VALIDATION_ERROR.getErrorCode(), e.getErrorCode()); - Assert.assertEquals(PlanningError.VALIDATION_ERROR.getErrorClass(), e.getErrorClass()); + catch (DruidException e) { + MatcherAssert.assertThat(e, invalidSqlIs("Column count mismatch in UNION ALL (line [3], column [45])")); } } @@ -3198,12 +3187,8 @@ public void testUnionAllThreeTablesColumnCountMismatch2() ); Assert.fail("query execution should fail"); } - catch (SqlPlanningException e) { - Assert.assertTrue( - e.getMessage().contains("Column count mismatch in UNION ALL") - ); - Assert.assertEquals(PlanningError.VALIDATION_ERROR.getErrorCode(), e.getErrorCode()); - Assert.assertEquals(PlanningError.VALIDATION_ERROR.getErrorClass(), e.getErrorClass()); + catch (DruidException e) { + MatcherAssert.assertThat(e, invalidSqlIs("Column count mismatch in UNION ALL (line [3], column [45])")); } } @@ -3222,12 +3207,8 @@ public void testUnionAllThreeTablesColumnCountMismatch3() ); Assert.fail("query execution should fail"); } - catch (SqlPlanningException e) { - Assert.assertTrue( - e.getMessage().contains("Column count mismatch in UNION ALL") - ); - Assert.assertEquals(PlanningError.VALIDATION_ERROR.getErrorCode(), e.getErrorCode()); - Assert.assertEquals(PlanningError.VALIDATION_ERROR.getErrorClass(), e.getErrorClass()); + catch (DruidException e) { + MatcherAssert.assertThat(e, invalidSqlIs("Column count mismatch in UNION ALL (line [3], column [70])")); } } @@ -5634,17 +5615,17 @@ public void testUnplannableQueries() final Map queries = ImmutableMap.of( // SELECT query with order by non-__time. "SELECT dim1 FROM druid.foo ORDER BY dim1", - "Possible error: SQL query requires order by non-time column [dim1 ASC], which is not supported.", + "SQL query requires order by non-time column [[dim1 ASC]], which is not supported.", // JOIN condition with not-equals (<>). "SELECT foo.dim1, foo.dim2, l.k, l.v\n" + "FROM foo INNER JOIN lookup.lookyloo l ON foo.dim2 <> l.k", - "Possible error: SQL requires a join with 'NOT_EQUALS' condition that is not supported.", + "SQL requires a join with 'NOT_EQUALS' condition that is not supported.", // JOIN condition with a function of both sides. "SELECT foo.dim1, foo.dim2, l.k, l.v\n" + "FROM foo INNER JOIN lookup.lookyloo l ON CHARACTER_LENGTH(foo.dim2 || l.k) > 3\n", - "Possible error: SQL requires a join with 'GREATER_THAN' condition that is not supported." + "SQL requires a join with 'GREATER_THAN' condition that is not supported." ); for (final Map.Entry queryErrorPair : queries.entrySet()) { @@ -5702,7 +5683,7 @@ public void testUnplannableTwoExactCountDistincts() assertQueryIsUnplannable( PLANNER_CONFIG_NO_HLL, "SELECT dim2, COUNT(distinct dim1), COUNT(distinct dim2) FROM druid.foo GROUP BY dim2", - "Possible error: SQL requires a join with 'IS_NOT_DISTINCT_FROM' condition that is not supported." + "SQL requires a join with 'IS_NOT_DISTINCT_FROM' condition that is not supported." ); } @@ -5713,7 +5694,7 @@ public void testUnplannableExactCountDistinctOnSketch() assertQueryIsUnplannable( PLANNER_CONFIG_NO_HLL, "SELECT COUNT(distinct unique_dim1) FROM druid.foo", - "Possible error: SQL requires a group-by on a column of type COMPLEX that is unsupported." + "SQL requires a group-by on a column of type COMPLEX that is unsupported." ); } @@ -5756,7 +5737,9 @@ public void testArrayAggQueryOnComplexDatatypes() .build() ), ImmutableList.of( - new Object[]{"[\"AQAAAEAAAA==\",\"AQAAAQAAAAHNBA==\",\"AQAAAQAAAAOzAg==\",\"AQAAAQAAAAFREA==\",\"AQAAAQAAAACyEA==\",\"AQAAAQAAAAEkAQ==\"]"} + new Object[]{ + "[\"AQAAAEAAAA==\",\"AQAAAQAAAAHNBA==\",\"AQAAAQAAAAOzAg==\",\"AQAAAQAAAAFREA==\",\"AQAAAQAAAACyEA==\",\"AQAAAQAAAAEkAQ==\"]" + } ) ); } @@ -5768,12 +5751,11 @@ public void testStringAggQueryOnComplexDatatypes() testQuery("SELECT STRING_AGG(unique_dim1, ',') FROM druid.foo", ImmutableList.of(), ImmutableList.of()); Assert.fail("query execution should fail"); } - catch (SqlPlanningException e) { - Assert.assertTrue( - e.getMessage().contains("Cannot use STRING_AGG on complex inputs COMPLEX") + catch (DruidException e) { + MatcherAssert.assertThat( + e, + invalidSqlIs("Aggregation [STRING_AGG] does not support type [COMPLEX], column [foo.unique_dim1]") ); - Assert.assertEquals(PlanningError.VALIDATION_ERROR.getErrorCode(), e.getErrorCode()); - Assert.assertEquals(PlanningError.VALIDATION_ERROR.getErrorClass(), e.getErrorClass()); } } @@ -5909,11 +5891,13 @@ public void testCountStarWithTimeInIntervalFilterNonLiteral() "SELECT COUNT(*) FROM druid.foo " + "WHERE TIME_IN_INTERVAL(__time, dim1)", expected -> { - expected.expect(CoreMatchers.instanceOf(SqlPlanningException.class)); - expected.expect(ThrowableMessageMatcher.hasMessage(CoreMatchers.containsString( - "From line 1, column 38 to line 1, column 67: " - + "Cannot apply 'TIME_IN_INTERVAL' to arguments of type 'TIME_IN_INTERVAL(, )'. " - + "Supported form(s): 'TIME_IN_INTERVAL(, )'"))); + expected.expect( + invalidSqlIs( + "Cannot apply 'TIME_IN_INTERVAL' to arguments of type " + + "'TIME_IN_INTERVAL(, )'. Supported form(s): " + + "'TIME_IN_INTERVAL(, )' (line [1], column [38])" + ) + ); } ); } @@ -6050,11 +6034,21 @@ public void testCountStarWithTimeFilterUsingStringLiteralsInvalid_isUnplannable( { // Strings are implicitly cast to timestamps. Test an invalid string. // This error message isn't ideal but it is at least better than silently ignoring the problem. - assertQueryIsUnplannable( - "SELECT COUNT(*) FROM druid.foo\n" - + "WHERE __time >= 'z2000-01-01 00:00:00' AND __time < '2001-01-01 00:00:00'\n", - "Possible error: Illegal TIMESTAMP constant: CAST('z2000-01-01 00:00:00'):TIMESTAMP(3) NOT NULL" - ); + String sql = "SELECT COUNT(*) FROM druid.foo\n" + + "WHERE __time >= 'z2000-01-01 00:00:00' AND __time < '2001-01-01 00:00:00'\n"; + try { + testBuilder().sql(sql).run(); + } + catch (DruidException e) { + MatcherAssert.assertThat( + e, + invalidSqlIs("Illegal TIMESTAMP constant [CAST('z2000-01-01 00:00:00'):TIMESTAMP(3) NOT NULL]") + ); + } + catch (Exception e) { + log.error(e, "Expected DruidException for query: %s", sql); + Assert.fail(sql); + } } @Test @@ -7548,7 +7542,8 @@ public void testQueryWithMoreThanMaxNumericInFilter() { notMsqCompatible(); expectedException.expect(UOE.class); - expectedException.expectMessage("The number of values in the IN clause for [dim6] in query exceeds configured maxNumericFilter limit of [2] for INs. Cast [3] values of IN clause to String"); + expectedException.expectMessage( + "The number of values in the IN clause for [dim6] in query exceeds configured maxNumericFilter limit of [2] for INs. Cast [3] values of IN clause to String"); testQuery( PLANNER_CONFIG_MAX_NUMERIC_IN_FILTER, @@ -11301,12 +11296,13 @@ public void testTimeExtractWithTooFewArguments() testQuery("SELECT TIME_EXTRACT(__time) FROM druid.foo", ImmutableList.of(), ImmutableList.of()); Assert.fail("query execution should fail"); } - catch (SqlPlanningException e) { - Assert.assertTrue( - e.getMessage().contains("Invalid number of arguments to function 'TIME_EXTRACT'. Was expecting 2 arguments") + catch (DruidException e) { + MatcherAssert.assertThat( + e, + invalidSqlIs( + "Invalid number of arguments to function 'TIME_EXTRACT'. Was expecting 2 arguments (line [1], column [8])" + ) ); - Assert.assertEquals(PlanningError.VALIDATION_ERROR.getErrorCode(), e.getErrorCode()); - Assert.assertEquals(PlanningError.VALIDATION_ERROR.getErrorClass(), e.getErrorClass()); } } @@ -12828,8 +12824,8 @@ public void testTimeStampAddZeroDayPeriod() //Since adding a zero period does not change the timestamp, just compare the stamp with the orignal TestDataBuilder.ROWS1.stream() - .map(row -> new Object[]{row.getTimestampFromEpoch()}) - .collect(Collectors.toList()) + .map(row -> new Object[]{row.getTimestampFromEpoch()}) + .collect(Collectors.toList()) ); } @@ -12857,8 +12853,8 @@ public void testTimeStampAddZeroMonthPeriod() //Since adding a zero period does not change the timestamp, just compare the stamp with the orignal TestDataBuilder.ROWS1.stream() - .map(row -> new Object[]{row.getTimestampFromEpoch()}) - .collect(Collectors.toList()) + .map(row -> new Object[]{row.getTimestampFromEpoch()}) + .collect(Collectors.toList()) ); } @@ -12888,8 +12884,8 @@ public void testTimeStampAddZeroYearPeriod() //Since adding a zero period does not change the timestamp, just compare the stamp with the orignal TestDataBuilder.ROWS1.stream() - .map(row -> new Object[]{row.getTimestampFromEpoch()}) - .collect(Collectors.toList()) + .map(row -> new Object[]{row.getTimestampFromEpoch()}) + .collect(Collectors.toList()) ); } @@ -12926,8 +12922,8 @@ public void testTimeStampAddConversion() // verify if query results match the given TestDataBuilder.ROWS1.stream() - .map(r -> new Object[]{periodGranularity.increment(r.getTimestamp()).getMillis()}) - .collect(Collectors.toList()) + .map(r -> new Object[]{periodGranularity.increment(r.getTimestamp()).getMillis()}) + .collect(Collectors.toList()) ); // @@ -12955,8 +12951,8 @@ public void testTimeStampAddConversion() // verify if query results match the given // "cnt" for each row is 1 TestDataBuilder.ROWS1.stream() - .map(row -> new Object[]{periodGranularity.increment(row.getTimestamp()).getMillis()}) - .collect(Collectors.toList()) + .map(row -> new Object[]{periodGranularity.increment(row.getTimestamp()).getMillis()}) + .collect(Collectors.toList()) ); } @@ -13961,7 +13957,7 @@ public void testStringAggExpression() ); } - @Test(expected = RelOptPlanner.CannotPlanException.class) + @Test(expected = DruidException.class) public void testStringAggExpressionNonConstantSeparator() { testQuery( @@ -14110,7 +14106,7 @@ public void testHumanReadableFormatFunction() @Test public void testHumanReadableFormatFunctionExceptionWithWrongNumberType() { - this.expectedException.expect(SqlPlanningException.class); + this.expectedException.expect(DruidException.class); this.expectedException.expectMessage("Supported form(s): HUMAN_READABLE_BINARY_BYTE_FORMAT(Number, [Precision])"); testQuery( "SELECT HUMAN_READABLE_BINARY_BYTE_FORMAT('45678')", @@ -14122,7 +14118,7 @@ public void testHumanReadableFormatFunctionExceptionWithWrongNumberType() @Test public void testHumanReadableFormatFunctionWithWrongPrecisionType() { - this.expectedException.expect(SqlPlanningException.class); + this.expectedException.expect(DruidException.class); this.expectedException.expectMessage("Supported form(s): HUMAN_READABLE_BINARY_BYTE_FORMAT(Number, [Precision])"); testQuery( "SELECT HUMAN_READABLE_BINARY_BYTE_FORMAT(45678, '2')", @@ -14134,7 +14130,7 @@ public void testHumanReadableFormatFunctionWithWrongPrecisionType() @Test public void testHumanReadableFormatFunctionWithInvalidNumberOfArguments() { - this.expectedException.expect(SqlPlanningException.class); + this.expectedException.expect(DruidException.class); /* * frankly speaking, the exception message thrown here is a little bit confusing @@ -14545,21 +14541,21 @@ public void testComplexDecode() testQuery( "SELECT COMPLEX_DECODE_BASE64('hyperUnique',PARSE_JSON(TO_JSON_STRING(unique_dim1))) from druid.foo LIMIT 10", ImmutableList.of( - Druids.newScanQueryBuilder() - .dataSource(CalciteTests.DATASOURCE1) - .intervals(querySegmentSpec(Filtration.eternity())) - .columns("v0") - .virtualColumns( - expressionVirtualColumn( - "v0", - "complex_decode_base64('hyperUnique',parse_json(to_json_string(\"unique_dim1\")))", - ColumnType.ofComplex("hyperUnique") - ) - ) - .resultFormat(ResultFormat.RESULT_FORMAT_COMPACTED_LIST) - .legacy(false) - .limit(10) - .build() + Druids.newScanQueryBuilder() + .dataSource(CalciteTests.DATASOURCE1) + .intervals(querySegmentSpec(Filtration.eternity())) + .columns("v0") + .virtualColumns( + expressionVirtualColumn( + "v0", + "complex_decode_base64('hyperUnique',parse_json(to_json_string(\"unique_dim1\")))", + ColumnType.ofComplex("hyperUnique") + ) + ) + .resultFormat(ResultFormat.RESULT_FORMAT_COMPACTED_LIST) + .legacy(false) + .limit(10) + .build() ), ImmutableList.of( new Object[]{"\"AQAAAEAAAA==\""}, diff --git a/sql/src/test/java/org/apache/druid/sql/calcite/CalciteReplaceDmlTest.java b/sql/src/test/java/org/apache/druid/sql/calcite/CalciteReplaceDmlTest.java index 0c1f016600d1..d7ba655c1efa 100644 --- a/sql/src/test/java/org/apache/druid/sql/calcite/CalciteReplaceDmlTest.java +++ b/sql/src/test/java/org/apache/druid/sql/calcite/CalciteReplaceDmlTest.java @@ -22,6 +22,8 @@ import com.fasterxml.jackson.databind.ObjectMapper; import com.google.common.collect.ImmutableList; import com.google.common.collect.ImmutableMap; +import org.apache.druid.error.DruidException; +import org.apache.druid.error.DruidExceptionMatcher; import org.apache.druid.java.util.common.StringUtils; import org.apache.druid.java.util.common.granularity.Granularities; import org.apache.druid.java.util.common.jackson.JacksonUtils; @@ -33,7 +35,6 @@ import org.apache.druid.segment.column.ColumnType; import org.apache.druid.segment.column.RowSignature; import org.apache.druid.server.security.ForbiddenException; -import org.apache.druid.sql.SqlPlanningException; import org.apache.druid.sql.calcite.external.Externals; import org.apache.druid.sql.calcite.filtration.Filtration; import org.apache.druid.sql.calcite.parser.DruidSqlInsert; @@ -41,6 +42,7 @@ import org.apache.druid.sql.calcite.parser.DruidSqlReplace; import org.apache.druid.sql.calcite.planner.PlannerContext; import org.apache.druid.sql.calcite.util.CalciteTests; +import org.hamcrest.MatcherAssert; import org.junit.Assert; import org.junit.Test; @@ -49,8 +51,6 @@ import java.util.HashMap; import java.util.Map; -import static org.junit.Assert.assertEquals; - public class CalciteReplaceDmlTest extends CalciteIngestionDmlTest { private static final Map REPLACE_ALL_TIME_CHUNKS = ImmutableMap.of( @@ -218,10 +218,9 @@ public void testReplaceForUnsupportedDeleteWhereClause() { testIngestionQuery() .sql("REPLACE INTO dst OVERWRITE WHERE __time LIKE '20__-02-01' SELECT * FROM foo PARTITIONED BY MONTH") - .expectValidationError( - SqlPlanningException.class, - "Unsupported operation in OVERWRITE WHERE clause: LIKE" - ) + .expectValidationError(invalidSqlIs( + "Invalid OVERWRITE WHERE clause [`__time` LIKE '20__-02-01']: Unsupported operation [LIKE] in OVERWRITE WHERE clause." + )) .verify(); } @@ -230,10 +229,9 @@ public void testReplaceForInvalidDeleteWhereClause() { testIngestionQuery() .sql("REPLACE INTO dst OVERWRITE WHERE TRUE SELECT * FROM foo PARTITIONED BY MONTH") - .expectValidationError( - SqlPlanningException.class, - "Invalid OVERWRITE WHERE clause" - ) + .expectValidationError(invalidSqlIs( + "Invalid OVERWRITE WHERE clause [TRUE]: expected clause including AND, OR, NOT, >, <, >=, <= OR BETWEEN operators" + )) .verify(); } @@ -242,10 +240,9 @@ public void testReplaceForDeleteWhereClauseOnUnsupportedColumns() { testIngestionQuery() .sql("REPLACE INTO dst OVERWRITE WHERE dim1 > TIMESTAMP '2000-01-05 00:00:00' SELECT * FROM foo PARTITIONED BY ALL TIME") - .expectValidationError( - SqlPlanningException.class, - "Only __time column is supported in OVERWRITE WHERE clause" - ) + .expectValidationError(invalidSqlIs( + "OVERWRITE WHERE clause only supports filtering on the __time column, got [947030400000 < dim1 as numeric]" + )) .verify(); } @@ -255,7 +252,9 @@ public void testReplaceWithOrderBy() { testIngestionQuery() .sql("REPLACE INTO dst OVERWRITE ALL SELECT * FROM foo ORDER BY dim1 PARTITIONED BY ALL TIME") - .expectValidationError(SqlPlanningException.class, "Cannot have ORDER BY on a REPLACE statement, use CLUSTERED BY instead.") + .expectValidationError(invalidSqlIs( + "Cannot use an ORDER BY clause on a Query of type [REPLACE], use CLUSTERED BY instead" + )) .verify(); } @@ -265,8 +264,10 @@ public void testReplaceForMisalignedPartitionInterval() testIngestionQuery() .sql("REPLACE INTO dst OVERWRITE WHERE __time >= TIMESTAMP '2000-01-05 00:00:00' AND __time <= TIMESTAMP '2000-01-06 00:00:00' SELECT * FROM foo PARTITIONED BY MONTH") .expectValidationError( - SqlPlanningException.class, - "OVERWRITE WHERE clause contains an interval [2000-01-05T00:00:00.000Z/2000-01-06T00:00:00.001Z] which is not aligned with PARTITIONED BY granularity {type=period, period=P1M, timeZone=UTC, origin=null}" + invalidSqlIs( + "OVERWRITE WHERE clause identified interval [2000-01-05T00:00:00.000Z/2000-01-06T00:00:00.001Z] " + + "which is not aligned with PARTITIONED BY granularity [{type=period, period=P1M, timeZone=UTC, origin=null}]" + ) ) .verify(); } @@ -276,10 +277,10 @@ public void testReplaceForInvalidPartition() { testIngestionQuery() .sql("REPLACE INTO dst OVERWRITE WHERE __time >= TIMESTAMP '2000-01-05 00:00:00' AND __time <= TIMESTAMP '2000-02-05 00:00:00' SELECT * FROM foo PARTITIONED BY ALL TIME") - .expectValidationError( - SqlPlanningException.class, - "OVERWRITE WHERE clause contains an interval [2000-01-05T00:00:00.000Z/2000-02-05T00:00:00.001Z] which is not aligned with PARTITIONED BY granularity AllGranularity" - ) + .expectValidationError(invalidSqlIs( + "OVERWRITE WHERE clause identified interval [2000-01-05T00:00:00.000Z/2000-02-05T00:00:00.001Z] " + + "which is not aligned with PARTITIONED BY granularity [AllGranularity]" + )) .verify(); } @@ -290,10 +291,10 @@ public void testReplaceFromTableWithEmptyInterval() .sql("REPLACE INTO dst OVERWRITE WHERE " + "__time < TIMESTAMP '2000-01-01' AND __time > TIMESTAMP '2000-01-01' " + "SELECT * FROM foo PARTITIONED BY MONTH") - .expectValidationError( - SqlPlanningException.class, - "Intervals for replace are empty" - ) + .expectValidationError(invalidSqlIs( + "The OVERWRITE WHERE clause [(__time as numeric < 946684800000 && 946684800000 < __time as numeric)] " + + "produced no time intervals, are the bounds overly restrictive?" + )) .verify(); } @@ -302,7 +303,7 @@ public void testReplaceForWithInvalidInterval() { testIngestionQuery() .sql("REPLACE INTO dst OVERWRITE WHERE __time >= TIMESTAMP '2000-01-INVALID0:00' AND __time <= TIMESTAMP '2000-02-05 00:00:00' SELECT * FROM foo PARTITIONED BY ALL TIME") - .expectValidationError(SqlPlanningException.class) + .expectValidationError(DruidException.class) .verify(); } @@ -311,7 +312,7 @@ public void testReplaceForWithoutPartitionSpec() { testIngestionQuery() .sql("REPLACE INTO dst SELECT * FROM foo PARTITIONED BY ALL TIME") - .expectValidationError(SqlPlanningException.class) + .expectValidationError(DruidException.class) .verify(); } @@ -381,7 +382,11 @@ public void testReplaceIntoInvalidDataSourceName() { testIngestionQuery() .sql("REPLACE INTO \"in/valid\" OVERWRITE ALL SELECT dim1, dim2 FROM foo PARTITIONED BY ALL TIME") - .expectValidationError(SqlPlanningException.class, "REPLACE dataSource cannot contain the '/' character.") + .expectValidationError( + DruidExceptionMatcher + .invalidInput() + .expectMessageIs("Invalid value for field [table]: Value [in/valid] cannot contain '/'.") + ) .verify(); } @@ -390,7 +395,9 @@ public void testReplaceUsingColumnList() { testIngestionQuery() .sql("REPLACE INTO dst (foo, bar) OVERWRITE ALL SELECT dim1, dim2 FROM foo PARTITIONED BY ALL TIME") - .expectValidationError(SqlPlanningException.class, "REPLACE with a target column list is not supported.") + .expectValidationError( + invalidSqlIs("Operation [REPLACE] cannot be run with a target column list, given [dst (`foo`, `bar`)]") + ) .verify(); } @@ -399,7 +406,9 @@ public void testReplaceWithoutPartitionedBy() { testIngestionQuery() .sql("REPLACE INTO dst OVERWRITE ALL SELECT __time, FLOOR(m1) as floor_m1, dim1 FROM foo") - .expectValidationError(SqlPlanningException.class, "REPLACE statements must specify PARTITIONED BY clause explicitly") + .expectValidationError(invalidSqlIs( + "Operation [REPLACE] requires a PARTITIONED BY to be explicitly defined, but none was found." + )) .verify(); } @@ -408,7 +417,9 @@ public void testReplaceWithoutPartitionedByWithClusteredBy() { testIngestionQuery() .sql("REPLACE INTO dst OVERWRITE ALL SELECT __time, FLOOR(m1) as floor_m1, dim1 FROM foo CLUSTERED BY dim1") - .expectValidationError(SqlPlanningException.class, "CLUSTERED BY found before PARTITIONED BY. In Druid, the CLUSTERED BY clause must follow the PARTITIONED BY clause") + .expectValidationError(invalidSqlIs( + "CLUSTERED BY found before PARTITIONED BY, CLUSTERED BY must come after the PARTITIONED BY clause" + )) .verify(); } @@ -417,7 +428,10 @@ public void testReplaceWithoutOverwriteClause() { testIngestionQuery() .sql("REPLACE INTO dst SELECT * FROM foo PARTITIONED BY ALL TIME") - .expectValidationError(SqlPlanningException.class, "Missing time chunk information in OVERWRITE clause for REPLACE. Use OVERWRITE WHERE <__time based condition> or OVERWRITE ALL to overwrite the entire table.") + .expectValidationError(invalidSqlIs( + "Missing time chunk information in OVERWRITE clause for REPLACE. " + + "Use OVERWRITE WHERE <__time based condition> or OVERWRITE ALL to overwrite the entire table." + )) .verify(); } @@ -426,7 +440,10 @@ public void testReplaceWithoutCompleteOverwriteClause() { testIngestionQuery() .sql("REPLACE INTO dst OVERWRITE SELECT * FROM foo PARTITIONED BY ALL TIME") - .expectValidationError(SqlPlanningException.class, "Missing time chunk information in OVERWRITE clause for REPLACE. Use OVERWRITE WHERE <__time based condition> or OVERWRITE ALL to overwrite the entire table.") + .expectValidationError(invalidSqlIs( + "Missing time chunk information in OVERWRITE clause for REPLACE. " + + "Use OVERWRITE WHERE <__time based condition> or OVERWRITE ALL to overwrite the entire table." + )) .verify(); } @@ -435,10 +452,10 @@ public void testReplaceIntoSystemTable() { testIngestionQuery() .sql("REPLACE INTO INFORMATION_SCHEMA.COLUMNS OVERWRITE ALL SELECT * FROM foo PARTITIONED BY ALL TIME") - .expectValidationError( - SqlPlanningException.class, - "Cannot REPLACE into INFORMATION_SCHEMA.COLUMNS because it is not a Druid datasource." - ) + .expectValidationError(invalidSqlIs( + "Table [INFORMATION_SCHEMA.COLUMNS] does not support operation [REPLACE]" + + " because it is not a Druid datasource" + )) .verify(); } @@ -447,10 +464,9 @@ public void testReplaceIntoView() { testIngestionQuery() .sql("REPLACE INTO view.aview OVERWRITE ALL SELECT * FROM foo PARTITIONED BY ALL TIME") - .expectValidationError( - SqlPlanningException.class, - "Cannot REPLACE into view.aview because it is not a Druid datasource." - ) + .expectValidationError(invalidSqlIs( + "Table [view.aview] does not support operation [REPLACE] because it is not a Druid datasource" + )) .verify(); } @@ -477,10 +493,9 @@ public void testReplaceIntoNonexistentSchema() { testIngestionQuery() .sql("REPLACE INTO nonexistent.dst OVERWRITE ALL SELECT * FROM foo PARTITIONED BY ALL TIME") - .expectValidationError( - SqlPlanningException.class, - "Cannot REPLACE into nonexistent.dst because it is not a Druid datasource." - ) + .expectValidationError(invalidSqlIs( + "Table [nonexistent.dst] does not support operation [REPLACE] because it is not a Druid datasource" + )) .verify(); } @@ -576,7 +591,7 @@ public void testReplaceWithClusteredBy() @Test public void testReplaceWithPartitionedByContainingInvalidGranularity() { - // Throws a ValidationException, which gets converted to a SqlPlanningException before throwing to end user + // Throws a ValidationException, which gets converted to a DruidException before throwing to end user try { testQuery( "REPLACE INTO dst OVERWRITE ALL SELECT * FROM foo PARTITIONED BY 'invalid_granularity'", @@ -585,10 +600,13 @@ public void testReplaceWithPartitionedByContainingInvalidGranularity() ); Assert.fail("Exception should be thrown"); } - catch (SqlPlanningException e) { - assertEquals( - "Encountered 'invalid_granularity' after PARTITIONED BY. Expected HOUR, DAY, MONTH, YEAR, ALL TIME, FLOOR function or TIME_FLOOR function", - e.getMessage() + catch (DruidException e) { + MatcherAssert.assertThat( + e, + invalidSqlIs( + "Invalid granularity ['invalid_granularity'] after PARTITIONED BY. " + + "Expected HOUR, DAY, MONTH, YEAR, ALL TIME, FLOOR() or TIME_FLOOR()" + ) ); } didTest = true; @@ -779,8 +797,7 @@ public void testExplainPlanReplaceWithClusteredByDescThrowsException() testIngestionQuery() .sql(sql) .expectValidationError( - SqlPlanningException.class, - "[`dim1` DESC] is invalid. CLUSTERED BY columns cannot be sorted in descending order." + invalidSqlIs("Invalid CLUSTERED BY clause [`dim1` DESC]: cannot sort in descending order.") ) .verify(); } @@ -909,7 +926,9 @@ public void testReplaceWithSqlOuterLimit() testIngestionQuery() .context(context) .sql("REPLACE INTO dst OVERWRITE ALL SELECT * FROM foo PARTITIONED BY ALL TIME") - .expectValidationError(SqlPlanningException.class, "sqlOuterLimit cannot be provided with REPLACE.") + .expectValidationError(DruidExceptionMatcher.invalidInput().expectMessageIs( + "Context parameter [sqlOuterLimit] cannot be provided on operator [REPLACE]" + )) .verify(); } } diff --git a/sql/src/test/java/org/apache/druid/sql/calcite/CalciteSelectQueryTest.java b/sql/src/test/java/org/apache/druid/sql/calcite/CalciteSelectQueryTest.java index 54d8e856af8b..cc60a27acdd1 100644 --- a/sql/src/test/java/org/apache/druid/sql/calcite/CalciteSelectQueryTest.java +++ b/sql/src/test/java/org/apache/druid/sql/calcite/CalciteSelectQueryTest.java @@ -21,6 +21,7 @@ import com.google.common.collect.ImmutableList; import org.apache.druid.common.config.NullHandling; +import org.apache.druid.error.DruidException; import org.apache.druid.java.util.common.DateTimes; import org.apache.druid.java.util.common.Intervals; import org.apache.druid.java.util.common.granularity.Granularities; @@ -43,7 +44,6 @@ import org.apache.druid.segment.column.ColumnType; import org.apache.druid.segment.column.RowSignature; import org.apache.druid.segment.virtual.ExpressionVirtualColumn; -import org.apache.druid.sql.SqlPlanningException; import org.apache.druid.sql.calcite.filtration.Filtration; import org.apache.druid.sql.calcite.planner.PlannerContext; import org.apache.druid.sql.calcite.util.CalciteTests; @@ -311,8 +311,10 @@ public void testSelectConstantExpressionFromTable() @Test public void testSelectConstantExpressionEquivalentToNaN() { - expectedException.expectMessage( - "'(log10(0) - log10(0))' evaluates to 'NaN' that is not supported in SQL. You can either cast the expression as BIGINT ('CAST((log10(0) - log10(0)) as BIGINT)') or VARCHAR ('CAST((log10(0) - log10(0)) as VARCHAR)') or change the expression itself"); + expectedException.expect(invalidSqlIs( + "Expression [(log10(0) - log10(0))] evaluates to an unsupported value [NaN], " + + "expected something that can be a Double. Consider casting with 'CAST( AS BIGINT)'" + )); testQuery( "SELECT log10(0) - log10(0), dim1 FROM foo LIMIT 1", ImmutableList.of(), @@ -323,8 +325,10 @@ public void testSelectConstantExpressionEquivalentToNaN() @Test public void testSelectConstantExpressionEquivalentToInfinity() { - expectedException.expectMessage( - "'log10(0)' evaluates to '-Infinity' that is not supported in SQL. You can either cast the expression as BIGINT ('CAST(log10(0) as BIGINT)') or VARCHAR ('CAST(log10(0) as VARCHAR)') or change the expression itself"); + expectedException.expect(invalidSqlIs( + "Expression [log10(0)] evaluates to an unsupported value [-Infinity], " + + "expected something that can be a Double. Consider casting with 'CAST( AS BIGINT)'" + )); testQuery( "SELECT log10(0), dim1 FROM foo LIMIT 1", ImmutableList.of(), @@ -964,7 +968,7 @@ public void testSelectCurrentTimePrecisionTooHigh() testQueryThrows( "SELECT CURRENT_TIMESTAMP(4)", expectedException -> { - expectedException.expect(SqlPlanningException.class); + expectedException.expect(DruidException.class); expectedException.expectMessage( "Argument to function 'CURRENT_TIMESTAMP' must be a valid precision between '0' and '3'" ); diff --git a/sql/src/test/java/org/apache/druid/sql/calcite/QueryTestRunner.java b/sql/src/test/java/org/apache/druid/sql/calcite/QueryTestRunner.java index 34c8e4904146..963e1e0b23bc 100644 --- a/sql/src/test/java/org/apache/druid/sql/calcite/QueryTestRunner.java +++ b/sql/src/test/java/org/apache/druid/sql/calcite/QueryTestRunner.java @@ -618,7 +618,9 @@ public void verify() public QueryTestRunner(QueryTestBuilder builder) { QueryTestConfig config = builder.config; - Assume.assumeTrue(!config.isRunningMSQ() || builder.msqCompatible); + if (config.isRunningMSQ()) { + Assume.assumeTrue(builder.msqCompatible); + } if (builder.expectedResultsVerifier == null && builder.expectedResults != null) { builder.expectedResultsVerifier = config.defaultResultsVerifier( builder.expectedResults, diff --git a/sql/src/test/java/org/apache/druid/sql/calcite/SqlVectorizedExpressionSanityTest.java b/sql/src/test/java/org/apache/druid/sql/calcite/SqlVectorizedExpressionSanityTest.java index d75796792752..157a9e271753 100644 --- a/sql/src/test/java/org/apache/druid/sql/calcite/SqlVectorizedExpressionSanityTest.java +++ b/sql/src/test/java/org/apache/druid/sql/calcite/SqlVectorizedExpressionSanityTest.java @@ -22,7 +22,6 @@ import com.google.common.collect.ImmutableList; import com.google.common.collect.ImmutableMap; import com.google.common.collect.ImmutableSet; -import org.apache.calcite.tools.ValidationException; import org.apache.druid.java.util.common.StringUtils; import org.apache.druid.java.util.common.granularity.Granularities; import org.apache.druid.java.util.common.guava.Sequence; @@ -62,7 +61,6 @@ import org.junit.runners.Parameterized; import javax.annotation.Nullable; - import java.io.IOException; import java.util.List; import java.util.Map; @@ -181,13 +179,12 @@ public SqlVectorizedExpressionSanityTest(String query) } @Test - public void testQuery() throws ValidationException + public void testQuery() { sanityTestVectorizedSqlQueries(PLANNER_FACTORY, query); } public static void sanityTestVectorizedSqlQueries(PlannerFactory plannerFactory, String query) - throws ValidationException { final Map vector = ImmutableMap.of( QueryContexts.VECTORIZE_KEY, "force", diff --git a/sql/src/test/java/org/apache/druid/sql/calcite/parser/DruidSqlParserUtilsTest.java b/sql/src/test/java/org/apache/druid/sql/calcite/parser/DruidSqlParserUtilsTest.java index 1f295ea3587a..01f0544e1567 100644 --- a/sql/src/test/java/org/apache/druid/sql/calcite/parser/DruidSqlParserUtilsTest.java +++ b/sql/src/test/java/org/apache/druid/sql/calcite/parser/DruidSqlParserUtilsTest.java @@ -31,10 +31,12 @@ import org.apache.calcite.sql.SqlPostfixOperator; import org.apache.calcite.sql.fun.SqlStdOperatorTable; import org.apache.calcite.sql.parser.SqlParserPos; -import org.apache.calcite.tools.ValidationException; +import org.apache.druid.error.DruidException; +import org.apache.druid.error.DruidExceptionMatcher; import org.apache.druid.java.util.common.granularity.Granularities; import org.apache.druid.java.util.common.granularity.Granularity; import org.apache.druid.sql.calcite.expression.builtin.TimeFloorOperatorConversion; +import org.hamcrest.MatcherAssert; import org.junit.Assert; import org.junit.Test; import org.junit.experimental.runners.Enclosed; @@ -132,12 +134,8 @@ public static class ClusteredByColumnsValidationTest public void testEmptyClusteredByColumnsValid() { final SqlNodeList clusteredByArgs = new SqlNodeList(SqlParserPos.ZERO); - try { - DruidSqlParserUtils.validateClusteredByColumns(clusteredByArgs); - } - catch (ValidationException e) { - Assert.fail("Did not expect an exception" + e.getMessage()); - } + + DruidSqlParserUtils.validateClusteredByColumns(clusteredByArgs); } /** @@ -151,12 +149,7 @@ public void testClusteredByColumnsValid() clusteredByArgs.add(new SqlIdentifier("DIM2 ASC", SqlParserPos.ZERO)); clusteredByArgs.add(SqlLiteral.createExactNumeric("3", SqlParserPos.ZERO)); - try { - DruidSqlParserUtils.validateClusteredByColumns(clusteredByArgs); - } - catch (ValidationException e) { - Assert.fail("Did not expect an exception" + e.getMessage()); - } + DruidSqlParserUtils.validateClusteredByColumns(clusteredByArgs); } /** @@ -179,14 +172,10 @@ public void testClusteredByColumnsWithDescThrowsException() ); clusteredByArgs.add(sqlBasicCall); - ValidationException e = Assert.assertThrows( - ValidationException.class, - () -> DruidSqlParserUtils.validateClusteredByColumns(clusteredByArgs) - ); - Assert.assertEquals( - "[`DIM4` DESC] is invalid. CLUSTERED BY columns cannot be sorted in descending order.", - e.getMessage() - ); + DruidExceptionMatcher + .invalidSqlInput() + .expectMessageIs("Invalid CLUSTERED BY clause [`DIM4` DESC]: cannot sort in descending order.") + .assertThrowsAndMatches(() -> DruidSqlParserUtils.validateClusteredByColumns(clusteredByArgs)); } } @@ -199,13 +188,18 @@ public static class FloorToGranularityConversionErrorsTest public void testConvertSqlNodeToGranularityWithIncorrectNode() { SqlNode sqlNode = SqlLiteral.createCharString("day", SqlParserPos.ZERO); - ParseException e = Assert.assertThrows( - ParseException.class, + DruidException e = Assert.assertThrows( + DruidException.class, () -> DruidSqlParserUtils.convertSqlNodeToGranularityThrowingParseExceptions(sqlNode) ); - Assert.assertEquals( - "Encountered 'day' after PARTITIONED BY. Expected HOUR, DAY, MONTH, YEAR, ALL TIME, FLOOR function or TIME_FLOOR function", - e.getMessage() + MatcherAssert.assertThat( + e, + DruidExceptionMatcher + .invalidSqlInput() + .expectMessageIs( + "Invalid granularity ['day'] after PARTITIONED BY. " + + "Expected HOUR, DAY, MONTH, YEAR, ALL TIME, FLOOR() or TIME_FLOOR()" + ) ); } diff --git a/sql/src/test/java/org/apache/druid/sql/calcite/rule/DruidLogicalValuesRuleTest.java b/sql/src/test/java/org/apache/druid/sql/calcite/rule/DruidLogicalValuesRuleTest.java index de7a005852d2..73054f506a29 100644 --- a/sql/src/test/java/org/apache/druid/sql/calcite/rule/DruidLogicalValuesRuleTest.java +++ b/sql/src/test/java/org/apache/druid/sql/calcite/rule/DruidLogicalValuesRuleTest.java @@ -29,10 +29,10 @@ import org.apache.calcite.util.DateString; import org.apache.calcite.util.TimeString; import org.apache.calcite.util.TimestampString; +import org.apache.druid.error.DruidExceptionMatcher; import org.apache.druid.java.util.common.DateTimes; import org.apache.druid.sql.calcite.planner.DruidTypeSystem; import org.apache.druid.sql.calcite.planner.PlannerContext; -import org.apache.druid.sql.calcite.planner.UnsupportedSQLQueryException; import org.joda.time.DateTime; import org.joda.time.DateTimeZone; import org.junit.Assert; @@ -177,8 +177,14 @@ public void testGetValueFromTimestampWithLocalTimeZoneLiteral() new TimestampString("2021-04-01 16:54:31"), 0 ); - expectedException.expect(UnsupportedSQLQueryException.class); - expectedException.expectMessage("TIMESTAMP_WITH_LOCAL_TIME_ZONE type is not supported"); + expectedException.expect( + DruidExceptionMatcher + .invalidSqlInput() + .expectMessageIs( + "Cannot handle literal [2021-04-01 16:54:31:TIMESTAMP_WITH_LOCAL_TIME_ZONE(0)] " + + "of unsupported type [TIMESTAMP_WITH_LOCAL_TIME_ZONE]." + ) + ); DruidLogicalValuesRule.getValueFromLiteral(literal, DEFAULT_CONTEXT); } @@ -186,8 +192,11 @@ public void testGetValueFromTimestampWithLocalTimeZoneLiteral() public void testGetValueFromTimeLiteral() { RexLiteral literal = REX_BUILDER.makeTimeLiteral(new TimeString("16:54:31"), 0); - expectedException.expect(UnsupportedSQLQueryException.class); - expectedException.expectMessage("TIME type is not supported"); + expectedException.expect( + DruidExceptionMatcher + .invalidSqlInput() + .expectMessageIs("Cannot handle literal [16:54:31] of unsupported type [TIME].") + ); DruidLogicalValuesRule.getValueFromLiteral(literal, DEFAULT_CONTEXT); } @@ -195,8 +204,14 @@ public void testGetValueFromTimeLiteral() public void testGetValueFromTimeWithLocalTimeZoneLiteral() { RexLiteral literal = REX_BUILDER.makeTimeWithLocalTimeZoneLiteral(new TimeString("16:54:31"), 0); - expectedException.expect(UnsupportedSQLQueryException.class); - expectedException.expectMessage("TIME_WITH_LOCAL_TIME_ZONE type is not supported"); + expectedException.expect( + DruidExceptionMatcher + .invalidSqlInput() + .expectMessageIs( + "Cannot handle literal [16:54:31:TIME_WITH_LOCAL_TIME_ZONE(0)] " + + "of unsupported type [TIME_WITH_LOCAL_TIME_ZONE]." + ) + ); DruidLogicalValuesRule.getValueFromLiteral(literal, DEFAULT_CONTEXT); } diff --git a/sql/src/test/java/org/apache/druid/sql/calcite/util/QueryLogHook.java b/sql/src/test/java/org/apache/druid/sql/calcite/util/QueryLogHook.java index 967926681fb2..c95e2e609204 100644 --- a/sql/src/test/java/org/apache/druid/sql/calcite/util/QueryLogHook.java +++ b/sql/src/test/java/org/apache/druid/sql/calcite/util/QueryLogHook.java @@ -31,6 +31,7 @@ import org.junit.runners.model.Statement; import java.util.List; +import java.util.concurrent.atomic.AtomicBoolean; import java.util.function.Consumer; import java.util.function.Supplier; @@ -43,6 +44,7 @@ public class QueryLogHook implements TestRule private final Supplier objectMapperSupplier; private final List> recordedQueries = Lists.newCopyOnWriteArrayList(); + private final AtomicBoolean skipLog = new AtomicBoolean(false); public QueryLogHook(final Supplier objectMapperSupplier) { @@ -69,6 +71,17 @@ public List> getRecordedQueries() return ImmutableList.copyOf(recordedQueries); } + public void withSkippedLog(Consumer consumer) + { + try { + skipLog.set(true); + consumer.accept(null); + } + finally { + skipLog.set(false); + } + } + @Override public Statement apply(final Statement base, final Description description) { @@ -80,6 +93,10 @@ public void evaluate() throws Throwable clearRecordedQueries(); final Consumer function = query -> { + if (skipLog.get()) { + return; + } + try { recordedQueries.add((Query) query); log.info( diff --git a/sql/src/test/java/org/apache/druid/sql/http/SqlResourceTest.java b/sql/src/test/java/org/apache/druid/sql/http/SqlResourceTest.java index f825423ba9c4..7dbc5ce69317 100644 --- a/sql/src/test/java/org/apache/druid/sql/http/SqlResourceTest.java +++ b/sql/src/test/java/org/apache/druid/sql/http/SqlResourceTest.java @@ -34,6 +34,10 @@ import org.apache.druid.common.exception.AllowedRegexErrorResponseTransformStrategy; import org.apache.druid.common.exception.ErrorResponseTransformStrategy; import org.apache.druid.common.guava.SettableSupplier; +import org.apache.druid.error.DruidException; +import org.apache.druid.error.DruidExceptionMatcher; +import org.apache.druid.error.ErrorResponse; +import org.apache.druid.error.QueryExceptionCompat; import org.apache.druid.jackson.DefaultObjectMapper; import org.apache.druid.java.util.common.ISE; import org.apache.druid.java.util.common.NonnullPair; @@ -81,7 +85,6 @@ import org.apache.druid.sql.HttpStatement; import org.apache.druid.sql.PreparedStatement; import org.apache.druid.sql.SqlLifecycleManager; -import org.apache.druid.sql.SqlPlanningException.PlanningError; import org.apache.druid.sql.SqlQueryPlus; import org.apache.druid.sql.SqlStatementFactory; import org.apache.druid.sql.SqlToolbox; @@ -104,8 +107,11 @@ import org.hamcrest.CoreMatchers; import org.hamcrest.MatcherAssert; import org.junit.After; +import org.junit.AfterClass; import org.junit.Assert; import org.junit.Before; +import org.junit.BeforeClass; +import org.junit.ClassRule; import org.junit.Rule; import org.junit.Test; import org.junit.rules.TemporaryFolder; @@ -133,11 +139,13 @@ import java.util.concurrent.Future; import java.util.concurrent.TimeUnit; import java.util.concurrent.atomic.AtomicLong; +import java.util.concurrent.atomic.AtomicReference; import java.util.function.Consumer; import java.util.function.Function; import java.util.function.Supplier; import java.util.stream.Collectors; +@SuppressWarnings("ALL") public class SqlResourceTest extends CalciteTestBase { public static final DruidNode DUMMY_DRUID_NODE = new DruidNode("dummy", "dummy", false, 1, null, true, false); @@ -159,13 +167,17 @@ public class SqlResourceTest extends CalciteTestBase private static final List EXPECTED_SQL_TYPES_FOR_RESULT_FORMAT_TESTS = Arrays.asList("TIMESTAMP", "VARCHAR", "VARCHAR", "VARCHAR", "BIGINT", "FLOAT", "DOUBLE", "OTHER", "VARCHAR"); + private static Closer staticCloser = Closer.create(); private static QueryRunnerFactoryConglomerate conglomerate; - private static Closer resourceCloser; - @Rule - public TemporaryFolder temporaryFolder = new TemporaryFolder(); + @ClassRule + public static TemporaryFolder temporaryFolder = new TemporaryFolder(); + private static SpecificSegmentsQuerySegmentWalker walker; + private static QueryScheduler scheduler; + @Rule public QueryLogHook queryLogHook = QueryLogHook.create(); - private SpecificSegmentsQuerySegmentWalker walker; + + private Closer resourceCloser; private TestRequestLogger testRequestLogger; private SqlResource resource; private MockHttpServletRequest req; @@ -183,15 +195,13 @@ public class SqlResourceTest extends CalciteTestBase private final SettableSupplier responseContextSupplier = new SettableSupplier<>(); private Consumer onExecute = NULL_ACTION; - private Supplier schedulerBaggage = () -> null; + private static final AtomicReference> SCHEDULER_BAGGAGE = new AtomicReference<>(); - @Before - public void setUp() throws Exception + @BeforeClass + public static void setupClass() throws Exception { - resourceCloser = Closer.create(); - conglomerate = QueryStackTests.createQueryRunnerFactoryConglomerate(resourceCloser); - - final QueryScheduler scheduler = new QueryScheduler( + conglomerate = QueryStackTests.createQueryRunnerFactoryConglomerate(staticCloser); + scheduler = new QueryScheduler( 5, ManualQueryPrioritizationStrategy.INSTANCE, new HiLoQueryLaningStrategy(40), @@ -204,15 +214,29 @@ public Sequence run(Query query, Sequence resultSequence) return super.run( query, new LazySequence<>(() -> { - schedulerBaggage.get(); + SCHEDULER_BAGGAGE.get().get(); return resultSequence; }) ); } }; + walker = CalciteTests.createMockWalker(conglomerate, temporaryFolder.newFolder(), scheduler); + staticCloser.register(walker); + } + + @AfterClass + public static void teardownClass() throws Exception + { + staticCloser.close(); + } + + @Before + public void setUp() throws Exception + { + SCHEDULER_BAGGAGE.set(() -> null); + resourceCloser = Closer.create(); executorService = MoreExecutors.listeningDecorator(Execs.multiThreaded(8, "test_sql_resource_%s")); - walker = CalciteTests.createMockWalker(conglomerate, temporaryFolder.newFolder(), scheduler); final PlannerConfig plannerConfig = PlannerConfig.builder().serializeComplexValues(false).build(); final DruidSchemaCatalog rootSchema = CalciteTests.createMockRootSchema( @@ -320,8 +344,8 @@ MockHttpServletRequest request() @After public void tearDown() throws Exception { - walker.close(); - walker = null; + SCHEDULER_BAGGAGE.set(() -> null); + executorService.shutdownNow(); executorService.awaitTermination(2, TimeUnit.SECONDS); resourceCloser.close(); @@ -650,7 +674,7 @@ public void testArrayResultFormatWithErrorAfterSecondRow() throws Exception sequenceMapFnSupplier.set(errorAfterSecondRowMapFn()); final String query = "SELECT cnt FROM foo"; - final Pair response = + final Pair response = doPostRaw(new SqlQuery(query, ResultFormat.ARRAY, false, false, false, null, null), req); // Truncated response: missing final ] @@ -664,7 +688,7 @@ public void testObjectResultFormatWithErrorAfterFirstRow() throws Exception sequenceMapFnSupplier.set(errorAfterSecondRowMapFn()); final String query = "SELECT cnt FROM foo"; - final Pair response = + final Pair response = doPostRaw(new SqlQuery(query, ResultFormat.OBJECT, false, false, false, null, null), req); // Truncated response: missing final ] @@ -678,7 +702,7 @@ public void testArrayLinesResultFormatWithErrorAfterFirstRow() throws Exception sequenceMapFnSupplier.set(errorAfterSecondRowMapFn()); final String query = "SELECT cnt FROM foo"; - final Pair response = + final Pair response = doPostRaw(new SqlQuery(query, ResultFormat.ARRAYLINES, false, false, false, null, null), req); // Truncated response: missing final LFLF @@ -692,7 +716,7 @@ public void testObjectLinesResultFormatWithErrorAfterFirstRow() throws Exception sequenceMapFnSupplier.set(errorAfterSecondRowMapFn()); final String query = "SELECT cnt FROM foo"; - final Pair response = + final Pair response = doPostRaw(new SqlQuery(query, ResultFormat.OBJECTLINES, false, false, false, null, null), req); // Truncated response: missing final LFLF @@ -706,7 +730,7 @@ public void testCsvResultFormatWithErrorAfterFirstRow() throws Exception sequenceMapFnSupplier.set(errorAfterSecondRowMapFn()); final String query = "SELECT cnt FROM foo"; - final Pair response = + final Pair response = doPostRaw(new SqlQuery(query, ResultFormat.CSV, false, false, false, null, null), req); // Truncated response: missing final LFLF @@ -851,7 +875,7 @@ public void testArrayResultFormatWithHeader_nullColumnType() throws Exception public void testArrayLinesResultFormat() throws Exception { final String query = "SELECT *, CASE dim2 WHEN '' THEN dim2 END FROM foo LIMIT 2"; - final Pair pair = doPostRaw( + final Pair pair = doPostRaw( new SqlQuery(query, ResultFormat.ARRAYLINES, false, false, false, null, null) ); Assert.assertNull(pair.lhs); @@ -896,7 +920,7 @@ public void testArrayLinesResultFormat() throws Exception public void testArrayLinesResultFormatWithHeader() throws Exception { final String query = "SELECT *, CASE dim2 WHEN '' THEN dim2 END FROM foo LIMIT 2"; - final Pair pair = doPostRaw( + final Pair pair = doPostRaw( new SqlQuery(query, ResultFormat.ARRAYLINES, true, true, true, null, null) ); Assert.assertNull(pair.lhs); @@ -944,7 +968,7 @@ public void testArrayLinesResultFormatWithHeader() throws Exception public void testArrayLinesResultFormatWithHeader_nullColumnType() throws Exception { final String query = "SELECT (1, 2) FROM INFORMATION_SCHEMA.COLUMNS LIMIT 1"; - final Pair pair = doPostRaw( + final Pair pair = doPostRaw( new SqlQuery(query, ResultFormat.ARRAYLINES, true, true, true, null, null) ); Assert.assertNull(pair.lhs); @@ -1018,7 +1042,7 @@ public void testObjectResultFormat() throws Exception public void testObjectLinesResultFormat() throws Exception { final String query = "SELECT *, CASE dim2 WHEN '' THEN dim2 END FROM foo LIMIT 2"; - final Pair pair = doPostRaw( + final Pair pair = doPostRaw( new SqlQuery(query, ResultFormat.OBJECTLINES, false, false, false, null, null) ); Assert.assertNull(pair.lhs); @@ -1075,7 +1099,7 @@ public void testObjectLinesResultFormat() throws Exception public void testObjectLinesResultFormatWithMinimalHeader() throws Exception { final String query = "SELECT *, CASE dim2 WHEN '' THEN dim2 END FROM foo LIMIT 2"; - final Pair pair = + final Pair pair = doPostRaw(new SqlQuery(query, ResultFormat.OBJECTLINES, true, false, false, null, null)); Assert.assertNull(pair.lhs); final String response = pair.rhs; @@ -1135,7 +1159,7 @@ public void testObjectLinesResultFormatWithMinimalHeader() throws Exception public void testObjectLinesResultFormatWithFullHeader() throws Exception { final String query = "SELECT *, CASE dim2 WHEN '' THEN dim2 END FROM foo LIMIT 2"; - final Pair pair = + final Pair pair = doPostRaw(new SqlQuery(query, ResultFormat.OBJECTLINES, true, true, true, null, null)); Assert.assertNull(pair.lhs); final String response = pair.rhs; @@ -1201,7 +1225,7 @@ public void testObjectLinesResultFormatWithFullHeader() throws Exception public void testObjectLinesResultFormatWithFullHeader_nullColumnType() throws Exception { final String query = "SELECT (1, 2) FROM INFORMATION_SCHEMA.COLUMNS LIMIT 1"; - final Pair pair = + final Pair pair = doPostRaw(new SqlQuery(query, ResultFormat.OBJECTLINES, true, true, true, null, null)); Assert.assertNull(pair.lhs); final String response = pair.rhs; @@ -1230,7 +1254,7 @@ public void testObjectLinesResultFormatWithFullHeader_nullColumnType() throws Ex public void testCsvResultFormat() throws Exception { final String query = "SELECT *, CASE dim2 WHEN '' THEN dim2 END FROM foo LIMIT 2"; - final Pair pair = doPostRaw( + final Pair pair = doPostRaw( new SqlQuery(query, ResultFormat.CSV, false, false, false, null, null) ); Assert.assertNull(pair.lhs); @@ -1252,7 +1276,7 @@ public void testCsvResultFormat() throws Exception public void testCsvResultFormatWithHeaders() throws Exception { final String query = "SELECT *, CASE dim2 WHEN '' THEN dim2 END FROM foo LIMIT 2"; - final Pair pair = doPostRaw( + final Pair pair = doPostRaw( new SqlQuery(query, ResultFormat.CSV, true, true, true, null, null) ); Assert.assertNull(pair.lhs); @@ -1277,7 +1301,7 @@ public void testCsvResultFormatWithHeaders() throws Exception public void testCsvResultFormatWithHeaders_nullColumnType() throws Exception { final String query = "SELECT (1, 2) FROM INFORMATION_SCHEMA.COLUMNS LIMIT 1"; - final Pair pair = doPostRaw( + final Pair pair = doPostRaw( new SqlQuery(query, ResultFormat.CSV, true, true, true, null, null) ); Assert.assertNull(pair.lhs); @@ -1338,12 +1362,12 @@ public void testExplainCountStar() throws Exception @Test public void testCannotParse() throws Exception { - QueryException exception = postSyncForException("FROM druid.foo", Status.BAD_REQUEST.getStatusCode()); + ErrorResponse errorResponse = postSyncForException("FROM druid.foo", Status.BAD_REQUEST.getStatusCode()); - Assert.assertNotNull(exception); - Assert.assertEquals(PlanningError.SQL_PARSE_ERROR.getErrorCode(), exception.getErrorCode()); - Assert.assertEquals(PlanningError.SQL_PARSE_ERROR.getErrorClass(), exception.getErrorClass()); - Assert.assertTrue(exception.getMessage().contains("Encountered \"FROM\" at line 1, column 1.")); + validateInvalidSqlError( + errorResponse, + "Received an unexpected token [FROM] (line [1], column [1]), acceptable options: [\"INSERT\", \"UPSERT\", " + ); checkSqlRequestLog(false); Assert.assertTrue(lifecycleManager.getAll("id").isEmpty()); } @@ -1351,12 +1375,15 @@ public void testCannotParse() throws Exception @Test public void testCannotValidate() throws Exception { - QueryException exception = postSyncForException("SELECT dim4 FROM druid.foo", Status.BAD_REQUEST.getStatusCode()); + ErrorResponse errorResponse = postSyncForException( + "SELECT dim4 FROM druid.foo", + Status.BAD_REQUEST.getStatusCode() + ); - Assert.assertNotNull(exception); - Assert.assertEquals(PlanningError.VALIDATION_ERROR.getErrorCode(), exception.getErrorCode()); - Assert.assertEquals(PlanningError.VALIDATION_ERROR.getErrorClass(), exception.getErrorClass()); - Assert.assertTrue(exception.getMessage().contains("Column 'dim4' not found in any table")); + validateInvalidSqlError( + errorResponse, + "Column 'dim4' not found in any table" + ); checkSqlRequestLog(false); Assert.assertTrue(lifecycleManager.getAll("id").isEmpty()); } @@ -1366,16 +1393,17 @@ public void testCannotConvert() throws Exception { // SELECT + ORDER unsupported final SqlQuery unsupportedQuery = createSimpleQueryWithId("id", "SELECT dim1 FROM druid.foo ORDER BY dim1"); - QueryException exception = postSyncForException(unsupportedQuery, Status.BAD_REQUEST.getStatusCode()); + ErrorResponse exception = postSyncForException(unsupportedQuery, Status.BAD_REQUEST.getStatusCode()); Assert.assertTrue((Boolean) req.getAttribute(AuthConfig.DRUID_AUTHORIZATION_CHECKED)); - Assert.assertNotNull(exception); - Assert.assertEquals("SQL query is unsupported", exception.getErrorCode()); - Assert.assertEquals(PlanningError.UNSUPPORTED_SQL_ERROR.getErrorClass(), exception.getErrorClass()); - Assert.assertTrue( - exception.getMessage() - .contains("Query not supported. " + - "Possible error: SQL query requires order by non-time column [dim1 ASC], which is not supported.") + + validateErrorResponse( + exception, + "general", + DruidException.Persona.ADMIN, + DruidException.Category.INVALID_INPUT, + "Query planning failed for unknown reason, our best guess is this " + + "[SQL query requires order by non-time column [[dim1 ASC]], which is not supported.]" ); checkSqlRequestLog(false); Assert.assertTrue(lifecycleManager.getAll("id").isEmpty()); @@ -1390,18 +1418,14 @@ public void testCannotConvert() throws Exception public void testCannotConvert_UnsupportedSQLQueryException() throws Exception { // max(string) unsupported - QueryException exception = postSyncForException( + ErrorResponse errorResponse = postSyncForException( "SELECT max(dim1) FROM druid.foo", Status.BAD_REQUEST.getStatusCode() ); - Assert.assertNotNull(exception); - Assert.assertEquals(PlanningError.UNSUPPORTED_SQL_ERROR.getErrorCode(), exception.getErrorCode()); - Assert.assertEquals(PlanningError.UNSUPPORTED_SQL_ERROR.getErrorClass(), exception.getErrorClass()); - Assert.assertTrue( - exception.getMessage() - .contains("Query not supported. " + - "Possible error: Max aggregation is not supported for 'STRING' type") + validateInvalidSqlError( + errorResponse, + "Aggregation [MAX] does not support type [STRING], column [v0]" ); checkSqlRequestLog(false); Assert.assertTrue(lifecycleManager.getAll("id").isEmpty()); @@ -1410,7 +1434,7 @@ public void testCannotConvert_UnsupportedSQLQueryException() throws Exception @Test public void testResourceLimitExceeded() throws Exception { - final QueryException exception = doPost( + final ErrorResponse errorResponse = doPost( new SqlQuery( "SELECT DISTINCT dim1 FROM foo", ResultFormat.OBJECT, @@ -1422,10 +1446,12 @@ public void testResourceLimitExceeded() throws Exception ) ).lhs; - Assert.assertNotNull(exception); - Assert.assertEquals(exception.getErrorCode(), QueryException.RESOURCE_LIMIT_EXCEEDED_ERROR_CODE); - Assert.assertEquals(exception.getErrorClass(), ResourceLimitExceededException.class.getName()); - checkSqlRequestLog(false); + validateLegacyQueryExceptionErrorResponse( + errorResponse, + QueryException.RESOURCE_LIMIT_EXCEEDED_ERROR_CODE, + ResourceLimitExceededException.class.getName(), + "" + ); Assert.assertTrue(lifecycleManager.getAll("id").isEmpty()); } @@ -1441,7 +1467,7 @@ public void testUnsupportedQueryThrowsException() throws Exception { String errorMessage = "This will be supported in Druid 9999"; failOnExecute(errorMessage); - QueryException exception = postSyncForException( + ErrorResponse exception = postSyncForException( new SqlQuery( "SELECT ANSWER TO LIFE", ResultFormat.OBJECT, @@ -1454,9 +1480,12 @@ public void testUnsupportedQueryThrowsException() throws Exception 501 ); - Assert.assertNotNull(exception); - Assert.assertEquals(QueryException.QUERY_UNSUPPORTED_ERROR_CODE, exception.getErrorCode()); - Assert.assertEquals(QueryUnsupportedException.class.getName(), exception.getErrorClass()); + validateLegacyQueryExceptionErrorResponse( + exception, + QueryException.QUERY_UNSUPPORTED_ERROR_CODE, + QueryUnsupportedException.class.getName(), + "" + ); Assert.assertTrue(lifecycleManager.getAll("id").isEmpty()); } @@ -1537,7 +1566,7 @@ public ErrorResponseTransformStrategy getErrorResponseTransformStrategy() String errorMessage = "This will be supported in Druid 9999"; failOnExecute(errorMessage); - QueryException exception = postSyncForException( + ErrorResponse exception = postSyncForException( new SqlQuery( "SELECT ANSWER TO LIFE", ResultFormat.OBJECT, @@ -1550,48 +1579,38 @@ public ErrorResponseTransformStrategy getErrorResponseTransformStrategy() 501 ); - Assert.assertNotNull(exception); - Assert.assertNull(exception.getMessage()); - Assert.assertNull(exception.getHost()); - Assert.assertEquals(exception.getErrorCode(), QueryException.QUERY_UNSUPPORTED_ERROR_CODE); - Assert.assertNull(exception.getErrorClass()); + validateLegacyQueryExceptionErrorResponse( + exception, + QueryException.QUERY_UNSUPPORTED_ERROR_CODE, + "org.apache.druid.query.QueryUnsupportedException", + "This will be supported in Druid 9999" + ); Assert.assertTrue(lifecycleManager.getAll("id").isEmpty()); } + /** + * There are various points where Calcite feels it is acceptable to throw an AssertionError when it receives bad + * input. This is unfortunate as a java.lang.Error is very clearly documented to be something that nobody should + * try to catch. But, we can editorialize all we want, we still have to deal with it. So, this test reproduces + * the AssertionError behavior by using the substr() command. At the time that this test was written, the + * SQL substr assumes a literal for the second argument. The code ends up calling `RexLiteral.intValue` on the + * argument, which ends up calling a method that fails with an AssertionError, so this should generate the + * bad behavior for us. This test is validating that our exception handling deals with this meaningfully. + * If this test starts failing, it could be indicative of us not handling the AssertionErrors well anymore, + * OR it could be indicative of this specific code path not throwing an AssertionError anymore. If we run + * into the latter case, we should seek out a new code path that generates the error from Calcite. In the best + * world, this test starts failing because Calcite has moved all of its execptions away from AssertionErrors + * and we can no longer reproduce the behavior through Calcite, in that world, we should remove our own handling + * and this test at the same time. + * + * @throws Exception + */ @Test public void testAssertionErrorThrowsErrorWithFilterResponse() throws Exception { - resource = new SqlResource( - JSON_MAPPER, - CalciteTests.TEST_AUTHORIZER_MAPPER, - sqlStatementFactory, - lifecycleManager, - new ServerConfig() - { - @Override - public boolean isShowDetailedJettyErrors() - { - return true; - } - - @Override - public ErrorResponseTransformStrategy getErrorResponseTransformStrategy() - { - return new AllowedRegexErrorResponseTransformStrategy(ImmutableList.of()); - } - }, - TEST_RESPONSE_CONTEXT_CONFIG, - DUMMY_DRUID_NODE - ); - - String errorMessage = "could not assert"; - failOnExecute(errorMessage); - onExecute = s -> { - throw new AssertionError(errorMessage); - }; - QueryException exception = postSyncForException( + ErrorResponse exception = postSyncForException( new SqlQuery( - "SELECT ANSWER TO LIFE", + "SELECT *, substr(dim2, strpos(dim2, 'hi')+2, 2) FROM foo LIMIT 2", ResultFormat.OBJECT, false, false, @@ -1599,14 +1618,15 @@ public ErrorResponseTransformStrategy getErrorResponseTransformStrategy() ImmutableMap.of("sqlQueryId", "id"), null ), - Status.INTERNAL_SERVER_ERROR.getStatusCode() + Status.BAD_REQUEST.getStatusCode() ); - Assert.assertNotNull(exception); - Assert.assertNull(exception.getMessage()); - Assert.assertNull(exception.getHost()); - Assert.assertEquals("Unknown exception", exception.getErrorCode()); - Assert.assertNull(exception.getErrorClass()); + MatcherAssert.assertThat( + exception.getUnderlyingException(), + DruidExceptionMatcher + .invalidSqlInput() + .expectMessageIs("Calcite assertion violated: [not a literal: +(STRPOS($2, 'hi'), 2)]") + ); Assert.assertTrue(lifecycleManager.getAll("id").isEmpty()); } @@ -1617,7 +1637,7 @@ public void testTooManyRequests() throws Exception CountDownLatch queriesScheduledLatch = new CountDownLatch(numQueries - 1); CountDownLatch runQueryLatch = new CountDownLatch(1); - schedulerBaggage = () -> { + SCHEDULER_BAGGAGE.set(() -> { queriesScheduledLatch.countDown(); try { runQueryLatch.await(); @@ -1626,7 +1646,7 @@ public void testTooManyRequests() throws Exception throw new RE(e); } return null; - }; + }); final String sqlQueryId = "tooManyRequestsTest"; @@ -1654,7 +1674,7 @@ public void testTooManyRequests() throws Exception } queriesScheduledLatch.await(); - schedulerBaggage = () -> null; + SCHEDULER_BAGGAGE.set(() -> null); futures.add(executorService.submit(() -> { try { final Response retVal = postForSyncResponse( @@ -1717,7 +1737,7 @@ public void testQueryTimeoutException() throws Exception sqlQueryId ); - QueryException exception = postSyncForException( + ErrorResponse exception = postSyncForException( new SqlQuery( "SELECT CAST(__time AS DATE), dim1, dim2, dim3 FROM druid.foo GROUP by __time, dim1, dim2, dim3 ORDER BY dim2 DESC", ResultFormat.OBJECT, @@ -1730,9 +1750,12 @@ public void testQueryTimeoutException() throws Exception 504 ); - Assert.assertNotNull(exception); - Assert.assertEquals(exception.getErrorCode(), QueryException.QUERY_TIMEOUT_ERROR_CODE); - Assert.assertEquals(exception.getErrorClass(), QueryTimeoutException.class.getName()); + validateLegacyQueryExceptionErrorResponse( + exception, + QueryException.QUERY_TIMEOUT_ERROR_CODE, + QueryTimeoutException.class.getName(), + "" + ); Assert.assertTrue(lifecycleManager.getAll(sqlQueryId).isEmpty()); } @@ -1762,8 +1785,13 @@ public void testCancelBetweenValidateAndPlan() throws Exception Response queryResponse = future.get(); assertStatusAndCommonHeaders(queryResponse, Status.INTERNAL_SERVER_ERROR.getStatusCode()); - QueryException exception = deserializeResponse(queryResponse, QueryException.class); - Assert.assertEquals("Query cancelled", exception.getErrorCode()); + ErrorResponse exception = deserializeResponse(queryResponse, ErrorResponse.class); + validateLegacyQueryExceptionErrorResponse( + exception, + "Query cancelled", + null, + "" + ); } @Test @@ -1790,8 +1818,8 @@ public void testCancelBetweenPlanAndExecute() throws Exception Response queryResponse = future.get(); assertStatusAndCommonHeaders(queryResponse, Status.INTERNAL_SERVER_ERROR.getStatusCode()); - QueryException exception = deserializeResponse(queryResponse, QueryException.class); - Assert.assertEquals("Query cancelled", exception.getErrorCode()); + ErrorResponse exception = deserializeResponse(queryResponse, ErrorResponse.class); + validateLegacyQueryExceptionErrorResponse(exception, "Query cancelled", null, ""); } @Test @@ -1854,7 +1882,7 @@ public void testQueryContextException() throws Exception BaseQuery.SQL_QUERY_ID, sqlQueryId ); - final QueryException queryContextException = doPost( + final ErrorResponse errorResponse = doPost( new SqlQuery( "SELECT 1337", ResultFormat.OBJECT, @@ -1865,10 +1893,13 @@ public void testQueryContextException() throws Exception null ) ).lhs; - Assert.assertNotNull(queryContextException); - Assert.assertEquals(QueryException.BAD_QUERY_CONTEXT_ERROR_CODE, queryContextException.getErrorCode()); - Assert.assertEquals(BadQueryContextException.ERROR_CLASS, queryContextException.getErrorClass()); - Assert.assertTrue(queryContextException.getMessage().contains("2000'")); + + validateLegacyQueryExceptionErrorResponse( + errorResponse, + QueryException.BAD_QUERY_CONTEXT_ERROR_CODE, + BadQueryContextException.ERROR_CLASS, + "2000'" + ); checkSqlRequestLog(false); Assert.assertTrue(lifecycleManager.getAll(sqlQueryId).isEmpty()); } @@ -1877,16 +1908,14 @@ public void testQueryContextException() throws Exception public void testQueryContextKeyNotAllowed() throws Exception { Map queryContext = ImmutableMap.of(DruidSqlInsert.SQL_INSERT_SEGMENT_GRANULARITY, "all"); - QueryException exception = postSyncForException( + ErrorResponse exception = postSyncForException( new SqlQuery("SELECT 1337", ResultFormat.OBJECT, false, false, false, queryContext, null), Status.BAD_REQUEST.getStatusCode() ); - Assert.assertNotNull(exception); - Assert.assertEquals(PlanningError.VALIDATION_ERROR.getErrorCode(), exception.getErrorCode()); - MatcherAssert.assertThat( - exception.getMessage(), - CoreMatchers.containsString("Cannot execute query with context parameter [sqlInsertSegmentGranularity]") + validateInvalidInputError( + exception, + "Query context parameter [sqlInsertSegmentGranularity] is not allowed" ); checkSqlRequestLog(false); } @@ -1915,7 +1944,7 @@ private static SqlQuery createSimpleQueryWithId(String sqlQueryId, String sql) return new SqlQuery(sql, null, false, false, false, ImmutableMap.of(BaseQuery.SQL_QUERY_ID, sqlQueryId), null); } - private Pair>> doPost(final SqlQuery query) throws Exception + private Pair>> doPost(final SqlQuery query) throws Exception { return doPost(query, new TypeReference>>() { @@ -1923,7 +1952,7 @@ private Pair>> doPost(final SqlQuery qu } // Returns either an error or a result, assuming the result is a JSON object. - private Pair doPost( + private Pair doPost( final SqlQuery query, final TypeReference typeReference ) throws Exception @@ -1931,38 +1960,30 @@ private Pair doPost( return doPost(query, req, typeReference); } - private Pair doPostRaw(final SqlQuery query) throws Exception + private Pair doPostRaw(final SqlQuery query) throws Exception { return doPostRaw(query, req); } - private Pair>> doPost(final SqlQuery query, MockHttpServletRequest req) - throws Exception - { - return doPost(query, req, new TypeReference>>() - { - }); - } - // Returns either an error or a result, assuming the result is a JSON object. @SuppressWarnings("unchecked") - private Pair doPost( + private Pair doPost( final SqlQuery query, final MockHttpServletRequest req, final TypeReference typeReference ) throws Exception { - final Pair pair = doPostRaw(query, req); + final Pair pair = doPostRaw(query, req); if (pair.rhs == null) { //noinspection unchecked - return (Pair) pair; + return (Pair) pair; } else { return Pair.of(pair.lhs, JSON_MAPPER.readValue(pair.rhs, typeReference)); } } // Returns either an error or a result. - private Pair doPostRaw(final SqlQuery query, final MockHttpServletRequest req) + private Pair doPostRaw(final SqlQuery query, final MockHttpServletRequest req) throws Exception { MockHttpServletResponse response = postForAsyncResponse(query, req); @@ -1970,7 +1991,7 @@ private Pair doPostRaw(final SqlQuery query, final MockH if (response.getStatus() == 200) { return Pair.of(null, new String(response.baos.toByteArray(), StandardCharsets.UTF_8)); } else { - return Pair.of(JSON_MAPPER.readValue(response.baos.toByteArray(), QueryException.class), null); + return Pair.of(JSON_MAPPER.readValue(response.baos.toByteArray(), ErrorResponse.class), null); } } @@ -2018,16 +2039,16 @@ private Response postForSyncResponse(SqlQuery query, MockHttpServletRequest req) return response; } - private QueryException postSyncForException(String s, int expectedStatus) throws IOException + private ErrorResponse postSyncForException(String s, int expectedStatus) throws IOException { return postSyncForException(createSimpleQueryWithId("id", s), expectedStatus); } - private QueryException postSyncForException(SqlQuery query, int expectedStatus) throws IOException + private ErrorResponse postSyncForException(SqlQuery query, int expectedStatus) throws IOException { final Response response = postForSyncResponse(query, req); assertStatusAndCommonHeaders(response, expectedStatus); - return deserializeResponse(response, QueryException.class); + return deserializeResponse(response, ErrorResponse.class); } private T deserializeResponse(Response resp, Class clazz) throws IOException @@ -2037,9 +2058,13 @@ private T deserializeResponse(Response resp, Class clazz) throws IOExcept private byte[] responseToByteArray(Response resp) throws IOException { - ByteArrayOutputStream baos = new ByteArrayOutputStream(); - ((StreamingOutput) resp.getEntity()).write(baos); - return baos.toByteArray(); + if (resp.getEntity() instanceof StreamingOutput) { + ByteArrayOutputStream baos = new ByteArrayOutputStream(); + ((StreamingOutput) resp.getEntity()).write(baos); + return baos.toByteArray(); + } else { + return JSON_MAPPER.writeValueAsBytes(resp.getEntity()); + } } private String getContentType(Response resp) @@ -2276,4 +2301,100 @@ public QueryResponse run() }; } } + + private DruidException validateErrorResponse( + ErrorResponse errorResponse, + String errorCode, + DruidException.Persona targetPersona, + DruidException.Category category, + String messageContainsString + ) + { + Assert.assertNotNull(errorResponse); + + DruidException exception = errorResponse.getUnderlyingException(); + + Assert.assertEquals(errorCode, exception.getErrorCode()); + Assert.assertEquals(targetPersona, exception.getTargetPersona()); + Assert.assertEquals(category, exception.getCategory()); + if (messageContainsString == null) { + Assert.assertNull(exception.getMessage()); + } else { + MatcherAssert.assertThat(exception.getMessage(), CoreMatchers.containsString(messageContainsString)); + } + + return exception; + } + + private DruidException validateInvalidSqlError( + ErrorResponse response, + String containsString + ) + { + final DruidException exception = validateInvalidInputError(response, containsString); + Assert.assertEquals("sql", exception.getContextValue("sourceType")); + + return exception; + } + + @Nonnull + private DruidException validateInvalidInputError(ErrorResponse response, String containsString) + { + return validateErrorResponse( + response, + "invalidInput", + DruidException.Persona.USER, + DruidException.Category.INVALID_INPUT, + containsString + ); + } + + private DruidException validateLegacyQueryExceptionErrorResponse( + ErrorResponse errorResponse, + String legacyCode, + String errorClass, + String messageContainsString + ) + { + DruidException exception = validateErrorResponse( + errorResponse, + QueryExceptionCompat.ERROR_CODE, + DruidException.Persona.OPERATOR, + convertToCategory(legacyCode), + messageContainsString + ); + + Assert.assertEquals(legacyCode, exception.getContextValue("legacyErrorCode")); + Assert.assertEquals(errorClass, exception.getContextValue("errorClass")); + + return exception; + } + + private static DruidException.Category convertToCategory(String legacyErrorCode) + { + // This code is copied from QueryExceptionCompat at the time of writing. This is because these mappings + // are fundamentally part of the API, so reusing the code from there runs the risk that changes in the mapping + // would change the API but not break the unit tests. So, the unit test uses its own mapping to ensure + // that we are validating and aware of API-affecting changes. + switch (QueryException.fromErrorCode(legacyErrorCode)) { + case USER_ERROR: + return DruidException.Category.INVALID_INPUT; + case UNAUTHORIZED: + return DruidException.Category.UNAUTHORIZED; + case CAPACITY_EXCEEDED: + return DruidException.Category.CAPACITY_EXCEEDED; + case QUERY_RUNTIME_FAILURE: + return DruidException.Category.RUNTIME_FAILURE; + case CANCELED: + return DruidException.Category.CANCELED; + case UNKNOWN: + return DruidException.Category.UNCATEGORIZED; + case UNSUPPORTED: + return DruidException.Category.UNSUPPORTED; + case TIMEOUT: + return DruidException.Category.TIMEOUT; + default: + return DruidException.Category.UNCATEGORIZED; + } + } }