From 8538c8e974175901844a1a4dd243cac25bbb584d Mon Sep 17 00:00:00 2001 From: Paul Rogers Date: Fri, 10 Feb 2023 17:10:53 -0800 Subject: [PATCH 01/17] First cut --- .../apache/druid/error/DruidException.java | 238 ++++++++++++++++++ .../calcite/parser/DruidSqlParserUtils.java | 22 +- .../sql/calcite/planner/DruidPlanner.java | 11 + .../sql/calcite/planner/IngestHandler.java | 55 ++-- .../sql/calcite/planner/QueryHandler.java | 21 +- .../calcite/planner/SqlStatementHandler.java | 8 +- 6 files changed, 308 insertions(+), 47 deletions(-) create mode 100644 processing/src/main/java/org/apache/druid/error/DruidException.java diff --git a/processing/src/main/java/org/apache/druid/error/DruidException.java b/processing/src/main/java/org/apache/druid/error/DruidException.java new file mode 100644 index 000000000000..ccf715e56ed3 --- /dev/null +++ b/processing/src/main/java/org/apache/druid/error/DruidException.java @@ -0,0 +1,238 @@ +package org.apache.druid.error; + +import java.util.HashMap; +import java.util.Map; + +import org.apache.druid.java.util.common.StringUtils; +import org.apache.druid.java.util.common.logger.Logger; + +import com.google.common.collect.ImmutableMap; + +@SuppressWarnings("serial") +public class DruidException extends RuntimeException +{ + public enum ErrorType + { + USER, + SYSTEM, + RESOURCE, + CONFIG, + NETWORK + }; + + public static class Builder + { + private final DruidException source; + private final ErrorType type; + private final String msg; + private Throwable e; + private Map context; + + private Builder(ErrorType type, String msg, Object[] args) + { + this.source = null; + this.type = type; + this.msg = StringUtils.format(msg, args); + } + + private Builder(DruidException e) + { + this.source = e; + this.type = e.type; + this.msg = e.baseMessage(); + this.e = e.getCause() == null ? e : e.getCause(); + this.context = e.context == null ? null : new HashMap<>(e.context); + } + + public Builder cause(Exception e) + { + this.e = e; + return this; + } + + public Builder context(String key, Object value) + { + if (context == null) { + context = new HashMap(); + } + context.put(key, value == null ? "" : value.toString()); + return this; + } + + private boolean wasLogged() + { + return source != null && source.logged; + } + + private DruidException build(boolean logged) + { + return new DruidException( + e, + msg, + type, + context == null ? null : ImmutableMap.copyOf(context), + logged || wasLogged() + ); + } + + public DruidException build() + { + return build(false); + } + + public DruidException build(Logger logger) + { + DruidException e = build(true); + if (wasLogged()) { + return e; + } + switch (type) { + case CONFIG: + case SYSTEM: + logger.error(e, e.getMessage()); + break; + case NETWORK: + case RESOURCE: + logger.warn(e, e.getMessage()); + break; + default: + logger.info(e, e.getMessage()); + break; + } + return e; + } + } + + private final ErrorType type; + private final Map context; + private final boolean logged; + + public DruidException( + final Throwable e, + final String msg, + final ErrorType type, + final Map context, + final boolean logged + ) + { + super(msg, e); + this.type = type; + this.context = context; + this.logged = logged; + } + + /** + * Build an error that indicates the user provided incorrect input. + * The user can correct the error by correcting their input (their query, + * REST message, etc.) + */ + public static Builder user(String msg, Object...args) + { + return new Builder(ErrorType.USER, msg, args); + } + + public static DruidException userError(String msg, Object...args) + { + return user(msg, args).build(); + } + + /** + * Build an error that indicates that something went wrong internally + * with Druid. This is the equivalent of an assertion failure: errors + * of this type indicate a bug in the code: there is nothing the user + * can do other than request a fix or find a workaround. + */ + public static Builder system(String msg, Object...args) + { + return new Builder(ErrorType.SYSTEM, msg, args); + } + + public static DruidException unexpected(Exception e) + { + return system(e.getMessage()).cause(e).build(); + } + + /** + * Build an error that indicates Druid reached some kind of resource limit: + * memory, disk, CPU, etc. Generally the resolution is to reduce load or + * add resources to Druid. + */ + public static Builder resourceError(String msg, Object...args) + { + return new Builder(ErrorType.RESOURCE, msg, args); + } + + /** + * Build an error that indicates a configuration error which generally means + * that Druid won't start until the user corrects a configuration file or + * similar artifact. + */ + public static Builder configError(String msg, Object...args) + { + return new Builder(ErrorType.CONFIG, msg, args); + } + + /** + * Network I/O, connection, timeout or other error that indicates a problem + * with the client-to-Druid connection, and internal Druid-to-Druid connection, + * or a Druid-to-External error. + */ + public static Builder networkError(String msg, Object...args) + { + return new Builder(ErrorType.NETWORK, msg, args); + } + + /** + * Convert the exception back into a builder, generally so a higher level + * of code can add more context. + */ + public Builder toBuilder() + { + return new Builder(this); + } + + public ErrorType type() + { + return type; + } + + public Map context() + { + return context; + } + + @Override + public String getMessage() + { + StringBuilder buf = new StringBuilder(); + switch (type) + { + case CONFIG: + buf.append("Configuration error: "); + break; + case RESOURCE: + buf.append("Resource error: "); + break; + case SYSTEM: + buf.append("System error: "); + break; + default: + break; + } + buf.append(super.getMessage()); + if (context != null && context.size() > 0) { + for (Map.Entry entry : context.entrySet()) { + buf.append("\n") + .append(entry.getKey()) + .append(": ") + .append(entry.getValue()); + } + } + return buf.toString(); + } + + private String baseMessage() + { + return super.getMessage(); + } +} diff --git a/sql/src/main/java/org/apache/druid/sql/calcite/parser/DruidSqlParserUtils.java b/sql/src/main/java/org/apache/druid/sql/calcite/parser/DruidSqlParserUtils.java index 9009237b780a..ea6b3f6fb508 100644 --- a/sql/src/main/java/org/apache/druid/sql/calcite/parser/DruidSqlParserUtils.java +++ b/sql/src/main/java/org/apache/druid/sql/calcite/parser/DruidSqlParserUtils.java @@ -33,6 +33,7 @@ import org.apache.calcite.sql.SqlOrderBy; import org.apache.calcite.sql.SqlTimestampLiteral; import org.apache.calcite.tools.ValidationException; +import org.apache.druid.error.DruidException; import org.apache.druid.java.util.common.IAE; import org.apache.druid.java.util.common.StringUtils; import org.apache.druid.java.util.common.granularity.Granularity; @@ -217,7 +218,7 @@ public static List validateQueryAndConvertToIntervals( SqlNode replaceTimeQuery, Granularity granularity, DateTimeZone dateTimeZone - ) throws ValidationException + ) { if (replaceTimeQuery instanceof SqlLiteral && ALL.equalsIgnoreCase(((SqlLiteral) replaceTimeQuery).toValue())) { return ImmutableList.of(ALL); @@ -230,18 +231,18 @@ public static List validateQueryAndConvertToIntervals( List intervals = filtration.getIntervals(); if (filtration.getDimFilter() != null) { - throw new ValidationException("Only " + ColumnHolder.TIME_COLUMN_NAME + " column is supported in OVERWRITE WHERE clause"); + throw DruidException.userError("Only " + ColumnHolder.TIME_COLUMN_NAME + " column is supported in OVERWRITE WHERE clause"); } if (intervals.isEmpty()) { - throw new ValidationException("Intervals for replace are empty"); + throw DruidException.userError("Intervals for replace are empty"); } for (Interval interval : intervals) { DateTime intervalStart = interval.getStart(); DateTime intervalEnd = interval.getEnd(); if (!granularity.bucketStart(intervalStart).equals(intervalStart) || !granularity.bucketStart(intervalEnd).equals(intervalEnd)) { - throw new ValidationException("OVERWRITE WHERE clause contains an interval " + intervals + + throw DruidException.userError("OVERWRITE WHERE clause contains an interval " + intervals + " which is not aligned with PARTITIONED BY granularity " + granularity); } } @@ -321,11 +322,10 @@ public static void validateClusteredByColumns(final SqlNodeList clusteredByNodes * @throws ValidationException if the SqlNode cannot be converted a Dimfilter */ public static DimFilter convertQueryToDimFilter(SqlNode replaceTimeQuery, DateTimeZone dateTimeZone) - throws ValidationException { if (!(replaceTimeQuery instanceof SqlBasicCall)) { log.error("Expected SqlBasicCall during parsing, but found " + replaceTimeQuery.getClass().getName()); - throw new ValidationException("Invalid OVERWRITE WHERE clause"); + throw DruidException.userError("Invalid OVERWRITE WHERE clause"); } String columnName; SqlBasicCall sqlBasicCall = (SqlBasicCall) replaceTimeQuery; @@ -406,7 +406,7 @@ public static DimFilter convertQueryToDimFilter(SqlNode replaceTimeQuery, DateTi StringComparators.NUMERIC ); default: - throw new ValidationException("Unsupported operation in OVERWRITE WHERE clause: " + sqlBasicCall.getOperator().getName()); + throw DruidException.userError("Unsupported operation in OVERWRITE WHERE clause: " + sqlBasicCall.getOperator().getName()); } } @@ -417,10 +417,10 @@ public static DimFilter convertQueryToDimFilter(SqlNode replaceTimeQuery, DateTi * @return string representing the column name * @throws ValidationException if the sql node is not an SqlIdentifier */ - public static String parseColumnName(SqlNode sqlNode) throws ValidationException + public static String parseColumnName(SqlNode sqlNode) { if (!(sqlNode instanceof SqlIdentifier)) { - throw new ValidationException("Expressions must be of the form __time TIMESTAMP"); + throw DruidException.userError("Expressions must be of the form __time TIMESTAMP"); } return ((SqlIdentifier) sqlNode).getSimple(); } @@ -433,10 +433,10 @@ public static String parseColumnName(SqlNode sqlNode) throws ValidationException * @return the timestamp string as milliseconds from epoch * @throws ValidationException if the sql node is not a SqlTimestampLiteral */ - public static String parseTimeStampWithTimeZone(SqlNode sqlNode, DateTimeZone timeZone) throws ValidationException + public static String parseTimeStampWithTimeZone(SqlNode sqlNode, DateTimeZone timeZone) { if (!(sqlNode instanceof SqlTimestampLiteral)) { - throw new ValidationException("Expressions must be of the form __time TIMESTAMP"); + throw DruidException.userError("Expressions must be of the form __time TIMESTAMP"); } Timestamp sqlTimestamp = Timestamp.valueOf(((SqlTimestampLiteral) sqlNode).toFormattedString()); diff --git a/sql/src/main/java/org/apache/druid/sql/calcite/planner/DruidPlanner.java b/sql/src/main/java/org/apache/druid/sql/calcite/planner/DruidPlanner.java index 59c4cca2851c..5b79d3b64360 100644 --- a/sql/src/main/java/org/apache/druid/sql/calcite/planner/DruidPlanner.java +++ b/sql/src/main/java/org/apache/druid/sql/calcite/planner/DruidPlanner.java @@ -28,6 +28,7 @@ import org.apache.calcite.sql.parser.SqlParseException; import org.apache.calcite.tools.FrameworkConfig; import org.apache.calcite.tools.ValidationException; +import org.apache.druid.error.DruidException; import org.apache.druid.java.util.common.StringUtils; import org.apache.druid.query.QueryContext; import org.apache.druid.server.security.Access; @@ -306,5 +307,15 @@ public PlannerHook hook() { return hook; } + + @Override + public DruidException translateException(Exception e) { + if (e instanceof ValidationException) { + // TODO: Parse line number + return DruidException.user(e.getMessage()).cause(e).build(); + } else { + return DruidException.user(e.getMessage()).cause(e).build(); + } + } } } diff --git a/sql/src/main/java/org/apache/druid/sql/calcite/planner/IngestHandler.java b/sql/src/main/java/org/apache/druid/sql/calcite/planner/IngestHandler.java index 459784eb3bb9..5c334a89ac32 100644 --- a/sql/src/main/java/org/apache/druid/sql/calcite/planner/IngestHandler.java +++ b/sql/src/main/java/org/apache/druid/sql/calcite/planner/IngestHandler.java @@ -35,9 +35,12 @@ import org.apache.calcite.sql.SqlOrderBy; import org.apache.calcite.tools.ValidationException; import org.apache.calcite.util.Pair; +import org.apache.druid.collections.ResourceHolder; import org.apache.druid.common.utils.IdUtils; +import org.apache.druid.error.DruidException; import org.apache.druid.java.util.common.StringUtils; import org.apache.druid.java.util.common.granularity.Granularity; +import org.apache.druid.segment.QueryableIndex; import org.apache.druid.server.security.Action; import org.apache.druid.server.security.Resource; import org.apache.druid.server.security.ResourceAction; @@ -48,8 +51,10 @@ import org.apache.druid.sql.calcite.parser.DruidSqlReplace; import org.apache.druid.sql.calcite.run.EngineFeature; import org.apache.druid.sql.calcite.run.QueryMaker; +import org.apache.druid.timeline.DataSegment; import java.util.List; +import java.util.function.Supplier; import java.util.regex.Pattern; public abstract class IngestHandler extends QueryHandler @@ -112,13 +117,13 @@ protected String operationName() protected abstract DruidSqlIngest ingestNode(); @Override - public void validate() throws ValidationException + public void validate() { if (ingestNode().getPartitionedBy() == null) { - throw new ValidationException(StringUtils.format( + throw DruidException.userError( "%s statements must specify PARTITIONED BY clause explicitly", operationName() - )); + ); } try { PlannerContext plannerContext = handlerContext.plannerContext(); @@ -130,18 +135,20 @@ public void validate() throws ValidationException } } catch (JsonProcessingException e) { - throw new ValidationException("Unable to serialize partition granularity."); + throw DruidException.system( + "Unable to serialize partition granularity." + ) + .context("Value", ingestionGranularity) + .build(); } super.validate(); // Check if CTX_SQL_OUTER_LIMIT is specified and fail the query if it is. CTX_SQL_OUTER_LIMIT being provided causes // the number of rows inserted to be limited which is likely to be confusing and unintended. if (handlerContext.queryContextMap().get(PlannerContext.CTX_SQL_OUTER_LIMIT) != null) { - throw new ValidationException( - StringUtils.format( + throw DruidException.userError( "%s cannot be provided with %s.", PlannerContext.CTX_SQL_OUTER_LIMIT, operationName() - ) ); } targetDatasource = validateAndGetDataSourceForIngest(); @@ -161,15 +168,15 @@ protected RelDataType returnedRowType() * Extract target datasource from a {@link SqlInsert}, and also validate that the ingestion is of a form we support. * Expects the target datasource to be either an unqualified name, or a name qualified by the default schema. */ - private String validateAndGetDataSourceForIngest() throws ValidationException + private String validateAndGetDataSourceForIngest() { final SqlInsert insert = ingestNode(); if (insert.isUpsert()) { - throw new ValidationException("UPSERT is not supported."); + throw DruidException.userError("UPSERT is not supported."); } if (insert.getTargetColumnList() != null) { - throw new ValidationException(operationName() + " with a target column list is not supported."); + throw DruidException.userError(operationName() + " with a target column list is not supported."); } final SqlIdentifier tableIdentifier = (SqlIdentifier) insert.getTargetTable(); @@ -177,7 +184,7 @@ private String validateAndGetDataSourceForIngest() throws ValidationException if (tableIdentifier.names.isEmpty()) { // I don't think this can happen, but include a branch for it just in case. - throw new ValidationException(operationName() + " requires a target table."); + throw DruidException.userError(operationName() + " requires a target table."); } else if (tableIdentifier.names.size() == 1) { // Unqualified name. dataSource = Iterables.getOnlyElement(tableIdentifier.names); @@ -189,12 +196,10 @@ private String validateAndGetDataSourceForIngest() throws ValidationException if (tableIdentifier.names.size() == 2 && defaultSchemaName.equals(tableIdentifier.names.get(0))) { dataSource = tableIdentifier.names.get(1); } else { - throw new ValidationException( - StringUtils.format( - "Cannot %s into %s because it is not a Druid datasource.", - operationName(), - tableIdentifier - ) + throw DruidException.userError( + "Cannot %s into %s because it is not a Druid datasource.", + operationName(), + tableIdentifier ); } } @@ -203,7 +208,7 @@ private String validateAndGetDataSourceForIngest() throws ValidationException IdUtils.validateId(operationName() + " dataSource", dataSource); } catch (IllegalArgumentException e) { - throw new ValidationException(e.getMessage()); + throw DruidException.unexpected(e); } return dataSource; @@ -263,12 +268,12 @@ protected DruidSqlIngest ingestNode() } @Override - public void validate() throws ValidationException + public void validate() { if (!handlerContext.plannerContext().featureAvailable(EngineFeature.CAN_INSERT)) { - throw new ValidationException(StringUtils.format( + throw DruidException.userError( "Cannot execute INSERT with SQL engine '%s'.", - handlerContext.engine().name()) + handlerContext.engine().name() ); } super.validate(); @@ -317,17 +322,17 @@ protected DruidSqlIngest ingestNode() } @Override - public void validate() throws ValidationException + public void validate() { if (!handlerContext.plannerContext().featureAvailable(EngineFeature.CAN_REPLACE)) { - throw new ValidationException(StringUtils.format( + throw DruidException.userError( "Cannot execute REPLACE with SQL engine '%s'.", - handlerContext.engine().name()) + handlerContext.engine().name() ); } SqlNode replaceTimeQuery = sqlNode.getReplaceTimeQuery(); if (replaceTimeQuery == null) { - throw new ValidationException("Missing time chunk information in OVERWRITE clause for REPLACE. Use " + throw DruidException.userError("Missing time chunk information in OVERWRITE clause for REPLACE. Use " + "OVERWRITE WHERE <__time based condition> or OVERWRITE ALL to overwrite the entire table."); } diff --git a/sql/src/main/java/org/apache/druid/sql/calcite/planner/QueryHandler.java b/sql/src/main/java/org/apache/druid/sql/calcite/planner/QueryHandler.java index 45b4390d0ba9..84fc2ad7d61a 100644 --- a/sql/src/main/java/org/apache/druid/sql/calcite/planner/QueryHandler.java +++ b/sql/src/main/java/org/apache/druid/sql/calcite/planner/QueryHandler.java @@ -54,6 +54,7 @@ import org.apache.calcite.sql.validate.SqlValidator; import org.apache.calcite.tools.ValidationException; import org.apache.calcite.util.Pair; +import org.apache.druid.error.DruidException; import org.apache.druid.java.util.common.StringUtils; import org.apache.druid.java.util.common.guava.BaseSequence; import org.apache.druid.java.util.common.guava.Sequences; @@ -106,10 +107,14 @@ public QueryHandler(SqlStatementHandler.HandlerContext handlerContext, SqlNode s } @Override - public void validate() throws ValidationException + public void validate() { CalcitePlanner planner = handlerContext.planner(); - validatedQueryNode = planner.validate(rewriteParameters()); + try { + validatedQueryNode = planner.validate(rewriteParameters()); + } catch (ValidationException e) { + throw handlerContext.translateException(e); + } final SqlValidator validator = planner.getValidator(); SqlResourceCollectorShuttle resourceCollectorShuttle = new SqlResourceCollectorShuttle( @@ -183,7 +188,7 @@ private static RelDataType getExplainStructType(RelDataTypeFactory typeFactory) } @Override - public PlannerResult plan() throws ValidationException + public PlannerResult plan() { prepare(); final Set bindableTables = getBindableTables(rootQueryRel.rel); @@ -218,7 +223,7 @@ public PlannerResult plan() throws ValidationException Throwable cannotPlanException = Throwables.getCauseOfType(e, RelOptPlanner.CannotPlanException.class); if (null == cannotPlanException) { // Not a CannotPlanException, rethrow without logging. - throw e; + throw handlerContext.translateException(e); } Logger logger = log; @@ -641,13 +646,13 @@ public SelectHandler( } @Override - public void validate() throws ValidationException + public void validate() { if (!handlerContext.plannerContext().featureAvailable(EngineFeature.CAN_SELECT)) { - throw new ValidationException(StringUtils.format( + throw DruidException.userError( "Cannot execute SELECT with SQL engine '%s'.", - handlerContext.engine().name()) - ); + handlerContext.engine().name() + ); } super.validate(); } diff --git a/sql/src/main/java/org/apache/druid/sql/calcite/planner/SqlStatementHandler.java b/sql/src/main/java/org/apache/druid/sql/calcite/planner/SqlStatementHandler.java index 4cb263c52202..f09f5b41133c 100644 --- a/sql/src/main/java/org/apache/druid/sql/calcite/planner/SqlStatementHandler.java +++ b/sql/src/main/java/org/apache/druid/sql/calcite/planner/SqlStatementHandler.java @@ -21,7 +21,8 @@ import com.fasterxml.jackson.databind.ObjectMapper; import org.apache.calcite.schema.SchemaPlus; -import org.apache.calcite.tools.ValidationException; +import org.apache.calcite.sql.SqlNode; +import org.apache.druid.error.DruidException; import org.apache.druid.query.QueryContext; import org.apache.druid.server.security.ResourceAction; import org.apache.druid.sql.calcite.run.SqlEngine; @@ -36,11 +37,11 @@ */ public interface SqlStatementHandler { - void validate() throws ValidationException; + void validate(); Set resourceActions(); void prepare(); PrepareResult prepareResult(); - PlannerResult plan() throws ValidationException; + PlannerResult plan(); ExplainAttributes explainAttributes(); /** @@ -57,6 +58,7 @@ interface HandlerContext ObjectMapper jsonMapper(); DateTimeZone timeZone(); PlannerHook hook(); + DruidException translateException(Exception e); } abstract class BaseStatementHandler implements SqlStatementHandler From 7c6cc7918bd448a470e2905699fa38563e5ba83e Mon Sep 17 00:00:00 2001 From: Paul Rogers Date: Fri, 10 Feb 2023 18:47:42 -0800 Subject: [PATCH 02/17] Snapshot --- .../druid/msq/sql/MSQTaskSqlEngine.java | 2 +- .../apache/druid/error/DruidException.java | 85 ++++++++++++++++++- .../druid/error/RestExceptionEncoder.java | 9 ++ .../error/StandardRestExceptionEncoder.java | 62 ++++++++++++++ .../druid/server/QueryResultPusher.java | 44 ++++++++++ .../apache/druid/sql/AbstractStatement.java | 6 +- .../org/apache/druid/sql/DirectStatement.java | 15 ++-- .../druid/sql/SqlPlanningException.java | 1 - .../sql/calcite/planner/DruidPlanner.java | 73 +++++++++++----- .../sql/calcite/planner/IngestHandler.java | 12 +-- .../sql/calcite/planner/QueryHandler.java | 38 ++++----- .../calcite/planner/SqlStatementHandler.java | 1 - .../sql/calcite/run/NativeSqlEngine.java | 2 +- .../druid/sql/calcite/run/SqlEngine.java | 2 +- .../druid/sql/calcite/run/SqlEngines.java | 19 +++-- .../apache/druid/sql/http/SqlResource.java | 29 +------ 16 files changed, 299 insertions(+), 101 deletions(-) create mode 100644 server/src/main/java/org/apache/druid/error/RestExceptionEncoder.java create mode 100644 server/src/main/java/org/apache/druid/error/StandardRestExceptionEncoder.java diff --git a/extensions-core/multi-stage-query/src/main/java/org/apache/druid/msq/sql/MSQTaskSqlEngine.java b/extensions-core/multi-stage-query/src/main/java/org/apache/druid/msq/sql/MSQTaskSqlEngine.java index 94c2532ca793..e021cef98b58 100644 --- a/extensions-core/multi-stage-query/src/main/java/org/apache/druid/msq/sql/MSQTaskSqlEngine.java +++ b/extensions-core/multi-stage-query/src/main/java/org/apache/druid/msq/sql/MSQTaskSqlEngine.java @@ -86,7 +86,7 @@ public String name() } @Override - public void validateContext(Map queryContext) throws ValidationException + public void validateContext(Map queryContext) { SqlEngines.validateNoSpecialContextKeys(queryContext, SYSTEM_CONTEXT_PARAMETERS); } diff --git a/processing/src/main/java/org/apache/druid/error/DruidException.java b/processing/src/main/java/org/apache/druid/error/DruidException.java index ccf715e56ed3..b558525f8b25 100644 --- a/processing/src/main/java/org/apache/druid/error/DruidException.java +++ b/processing/src/main/java/org/apache/druid/error/DruidException.java @@ -8,15 +8,84 @@ import com.google.common.collect.ImmutableMap; -@SuppressWarnings("serial") +/** + * Represents an error condition exposed to the user and/or operator of Druid. + * Not needed for purely internal exceptions thrown and caught within Druid itself. + * There are categories of error that determine the general form of corrective + * action, and also determine HTTP (or other API) status codes. + *

+ * Druid exceptions can contain context. Use the context for details, such as + * file names, query context variables, symbols, etc. This allows the error + * message itself to be simple. Context allows consumers to filter out various + * bits of information that a site does not wish to expose to the user, while + * still logging the full details. Typical usage: + *


+ * if (something_is_wrong) {
+ *   throw DruidException.user("File not found")
+ *       .context("File name", theFile.getName())
+ *       .context("Directory", theFile.getParent())
+ *       .build();
+ * }
+ * 
+ *

+ * Exceptions are immutable. In many cases, an error is thrown low in the code, + * bit context is known at a higher level. In this case, the higher code should + * catch the exception, convert back to a builder, add context, and throw the + * new exception. The original call stack is maintained. Example: + *


+ * catch (DruidExceptin e) {
+ *   throw e.toBuilder().
+ *       .context("File name", theFile.getName())
+ *       .context("Directory", theFile.getParent())
+ *       .build();
+ * }
+ * 
+ */ public class DruidException extends RuntimeException { public enum ErrorType { + /** + * General case of an error due to something the user asked to do in an REST + * request. Translates to an HTTP status 400 (BAD_REQUET) for a REST call + * (or the equivalent for other APIs.) + */ USER, + + /** + * Special case of a user error where a resource is not found and we wish + * to return a 404 (NOT_FOUND) HTTP status (or the equivalent for other + * APIs.) + */ + NOT_FOUND, + + /** + * Error due to a problem beyond the user's control, such as an assertion + * failed, unsupported operation, etc. These indicate problems with the software + * where the fix is either a workaround or a bug fix. Such error should only + * be raised for "should never occur" type situations. + */ SYSTEM, + + /** + * Error for a resource limit: memory, CPU, slots or so on. The workaround is + * generally to try later, get more resources, reduce load or otherwise resolve + * the resource pressure issue. + */ RESOURCE, + + /** + * Error in configuration. Indicates that the administrator made a mistake during + * configuration or setup. The solution is for the administrator (not the end user) + * to resolve the issue. + */ CONFIG, + + /** + * Indicates a network error of some kind: intra-Druid, client-to-Druid, + * Druid-to-external system, etc. Generally the end user cannot fix these errors: + * it requires a DevOps person to resolve. + */ NETWORK }; @@ -39,14 +108,17 @@ private Builder(DruidException e) { this.source = e; this.type = e.type; - this.msg = e.baseMessage(); + this.msg = e.message(); this.e = e.getCause() == null ? e : e.getCause(); this.context = e.context == null ? null : new HashMap<>(e.context); } - public Builder cause(Exception e) + public Builder cause(Throwable e) { this.e = e; + if (!msg.equals(e.getMessage())) { + context("Cause", e.getMessage()); + } return this; } @@ -147,6 +219,11 @@ public static Builder system(String msg, Object...args) return new Builder(ErrorType.SYSTEM, msg, args); } + public static Builder notFound(String msg, Object...args) + { + return new Builder(ErrorType.NOT_FOUND, msg, args); + } + public static DruidException unexpected(Exception e) { return system(e.getMessage()).cause(e).build(); @@ -231,7 +308,7 @@ public String getMessage() return buf.toString(); } - private String baseMessage() + public String message() { return super.getMessage(); } diff --git a/server/src/main/java/org/apache/druid/error/RestExceptionEncoder.java b/server/src/main/java/org/apache/druid/error/RestExceptionEncoder.java new file mode 100644 index 000000000000..82efe256f354 --- /dev/null +++ b/server/src/main/java/org/apache/druid/error/RestExceptionEncoder.java @@ -0,0 +1,9 @@ +package org.apache.druid.error; + +import javax.ws.rs.core.Response; + +public interface RestExceptionEncoder +{ + Response encode(DruidException e); + Response.ResponseBuilder builder(DruidException e); +} diff --git a/server/src/main/java/org/apache/druid/error/StandardRestExceptionEncoder.java b/server/src/main/java/org/apache/druid/error/StandardRestExceptionEncoder.java new file mode 100644 index 000000000000..b905e3b843dd --- /dev/null +++ b/server/src/main/java/org/apache/druid/error/StandardRestExceptionEncoder.java @@ -0,0 +1,62 @@ +package org.apache.druid.error; + +import com.google.common.collect.ImmutableMap; + +import javax.ws.rs.core.Response; +import javax.ws.rs.core.Response.ResponseBuilder; +import javax.ws.rs.core.Response.Status; + +public class StandardRestExceptionEncoder implements RestExceptionEncoder +{ + private static final RestExceptionEncoder instance = new StandardRestExceptionEncoder(); + + public static RestExceptionEncoder instance() + { + return instance; + } + + @Override + public ResponseBuilder builder(DruidException e) + { + ImmutableMap.Builder builder = ImmutableMap.builder(); + builder.put("error", errorCode(e)); + builder.put("message", e.message()); + if (e.context() != null) { + builder.put("context", ImmutableMap.copyOf(e.context())); + } + return Response + .status(status(e)) + .entity(builder.build()); + } + + @Override + public Response encode(DruidException e) + { + return builder(e).build(); + } + + private Object errorCode(DruidException e) + { + return e.type().name(); + } + + // Temporary status mapping + private Status status(DruidException e) + { + switch (e.type()) { + case CONFIG: + case SYSTEM: + case NETWORK: + return Response.Status.INTERNAL_SERVER_ERROR; + case NOT_FOUND: + return Response.Status.NOT_FOUND; + case RESOURCE: + return Response.Status.SERVICE_UNAVAILABLE; + case USER: + return Response.Status.BAD_REQUEST; + default: + // Should never occur + return Response.Status.INTERNAL_SERVER_ERROR; + } + } +} diff --git a/server/src/main/java/org/apache/druid/server/QueryResultPusher.java b/server/src/main/java/org/apache/druid/server/QueryResultPusher.java index ad268a78ef0f..d417338f51a0 100644 --- a/server/src/main/java/org/apache/druid/server/QueryResultPusher.java +++ b/server/src/main/java/org/apache/druid/server/QueryResultPusher.java @@ -23,6 +23,8 @@ import com.fasterxml.jackson.databind.ObjectMapper; import com.google.common.io.CountingOutputStream; import org.apache.druid.client.DirectDruidClient; +import org.apache.druid.error.DruidException; +import org.apache.druid.error.StandardRestExceptionEncoder; import org.apache.druid.java.util.common.ISE; import org.apache.druid.java.util.common.RE; import org.apache.druid.java.util.common.StringUtils; @@ -149,6 +151,9 @@ public Response push() accumulator.close(); resultsWriter.recordSuccess(accumulator.getNumBytesSent()); } + catch (DruidException e) { + return handleDruidException(resultsWriter, e); + } catch (QueryException e) { return handleQueryException(resultsWriter, e); } @@ -277,6 +282,45 @@ private Response handleQueryException(ResultsWriter resultsWriter, QueryExceptio } } + private Response handleDruidException(ResultsWriter resultsWriter, DruidException e) + { + if (accumulator != null && accumulator.isInitialized()) { + // We already started sending a response when we got the error message. In this case we just give up + // and hope that the partial stream generates a meaningful failure message for our client. We could consider + // also throwing the exception body into the response to make it easier for the client to choke if it manages + // to parse a meaningful object out, but that's potentially an API change so we leave that as an exercise for + // the future. + + resultsWriter.recordFailure(e); + + // This case is always a failure because the error happened mid-stream of sending results back. Therefore, + // we do not believe that the response stream was actually useable + counter.incrementFailed(); + return null; + } + + switch (e.type()) { + case RESOURCE: + counter.incrementInterrupted(); + break; + case NETWORK: + counter.incrementTimedOut(); + break; + default: + counter.incrementFailed(); + break; + } + + resultsWriter.recordFailure(e); + + final Response.ResponseBuilder bob = StandardRestExceptionEncoder.instance().builder(e); + bob.header(QueryResource.QUERY_ID_RESPONSE_HEADER, queryId); + for (Map.Entry entry : extraHeaders.entrySet()) { + bob.header(entry.getKey(), entry.getValue()); + } + return bob.build(); + } + public interface ResultsWriter extends Closeable { /** diff --git a/sql/src/main/java/org/apache/druid/sql/AbstractStatement.java b/sql/src/main/java/org/apache/druid/sql/AbstractStatement.java index 99d2fa17a618..79b04af1c03f 100644 --- a/sql/src/main/java/org/apache/druid/sql/AbstractStatement.java +++ b/sql/src/main/java/org/apache/druid/sql/AbstractStatement.java @@ -21,6 +21,7 @@ import org.apache.calcite.sql.parser.SqlParseException; import org.apache.calcite.tools.ValidationException; +import org.apache.druid.error.DruidException; import org.apache.druid.java.util.common.logger.Logger; import org.apache.druid.query.QueryContexts; import org.apache.druid.server.security.Access; @@ -136,10 +137,11 @@ protected void validate(final DruidPlanner planner) // We can't collapse catch clauses since SqlPlanningException has // type-sensitive constructors. catch (SqlParseException e) { - throw new SqlPlanningException(e); + throw DruidException.user(e.getMessage()).cause(e).build(); } catch (ValidationException e) { - throw new SqlPlanningException(e); + // Should no longer get here: the planner should have done the translation. + throw DruidPlanner.translateException(e); } } diff --git a/sql/src/main/java/org/apache/druid/sql/DirectStatement.java b/sql/src/main/java/org/apache/druid/sql/DirectStatement.java index 62830063d21c..fb5cb3d358bb 100644 --- a/sql/src/main/java/org/apache/druid/sql/DirectStatement.java +++ b/sql/src/main/java/org/apache/druid/sql/DirectStatement.java @@ -20,7 +20,9 @@ package org.apache.druid.sql; import com.google.common.annotations.VisibleForTesting; +import org.apache.calcite.plan.RelOptPlanner; import org.apache.calcite.tools.ValidationException; +import org.apache.druid.error.DruidException; import org.apache.druid.java.util.common.ISE; import org.apache.druid.java.util.common.StringUtils; import org.apache.druid.java.util.common.logger.Logger; @@ -225,6 +227,12 @@ public ResultSet plan() reporter.planningTimeNanos(System.nanoTime() - planningStartNanos); return resultSet; } + catch (RelOptPlanner.CannotPlanException e) { + // Not sure if this is even thrown here. + throw DruidException.system("Internal error: cannot plan SQL query") + .cause(e) + .build(); + } catch (RuntimeException e) { state = State.FAILED; reporter.failed(e); @@ -239,12 +247,7 @@ public ResultSet plan() @VisibleForTesting protected PlannerResult createPlan(DruidPlanner planner) { - try { - return planner.plan(); - } - catch (ValidationException e) { - throw new SqlPlanningException(e); - } + return planner.plan(); } /** diff --git a/sql/src/main/java/org/apache/druid/sql/SqlPlanningException.java b/sql/src/main/java/org/apache/druid/sql/SqlPlanningException.java index 41dcf6cc02f6..0d2a40324832 100644 --- a/sql/src/main/java/org/apache/druid/sql/SqlPlanningException.java +++ b/sql/src/main/java/org/apache/druid/sql/SqlPlanningException.java @@ -37,7 +37,6 @@ public enum PlanningError { SQL_PARSE_ERROR(SQL_PARSE_FAILED_ERROR_CODE, SqlParseException.class.getName()), VALIDATION_ERROR(PLAN_VALIDATION_FAILED_ERROR_CODE, ValidationException.class.getName()), - UNSUPPORTED_SQL_ERROR(SQL_QUERY_UNSUPPORTED_ERROR_CODE, RelOptPlanner.CannotPlanException.class.getName()); private final String errorCode; private final String errorClass; diff --git a/sql/src/main/java/org/apache/druid/sql/calcite/planner/DruidPlanner.java b/sql/src/main/java/org/apache/druid/sql/calcite/planner/DruidPlanner.java index 5b79d3b64360..5eba91f498b2 100644 --- a/sql/src/main/java/org/apache/druid/sql/calcite/planner/DruidPlanner.java +++ b/sql/src/main/java/org/apache/druid/sql/calcite/planner/DruidPlanner.java @@ -30,7 +30,9 @@ import org.apache.calcite.tools.ValidationException; import org.apache.druid.error.DruidException; import org.apache.druid.java.util.common.StringUtils; +import org.apache.druid.java.util.common.UOE; import org.apache.druid.query.QueryContext; +import org.apache.druid.query.QueryInterruptedException; import org.apache.druid.server.security.Access; import org.apache.druid.server.security.Resource; import org.apache.druid.server.security.ResourceAction; @@ -44,6 +46,8 @@ import java.util.Map; import java.util.Set; import java.util.function.Function; +import java.util.regex.Matcher; +import java.util.regex.Pattern; /** * Druid SQL planner. Wraps the underlying Calcite planner with Druid-specific @@ -120,7 +124,7 @@ public AuthResult( * @return set of {@link Resource} corresponding to any Druid datasources * or views which are taking part in the query. */ - public void validate() throws SqlParseException, ValidationException + public void validate() { Preconditions.checkState(state == State.START); @@ -130,22 +134,20 @@ public void validate() throws SqlParseException, ValidationException // Parse the query string. String sql = plannerContext.getSql(); hook.captureSql(sql); - SqlNode root = planner.parse(sql); - handler = createHandler(root); - + SqlNode root; try { - handler.validate(); - plannerContext.setResourceActions(handler.resourceActions()); - plannerContext.setExplainAttributes(handler.explainAttributes()); - } - catch (RuntimeException e) { - throw new ValidationException(e); + root = planner.parse(sql); + } catch (SqlParseException e1) { + throw translateException(e1); } - + handler = createHandler(root); + handler.validate(); + plannerContext.setResourceActions(handler.resourceActions()); + plannerContext.setExplainAttributes(handler.explainAttributes()); state = State.VALIDATED; } - private SqlStatementHandler createHandler(final SqlNode node) throws ValidationException + private SqlStatementHandler createHandler(final SqlNode node) { SqlNode query = node; SqlExplain explain = null; @@ -166,7 +168,9 @@ private SqlStatementHandler createHandler(final SqlNode node) throws ValidationE if (query.isA(SqlKind.QUERY)) { return new QueryHandler.SelectHandler(handlerContext, query, explain); } - throw new ValidationException(StringUtils.format("Cannot execute [%s].", node.getKind())); + throw DruidException.user("Unsupported SQL statement") + .context("Statement kind", node.getKind()) + .build(); } /** @@ -228,7 +232,7 @@ public AuthResult authorize( *

* Planning reuses the validation done in {@code validate()} which must be called first. */ - public PlannerResult plan() throws ValidationException + public PlannerResult plan() { Preconditions.checkState(state == State.VALIDATED || state == State.PREPARED); Preconditions.checkState(authorized); @@ -307,15 +311,42 @@ public PlannerHook hook() { return hook; } + } - @Override - public DruidException translateException(Exception e) { - if (e instanceof ValidationException) { - // TODO: Parse line number - return DruidException.user(e.getMessage()).cause(e).build(); - } else { - return DruidException.user(e.getMessage()).cause(e).build(); + public static DruidException translateException(Exception e) { + try { + throw e; + } + catch (DruidException inner) { + return inner; + } + catch (ValidationException | SqlParseException inner) { + // Calcite exception that probably includes a position. + String msg = inner.getMessage(); + Pattern p = Pattern.compile("From line (\\d+), column (\\d+) to line \\d+, column \\d+: (.*)$"); + Matcher m = p.matcher(msg); + if (m.matches()) { + return DruidException + .user(m.group(3)) + .cause(e) + .context("Line", m.group(1)) + .context("Column", m.group(2)) + .build(); } } + // There is a claim that Calcite sometimes throws a java.lang.AssertionError, but we do not have a test that can + // reproduce it checked into the code (the best we have is something that uses mocks to throw an Error, which is + // dubious at best). We keep this just in case, but it might be best to remove it and see where the + // AssertionErrors are coming from and do something to ensure that they don't actually make it out of Calcite + catch (AssertionError inner) { + return DruidException.resourceError("AssertionError killed query") + .cause(inner) + .build(); + } + catch (Exception inner) { + // Anything else + return DruidException.user(e.getMessage()).cause(inner).build(); + } + throw new UOE("Should not get here"); } } diff --git a/sql/src/main/java/org/apache/druid/sql/calcite/planner/IngestHandler.java b/sql/src/main/java/org/apache/druid/sql/calcite/planner/IngestHandler.java index 5c334a89ac32..36af54ff7082 100644 --- a/sql/src/main/java/org/apache/druid/sql/calcite/planner/IngestHandler.java +++ b/sql/src/main/java/org/apache/druid/sql/calcite/planner/IngestHandler.java @@ -82,7 +82,7 @@ public abstract class IngestHandler extends QueryHandler handlerContext.hook().captureInsert(ingestNode); } - protected static SqlNode convertQuery(DruidSqlIngest sqlNode) throws ValidationException + protected static SqlNode convertQuery(DruidSqlIngest sqlNode) { SqlNode query = sqlNode.getSource(); @@ -92,11 +92,11 @@ protected static SqlNode convertQuery(DruidSqlIngest sqlNode) throws ValidationE SqlNodeList orderByList = sqlOrderBy.orderList; if (!(orderByList == null || orderByList.equals(SqlNodeList.EMPTY))) { String opName = sqlNode.getOperator().getName(); - throw new ValidationException(StringUtils.format( + throw DruidException.userError( "Cannot have ORDER BY on %s %s statement, use CLUSTERED BY instead.", "INSERT".equals(opName) ? "an" : "a", opName - )); + ); } } if (sqlNode.getClusteredBy() != null) { @@ -104,7 +104,7 @@ protected static SqlNode convertQuery(DruidSqlIngest sqlNode) throws ValidationE } if (!query.isA(SqlKind.QUERY)) { - throw new ValidationException(StringUtils.format("Cannot execute [%s].", query.getKind())); + throw DruidException.userError("Cannot execute [%s].", query.getKind()); } return query; } @@ -251,7 +251,7 @@ public InsertHandler( SqlStatementHandler.HandlerContext handlerContext, DruidSqlInsert sqlNode, SqlExplain explain - ) throws ValidationException + ) { super( handlerContext, @@ -304,7 +304,7 @@ public ReplaceHandler( SqlStatementHandler.HandlerContext handlerContext, DruidSqlReplace sqlNode, SqlExplain explain - ) throws ValidationException + ) { super( handlerContext, diff --git a/sql/src/main/java/org/apache/druid/sql/calcite/planner/QueryHandler.java b/sql/src/main/java/org/apache/druid/sql/calcite/planner/QueryHandler.java index 84fc2ad7d61a..f26cb8c8901d 100644 --- a/sql/src/main/java/org/apache/druid/sql/calcite/planner/QueryHandler.java +++ b/sql/src/main/java/org/apache/druid/sql/calcite/planner/QueryHandler.java @@ -72,7 +72,6 @@ import org.apache.druid.sql.calcite.run.EngineFeature; import org.apache.druid.sql.calcite.run.QueryMaker; import org.apache.druid.sql.calcite.table.DruidTable; -import org.apache.druid.utils.Throwables; import javax.annotation.Nullable; import java.util.ArrayList; @@ -113,7 +112,7 @@ public void validate() try { validatedQueryNode = planner.validate(rewriteParameters()); } catch (ValidationException e) { - throw handlerContext.translateException(e); + throw DruidPlanner.translateException(e); } final SqlValidator validator = planner.getValidator(); @@ -219,20 +218,15 @@ public PlannerResult plan() return planForDruid(); } } - catch (Exception e) { - Throwable cannotPlanException = Throwables.getCauseOfType(e, RelOptPlanner.CannotPlanException.class); - if (null == cannotPlanException) { - // Not a CannotPlanException, rethrow without logging. - throw handlerContext.translateException(e); - } - + catch (RelOptPlanner.CannotPlanException e) { Logger logger = log; if (!handlerContext.queryContext().isDebug()) { logger = log.noStackTrace(); } - String errorMessage = buildSQLPlanningErrorMessage(cannotPlanException); - logger.warn(e, errorMessage); - throw new UnsupportedSQLQueryException(errorMessage); + throw buildSQLPlanningError(e, logger); + } + catch (Exception e) { + throw DruidPlanner.translateException(e); } } @@ -616,23 +610,23 @@ private RelRoot possiblyWrapRootWithOuterLimitFromContext(RelRoot root) protected abstract QueryMaker buildQueryMaker(RelRoot rootQueryRel) throws ValidationException; - private String buildSQLPlanningErrorMessage(Throwable exception) + private DruidException buildSQLPlanningError(Throwable exception, Logger logger) { + DruidException.Builder builder = DruidException.system("Unsupported query") + .cause(exception) + .context("SQL", handlerContext.plannerContext().getSql()); String errorMessage = handlerContext.plannerContext().getPlanningError(); if (null == errorMessage && exception instanceof UnsupportedSQLQueryException) { - errorMessage = exception.getMessage(); + builder.context("Specific error", errorMessage); } if (null == errorMessage) { - errorMessage = "Please check Broker logs for additional details."; + builder.context("Note", "Please check Broker logs for additional details."); } else { - // Planning errors are more like hints: it isn't guaranteed that the planning error is actually what went wrong. - errorMessage = "Possible error: " + errorMessage; + // Planning errors are more like hints: it isn't guaranteed that the + // planning error is actually what went wrong. + builder.context("Possible error", errorMessage); } - // Finally, add the query itself to error message that user will get. - return StringUtils.format( - "Query not supported. %s SQL was: %s", errorMessage, - handlerContext.plannerContext().getSql() - ); + return builder.build(logger); } public static class SelectHandler extends QueryHandler diff --git a/sql/src/main/java/org/apache/druid/sql/calcite/planner/SqlStatementHandler.java b/sql/src/main/java/org/apache/druid/sql/calcite/planner/SqlStatementHandler.java index f09f5b41133c..40eeb326f8ad 100644 --- a/sql/src/main/java/org/apache/druid/sql/calcite/planner/SqlStatementHandler.java +++ b/sql/src/main/java/org/apache/druid/sql/calcite/planner/SqlStatementHandler.java @@ -58,7 +58,6 @@ interface HandlerContext ObjectMapper jsonMapper(); DateTimeZone timeZone(); PlannerHook hook(); - DruidException translateException(Exception e); } abstract class BaseStatementHandler implements SqlStatementHandler diff --git a/sql/src/main/java/org/apache/druid/sql/calcite/run/NativeSqlEngine.java b/sql/src/main/java/org/apache/druid/sql/calcite/run/NativeSqlEngine.java index 3d952accda3e..2325a6f00840 100644 --- a/sql/src/main/java/org/apache/druid/sql/calcite/run/NativeSqlEngine.java +++ b/sql/src/main/java/org/apache/druid/sql/calcite/run/NativeSqlEngine.java @@ -77,7 +77,7 @@ public String name() } @Override - public void validateContext(Map queryContext) throws ValidationException + public void validateContext(Map queryContext) { SqlEngines.validateNoSpecialContextKeys(queryContext, SYSTEM_CONTEXT_PARAMETERS); validateJoinAlgorithm(queryContext); diff --git a/sql/src/main/java/org/apache/druid/sql/calcite/run/SqlEngine.java b/sql/src/main/java/org/apache/druid/sql/calcite/run/SqlEngine.java index 22c8545dd67e..678ded23e9da 100644 --- a/sql/src/main/java/org/apache/druid/sql/calcite/run/SqlEngine.java +++ b/sql/src/main/java/org/apache/druid/sql/calcite/run/SqlEngine.java @@ -46,7 +46,7 @@ public interface SqlEngine * Validates a provided query context. Returns quietly if the context is OK; throws {@link ValidationException} * if the context has a problem. */ - void validateContext(Map queryContext) throws ValidationException; + void validateContext(Map queryContext); /** * SQL row type that would be emitted by the {@link QueryMaker} from {@link #buildQueryMakerForSelect}. diff --git a/sql/src/main/java/org/apache/druid/sql/calcite/run/SqlEngines.java b/sql/src/main/java/org/apache/druid/sql/calcite/run/SqlEngines.java index 30dd7926bd20..53f12952b410 100644 --- a/sql/src/main/java/org/apache/druid/sql/calcite/run/SqlEngines.java +++ b/sql/src/main/java/org/apache/druid/sql/calcite/run/SqlEngines.java @@ -20,7 +20,7 @@ package org.apache.druid.sql.calcite.run; import org.apache.calcite.tools.ValidationException; -import org.apache.druid.java.util.common.StringUtils; +import org.apache.druid.error.DruidException; import java.util.Map; import java.util.Set; @@ -35,17 +35,18 @@ public class SqlEngines * * This is a helper function used by {@link SqlEngine#validateContext} implementations. */ - public static void validateNoSpecialContextKeys(final Map queryContext, final Set specialContextKeys) - throws ValidationException + public static void validateNoSpecialContextKeys( + final Map queryContext, + final Set specialContextKeys + ) { for (String contextParameterName : queryContext.keySet()) { if (specialContextKeys.contains(contextParameterName)) { - throw new ValidationException( - StringUtils.format( - "Cannot execute query with context parameter [%s]", - contextParameterName - ) - ); + throw DruidException + .user("Cannot execute query with context parameter") + .context("Parameter", contextParameterName) + .context("Value", queryContext.get(contextParameterName)) + .build(); } } } diff --git a/sql/src/main/java/org/apache/druid/sql/http/SqlResource.java b/sql/src/main/java/org/apache/druid/sql/http/SqlResource.java index dad391e6bf04..a100194d16aa 100644 --- a/sql/src/main/java/org/apache/druid/sql/http/SqlResource.java +++ b/sql/src/main/java/org/apache/druid/sql/http/SqlResource.java @@ -177,25 +177,21 @@ private static class SqlResourceQueryMetricCounter implements QueryResource.Quer @Override public void incrementSuccess() { - } @Override public void incrementFailed() { - } @Override public void incrementInterrupted() { - } @Override public void incrementTimedOut() { - } } @@ -254,28 +250,9 @@ public ResultsWriter start() @Nullable public Response.ResponseBuilder start() { - try { - thePlan = stmt.plan(); - queryResponse = thePlan.run(); - return null; - } - catch (RelOptPlanner.CannotPlanException e) { - throw new SqlPlanningException( - SqlPlanningException.PlanningError.UNSUPPORTED_SQL_ERROR, - e.getMessage() - ); - } - // There is a claim that Calcite sometimes throws a java.lang.AssertionError, but we do not have a test that can - // reproduce it checked into the code (the best we have is something that uses mocks to throw an Error, which is - // dubious at best). We keep this just in case, but it might be best to remove it and see where the - // AssertionErrors are coming from and do something to ensure that they don't actually make it out of Calcite - catch (AssertionError e) { - log.warn(e, "AssertionError killed query: %s", sqlQuery); - - // We wrap the exception here so that we get the sanitization. java.lang.AssertionError apparently - // doesn't implement org.apache.druid.common.exception.SanitizableException. - throw new QueryInterruptedException(e); - } + thePlan = stmt.plan(); + queryResponse = thePlan.run(); + return null; } @Override From 782ba8a360267ecca5ba4c7f0a67485b585a29bf Mon Sep 17 00:00:00 2001 From: Paul Rogers Date: Sat, 11 Feb 2023 17:44:39 -0800 Subject: [PATCH 03/17] Tested SQL planner messages Parser errors, validation errors, ad-hoc errors --- .../apache/druid/msq/sql/SqlTaskResource.java | 16 ++- .../apache/druid/msq/exec/MSQInsertTest.java | 14 +-- .../apache/druid/msq/exec/MSQReplaceTest.java | 8 +- .../apache/druid/msq/exec/MSQSelectTest.java | 13 +- .../apache/druid/error/DruidException.java | 104 ++++++++++----- .../druid/error/RestExceptionEncoder.java | 19 +++ .../error/StandardRestExceptionEncoder.java | 72 ++++++++--- .../druid/server/QueryResultPusher.java | 6 + .../security/AllowOptionsResourceFilter.java | 2 - .../PreResponseAuthorizationCheckFilter.java | 2 +- .../apache/druid/sql/AbstractStatement.java | 16 +-- .../org/apache/druid/sql/DirectStatement.java | 1 - .../druid/sql/SqlPlanningException.java | 1 + .../sql/calcite/planner/DruidPlanner.java | 93 +++++++++----- .../sql/calcite/planner/DruidRexExecutor.java | 40 ++++-- .../sql/calcite/planner/IngestHandler.java | 5 - .../sql/calcite/planner/PlannerFactory.java | 9 +- .../sql/calcite/planner/QueryHandler.java | 33 +++-- .../planner/RelParameterizerShuttle.java | 14 +-- .../sql/calcite/run/NativeSqlEngine.java | 1 - .../apache/druid/sql/http/SqlResource.java | 4 +- .../apache/druid/sql/SqlStatementTest.java | 46 +++++-- .../sql/calcite/BaseCalciteQueryTest.java | 31 +++-- .../calcite/CalciteCorrelatedQueryTest.java | 1 - .../sql/calcite/CalciteInsertDmlTest.java | 44 +++---- .../CalciteMultiValueStringQueryTest.java | 6 +- .../calcite/CalciteParameterQueryTest.java | 8 +- .../druid/sql/calcite/CalciteQueryTest.java | 119 +++++++++++------- .../sql/calcite/CalciteReplaceDmlTest.java | 44 +++---- .../sql/calcite/CalciteSelectQueryTest.java | 4 +- 30 files changed, 487 insertions(+), 289 deletions(-) diff --git a/extensions-core/multi-stage-query/src/main/java/org/apache/druid/msq/sql/SqlTaskResource.java b/extensions-core/multi-stage-query/src/main/java/org/apache/druid/msq/sql/SqlTaskResource.java index f0cd7318f644..4684bac70a45 100644 --- a/extensions-core/multi-stage-query/src/main/java/org/apache/druid/msq/sql/SqlTaskResource.java +++ b/extensions-core/multi-stage-query/src/main/java/org/apache/druid/msq/sql/SqlTaskResource.java @@ -23,8 +23,9 @@ import com.google.common.collect.ImmutableMap; import com.google.common.io.CountingOutputStream; import com.google.inject.Inject; -import org.apache.calcite.plan.RelOptPlanner; import org.apache.druid.common.exception.SanitizableException; +import org.apache.druid.error.DruidException; +import org.apache.druid.error.StandardRestExceptionEncoder; import org.apache.druid.guice.annotations.MSQ; import org.apache.druid.indexer.TaskState; import org.apache.druid.java.util.common.guava.Sequence; @@ -64,6 +65,7 @@ import javax.ws.rs.core.Response; import javax.ws.rs.core.Response.Status; import javax.ws.rs.core.StreamingOutput; + import java.io.IOException; import java.util.Collections; @@ -159,6 +161,10 @@ public Response doPost( return buildStandardResponse(sequence, sqlQuery, sqlQueryId, rowTransformer); } } + catch (DruidException e) { + stmt.reporter().failed(e); + return StandardRestExceptionEncoder.instance().encode(e); + } // Kitchen-sinking the errors since they are all unchecked. // Just copied from SqlResource. catch (QueryCapacityExceededException cap) { @@ -182,14 +188,6 @@ public Response doPost( throw (ForbiddenException) serverConfig.getErrorResponseTransformStrategy() .transformIfNeeded(e); // let ForbiddenExceptionMapper handle this } - catch (RelOptPlanner.CannotPlanException e) { - stmt.reporter().failed(e); - SqlPlanningException spe = new SqlPlanningException( - SqlPlanningException.PlanningError.UNSUPPORTED_SQL_ERROR, - e.getMessage() - ); - return buildNonOkResponse(BadQueryException.STATUS_CODE, spe, sqlQueryId); - } // Calcite throws a java.lang.AssertionError which is type Error not Exception. Using Throwable catches both. catch (Throwable e) { stmt.reporter().failed(e); diff --git a/extensions-core/multi-stage-query/src/test/java/org/apache/druid/msq/exec/MSQInsertTest.java b/extensions-core/multi-stage-query/src/test/java/org/apache/druid/msq/exec/MSQInsertTest.java index b55de6c165c9..940798c2b177 100644 --- a/extensions-core/multi-stage-query/src/test/java/org/apache/druid/msq/exec/MSQInsertTest.java +++ b/extensions-core/multi-stage-query/src/test/java/org/apache/druid/msq/exec/MSQInsertTest.java @@ -24,6 +24,7 @@ import com.google.common.collect.ImmutableSet; import com.google.common.hash.HashFunction; import com.google.common.hash.Hashing; +import org.apache.druid.error.DruidException; import org.apache.druid.common.config.NullHandling; import org.apache.druid.hll.HyperLogLogCollector; import org.apache.druid.java.util.common.ISE; @@ -43,7 +44,6 @@ import org.apache.druid.segment.column.ColumnType; import org.apache.druid.segment.column.RowSignature; import org.apache.druid.segment.column.ValueType; -import org.apache.druid.sql.SqlPlanningException; import org.apache.druid.timeline.SegmentId; import org.apache.druid.utils.CompressionUtils; import org.hamcrest.CoreMatchers; @@ -54,6 +54,7 @@ import org.mockito.Mockito; import javax.annotation.Nonnull; + import java.io.File; import java.io.IOException; import java.io.InputStream; @@ -92,7 +93,6 @@ public static Collection data() @Parameterized.Parameter(1) public Map context; - @Test public void testInsertOnFoo1() { @@ -542,7 +542,7 @@ public void testInsertOnFoo1WithMultiValueMeasureGroupBy() .setExpectedDataSource("foo1") .setQueryContext(context) .setExpectedValidationErrorMatcher(CoreMatchers.allOf( - CoreMatchers.instanceOf(SqlPlanningException.class), + CoreMatchers.instanceOf(DruidException.class), ThrowableMessageMatcher.hasMessage(CoreMatchers.containsString( "Aggregate expression is illegal in GROUP BY clause")) )) @@ -967,7 +967,7 @@ public void testInsertWrongTypeTimestamp() .setExpectedRowSignature(rowSignature) .setQueryContext(context) .setExpectedValidationErrorMatcher(CoreMatchers.allOf( - CoreMatchers.instanceOf(SqlPlanningException.class), + CoreMatchers.instanceOf(DruidException.class), ThrowableMessageMatcher.hasMessage(CoreMatchers.containsString( "Field \"__time\" must be of type TIMESTAMP")) )) @@ -980,7 +980,7 @@ public void testIncorrectInsertQuery() testIngestQuery().setSql( "insert into foo1 select __time, dim1 , count(*) as cnt from foo where dim1 is not null group by 1, 2 clustered by dim1") .setExpectedValidationErrorMatcher(CoreMatchers.allOf( - CoreMatchers.instanceOf(SqlPlanningException.class), + CoreMatchers.instanceOf(DruidException.class), ThrowableMessageMatcher.hasMessage(CoreMatchers.startsWith( "CLUSTERED BY found before PARTITIONED BY. In Druid, the CLUSTERED BY clause must follow the PARTITIONED BY clause")) )) @@ -1098,7 +1098,7 @@ public void testInsertLimitWithPeriodGranularityThrowsException() + "LIMIT 50 " + "PARTITIONED BY MONTH") .setExpectedValidationErrorMatcher(CoreMatchers.allOf( - CoreMatchers.instanceOf(SqlPlanningException.class), + CoreMatchers.instanceOf(DruidException.class), ThrowableMessageMatcher.hasMessage(CoreMatchers.containsString( "INSERT and REPLACE queries cannot have a LIMIT unless PARTITIONED BY is \"ALL\"")) )) @@ -1116,7 +1116,7 @@ public void testInsertOffsetThrowsException() + "OFFSET 10" + "PARTITIONED BY ALL TIME") .setExpectedValidationErrorMatcher(CoreMatchers.allOf( - CoreMatchers.instanceOf(SqlPlanningException.class), + CoreMatchers.instanceOf(DruidException.class), ThrowableMessageMatcher.hasMessage(CoreMatchers.containsString( "INSERT and REPLACE queries cannot have an OFFSET")) )) diff --git a/extensions-core/multi-stage-query/src/test/java/org/apache/druid/msq/exec/MSQReplaceTest.java b/extensions-core/multi-stage-query/src/test/java/org/apache/druid/msq/exec/MSQReplaceTest.java index 1dfd7742146e..c8dd8ed7e0b4 100644 --- a/extensions-core/multi-stage-query/src/test/java/org/apache/druid/msq/exec/MSQReplaceTest.java +++ b/extensions-core/multi-stage-query/src/test/java/org/apache/druid/msq/exec/MSQReplaceTest.java @@ -21,6 +21,7 @@ import com.google.common.collect.ImmutableList; import com.google.common.collect.ImmutableSet; +import org.apache.druid.error.DruidException; import org.apache.druid.common.config.NullHandling; import org.apache.druid.indexing.common.actions.RetrieveUsedSegmentsAction; import org.apache.druid.java.util.common.Intervals; @@ -43,6 +44,7 @@ import org.mockito.Mockito; import javax.annotation.Nonnull; + import java.io.File; import java.io.IOException; import java.util.ArrayList; @@ -332,7 +334,7 @@ public void testReplaceIncorrectSyntax() .setQueryContext(context) .setExpectedValidationErrorMatcher( CoreMatchers.allOf( - CoreMatchers.instanceOf(SqlPlanningException.class), + CoreMatchers.instanceOf(DruidException.class), ThrowableMessageMatcher.hasMessage(CoreMatchers.containsString( "Missing time chunk information in OVERWRITE clause for REPLACE. Use OVERWRITE WHERE <__time based condition> or OVERWRITE ALL to overwrite the entire table.")) ) @@ -582,7 +584,7 @@ public void testReplaceLimitWithPeriodGranularityThrowsException() + "PARTITIONED BY MONTH") .setQueryContext(context) .setExpectedValidationErrorMatcher(CoreMatchers.allOf( - CoreMatchers.instanceOf(SqlPlanningException.class), + CoreMatchers.instanceOf(DruidException.class), ThrowableMessageMatcher.hasMessage(CoreMatchers.containsString( "INSERT and REPLACE queries cannot have a LIMIT unless PARTITIONED BY is \"ALL\"")) )) @@ -600,7 +602,7 @@ public void testReplaceOffsetThrowsException() + "OFFSET 10" + "PARTITIONED BY ALL TIME") .setExpectedValidationErrorMatcher(CoreMatchers.allOf( - CoreMatchers.instanceOf(SqlPlanningException.class), + CoreMatchers.instanceOf(DruidException.class), ThrowableMessageMatcher.hasMessage(CoreMatchers.containsString( "INSERT and REPLACE queries cannot have an OFFSET")) )) diff --git a/extensions-core/multi-stage-query/src/test/java/org/apache/druid/msq/exec/MSQSelectTest.java b/extensions-core/multi-stage-query/src/test/java/org/apache/druid/msq/exec/MSQSelectTest.java index b09e3aa01f11..2c0728389186 100644 --- a/extensions-core/multi-stage-query/src/test/java/org/apache/druid/msq/exec/MSQSelectTest.java +++ b/extensions-core/multi-stage-query/src/test/java/org/apache/druid/msq/exec/MSQSelectTest.java @@ -25,6 +25,7 @@ import org.apache.druid.data.input.impl.CsvInputFormat; import org.apache.druid.data.input.impl.JsonInputFormat; import org.apache.druid.data.input.impl.LocalInputSource; +import org.apache.druid.error.DruidException; import org.apache.druid.frame.util.DurableStorageUtils; import org.apache.druid.java.util.common.DateTimes; import org.apache.druid.java.util.common.ISE; @@ -64,7 +65,6 @@ import org.apache.druid.segment.column.RowSignature; import org.apache.druid.segment.join.JoinType; import org.apache.druid.segment.virtual.ExpressionVirtualColumn; -import org.apache.druid.sql.SqlPlanningException; import org.apache.druid.sql.calcite.expression.DruidExpression; import org.apache.druid.sql.calcite.external.ExternalDataSource; import org.apache.druid.sql.calcite.filtration.Filtration; @@ -83,6 +83,7 @@ import org.mockito.Mockito; import javax.annotation.Nonnull; + import java.io.File; import java.io.IOException; import java.util.ArrayList; @@ -1187,7 +1188,7 @@ public void testIncorrectSelectQuery() testSelectQuery() .setSql("select a from ") .setExpectedValidationErrorMatcher(CoreMatchers.allOf( - CoreMatchers.instanceOf(SqlPlanningException.class), + CoreMatchers.instanceOf(DruidException.class), ThrowableMessageMatcher.hasMessage(CoreMatchers.startsWith("Encountered \"from \"")) )) .setQueryContext(context) @@ -1202,7 +1203,7 @@ public void testSelectOnInformationSchemaSource() .setQueryContext(context) .setExpectedValidationErrorMatcher( CoreMatchers.allOf( - CoreMatchers.instanceOf(SqlPlanningException.class), + CoreMatchers.instanceOf(DruidException.class), ThrowableMessageMatcher.hasMessage(CoreMatchers.startsWith( "Cannot query table INFORMATION_SCHEMA.SCHEMATA with SQL engine 'msq-task'.")) ) @@ -1218,7 +1219,7 @@ public void testSelectOnSysSource() .setQueryContext(context) .setExpectedValidationErrorMatcher( CoreMatchers.allOf( - CoreMatchers.instanceOf(SqlPlanningException.class), + CoreMatchers.instanceOf(DruidException.class), ThrowableMessageMatcher.hasMessage(CoreMatchers.startsWith( "Cannot query table sys.segments with SQL engine 'msq-task'.")) ) @@ -1234,7 +1235,7 @@ public void testSelectOnSysSourceWithJoin() .setQueryContext(context) .setExpectedValidationErrorMatcher( CoreMatchers.allOf( - CoreMatchers.instanceOf(SqlPlanningException.class), + CoreMatchers.instanceOf(DruidException.class), ThrowableMessageMatcher.hasMessage(CoreMatchers.startsWith( "Cannot query table sys.segments with SQL engine 'msq-task'.")) ) @@ -1251,7 +1252,7 @@ public void testSelectOnSysSourceContainingWith() .setQueryContext(context) .setExpectedValidationErrorMatcher( CoreMatchers.allOf( - CoreMatchers.instanceOf(SqlPlanningException.class), + CoreMatchers.instanceOf(DruidException.class), ThrowableMessageMatcher.hasMessage(CoreMatchers.startsWith( "Cannot query table sys.segments with SQL engine 'msq-task'.")) ) diff --git a/processing/src/main/java/org/apache/druid/error/DruidException.java b/processing/src/main/java/org/apache/druid/error/DruidException.java index b558525f8b25..e58c15e47941 100644 --- a/processing/src/main/java/org/apache/druid/error/DruidException.java +++ b/processing/src/main/java/org/apache/druid/error/DruidException.java @@ -1,12 +1,30 @@ -package org.apache.druid.error; +/* + * Licensed to the Apache Software Foundation (ASF) under one + * or more contributor license agreements. See the NOTICE file + * distributed with this work for additional information + * regarding copyright ownership. The ASF licenses this file + * to you under the Apache License, Version 2.0 (the + * "License"); you may not use this file except in compliance + * with the License. You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, + * software distributed under the License is distributed on an + * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY + * KIND, either express or implied. See the License for the + * specific language governing permissions and limitations + * under the License. + */ -import java.util.HashMap; -import java.util.Map; +package org.apache.druid.error; import org.apache.druid.java.util.common.StringUtils; import org.apache.druid.java.util.common.logger.Logger; -import com.google.common.collect.ImmutableMap; +import java.util.HashMap; +import java.util.LinkedHashMap; +import java.util.Map; /** * Represents an error condition exposed to the user and/or operator of Druid. @@ -74,6 +92,12 @@ public enum ErrorType */ RESOURCE, + /** + * Similar to RESOURCE, except indicates a timeout, perhaps due to load, due + * to an external system being unavailable, etc. + */ + TIMEOUT, + /** * Error in configuration. Indicates that the administrator made a mistake during * configuration or setup. The solution is for the administrator (not the end user) @@ -89,6 +113,9 @@ public enum ErrorType NETWORK }; + public static final String ERROR_CODE = "Error Code"; + public static final String HOST = "Host"; + public static class Builder { private final DruidException source; @@ -125,7 +152,8 @@ public Builder cause(Throwable e) public Builder context(String key, Object value) { if (context == null) { - context = new HashMap(); + // Used linked hash map to preserve order + context = new LinkedHashMap(); } context.put(key, value == null ? "" : value.toString()); return this; @@ -142,7 +170,8 @@ private DruidException build(boolean logged) e, msg, type, - context == null ? null : ImmutableMap.copyOf(context), + // Used linked hash map to preserve order + context == null ? null : new LinkedHashMap<>(context), logged || wasLogged() ); } @@ -159,20 +188,26 @@ public DruidException build(Logger logger) return e; } switch (type) { - case CONFIG: - case SYSTEM: - logger.error(e, e.getMessage()); - break; - case NETWORK: - case RESOURCE: - logger.warn(e, e.getMessage()); - break; - default: - logger.info(e, e.getMessage()); - break; + case CONFIG: + case SYSTEM: + logger.error(e, e.getMessage()); + break; + case NETWORK: + case RESOURCE: + logger.warn(e, e.getMessage()); + break; + default: + logger.info(e, e.getMessage()); + break; } return e; } + + @Override + public String toString() + { + return build().getMessage(); + } } private final ErrorType type; @@ -239,6 +274,11 @@ public static Builder resourceError(String msg, Object...args) return new Builder(ErrorType.RESOURCE, msg, args); } + public static Builder timeoutError(String msg, Object...args) + { + return new Builder(ErrorType.TIMEOUT, msg, args); + } + /** * Build an error that indicates a configuration error which generally means * that Druid won't start until the user corrects a configuration file or @@ -278,23 +318,27 @@ public Map context() return context; } + public String context(String key) + { + return context.get(key); + } + @Override public String getMessage() { StringBuilder buf = new StringBuilder(); - switch (type) - { - case CONFIG: - buf.append("Configuration error: "); - break; - case RESOURCE: - buf.append("Resource error: "); - break; - case SYSTEM: - buf.append("System error: "); - break; - default: - break; + switch (type) { + case CONFIG: + buf.append("Configuration error: "); + break; + case RESOURCE: + buf.append("Resource error: "); + break; + case SYSTEM: + buf.append("System error: "); + break; + default: + break; } buf.append(super.getMessage()); if (context != null && context.size() > 0) { diff --git a/server/src/main/java/org/apache/druid/error/RestExceptionEncoder.java b/server/src/main/java/org/apache/druid/error/RestExceptionEncoder.java index 82efe256f354..29050f012b1c 100644 --- a/server/src/main/java/org/apache/druid/error/RestExceptionEncoder.java +++ b/server/src/main/java/org/apache/druid/error/RestExceptionEncoder.java @@ -1,3 +1,22 @@ +/* + * Licensed to the Apache Software Foundation (ASF) under one + * or more contributor license agreements. See the NOTICE file + * distributed with this work for additional information + * regarding copyright ownership. The ASF licenses this file + * to you under the Apache License, Version 2.0 (the + * "License"); you may not use this file except in compliance + * with the License. You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, + * software distributed under the License is distributed on an + * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY + * KIND, either express or implied. See the License for the + * specific language governing permissions and limitations + * under the License. + */ + package org.apache.druid.error; import javax.ws.rs.core.Response; diff --git a/server/src/main/java/org/apache/druid/error/StandardRestExceptionEncoder.java b/server/src/main/java/org/apache/druid/error/StandardRestExceptionEncoder.java index b905e3b843dd..82cea864f35b 100644 --- a/server/src/main/java/org/apache/druid/error/StandardRestExceptionEncoder.java +++ b/server/src/main/java/org/apache/druid/error/StandardRestExceptionEncoder.java @@ -1,3 +1,22 @@ +/* + * Licensed to the Apache Software Foundation (ASF) under one + * or more contributor license agreements. See the NOTICE file + * distributed with this work for additional information + * regarding copyright ownership. The ASF licenses this file + * to you under the Apache License, Version 2.0 (the + * "License"); you may not use this file except in compliance + * with the License. You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, + * software distributed under the License is distributed on an + * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY + * KIND, either express or implied. See the License for the + * specific language governing permissions and limitations + * under the License. + */ + package org.apache.druid.error; import com.google.common.collect.ImmutableMap; @@ -6,23 +25,38 @@ import javax.ws.rs.core.Response.ResponseBuilder; import javax.ws.rs.core.Response.Status; +import java.util.HashMap; +import java.util.Map; + public class StandardRestExceptionEncoder implements RestExceptionEncoder { - private static final RestExceptionEncoder instance = new StandardRestExceptionEncoder(); + private static final RestExceptionEncoder INSTANCE = new StandardRestExceptionEncoder(); public static RestExceptionEncoder instance() { - return instance; + return INSTANCE; } @Override public ResponseBuilder builder(DruidException e) { ImmutableMap.Builder builder = ImmutableMap.builder(); - builder.put("error", errorCode(e)); + builder.put("type", errorCode(e)); builder.put("message", e.message()); - if (e.context() != null) { - builder.put("context", ImmutableMap.copyOf(e.context())); + builder.put("errorMessage", e.getMessage()); + if (e.context() != null && !e.context().isEmpty()) { + Map context = new HashMap<>(e.context()); + String errorCode = context.remove(DruidException.ERROR_CODE); + if (errorCode != null) { + builder.put("errorCode", errorCode); + } + String host = context.remove(DruidException.HOST); + if (host != null) { + builder.put("host", host); + } + if (!context.isEmpty()) { + builder.put("context", context); + } } return Response .status(status(e)) @@ -44,19 +78,21 @@ private Object errorCode(DruidException e) private Status status(DruidException e) { switch (e.type()) { - case CONFIG: - case SYSTEM: - case NETWORK: - return Response.Status.INTERNAL_SERVER_ERROR; - case NOT_FOUND: - return Response.Status.NOT_FOUND; - case RESOURCE: - return Response.Status.SERVICE_UNAVAILABLE; - case USER: - return Response.Status.BAD_REQUEST; - default: - // Should never occur - return Response.Status.INTERNAL_SERVER_ERROR; + case CONFIG: + case SYSTEM: + case NETWORK: + return Response.Status.INTERNAL_SERVER_ERROR; + case TIMEOUT: + return Response.Status.fromStatusCode(504); // No predefined status name + case NOT_FOUND: + return Response.Status.NOT_FOUND; + case RESOURCE: + return Response.Status.fromStatusCode(429); // No predefined status name + case USER: + return Response.Status.BAD_REQUEST; + default: + // Should never occur + return Response.Status.INTERNAL_SERVER_ERROR; } } } diff --git a/server/src/main/java/org/apache/druid/server/QueryResultPusher.java b/server/src/main/java/org/apache/druid/server/QueryResultPusher.java index d417338f51a0..44d5d3965aef 100644 --- a/server/src/main/java/org/apache/druid/server/QueryResultPusher.java +++ b/server/src/main/java/org/apache/druid/server/QueryResultPusher.java @@ -35,6 +35,7 @@ import org.apache.druid.query.QueryInterruptedException; import org.apache.druid.query.TruncatedResponseContextException; import org.apache.druid.query.context.ResponseContext; +import org.apache.druid.server.security.AuthConfig; import org.apache.druid.server.security.ForbiddenException; import javax.annotation.Nullable; @@ -152,6 +153,11 @@ public Response push() resultsWriter.recordSuccess(accumulator.getNumBytesSent()); } catch (DruidException e) { + // Less than ideal. But, if we return the result as JSON, this is + // the only way for the security filter to know that, yes, it is OK + // to show the user this error even if we didn't get to the step where + // we did a security check. + request.setAttribute(AuthConfig.DRUID_AUTHORIZATION_CHECKED, true); return handleDruidException(resultsWriter, e); } catch (QueryException e) { diff --git a/server/src/main/java/org/apache/druid/server/security/AllowOptionsResourceFilter.java b/server/src/main/java/org/apache/druid/server/security/AllowOptionsResourceFilter.java index 46fe78a0a470..776f5b6df4d3 100644 --- a/server/src/main/java/org/apache/druid/server/security/AllowOptionsResourceFilter.java +++ b/server/src/main/java/org/apache/druid/server/security/AllowOptionsResourceFilter.java @@ -44,7 +44,6 @@ public AllowOptionsResourceFilter( @Override public void init(FilterConfig filterConfig) { - } @Override @@ -78,6 +77,5 @@ public void doFilter(ServletRequest request, ServletResponse response, FilterCha @Override public void destroy() { - } } diff --git a/server/src/main/java/org/apache/druid/server/security/PreResponseAuthorizationCheckFilter.java b/server/src/main/java/org/apache/druid/server/security/PreResponseAuthorizationCheckFilter.java index 97d3e05a5f33..454d8566f29f 100644 --- a/server/src/main/java/org/apache/druid/server/security/PreResponseAuthorizationCheckFilter.java +++ b/server/src/main/java/org/apache/druid/server/security/PreResponseAuthorizationCheckFilter.java @@ -100,7 +100,7 @@ public void doFilter(ServletRequest servletRequest, ServletResponse servletRespo if (authInfoChecked != null && !authInfoChecked && response.getStatus() != HttpServletResponse.SC_FORBIDDEN) { handleAuthorizationCheckError( - "Request's authorization check failed but status code was not 403.", + "Request's authorization check failed but status code was not 403", request, response ); diff --git a/sql/src/main/java/org/apache/druid/sql/AbstractStatement.java b/sql/src/main/java/org/apache/druid/sql/AbstractStatement.java index 79b04af1c03f..bfa95c5d5562 100644 --- a/sql/src/main/java/org/apache/druid/sql/AbstractStatement.java +++ b/sql/src/main/java/org/apache/druid/sql/AbstractStatement.java @@ -19,9 +19,6 @@ package org.apache.druid.sql; -import org.apache.calcite.sql.parser.SqlParseException; -import org.apache.calcite.tools.ValidationException; -import org.apache.druid.error.DruidException; import org.apache.druid.java.util.common.logger.Logger; import org.apache.druid.query.QueryContexts; import org.apache.druid.server.security.Access; @@ -131,18 +128,7 @@ protected void validate(final DruidPlanner planner) plannerContext = planner.getPlannerContext(); plannerContext.setAuthenticationResult(queryPlus.authResult()); plannerContext.setParameters(queryPlus.parameters()); - try { - planner.validate(); - } - // We can't collapse catch clauses since SqlPlanningException has - // type-sensitive constructors. - catch (SqlParseException e) { - throw DruidException.user(e.getMessage()).cause(e).build(); - } - catch (ValidationException e) { - // Should no longer get here: the planner should have done the translation. - throw DruidPlanner.translateException(e); - } + planner.validate(); } /** diff --git a/sql/src/main/java/org/apache/druid/sql/DirectStatement.java b/sql/src/main/java/org/apache/druid/sql/DirectStatement.java index fb5cb3d358bb..b993eb6f0e69 100644 --- a/sql/src/main/java/org/apache/druid/sql/DirectStatement.java +++ b/sql/src/main/java/org/apache/druid/sql/DirectStatement.java @@ -21,7 +21,6 @@ import com.google.common.annotations.VisibleForTesting; import org.apache.calcite.plan.RelOptPlanner; -import org.apache.calcite.tools.ValidationException; import org.apache.druid.error.DruidException; import org.apache.druid.java.util.common.ISE; import org.apache.druid.java.util.common.StringUtils; diff --git a/sql/src/main/java/org/apache/druid/sql/SqlPlanningException.java b/sql/src/main/java/org/apache/druid/sql/SqlPlanningException.java index 0d2a40324832..41dcf6cc02f6 100644 --- a/sql/src/main/java/org/apache/druid/sql/SqlPlanningException.java +++ b/sql/src/main/java/org/apache/druid/sql/SqlPlanningException.java @@ -37,6 +37,7 @@ public enum PlanningError { SQL_PARSE_ERROR(SQL_PARSE_FAILED_ERROR_CODE, SqlParseException.class.getName()), VALIDATION_ERROR(PLAN_VALIDATION_FAILED_ERROR_CODE, ValidationException.class.getName()), + UNSUPPORTED_SQL_ERROR(SQL_QUERY_UNSUPPORTED_ERROR_CODE, RelOptPlanner.CannotPlanException.class.getName()); private final String errorCode; private final String errorClass; diff --git a/sql/src/main/java/org/apache/druid/sql/calcite/planner/DruidPlanner.java b/sql/src/main/java/org/apache/druid/sql/calcite/planner/DruidPlanner.java index 5eba91f498b2..ebe908085e26 100644 --- a/sql/src/main/java/org/apache/druid/sql/calcite/planner/DruidPlanner.java +++ b/sql/src/main/java/org/apache/druid/sql/calcite/planner/DruidPlanner.java @@ -21,6 +21,7 @@ import com.fasterxml.jackson.databind.ObjectMapper; import com.google.common.base.Preconditions; +import org.apache.calcite.plan.RelOptPlanner; import org.apache.calcite.schema.SchemaPlus; import org.apache.calcite.sql.SqlExplain; import org.apache.calcite.sql.SqlKind; @@ -29,10 +30,8 @@ import org.apache.calcite.tools.FrameworkConfig; import org.apache.calcite.tools.ValidationException; import org.apache.druid.error.DruidException; -import org.apache.druid.java.util.common.StringUtils; -import org.apache.druid.java.util.common.UOE; import org.apache.druid.query.QueryContext; -import org.apache.druid.query.QueryInterruptedException; +import org.apache.druid.query.QueryException; import org.apache.druid.server.security.Access; import org.apache.druid.server.security.Resource; import org.apache.druid.server.security.ResourceAction; @@ -137,7 +136,8 @@ public void validate() SqlNode root; try { root = planner.parse(sql); - } catch (SqlParseException e1) { + } + catch (SqlParseException e1) { throw translateException(e1); } handler = createHandler(root); @@ -170,6 +170,7 @@ private SqlStatementHandler createHandler(final SqlNode node) } throw DruidException.user("Unsupported SQL statement") .context("Statement kind", node.getKind()) + .context(DruidException.ERROR_CODE, QueryException.PLAN_VALIDATION_FAILED_ERROR_CODE) .build(); } @@ -313,40 +314,76 @@ public PlannerHook hook() } } - public static DruidException translateException(Exception e) { + public static DruidException translateException(Exception e) + { try { throw e; } catch (DruidException inner) { return inner; } - catch (ValidationException | SqlParseException inner) { - // Calcite exception that probably includes a position. - String msg = inner.getMessage(); - Pattern p = Pattern.compile("From line (\\d+), column (\\d+) to line \\d+, column \\d+: (.*)$"); - Matcher m = p.matcher(msg); - if (m.matches()) { - return DruidException - .user(m.group(3)) - .cause(e) - .context("Line", m.group(1)) - .context("Column", m.group(2)) - .build(); - } + catch (ValidationException inner) { + return parseValidationMessage(inner, QueryException.PLAN_VALIDATION_FAILED_ERROR_CODE); + } + catch (SqlParseException inner) { + return parseParserMessage(inner); } - // There is a claim that Calcite sometimes throws a java.lang.AssertionError, but we do not have a test that can - // reproduce it checked into the code (the best we have is something that uses mocks to throw an Error, which is - // dubious at best). We keep this just in case, but it might be best to remove it and see where the - // AssertionErrors are coming from and do something to ensure that they don't actually make it out of Calcite - catch (AssertionError inner) { - return DruidException.resourceError("AssertionError killed query") + catch (RelOptPlanner.CannotPlanException inner) { + return parseValidationMessage(inner, QueryException.QUERY_UNSUPPORTED_ERROR_CODE); + } + catch (Exception inner) { + // Anything else. Should not get here. Anything else should already have + // been translated to a DruidException unless it is an unexpected exception. + return DruidException + .system(e.getMessage()) .cause(inner) + .context(DruidException.ERROR_CODE, QueryException.UNKNOWN_EXCEPTION_ERROR_CODE) .build(); } - catch (Exception inner) { - // Anything else - return DruidException.user(e.getMessage()).cause(inner).build(); + } + + private static DruidException parseValidationMessage(Exception e, String errorCode) + { + // Calcite exception that probably includes a position. + String msg = e.getMessage(); + Pattern p = Pattern.compile("(?:org\\..*: )From line (\\d+), column (\\d+) to line \\d+, column \\d+: (.*)$"); + Matcher m = p.matcher(msg); + DruidException.Builder builder; + if (m.matches()) { + builder = DruidException + .user(m.group(3)) + .context("Line", m.group(1)) + .context("Column", m.group(2)); + } else { + builder = DruidException.user(msg).cause(e); + } + return builder + .context(DruidException.ERROR_CODE, errorCode) + .build(); + } + + private static DruidException parseParserMessage(Exception e) + { + // Calcite exception that probably includes a position. + String msg = e.getMessage(); + Pattern p = Pattern.compile( + "Encountered \"(.*)\" at line (\\d+), column (\\d+).\nWas expecting one of:\n(.*)", + Pattern.MULTILINE | Pattern.DOTALL + ); + Matcher m = p.matcher(msg); + DruidException.Builder builder; + if (m.matches()) { + String choices = m.group(4).trim().replaceAll("[ .]*\n\\ s+", ", "); + builder = DruidException + .user("Parse error: unexpected token " + m.group(1)) + .context("Line", m.group(2)) + .context("Column", m.group(3)) + .context("Expected", choices); + } else { + builder = DruidException.user(msg).cause(e); } - throw new UOE("Should not get here"); + return builder + .context(DruidException.ERROR_CODE, QueryException.SQL_PARSE_FAILED_ERROR_CODE) + .build(); } } diff --git a/sql/src/main/java/org/apache/druid/sql/calcite/planner/DruidRexExecutor.java b/sql/src/main/java/org/apache/druid/sql/calcite/planner/DruidRexExecutor.java index 6b9e42bafd01..89837a4096f9 100644 --- a/sql/src/main/java/org/apache/druid/sql/calcite/planner/DruidRexExecutor.java +++ b/sql/src/main/java/org/apache/druid/sql/calcite/planner/DruidRexExecutor.java @@ -23,12 +23,15 @@ import org.apache.calcite.rex.RexExecutor; import org.apache.calcite.rex.RexNode; import org.apache.calcite.sql.type.SqlTypeName; +import org.apache.druid.error.DruidException; import org.apache.druid.java.util.common.DateTimes; +import org.apache.druid.java.util.common.StringUtils; import org.apache.druid.math.expr.Expr; import org.apache.druid.math.expr.ExprEval; import org.apache.druid.math.expr.ExprType; import org.apache.druid.math.expr.InputBindings; import org.apache.druid.math.expr.Parser; +import org.apache.druid.query.QueryException; import org.apache.druid.segment.column.RowSignature; import org.apache.druid.sql.calcite.expression.DruidExpression; import org.apache.druid.sql.calcite.expression.Expressions; @@ -87,7 +90,10 @@ public void reduce( // as a primitive long/float/double. // ExprEval.isNumericNull checks whether the parsed primitive value is null or not. if (!constExp.getType().isNullable() && exprResult.isNumericNull()) { - throw new UnsupportedSQLQueryException("Illegal DATE constant: %s", constExp); + throw DruidException.user("Illegal DATE constant") + .context(DruidException.ERROR_CODE, QueryException.QUERY_UNSUPPORTED_ERROR_CODE) + .context("Value", constExp) + .build(); } literal = rexBuilder.makeDateLiteral( @@ -101,7 +107,10 @@ public void reduce( // as a primitive long/float/double. // ExprEval.isNumericNull checks whether the parsed primitive value is null or not. if (!constExp.getType().isNullable() && exprResult.isNumericNull()) { - throw new UnsupportedSQLQueryException("Illegal TIMESTAMP constant: %s", constExp); + throw DruidException.user("Illegal TIMESTAMP constant") + .context(DruidException.ERROR_CODE, QueryException.QUERY_UNSUPPORTED_ERROR_CODE) + .context("Value", constExp) + .build(); } literal = Calcites.jodaToCalciteTimestampLiteral( @@ -126,11 +135,16 @@ public void reduce( double exprResultDouble = exprResult.asDouble(); if (Double.isNaN(exprResultDouble) || Double.isInfinite(exprResultDouble)) { String expression = druidExpression.getExpression(); - throw new UnsupportedSQLQueryException("'%s' evaluates to '%s' that is not supported in SQL. You can either cast the expression as BIGINT ('CAST(%s as BIGINT)') or VARCHAR ('CAST(%s as VARCHAR)') or change the expression itself", - expression, - Double.toString(exprResultDouble), - expression, - expression); + throw DruidException.user("Expression not supported in SQL") + .context(DruidException.ERROR_CODE, QueryException.QUERY_UNSUPPORTED_ERROR_CODE) + .context("Expression", expression) + .context("Evaluates to", Double.toString(exprResultDouble)) + .context("Suggestion", StringUtils.format( + "You can either cast the expression as BIGINT ('CAST(%s as BIGINT)') or VARCHAR ('CAST(%s as VARCHAR)') or change the expression itself", + expression, + expression + )) + .build(); } bigDecimal = BigDecimal.valueOf(exprResult.asDouble()); } @@ -161,11 +175,13 @@ public void reduce( resultAsBigDecimalList.add(null); } else if (Double.isNaN(doubleVal.doubleValue()) || Double.isInfinite(doubleVal.doubleValue())) { String expression = druidExpression.getExpression(); - throw new UnsupportedSQLQueryException( - "'%s' contains an element that evaluates to '%s' which is not supported in SQL. You can either cast the element in the ARRAY to BIGINT or VARCHAR or change the expression itself", - expression, - Double.toString(doubleVal.doubleValue()) - ); + throw DruidException.user("Array element not supported in SQL") + .context(DruidException.ERROR_CODE, QueryException.QUERY_UNSUPPORTED_ERROR_CODE) + .context("Array", expression) + .context("Evaluates to", Double.toString(doubleVal.doubleValue())) + .context("Suggestion", + "You can either cast the element in the ARRAY to BIGINT or VARCHAR or change the expression itself") + .build(); } else { resultAsBigDecimalList.add(BigDecimal.valueOf(doubleVal.doubleValue())); } diff --git a/sql/src/main/java/org/apache/druid/sql/calcite/planner/IngestHandler.java b/sql/src/main/java/org/apache/druid/sql/calcite/planner/IngestHandler.java index 36af54ff7082..98667165b119 100644 --- a/sql/src/main/java/org/apache/druid/sql/calcite/planner/IngestHandler.java +++ b/sql/src/main/java/org/apache/druid/sql/calcite/planner/IngestHandler.java @@ -35,12 +35,9 @@ import org.apache.calcite.sql.SqlOrderBy; import org.apache.calcite.tools.ValidationException; import org.apache.calcite.util.Pair; -import org.apache.druid.collections.ResourceHolder; import org.apache.druid.common.utils.IdUtils; import org.apache.druid.error.DruidException; -import org.apache.druid.java.util.common.StringUtils; import org.apache.druid.java.util.common.granularity.Granularity; -import org.apache.druid.segment.QueryableIndex; import org.apache.druid.server.security.Action; import org.apache.druid.server.security.Resource; import org.apache.druid.server.security.ResourceAction; @@ -51,10 +48,8 @@ import org.apache.druid.sql.calcite.parser.DruidSqlReplace; import org.apache.druid.sql.calcite.run.EngineFeature; import org.apache.druid.sql.calcite.run.QueryMaker; -import org.apache.druid.timeline.DataSegment; import java.util.List; -import java.util.function.Supplier; import java.util.regex.Pattern; public abstract class IngestHandler extends QueryHandler diff --git a/sql/src/main/java/org/apache/druid/sql/calcite/planner/PlannerFactory.java b/sql/src/main/java/org/apache/druid/sql/calcite/planner/PlannerFactory.java index 4395da5fe441..691c33567a89 100644 --- a/sql/src/main/java/org/apache/druid/sql/calcite/planner/PlannerFactory.java +++ b/sql/src/main/java/org/apache/druid/sql/calcite/planner/PlannerFactory.java @@ -30,13 +30,11 @@ import org.apache.calcite.plan.Context; import org.apache.calcite.plan.ConventionTraitDef; import org.apache.calcite.rel.RelCollationTraitDef; -import org.apache.calcite.sql.parser.SqlParseException; import org.apache.calcite.sql.parser.SqlParser; import org.apache.calcite.sql.validate.SqlConformance; import org.apache.calcite.sql2rel.SqlToRelConverter; import org.apache.calcite.tools.FrameworkConfig; import org.apache.calcite.tools.Frameworks; -import org.apache.calcite.tools.ValidationException; import org.apache.druid.guice.annotations.Json; import org.apache.druid.math.expr.ExprMacroTable; import org.apache.druid.segment.join.JoinableFactoryWrapper; @@ -126,12 +124,7 @@ public DruidPlanner createPlannerForTesting(final SqlEngine engine, final String final DruidPlanner thePlanner = createPlanner(engine, sql, queryContext, null); thePlanner.getPlannerContext() .setAuthenticationResult(NoopEscalator.getInstance().createEscalatedAuthenticationResult()); - try { - thePlanner.validate(); - } - catch (SqlParseException | ValidationException e) { - throw new RuntimeException(e); - } + thePlanner.validate(); thePlanner.authorize(ra -> Access.OK, ImmutableSet.of()); return thePlanner; } diff --git a/sql/src/main/java/org/apache/druid/sql/calcite/planner/QueryHandler.java b/sql/src/main/java/org/apache/druid/sql/calcite/planner/QueryHandler.java index f26cb8c8901d..18b2c4c0cf7b 100644 --- a/sql/src/main/java/org/apache/druid/sql/calcite/planner/QueryHandler.java +++ b/sql/src/main/java/org/apache/druid/sql/calcite/planner/QueryHandler.java @@ -61,6 +61,7 @@ import org.apache.druid.java.util.common.logger.Logger; import org.apache.druid.java.util.emitter.EmittingLogger; import org.apache.druid.query.Query; +import org.apache.druid.query.QueryException; import org.apache.druid.server.QueryResponse; import org.apache.druid.server.security.Action; import org.apache.druid.server.security.Resource; @@ -72,6 +73,7 @@ import org.apache.druid.sql.calcite.run.EngineFeature; import org.apache.druid.sql.calcite.run.QueryMaker; import org.apache.druid.sql.calcite.table.DruidTable; +import org.apache.druid.utils.Throwables; import javax.annotation.Nullable; import java.util.ArrayList; @@ -111,7 +113,8 @@ public void validate() CalcitePlanner planner = handlerContext.planner(); try { validatedQueryNode = planner.validate(rewriteParameters()); - } catch (ValidationException e) { + } + catch (ValidationException e) { throw DruidPlanner.translateException(e); } @@ -219,13 +222,24 @@ public PlannerResult plan() } } catch (RelOptPlanner.CannotPlanException e) { - Logger logger = log; - if (!handlerContext.queryContext().isDebug()) { - logger = log.noStackTrace(); + throw buildSQLPlanningError(e); + } + catch (RuntimeException e) { + // Calcite throws a Runtime exception as the result of an IllegalTargetException + // as the result of invoking a method dynamically, when that method throws an + // exception. Unwrap the exception if this exception is from Calcite. + RelOptPlanner.CannotPlanException cpe = Throwables.getCauseOfType(e, RelOptPlanner.CannotPlanException.class); + if (cpe != null) { + throw buildSQLPlanningError(cpe); + } + DruidException de = Throwables.getCauseOfType(e, DruidException.class); + if (de != null) { + throw de; } - throw buildSQLPlanningError(e, logger); + throw DruidPlanner.translateException(e); } catch (Exception e) { + // Not sure what this is. Should it have been translated sooner? throw DruidPlanner.translateException(e); } } @@ -610,11 +624,16 @@ private RelRoot possiblyWrapRootWithOuterLimitFromContext(RelRoot root) protected abstract QueryMaker buildQueryMaker(RelRoot rootQueryRel) throws ValidationException; - private DruidException buildSQLPlanningError(Throwable exception, Logger logger) + private DruidException buildSQLPlanningError(RelOptPlanner.CannotPlanException exception) { + Logger logger = log; + if (!handlerContext.queryContext().isDebug()) { + logger = log.noStackTrace(); + } DruidException.Builder builder = DruidException.system("Unsupported query") .cause(exception) - .context("SQL", handlerContext.plannerContext().getSql()); + .context("SQL", handlerContext.plannerContext().getSql()) + .context(DruidException.ERROR_CODE, QueryException.QUERY_UNSUPPORTED_ERROR_CODE); String errorMessage = handlerContext.plannerContext().getPlanningError(); if (null == errorMessage && exception instanceof UnsupportedSQLQueryException) { builder.context("Specific error", errorMessage); diff --git a/sql/src/main/java/org/apache/druid/sql/calcite/planner/RelParameterizerShuttle.java b/sql/src/main/java/org/apache/druid/sql/calcite/planner/RelParameterizerShuttle.java index cd9b1c2d2138..a9345f31ea98 100644 --- a/sql/src/main/java/org/apache/druid/sql/calcite/planner/RelParameterizerShuttle.java +++ b/sql/src/main/java/org/apache/druid/sql/calcite/planner/RelParameterizerShuttle.java @@ -43,9 +43,7 @@ import org.apache.calcite.rex.RexNode; import org.apache.calcite.rex.RexShuttle; import org.apache.calcite.sql.type.SqlTypeName; -import org.apache.druid.java.util.common.StringUtils; -import org.apache.druid.sql.SqlPlanningException; -import org.apache.druid.sql.SqlPlanningException.PlanningError; +import org.apache.druid.error.DruidException; /** * Traverse {@link RelNode} tree and replaces all {@link RexDynamicParam} with {@link org.apache.calcite.rex.RexLiteral} @@ -201,9 +199,8 @@ private RexNode bind(RexNode node, RexBuilder builder, RelDataTypeFactory typeFa if (plannerContext.getParameters().size() > dynamicParam.getIndex()) { TypedValue param = plannerContext.getParameters().get(dynamicParam.getIndex()); if (param == null) { - throw new SqlPlanningException( - PlanningError.VALIDATION_ERROR, - StringUtils.format("Parameter at position [%s] is not bound", dynamicParam.getIndex()) + throw DruidException.userError( + "Parameter at position %d is not bound", dynamicParam.getIndex() ); } if (param.value == null) { @@ -216,9 +213,8 @@ private RexNode bind(RexNode node, RexBuilder builder, RelDataTypeFactory typeFa true ); } else { - throw new SqlPlanningException( - PlanningError.VALIDATION_ERROR, - StringUtils.format("Parameter at position [%s] is not bound", dynamicParam.getIndex()) + throw DruidException.userError( + "Parameter at position %d is not bound", dynamicParam.getIndex() ); } } diff --git a/sql/src/main/java/org/apache/druid/sql/calcite/run/NativeSqlEngine.java b/sql/src/main/java/org/apache/druid/sql/calcite/run/NativeSqlEngine.java index 2325a6f00840..722e1e3ce63b 100644 --- a/sql/src/main/java/org/apache/druid/sql/calcite/run/NativeSqlEngine.java +++ b/sql/src/main/java/org/apache/druid/sql/calcite/run/NativeSqlEngine.java @@ -25,7 +25,6 @@ import org.apache.calcite.rel.RelRoot; import org.apache.calcite.rel.type.RelDataType; import org.apache.calcite.rel.type.RelDataTypeFactory; -import org.apache.calcite.tools.ValidationException; import org.apache.druid.guice.LazySingleton; import org.apache.druid.java.util.common.IAE; import org.apache.druid.java.util.common.StringUtils; diff --git a/sql/src/main/java/org/apache/druid/sql/http/SqlResource.java b/sql/src/main/java/org/apache/druid/sql/http/SqlResource.java index a100194d16aa..cbb18cf36240 100644 --- a/sql/src/main/java/org/apache/druid/sql/http/SqlResource.java +++ b/sql/src/main/java/org/apache/druid/sql/http/SqlResource.java @@ -22,13 +22,11 @@ import com.fasterxml.jackson.databind.ObjectMapper; import com.google.common.base.Preconditions; import com.google.inject.Inject; -import org.apache.calcite.plan.RelOptPlanner; import org.apache.druid.common.exception.SanitizableException; import org.apache.druid.guice.annotations.NativeQuery; import org.apache.druid.guice.annotations.Self; import org.apache.druid.java.util.common.StringUtils; import org.apache.druid.java.util.common.logger.Logger; -import org.apache.druid.query.QueryInterruptedException; import org.apache.druid.server.DruidNode; import org.apache.druid.server.QueryResource; import org.apache.druid.server.QueryResponse; @@ -43,7 +41,6 @@ import org.apache.druid.sql.HttpStatement; import org.apache.druid.sql.SqlLifecycleManager; import org.apache.druid.sql.SqlLifecycleManager.Cancelable; -import org.apache.druid.sql.SqlPlanningException; import org.apache.druid.sql.SqlRowTransformer; import org.apache.druid.sql.SqlStatementFactory; @@ -59,6 +56,7 @@ import javax.ws.rs.core.MediaType; import javax.ws.rs.core.Response; import javax.ws.rs.core.Response.Status; + import java.io.IOException; import java.io.OutputStream; import java.util.LinkedHashMap; diff --git a/sql/src/test/java/org/apache/druid/sql/SqlStatementTest.java b/sql/src/test/java/org/apache/druid/sql/SqlStatementTest.java index 4d96a2ec908e..406855dc4c2c 100644 --- a/sql/src/test/java/org/apache/druid/sql/SqlStatementTest.java +++ b/sql/src/test/java/org/apache/druid/sql/SqlStatementTest.java @@ -25,6 +25,7 @@ import com.google.common.util.concurrent.MoreExecutors; import org.apache.calcite.rel.type.RelDataType; import org.apache.calcite.rel.type.RelDataTypeField; +import org.apache.druid.error.DruidException; import org.apache.druid.java.util.common.ISE; import org.apache.druid.java.util.common.concurrent.Execs; import org.apache.druid.java.util.common.guava.LazySequence; @@ -34,6 +35,7 @@ import org.apache.druid.query.DefaultQueryConfig; import org.apache.druid.query.Query; import org.apache.druid.query.QueryContexts; +import org.apache.druid.query.QueryException; import org.apache.druid.query.QueryRunnerFactoryConglomerate; import org.apache.druid.segment.join.JoinableFactoryWrapper; import org.apache.druid.server.QueryScheduler; @@ -47,7 +49,6 @@ import org.apache.druid.server.security.AuthenticationResult; import org.apache.druid.server.security.ForbiddenException; import org.apache.druid.sql.DirectStatement.ResultSet; -import org.apache.druid.sql.SqlPlanningException.PlanningError; import org.apache.druid.sql.calcite.planner.CalciteRulesManager; import org.apache.druid.sql.calcite.planner.CatalogResolver; import org.apache.druid.sql.calcite.planner.DruidOperatorTable; @@ -70,6 +71,7 @@ import org.junit.rules.TemporaryFolder; import javax.servlet.http.HttpServletRequest; + import java.io.IOException; import java.util.Collections; import java.util.List; @@ -284,9 +286,12 @@ public void testDirectSyntaxError() stmt.execute(); fail(); } - catch (SqlPlanningException e) { + catch (DruidException e) { // Expected - assertEquals(PlanningError.SQL_PARSE_ERROR.getErrorCode(), e.getErrorCode()); + Assert.assertEquals( + QueryException.PLAN_VALIDATION_FAILED_ERROR_CODE, + e.context(DruidException.ERROR_CODE) + ); } } @@ -301,9 +306,12 @@ public void testDirectValidationError() stmt.execute(); fail(); } - catch (SqlPlanningException e) { + catch (DruidException e) { // Expected - assertEquals(PlanningError.VALIDATION_ERROR.getErrorCode(), e.getErrorCode()); + Assert.assertEquals( + QueryException.PLAN_VALIDATION_FAILED_ERROR_CODE, + e.context(DruidException.ERROR_CODE) + ); } } @@ -363,9 +371,12 @@ public void testHttpSyntaxError() stmt.execute(); fail(); } - catch (SqlPlanningException e) { + catch (DruidException e) { // Expected - assertEquals(PlanningError.SQL_PARSE_ERROR.getErrorCode(), e.getErrorCode()); + Assert.assertEquals( + QueryException.PLAN_VALIDATION_FAILED_ERROR_CODE, + e.context(DruidException.ERROR_CODE) + ); } } @@ -380,9 +391,12 @@ public void testHttpValidationError() stmt.execute(); fail(); } - catch (SqlPlanningException e) { + catch (DruidException e) { // Expected - assertEquals(PlanningError.VALIDATION_ERROR.getErrorCode(), e.getErrorCode()); + Assert.assertEquals( + QueryException.PLAN_VALIDATION_FAILED_ERROR_CODE, + e.context(DruidException.ERROR_CODE) + ); } } @@ -446,9 +460,12 @@ public void testPrepareSyntaxError() stmt.prepare(); fail(); } - catch (SqlPlanningException e) { + catch (DruidException e) { // Expected - assertEquals(PlanningError.SQL_PARSE_ERROR.getErrorCode(), e.getErrorCode()); + Assert.assertEquals( + QueryException.PLAN_VALIDATION_FAILED_ERROR_CODE, + e.context(DruidException.ERROR_CODE) + ); } } @@ -463,9 +480,12 @@ public void testPrepareValidationError() stmt.prepare(); fail(); } - catch (SqlPlanningException e) { + catch (DruidException e) { // Expected - assertEquals(PlanningError.VALIDATION_ERROR.getErrorCode(), e.getErrorCode()); + Assert.assertEquals( + QueryException.PLAN_VALIDATION_FAILED_ERROR_CODE, + e.context(DruidException.ERROR_CODE) + ); } } diff --git a/sql/src/test/java/org/apache/druid/sql/calcite/BaseCalciteQueryTest.java b/sql/src/test/java/org/apache/druid/sql/calcite/BaseCalciteQueryTest.java index 2a0930912b46..8fc797a49818 100644 --- a/sql/src/test/java/org/apache/druid/sql/calcite/BaseCalciteQueryTest.java +++ b/sql/src/test/java/org/apache/druid/sql/calcite/BaseCalciteQueryTest.java @@ -30,6 +30,7 @@ import org.apache.commons.text.StringEscapeUtils; import org.apache.druid.annotations.UsedByJUnitParamsRunner; import org.apache.druid.common.config.NullHandling; +import org.apache.druid.error.DruidException; import org.apache.druid.guice.DruidInjectorBuilder; import org.apache.druid.hll.VersionOneHyperLogLogCollector; import org.apache.druid.java.util.common.DateTimes; @@ -114,6 +115,7 @@ import org.junit.rules.TemporaryFolder; import javax.annotation.Nullable; + import java.io.IOException; import java.io.PrintStream; import java.util.Arrays; @@ -633,23 +635,30 @@ public void assertQueryIsUnplannable(final String sql, String expectedError) public void assertQueryIsUnplannable(final PlannerConfig plannerConfig, final String sql, String expectedError) { - Exception e = null; try { testQuery(plannerConfig, sql, CalciteTests.REGULAR_USER_AUTH_RESULT, ImmutableList.of(), ImmutableList.of()); } - catch (Exception e1) { - e = e1; + catch (DruidException e) { + Assert.assertEquals( + sql, + "Unsupported query", + e.message() + ); + Assert.assertEquals( + sql, + sql, + e.context("SQL") + ); + Assert.assertEquals( + sql, + expectedError, + e.context("Possible error") + ); } - - if (!(e instanceof RelOptPlanner.CannotPlanException)) { - log.error(e, "Expected CannotPlanException for query: %s", sql); + catch (Exception e) { + log.error(e, "Expected DruidException for query: %s", sql); Assert.fail(sql); } - Assert.assertEquals( - sql, - StringUtils.format("Query not supported. %s SQL was: %s", expectedError, sql), - e.getMessage() - ); } /** diff --git a/sql/src/test/java/org/apache/druid/sql/calcite/CalciteCorrelatedQueryTest.java b/sql/src/test/java/org/apache/druid/sql/calcite/CalciteCorrelatedQueryTest.java index 3a1ac2db9aa2..577f46a2e993 100644 --- a/sql/src/test/java/org/apache/druid/sql/calcite/CalciteCorrelatedQueryTest.java +++ b/sql/src/test/java/org/apache/druid/sql/calcite/CalciteCorrelatedQueryTest.java @@ -54,7 +54,6 @@ @RunWith(JUnitParamsRunner.class) public class CalciteCorrelatedQueryTest extends BaseCalciteQueryTest { - @Test @Parameters(source = QueryContextForJoinProvider.class) public void testCorrelatedSubquery(Map queryContext) diff --git a/sql/src/test/java/org/apache/druid/sql/calcite/CalciteInsertDmlTest.java b/sql/src/test/java/org/apache/druid/sql/calcite/CalciteInsertDmlTest.java index aeabf5241a0e..b44a88b9f25d 100644 --- a/sql/src/test/java/org/apache/druid/sql/calcite/CalciteInsertDmlTest.java +++ b/sql/src/test/java/org/apache/druid/sql/calcite/CalciteInsertDmlTest.java @@ -26,6 +26,7 @@ import org.apache.druid.data.input.InputSource; import org.apache.druid.data.input.impl.CsvInputFormat; import org.apache.druid.data.input.impl.InlineInputSource; +import org.apache.druid.error.DruidException; import org.apache.druid.java.util.common.StringUtils; import org.apache.druid.java.util.common.granularity.Granularities; import org.apache.druid.java.util.common.granularity.Granularity; @@ -41,7 +42,6 @@ import org.apache.druid.segment.join.JoinType; import org.apache.druid.server.security.AuthConfig; import org.apache.druid.server.security.ForbiddenException; -import org.apache.druid.sql.SqlPlanningException; import org.apache.druid.sql.calcite.external.ExternalDataSource; import org.apache.druid.sql.calcite.external.Externals; import org.apache.druid.sql.calcite.filtration.Filtration; @@ -199,7 +199,7 @@ public void testInsertIntoInvalidDataSourceName() { testIngestionQuery() .sql("INSERT INTO \"in/valid\" SELECT dim1, dim2 FROM foo PARTITIONED BY ALL TIME") - .expectValidationError(SqlPlanningException.class, "INSERT dataSource cannot contain the '/' character.") + .expectValidationError(DruidException.class, "INSERT dataSource cannot contain the '/' character.") .verify(); } @@ -208,7 +208,7 @@ public void testInsertUsingColumnList() { testIngestionQuery() .sql("INSERT INTO dst (foo, bar) SELECT dim1, dim2 FROM foo PARTITIONED BY ALL TIME") - .expectValidationError(SqlPlanningException.class, "INSERT with a target column list is not supported.") + .expectValidationError(DruidException.class, "INSERT with a target column list is not supported.") .verify(); } @@ -217,7 +217,7 @@ public void testUpsert() { testIngestionQuery() .sql("UPSERT INTO dst SELECT * FROM foo PARTITIONED BY ALL TIME") - .expectValidationError(SqlPlanningException.class, "UPSERT is not supported.") + .expectValidationError(DruidException.class, "UPSERT is not supported.") .verify(); } @@ -229,7 +229,7 @@ public void testSelectFromSystemTable() testIngestionQuery() .sql("INSERT INTO dst SELECT * FROM INFORMATION_SCHEMA.COLUMNS PARTITIONED BY ALL TIME") .expectValidationError( - SqlPlanningException.class, + DruidException.class, "Cannot query table INFORMATION_SCHEMA.COLUMNS with SQL engine 'ingestion-test'." ) .verify(); @@ -241,7 +241,7 @@ public void testInsertIntoSystemTable() testIngestionQuery() .sql("INSERT INTO INFORMATION_SCHEMA.COLUMNS SELECT * FROM foo PARTITIONED BY ALL TIME") .expectValidationError( - SqlPlanningException.class, + DruidException.class, "Cannot INSERT into INFORMATION_SCHEMA.COLUMNS because it is not a Druid datasource." ) .verify(); @@ -253,7 +253,7 @@ public void testInsertIntoView() testIngestionQuery() .sql("INSERT INTO view.aview SELECT * FROM foo PARTITIONED BY ALL TIME") .expectValidationError( - SqlPlanningException.class, + DruidException.class, "Cannot INSERT into view.aview because it is not a Druid datasource." ) .verify(); @@ -283,7 +283,7 @@ public void testInsertIntoNonexistentSchema() testIngestionQuery() .sql("INSERT INTO nonexistent.dst SELECT * FROM foo PARTITIONED BY ALL TIME") .expectValidationError( - SqlPlanningException.class, + DruidException.class, "Cannot INSERT into nonexistent.dst because it is not a Druid datasource." ) .verify(); @@ -820,7 +820,7 @@ public void testInsertWithoutPartitionedByWithClusteredBy() + "CLUSTERED BY 2, dim1 DESC, CEIL(m2)" ) .expectValidationError( - SqlPlanningException.class, + DruidException.class, "CLUSTERED BY found before PARTITIONED BY. In Druid, the CLUSTERED BY clause must follow the PARTITIONED BY clause" ) .verify(); @@ -901,7 +901,7 @@ public void testInsertWithClusteredByAndOrderBy() ); Assert.fail("Exception should be thrown"); } - catch (SqlPlanningException e) { + catch (DruidException e) { Assert.assertEquals( "Cannot have ORDER BY on an INSERT statement, use CLUSTERED BY instead.", e.getMessage() @@ -913,7 +913,7 @@ public void testInsertWithClusteredByAndOrderBy() @Test public void testInsertWithPartitionedByContainingInvalidGranularity() { - // Throws a ValidationException, which gets converted to a SqlPlanningException before throwing to end user + // Throws a ValidationException, which gets converted to a DruidException before throwing to end user try { testQuery( "INSERT INTO dst SELECT * FROM foo PARTITIONED BY 'invalid_granularity'", @@ -922,7 +922,7 @@ public void testInsertWithPartitionedByContainingInvalidGranularity() ); Assert.fail("Exception should be thrown"); } - catch (SqlPlanningException e) { + catch (DruidException e) { Assert.assertEquals( "Encountered 'invalid_granularity' after PARTITIONED BY. Expected HOUR, DAY, MONTH, YEAR, ALL TIME, FLOOR function or TIME_FLOOR function", e.getMessage() @@ -945,7 +945,7 @@ public void testInsertWithOrderBy() ); Assert.fail("Exception should be thrown"); } - catch (SqlPlanningException e) { + catch (DruidException e) { Assert.assertEquals( "Cannot have ORDER BY on an INSERT statement, use CLUSTERED BY instead.", e.getMessage() @@ -959,8 +959,8 @@ public void testInsertWithOrderBy() @Test public void testInsertWithoutPartitionedBy() { - SqlPlanningException e = Assert.assertThrows( - SqlPlanningException.class, + DruidException e = Assert.assertThrows( + DruidException.class, () -> testQuery( StringUtils.format("INSERT INTO dst SELECT * FROM %s", externSql(externalDataSource)), @@ -1312,7 +1312,7 @@ public void testInsertWithInvalidSelectStatement() .sql("INSERT INTO t SELECT channel, added as count FROM foo PARTITIONED BY ALL") // count is a keyword .expectValidationError( CoreMatchers.allOf( - CoreMatchers.instanceOf(SqlPlanningException.class), + CoreMatchers.instanceOf(DruidException.class), ThrowableMessageMatcher.hasMessage(CoreMatchers.startsWith("Encountered \"as count\"")) ) ) @@ -1325,7 +1325,7 @@ public void testInsertWithUnnamedColumnInSelectStatement() testIngestionQuery() .sql("INSERT INTO t SELECT dim1, dim2 || '-lol' FROM foo PARTITIONED BY ALL") .expectValidationError( - SqlPlanningException.class, + DruidException.class, IngestHandler.UNNAMED_INGESTION_COLUMN_ERROR ) .verify(); @@ -1337,7 +1337,7 @@ public void testInsertWithInvalidColumnNameInIngest() testIngestionQuery() .sql("INSERT INTO t SELECT __time, dim1 AS EXPR$0 FROM foo PARTITIONED BY ALL") .expectValidationError( - SqlPlanningException.class, + DruidException.class, IngestHandler.UNNAMED_INGESTION_COLUMN_ERROR ) .verify(); @@ -1351,7 +1351,7 @@ public void testInsertWithUnnamedColumnInNestedSelectStatement() + "SELECT __time, * FROM " + "(SELECT __time, LOWER(dim1) FROM foo) PARTITIONED BY ALL TIME") .expectValidationError( - SqlPlanningException.class, + DruidException.class, IngestHandler.UNNAMED_INGESTION_COLUMN_ERROR ) .verify(); @@ -1364,7 +1364,7 @@ public void testInsertQueryWithInvalidGranularity() .sql("insert into foo1 select __time, dim1 FROM foo partitioned by time_floor(__time, 'PT2H')") .expectValidationError( CoreMatchers.allOf( - CoreMatchers.instanceOf(SqlPlanningException.class), + CoreMatchers.instanceOf(DruidException.class), ThrowableMessageMatcher.hasMessage(CoreMatchers.containsString( "The granularity specified in PARTITIONED BY is not supported. " + "Please use an equivalent of these granularities: second, minute, five_minute, ten_minute, " @@ -1391,7 +1391,7 @@ public void testInsertOnExternalDataSourceWithIncompatibleTimeColumnSignature() ) .expectValidationError( CoreMatchers.allOf( - CoreMatchers.instanceOf(SqlPlanningException.class), + CoreMatchers.instanceOf(DruidException.class), ThrowableMessageMatcher.hasMessage(CoreMatchers.containsString( "EXTERN function with __time column can be used when __time column is of type long")) ) @@ -1409,7 +1409,7 @@ public void testInsertWithSqlOuterLimit() .context(context) .sql("INSERT INTO dst SELECT * FROM foo PARTITIONED BY ALL TIME") .expectValidationError( - SqlPlanningException.class, + DruidException.class, "sqlOuterLimit cannot be provided with INSERT." ) .verify(); diff --git a/sql/src/test/java/org/apache/druid/sql/calcite/CalciteMultiValueStringQueryTest.java b/sql/src/test/java/org/apache/druid/sql/calcite/CalciteMultiValueStringQueryTest.java index 2311372e75e3..8c126d0b7fd0 100644 --- a/sql/src/test/java/org/apache/druid/sql/calcite/CalciteMultiValueStringQueryTest.java +++ b/sql/src/test/java/org/apache/druid/sql/calcite/CalciteMultiValueStringQueryTest.java @@ -22,6 +22,7 @@ import com.google.common.collect.ImmutableList; import com.google.common.collect.ImmutableSet; import org.apache.druid.common.config.NullHandling; +import org.apache.druid.error.DruidException; import org.apache.druid.java.util.common.StringUtils; import org.apache.druid.java.util.common.granularity.Granularities; import org.apache.druid.math.expr.ExpressionProcessing; @@ -43,7 +44,6 @@ import org.apache.druid.segment.column.ColumnType; import org.apache.druid.segment.virtual.ExpressionVirtualColumn; import org.apache.druid.segment.virtual.ListFilteredVirtualColumn; -import org.apache.druid.sql.SqlPlanningException; import org.apache.druid.sql.calcite.filtration.Filtration; import org.apache.druid.sql.calcite.util.CalciteTests; import org.junit.Test; @@ -1797,7 +1797,7 @@ public void testMultiValueToArrayMoreArgs() testQueryThrows( "SELECT MV_TO_ARRAY(dim3,dim3) FROM druid.numfoo", exception -> { - exception.expect(SqlPlanningException.class); + exception.expect(DruidException.class); exception.expectMessage("Invalid number of arguments to function"); } ); @@ -1809,7 +1809,7 @@ public void testMultiValueToArrayNoArgs() testQueryThrows( "SELECT MV_TO_ARRAY() FROM druid.numfoo", exception -> { - exception.expect(SqlPlanningException.class); + exception.expect(DruidException.class); exception.expectMessage("Invalid number of arguments to function"); } ); diff --git a/sql/src/test/java/org/apache/druid/sql/calcite/CalciteParameterQueryTest.java b/sql/src/test/java/org/apache/druid/sql/calcite/CalciteParameterQueryTest.java index 3e1bfe62b651..97ad77b8ea85 100644 --- a/sql/src/test/java/org/apache/druid/sql/calcite/CalciteParameterQueryTest.java +++ b/sql/src/test/java/org/apache/druid/sql/calcite/CalciteParameterQueryTest.java @@ -22,6 +22,7 @@ import com.google.common.collect.ImmutableList; import org.apache.calcite.avatica.SqlType; import org.apache.druid.common.config.NullHandling; +import org.apache.druid.error.DruidException; import org.apache.druid.java.util.common.DateTimes; import org.apache.druid.java.util.common.Intervals; import org.apache.druid.java.util.common.granularity.Granularities; @@ -37,7 +38,6 @@ import org.apache.druid.query.scan.ScanQuery.ResultFormat; import org.apache.druid.segment.column.ColumnType; import org.apache.druid.segment.column.RowSignature; -import org.apache.druid.sql.SqlPlanningException; import org.apache.druid.sql.calcite.filtration.Filtration; import org.apache.druid.sql.calcite.util.CalciteTests; import org.apache.druid.sql.http.SqlParameter; @@ -577,7 +577,7 @@ public void testLongs() @Test public void testMissingParameter() { - expectedException.expect(SqlPlanningException.class); + expectedException.expect(DruidException.class); expectedException.expectMessage("Parameter at position [0] is not bound"); testQuery( "SELECT COUNT(*)\n" @@ -592,7 +592,7 @@ public void testMissingParameter() @Test public void testPartiallyMissingParameter() { - expectedException.expect(SqlPlanningException.class); + expectedException.expect(DruidException.class); expectedException.expectMessage("Parameter at position [1] is not bound"); testQuery( "SELECT COUNT(*)\n" @@ -610,7 +610,7 @@ public void testPartiallyMissingParameterInTheMiddle() List params = new ArrayList<>(); params.add(null); params.add(new SqlParameter(SqlType.INTEGER, 1)); - expectedException.expect(SqlPlanningException.class); + expectedException.expect(DruidException.class); expectedException.expectMessage("Parameter at position [0] is not bound"); testQuery( "SELECT 1 + ?, dim1 FROM foo LIMIT ?", diff --git a/sql/src/test/java/org/apache/druid/sql/calcite/CalciteQueryTest.java b/sql/src/test/java/org/apache/druid/sql/calcite/CalciteQueryTest.java index 0e7d377489de..b64f04401619 100644 --- a/sql/src/test/java/org/apache/druid/sql/calcite/CalciteQueryTest.java +++ b/sql/src/test/java/org/apache/druid/sql/calcite/CalciteQueryTest.java @@ -23,9 +23,9 @@ import com.google.common.collect.ImmutableList; import com.google.common.collect.ImmutableMap; import com.google.common.collect.ImmutableSet; -import org.apache.calcite.plan.RelOptPlanner; import org.apache.calcite.runtime.CalciteContextException; import org.apache.druid.common.config.NullHandling; +import org.apache.druid.error.DruidException; import org.apache.druid.java.util.common.DateTimes; import org.apache.druid.java.util.common.HumanReadableBytes; import org.apache.druid.java.util.common.Intervals; @@ -42,6 +42,7 @@ import org.apache.druid.query.Query; import org.apache.druid.query.QueryContexts; import org.apache.druid.query.QueryDataSource; +import org.apache.druid.query.QueryException; import org.apache.druid.query.ResourceLimitExceededException; import org.apache.druid.query.TableDataSource; import org.apache.druid.query.UnionDataSource; @@ -110,8 +111,6 @@ import org.apache.druid.segment.column.ColumnType; import org.apache.druid.segment.column.RowSignature; import org.apache.druid.segment.join.JoinType; -import org.apache.druid.sql.SqlPlanningException; -import org.apache.druid.sql.SqlPlanningException.PlanningError; import org.apache.druid.sql.calcite.expression.DruidExpression; import org.apache.druid.sql.calcite.filtration.Filtration; import org.apache.druid.sql.calcite.planner.Calcites; @@ -372,8 +371,8 @@ public void testInformationSchemaColumnsOnAnotherView() public void testCannotInsertWithNativeEngine() { notMsqCompatible(); - final SqlPlanningException e = Assert.assertThrows( - SqlPlanningException.class, + final DruidException e = Assert.assertThrows( + DruidException.class, () -> testQuery( "INSERT INTO dst SELECT * FROM foo PARTITIONED BY ALL", ImmutableList.of(), @@ -393,8 +392,8 @@ public void testCannotInsertWithNativeEngine() public void testCannotReplaceWithNativeEngine() { notMsqCompatible(); - final SqlPlanningException e = Assert.assertThrows( - SqlPlanningException.class, + final DruidException e = Assert.assertThrows( + DruidException.class, () -> testQuery( "REPLACE INTO dst OVERWRITE ALL SELECT * FROM foo PARTITIONED BY ALL", ImmutableList.of(), @@ -814,7 +813,7 @@ public void testLatestAggregators() @Test public void testEarliestByInvalidTimestamp() { - expectedException.expect(SqlPlanningException.class); + expectedException.expect(DruidException.class); expectedException.expectMessage("Cannot apply 'EARLIEST_BY' to arguments of type 'EARLIEST_BY(, )"); testQuery( @@ -827,7 +826,7 @@ public void testEarliestByInvalidTimestamp() @Test public void testLatestByInvalidTimestamp() { - expectedException.expect(SqlPlanningException.class); + expectedException.expect(DruidException.class); expectedException.expectMessage("Cannot apply 'LATEST_BY' to arguments of type 'LATEST_BY(, )"); testQuery( @@ -2903,12 +2902,14 @@ public void testUnionAllTablesColumnCountMismatch() ); Assert.fail("query execution should fail"); } - catch (SqlPlanningException e) { + catch (DruidException e) { Assert.assertTrue( e.getMessage().contains("Column count mismatch in UNION ALL") ); - Assert.assertEquals(PlanningError.VALIDATION_ERROR.getErrorCode(), e.getErrorCode()); - Assert.assertEquals(PlanningError.VALIDATION_ERROR.getErrorClass(), e.getErrorClass()); + Assert.assertEquals( + QueryException.PLAN_VALIDATION_FAILED_ERROR_CODE, + e.context(DruidException.ERROR_CODE) + ); } } @@ -2971,7 +2972,7 @@ public void testUnionAllTablesColumnTypeMismatchStringLong() + "FROM (SELECT dim3, dim2, m1 FROM foo2 UNION ALL SELECT dim3, dim2, m1 FROM foo)\n" + "WHERE dim2 = 'a' OR dim2 = 'en'\n" + "GROUP BY 1, 2", - "Possible error: SQL requires union between inputs that are not simple table scans and involve a " + + "SQL requires union between inputs that are not simple table scans and involve a " + "filter or aliasing. Or column types of tables being unioned are not of same type."); } @@ -2985,7 +2986,7 @@ public void testUnionAllTablesWhenMappingIsRequired() + "FROM (SELECT dim1 AS c, m1 FROM foo UNION ALL SELECT dim2 AS c, m1 FROM numfoo)\n" + "WHERE c = 'a' OR c = 'def'\n" + "GROUP BY 1", - "Possible error: SQL requires union between two tables " + + "SQL requires union between two tables " + "and column names queried for each table are different Left: [dim1], Right: [dim2]." ); } @@ -2996,7 +2997,7 @@ public void testUnionIsUnplannable() // Cannot plan this UNION operation assertQueryIsUnplannable( "SELECT dim2, dim1, m1 FROM foo2 UNION SELECT dim1, dim2, m1 FROM foo", - "Possible error: SQL requires 'UNION' but only 'UNION ALL' is supported." + "SQL requires 'UNION' but only 'UNION ALL' is supported." ); } @@ -3010,7 +3011,7 @@ public void testUnionAllTablesWhenCastAndMappingIsRequired() + "FROM (SELECT dim1 AS c, m1 FROM foo UNION ALL SELECT cnt AS c, m1 FROM numfoo)\n" + "WHERE c = 'a' OR c = 'def'\n" + "GROUP BY 1", - "Possible error: SQL requires union between inputs that are not simple table scans and involve " + + "SQL requires union between inputs that are not simple table scans and involve " + "a filter or aliasing. Or column types of tables being unioned are not of same type." ); } @@ -3111,7 +3112,7 @@ public void testUnionAllSameTableTwiceWithDifferentMapping() + "FROM (SELECT dim1, dim2, m1 FROM foo UNION ALL SELECT dim2, dim1, m1 FROM foo)\n" + "WHERE dim2 = 'a' OR dim2 = 'def'\n" + "GROUP BY 1, 2", - "Possible error: SQL requires union between two tables and column names queried for each table are different Left: [dim1, dim2, m1], Right: [dim2, dim1, m1]." + "SQL requires union between two tables and column names queried for each table are different Left: [dim1, dim2, m1], Right: [dim2, dim1, m1]." ); } @@ -3174,12 +3175,14 @@ public void testUnionAllThreeTablesColumnCountMismatch1() ); Assert.fail("query execution should fail"); } - catch (SqlPlanningException e) { + catch (DruidException e) { Assert.assertTrue( e.getMessage().contains("Column count mismatch in UNION ALL") ); - Assert.assertEquals(PlanningError.VALIDATION_ERROR.getErrorCode(), e.getErrorCode()); - Assert.assertEquals(PlanningError.VALIDATION_ERROR.getErrorClass(), e.getErrorClass()); + Assert.assertEquals( + QueryException.PLAN_VALIDATION_FAILED_ERROR_CODE, + e.context(DruidException.ERROR_CODE) + ); } } @@ -3198,12 +3201,14 @@ public void testUnionAllThreeTablesColumnCountMismatch2() ); Assert.fail("query execution should fail"); } - catch (SqlPlanningException e) { + catch (DruidException e) { Assert.assertTrue( e.getMessage().contains("Column count mismatch in UNION ALL") ); - Assert.assertEquals(PlanningError.VALIDATION_ERROR.getErrorCode(), e.getErrorCode()); - Assert.assertEquals(PlanningError.VALIDATION_ERROR.getErrorClass(), e.getErrorClass()); + Assert.assertEquals( + QueryException.PLAN_VALIDATION_FAILED_ERROR_CODE, + e.context(DruidException.ERROR_CODE) + ); } } @@ -3222,12 +3227,14 @@ public void testUnionAllThreeTablesColumnCountMismatch3() ); Assert.fail("query execution should fail"); } - catch (SqlPlanningException e) { + catch (DruidException e) { Assert.assertTrue( e.getMessage().contains("Column count mismatch in UNION ALL") ); - Assert.assertEquals(PlanningError.VALIDATION_ERROR.getErrorCode(), e.getErrorCode()); - Assert.assertEquals(PlanningError.VALIDATION_ERROR.getErrorClass(), e.getErrorClass()); + Assert.assertEquals( + QueryException.PLAN_VALIDATION_FAILED_ERROR_CODE, + e.context(DruidException.ERROR_CODE) + ); } } @@ -5639,12 +5646,12 @@ public void testUnplannableQueries() // JOIN condition with not-equals (<>). "SELECT foo.dim1, foo.dim2, l.k, l.v\n" + "FROM foo INNER JOIN lookup.lookyloo l ON foo.dim2 <> l.k", - "Possible error: SQL requires a join with 'NOT_EQUALS' condition that is not supported.", + "SQL requires a join with 'NOT_EQUALS' condition that is not supported.", // JOIN condition with a function of both sides. "SELECT foo.dim1, foo.dim2, l.k, l.v\n" + "FROM foo INNER JOIN lookup.lookyloo l ON CHARACTER_LENGTH(foo.dim2 || l.k) > 3\n", - "Possible error: SQL requires a join with 'GREATER_THAN' condition that is not supported." + "SQL requires a join with 'GREATER_THAN' condition that is not supported." ); for (final Map.Entry queryErrorPair : queries.entrySet()) { @@ -5702,7 +5709,7 @@ public void testUnplannableTwoExactCountDistincts() assertQueryIsUnplannable( PLANNER_CONFIG_NO_HLL, "SELECT dim2, COUNT(distinct dim1), COUNT(distinct dim2) FROM druid.foo GROUP BY dim2", - "Possible error: SQL requires a join with 'IS_NOT_DISTINCT_FROM' condition that is not supported." + "SQL requires a join with 'IS_NOT_DISTINCT_FROM' condition that is not supported." ); } @@ -5713,7 +5720,7 @@ public void testUnplannableExactCountDistinctOnSketch() assertQueryIsUnplannable( PLANNER_CONFIG_NO_HLL, "SELECT COUNT(distinct unique_dim1) FROM druid.foo", - "Possible error: SQL requires a group-by on a column of type COMPLEX that is unsupported." + "SQL requires a group-by on a column of type COMPLEX that is unsupported." ); } @@ -5768,12 +5775,14 @@ public void testStringAggQueryOnComplexDatatypes() testQuery("SELECT STRING_AGG(unique_dim1, ',') FROM druid.foo", ImmutableList.of(), ImmutableList.of()); Assert.fail("query execution should fail"); } - catch (SqlPlanningException e) { + catch (DruidException e) { Assert.assertTrue( e.getMessage().contains("Cannot use STRING_AGG on complex inputs COMPLEX") ); - Assert.assertEquals(PlanningError.VALIDATION_ERROR.getErrorCode(), e.getErrorCode()); - Assert.assertEquals(PlanningError.VALIDATION_ERROR.getErrorClass(), e.getErrorClass()); + Assert.assertEquals( + QueryException.PLAN_VALIDATION_FAILED_ERROR_CODE, + e.context(DruidException.ERROR_CODE) + ); } } @@ -5909,7 +5918,7 @@ public void testCountStarWithTimeInIntervalFilterNonLiteral() "SELECT COUNT(*) FROM druid.foo " + "WHERE TIME_IN_INTERVAL(__time, dim1)", expected -> { - expected.expect(CoreMatchers.instanceOf(SqlPlanningException.class)); + expected.expect(CoreMatchers.instanceOf(DruidException.class)); expected.expect(ThrowableMessageMatcher.hasMessage(CoreMatchers.containsString( "From line 1, column 38 to line 1, column 67: " + "Cannot apply 'TIME_IN_INTERVAL' to arguments of type 'TIME_IN_INTERVAL(, )'. " @@ -6050,11 +6059,27 @@ public void testCountStarWithTimeFilterUsingStringLiteralsInvalid_isUnplannable( { // Strings are implicitly cast to timestamps. Test an invalid string. // This error message isn't ideal but it is at least better than silently ignoring the problem. - assertQueryIsUnplannable( - "SELECT COUNT(*) FROM druid.foo\n" - + "WHERE __time >= 'z2000-01-01 00:00:00' AND __time < '2001-01-01 00:00:00'\n", - "Possible error: Illegal TIMESTAMP constant: CAST('z2000-01-01 00:00:00'):TIMESTAMP(3) NOT NULL" - ); + String sql = "SELECT COUNT(*) FROM druid.foo\n" + + "WHERE __time >= 'z2000-01-01 00:00:00' AND __time < '2001-01-01 00:00:00'\n"; + try { + testBuilder().sql(sql).run(); + } + catch (DruidException e) { + Assert.assertEquals( + sql, + "Illegal TIMESTAMP constant", + e.message() + ); + Assert.assertEquals( + sql, + "CAST('z2000-01-01 00:00:00'):TIMESTAMP(3) NOT NULL", + e.context("Value") + ); + } + catch (Exception e) { + log.error(e, "Expected DruidException for query: %s", sql); + Assert.fail(sql); + } } @Test @@ -11301,12 +11326,14 @@ public void testTimeExtractWithTooFewArguments() testQuery("SELECT TIME_EXTRACT(__time) FROM druid.foo", ImmutableList.of(), ImmutableList.of()); Assert.fail("query execution should fail"); } - catch (SqlPlanningException e) { + catch (DruidException e) { Assert.assertTrue( e.getMessage().contains("Invalid number of arguments to function 'TIME_EXTRACT'. Was expecting 2 arguments") ); - Assert.assertEquals(PlanningError.VALIDATION_ERROR.getErrorCode(), e.getErrorCode()); - Assert.assertEquals(PlanningError.VALIDATION_ERROR.getErrorClass(), e.getErrorClass()); + Assert.assertEquals( + QueryException.PLAN_VALIDATION_FAILED_ERROR_CODE, + e.context(DruidException.ERROR_CODE) + ); } } @@ -13961,7 +13988,7 @@ public void testStringAggExpression() ); } - @Test(expected = RelOptPlanner.CannotPlanException.class) + @Test(expected = DruidException.class) public void testStringAggExpressionNonConstantSeparator() { testQuery( @@ -14110,7 +14137,7 @@ public void testHumanReadableFormatFunction() @Test public void testHumanReadableFormatFunctionExceptionWithWrongNumberType() { - this.expectedException.expect(SqlPlanningException.class); + this.expectedException.expect(DruidException.class); this.expectedException.expectMessage("Supported form(s): HUMAN_READABLE_BINARY_BYTE_FORMAT(Number, [Precision])"); testQuery( "SELECT HUMAN_READABLE_BINARY_BYTE_FORMAT('45678')", @@ -14122,7 +14149,7 @@ public void testHumanReadableFormatFunctionExceptionWithWrongNumberType() @Test public void testHumanReadableFormatFunctionWithWrongPrecisionType() { - this.expectedException.expect(SqlPlanningException.class); + this.expectedException.expect(DruidException.class); this.expectedException.expectMessage("Supported form(s): HUMAN_READABLE_BINARY_BYTE_FORMAT(Number, [Precision])"); testQuery( "SELECT HUMAN_READABLE_BINARY_BYTE_FORMAT(45678, '2')", @@ -14134,7 +14161,7 @@ public void testHumanReadableFormatFunctionWithWrongPrecisionType() @Test public void testHumanReadableFormatFunctionWithInvalidNumberOfArguments() { - this.expectedException.expect(SqlPlanningException.class); + this.expectedException.expect(DruidException.class); /* * frankly speaking, the exception message thrown here is a little bit confusing diff --git a/sql/src/test/java/org/apache/druid/sql/calcite/CalciteReplaceDmlTest.java b/sql/src/test/java/org/apache/druid/sql/calcite/CalciteReplaceDmlTest.java index 0c1f016600d1..3dc445b4f3c5 100644 --- a/sql/src/test/java/org/apache/druid/sql/calcite/CalciteReplaceDmlTest.java +++ b/sql/src/test/java/org/apache/druid/sql/calcite/CalciteReplaceDmlTest.java @@ -22,6 +22,7 @@ import com.fasterxml.jackson.databind.ObjectMapper; import com.google.common.collect.ImmutableList; import com.google.common.collect.ImmutableMap; +import org.apache.druid.error.DruidException; import org.apache.druid.java.util.common.StringUtils; import org.apache.druid.java.util.common.granularity.Granularities; import org.apache.druid.java.util.common.jackson.JacksonUtils; @@ -33,7 +34,6 @@ import org.apache.druid.segment.column.ColumnType; import org.apache.druid.segment.column.RowSignature; import org.apache.druid.server.security.ForbiddenException; -import org.apache.druid.sql.SqlPlanningException; import org.apache.druid.sql.calcite.external.Externals; import org.apache.druid.sql.calcite.filtration.Filtration; import org.apache.druid.sql.calcite.parser.DruidSqlInsert; @@ -219,7 +219,7 @@ public void testReplaceForUnsupportedDeleteWhereClause() testIngestionQuery() .sql("REPLACE INTO dst OVERWRITE WHERE __time LIKE '20__-02-01' SELECT * FROM foo PARTITIONED BY MONTH") .expectValidationError( - SqlPlanningException.class, + DruidException.class, "Unsupported operation in OVERWRITE WHERE clause: LIKE" ) .verify(); @@ -231,7 +231,7 @@ public void testReplaceForInvalidDeleteWhereClause() testIngestionQuery() .sql("REPLACE INTO dst OVERWRITE WHERE TRUE SELECT * FROM foo PARTITIONED BY MONTH") .expectValidationError( - SqlPlanningException.class, + DruidException.class, "Invalid OVERWRITE WHERE clause" ) .verify(); @@ -243,7 +243,7 @@ public void testReplaceForDeleteWhereClauseOnUnsupportedColumns() testIngestionQuery() .sql("REPLACE INTO dst OVERWRITE WHERE dim1 > TIMESTAMP '2000-01-05 00:00:00' SELECT * FROM foo PARTITIONED BY ALL TIME") .expectValidationError( - SqlPlanningException.class, + DruidException.class, "Only __time column is supported in OVERWRITE WHERE clause" ) .verify(); @@ -255,7 +255,7 @@ public void testReplaceWithOrderBy() { testIngestionQuery() .sql("REPLACE INTO dst OVERWRITE ALL SELECT * FROM foo ORDER BY dim1 PARTITIONED BY ALL TIME") - .expectValidationError(SqlPlanningException.class, "Cannot have ORDER BY on a REPLACE statement, use CLUSTERED BY instead.") + .expectValidationError(DruidException.class, "Cannot have ORDER BY on a REPLACE statement, use CLUSTERED BY instead.") .verify(); } @@ -265,7 +265,7 @@ public void testReplaceForMisalignedPartitionInterval() testIngestionQuery() .sql("REPLACE INTO dst OVERWRITE WHERE __time >= TIMESTAMP '2000-01-05 00:00:00' AND __time <= TIMESTAMP '2000-01-06 00:00:00' SELECT * FROM foo PARTITIONED BY MONTH") .expectValidationError( - SqlPlanningException.class, + DruidException.class, "OVERWRITE WHERE clause contains an interval [2000-01-05T00:00:00.000Z/2000-01-06T00:00:00.001Z] which is not aligned with PARTITIONED BY granularity {type=period, period=P1M, timeZone=UTC, origin=null}" ) .verify(); @@ -277,7 +277,7 @@ public void testReplaceForInvalidPartition() testIngestionQuery() .sql("REPLACE INTO dst OVERWRITE WHERE __time >= TIMESTAMP '2000-01-05 00:00:00' AND __time <= TIMESTAMP '2000-02-05 00:00:00' SELECT * FROM foo PARTITIONED BY ALL TIME") .expectValidationError( - SqlPlanningException.class, + DruidException.class, "OVERWRITE WHERE clause contains an interval [2000-01-05T00:00:00.000Z/2000-02-05T00:00:00.001Z] which is not aligned with PARTITIONED BY granularity AllGranularity" ) .verify(); @@ -291,7 +291,7 @@ public void testReplaceFromTableWithEmptyInterval() + "__time < TIMESTAMP '2000-01-01' AND __time > TIMESTAMP '2000-01-01' " + "SELECT * FROM foo PARTITIONED BY MONTH") .expectValidationError( - SqlPlanningException.class, + DruidException.class, "Intervals for replace are empty" ) .verify(); @@ -302,7 +302,7 @@ public void testReplaceForWithInvalidInterval() { testIngestionQuery() .sql("REPLACE INTO dst OVERWRITE WHERE __time >= TIMESTAMP '2000-01-INVALID0:00' AND __time <= TIMESTAMP '2000-02-05 00:00:00' SELECT * FROM foo PARTITIONED BY ALL TIME") - .expectValidationError(SqlPlanningException.class) + .expectValidationError(DruidException.class) .verify(); } @@ -311,7 +311,7 @@ public void testReplaceForWithoutPartitionSpec() { testIngestionQuery() .sql("REPLACE INTO dst SELECT * FROM foo PARTITIONED BY ALL TIME") - .expectValidationError(SqlPlanningException.class) + .expectValidationError(DruidException.class) .verify(); } @@ -381,7 +381,7 @@ public void testReplaceIntoInvalidDataSourceName() { testIngestionQuery() .sql("REPLACE INTO \"in/valid\" OVERWRITE ALL SELECT dim1, dim2 FROM foo PARTITIONED BY ALL TIME") - .expectValidationError(SqlPlanningException.class, "REPLACE dataSource cannot contain the '/' character.") + .expectValidationError(DruidException.class, "REPLACE dataSource cannot contain the '/' character.") .verify(); } @@ -390,7 +390,7 @@ public void testReplaceUsingColumnList() { testIngestionQuery() .sql("REPLACE INTO dst (foo, bar) OVERWRITE ALL SELECT dim1, dim2 FROM foo PARTITIONED BY ALL TIME") - .expectValidationError(SqlPlanningException.class, "REPLACE with a target column list is not supported.") + .expectValidationError(DruidException.class, "REPLACE with a target column list is not supported.") .verify(); } @@ -399,7 +399,7 @@ public void testReplaceWithoutPartitionedBy() { testIngestionQuery() .sql("REPLACE INTO dst OVERWRITE ALL SELECT __time, FLOOR(m1) as floor_m1, dim1 FROM foo") - .expectValidationError(SqlPlanningException.class, "REPLACE statements must specify PARTITIONED BY clause explicitly") + .expectValidationError(DruidException.class, "REPLACE statements must specify PARTITIONED BY clause explicitly") .verify(); } @@ -408,7 +408,7 @@ public void testReplaceWithoutPartitionedByWithClusteredBy() { testIngestionQuery() .sql("REPLACE INTO dst OVERWRITE ALL SELECT __time, FLOOR(m1) as floor_m1, dim1 FROM foo CLUSTERED BY dim1") - .expectValidationError(SqlPlanningException.class, "CLUSTERED BY found before PARTITIONED BY. In Druid, the CLUSTERED BY clause must follow the PARTITIONED BY clause") + .expectValidationError(DruidException.class, "CLUSTERED BY found before PARTITIONED BY. In Druid, the CLUSTERED BY clause must follow the PARTITIONED BY clause") .verify(); } @@ -417,7 +417,7 @@ public void testReplaceWithoutOverwriteClause() { testIngestionQuery() .sql("REPLACE INTO dst SELECT * FROM foo PARTITIONED BY ALL TIME") - .expectValidationError(SqlPlanningException.class, "Missing time chunk information in OVERWRITE clause for REPLACE. Use OVERWRITE WHERE <__time based condition> or OVERWRITE ALL to overwrite the entire table.") + .expectValidationError(DruidException.class, "Missing time chunk information in OVERWRITE clause for REPLACE. Use OVERWRITE WHERE <__time based condition> or OVERWRITE ALL to overwrite the entire table.") .verify(); } @@ -426,7 +426,7 @@ public void testReplaceWithoutCompleteOverwriteClause() { testIngestionQuery() .sql("REPLACE INTO dst OVERWRITE SELECT * FROM foo PARTITIONED BY ALL TIME") - .expectValidationError(SqlPlanningException.class, "Missing time chunk information in OVERWRITE clause for REPLACE. Use OVERWRITE WHERE <__time based condition> or OVERWRITE ALL to overwrite the entire table.") + .expectValidationError(DruidException.class, "Missing time chunk information in OVERWRITE clause for REPLACE. Use OVERWRITE WHERE <__time based condition> or OVERWRITE ALL to overwrite the entire table.") .verify(); } @@ -436,7 +436,7 @@ public void testReplaceIntoSystemTable() testIngestionQuery() .sql("REPLACE INTO INFORMATION_SCHEMA.COLUMNS OVERWRITE ALL SELECT * FROM foo PARTITIONED BY ALL TIME") .expectValidationError( - SqlPlanningException.class, + DruidException.class, "Cannot REPLACE into INFORMATION_SCHEMA.COLUMNS because it is not a Druid datasource." ) .verify(); @@ -448,7 +448,7 @@ public void testReplaceIntoView() testIngestionQuery() .sql("REPLACE INTO view.aview OVERWRITE ALL SELECT * FROM foo PARTITIONED BY ALL TIME") .expectValidationError( - SqlPlanningException.class, + DruidException.class, "Cannot REPLACE into view.aview because it is not a Druid datasource." ) .verify(); @@ -478,7 +478,7 @@ public void testReplaceIntoNonexistentSchema() testIngestionQuery() .sql("REPLACE INTO nonexistent.dst OVERWRITE ALL SELECT * FROM foo PARTITIONED BY ALL TIME") .expectValidationError( - SqlPlanningException.class, + DruidException.class, "Cannot REPLACE into nonexistent.dst because it is not a Druid datasource." ) .verify(); @@ -576,7 +576,7 @@ public void testReplaceWithClusteredBy() @Test public void testReplaceWithPartitionedByContainingInvalidGranularity() { - // Throws a ValidationException, which gets converted to a SqlPlanningException before throwing to end user + // Throws a ValidationException, which gets converted to a DruidException before throwing to end user try { testQuery( "REPLACE INTO dst OVERWRITE ALL SELECT * FROM foo PARTITIONED BY 'invalid_granularity'", @@ -585,7 +585,7 @@ public void testReplaceWithPartitionedByContainingInvalidGranularity() ); Assert.fail("Exception should be thrown"); } - catch (SqlPlanningException e) { + catch (DruidException e) { assertEquals( "Encountered 'invalid_granularity' after PARTITIONED BY. Expected HOUR, DAY, MONTH, YEAR, ALL TIME, FLOOR function or TIME_FLOOR function", e.getMessage() @@ -909,7 +909,7 @@ public void testReplaceWithSqlOuterLimit() testIngestionQuery() .context(context) .sql("REPLACE INTO dst OVERWRITE ALL SELECT * FROM foo PARTITIONED BY ALL TIME") - .expectValidationError(SqlPlanningException.class, "sqlOuterLimit cannot be provided with REPLACE.") + .expectValidationError(DruidException.class, "sqlOuterLimit cannot be provided with REPLACE.") .verify(); } } diff --git a/sql/src/test/java/org/apache/druid/sql/calcite/CalciteSelectQueryTest.java b/sql/src/test/java/org/apache/druid/sql/calcite/CalciteSelectQueryTest.java index 54d8e856af8b..9c7b59bd53d5 100644 --- a/sql/src/test/java/org/apache/druid/sql/calcite/CalciteSelectQueryTest.java +++ b/sql/src/test/java/org/apache/druid/sql/calcite/CalciteSelectQueryTest.java @@ -21,6 +21,7 @@ import com.google.common.collect.ImmutableList; import org.apache.druid.common.config.NullHandling; +import org.apache.druid.error.DruidException; import org.apache.druid.java.util.common.DateTimes; import org.apache.druid.java.util.common.Intervals; import org.apache.druid.java.util.common.granularity.Granularities; @@ -43,7 +44,6 @@ import org.apache.druid.segment.column.ColumnType; import org.apache.druid.segment.column.RowSignature; import org.apache.druid.segment.virtual.ExpressionVirtualColumn; -import org.apache.druid.sql.SqlPlanningException; import org.apache.druid.sql.calcite.filtration.Filtration; import org.apache.druid.sql.calcite.planner.PlannerContext; import org.apache.druid.sql.calcite.util.CalciteTests; @@ -964,7 +964,7 @@ public void testSelectCurrentTimePrecisionTooHigh() testQueryThrows( "SELECT CURRENT_TIMESTAMP(4)", expectedException -> { - expectedException.expect(SqlPlanningException.class); + expectedException.expect(DruidException.class); expectedException.expectMessage( "Argument to function 'CURRENT_TIMESTAMP' must be a valid precision between '0' and '3'" ); From e4d62a69522779f4a69e96d5ad14654b23e6003d Mon Sep 17 00:00:00 2001 From: Paul Rogers Date: Mon, 13 Feb 2023 15:25:59 -0800 Subject: [PATCH 04/17] Converted remaining SQL exceptions --- .../builtin/ArraySqlAggregator.java | 8 ++++ .../EarliestLatestAnySqlAggregator.java | 43 +++++++++++-------- .../aggregation/builtin/MaxSqlAggregator.java | 7 ++- .../aggregation/builtin/MinSqlAggregator.java | 7 ++- .../builtin/StringSqlAggregator.java | 8 +++- .../aggregation/builtin/SumSqlAggregator.java | 7 ++- .../NestedDataOperatorConversions.java | 40 ++++++++++------- .../sql/calcite/rel/DruidJoinQueryRel.java | 7 ++- .../calcite/rule/DruidLogicalValuesRule.java | 11 +++-- .../calcite/CalciteNestedDataQueryTest.java | 9 ++-- 10 files changed, 98 insertions(+), 49 deletions(-) diff --git a/sql/src/main/java/org/apache/druid/sql/calcite/aggregation/builtin/ArraySqlAggregator.java b/sql/src/main/java/org/apache/druid/sql/calcite/aggregation/builtin/ArraySqlAggregator.java index ec914dac7f8e..312f5c2f619e 100644 --- a/sql/src/main/java/org/apache/druid/sql/calcite/aggregation/builtin/ArraySqlAggregator.java +++ b/sql/src/main/java/org/apache/druid/sql/calcite/aggregation/builtin/ArraySqlAggregator.java @@ -35,10 +35,12 @@ import org.apache.calcite.sql.type.SqlReturnTypeInference; import org.apache.calcite.sql.type.SqlTypeFamily; import org.apache.calcite.util.Optionality; +import org.apache.druid.error.DruidException; import org.apache.druid.java.util.common.HumanReadableBytes; import org.apache.druid.java.util.common.StringUtils; import org.apache.druid.math.expr.ExprMacroTable; import org.apache.druid.math.expr.ExpressionType; +import org.apache.druid.query.QueryException; import org.apache.druid.query.aggregation.ExpressionLambdaAggregatorFactory; import org.apache.druid.segment.column.ColumnType; import org.apache.druid.segment.column.RowSignature; @@ -51,6 +53,7 @@ import org.apache.druid.sql.calcite.rel.VirtualColumnRegistry; import javax.annotation.Nullable; + import java.util.List; import java.util.stream.Collectors; @@ -165,6 +168,11 @@ static class ArrayAggReturnTypeInference implements SqlReturnTypeInference public RelDataType inferReturnType(SqlOperatorBinding sqlOperatorBinding) { RelDataType type = sqlOperatorBinding.getOperandType(0); + if (type instanceof RowSignatures.ComplexSqlType) { + throw DruidException.user("Cannot use ARRAY_AGG on complex inputs %s", type) + .context(DruidException.ERROR_CODE, QueryException.UNSUPPORTED_OPERATION_ERROR_CODE) + .build(); + } return sqlOperatorBinding.getTypeFactory().createArrayType( type, -1 diff --git a/sql/src/main/java/org/apache/druid/sql/calcite/aggregation/builtin/EarliestLatestAnySqlAggregator.java b/sql/src/main/java/org/apache/druid/sql/calcite/aggregation/builtin/EarliestLatestAnySqlAggregator.java index 6efc8846e914..8d2503d53046 100644 --- a/sql/src/main/java/org/apache/druid/sql/calcite/aggregation/builtin/EarliestLatestAnySqlAggregator.java +++ b/sql/src/main/java/org/apache/druid/sql/calcite/aggregation/builtin/EarliestLatestAnySqlAggregator.java @@ -35,8 +35,8 @@ import org.apache.calcite.sql.type.SqlTypeName; import org.apache.calcite.sql.type.SqlTypeUtil; import org.apache.calcite.util.Optionality; -import org.apache.druid.java.util.common.IAE; -import org.apache.druid.java.util.common.ISE; +import org.apache.druid.error.DruidException; +import org.apache.druid.query.QueryException; import org.apache.druid.query.aggregation.AggregatorFactory; import org.apache.druid.query.aggregation.any.DoubleAnyAggregatorFactory; import org.apache.druid.query.aggregation.any.FloatAnyAggregatorFactory; @@ -60,10 +60,10 @@ import org.apache.druid.sql.calcite.expression.Expressions; import org.apache.druid.sql.calcite.planner.Calcites; import org.apache.druid.sql.calcite.planner.PlannerContext; -import org.apache.druid.sql.calcite.planner.UnsupportedSQLQueryException; import org.apache.druid.sql.calcite.rel.VirtualColumnRegistry; import javax.annotation.Nullable; + import java.util.Collections; import java.util.List; import java.util.stream.Collectors; @@ -91,7 +91,9 @@ AggregatorFactory createAggregatorFactory(String name, String fieldName, String case COMPLEX: return new StringFirstAggregatorFactory(name, fieldName, timeColumn, maxStringBytes); default: - throw new UnsupportedSQLQueryException("EARLIEST aggregator is not supported for '%s' type", type); + throw DruidException.user("EARLIEST aggregator is not supported for '%s' type", type) + .context(DruidException.ERROR_CODE, QueryException.UNSUPPORTED_OPERATION_ERROR_CODE) + .build(); } } }, @@ -111,7 +113,9 @@ AggregatorFactory createAggregatorFactory(String name, String fieldName, String case COMPLEX: return new StringLastAggregatorFactory(name, fieldName, timeColumn, maxStringBytes); default: - throw new UnsupportedSQLQueryException("LATEST aggregator is not supported for '%s' type", type); + throw DruidException.user("LATEST aggregator is not supported for '%s' type", type) + .context(DruidException.ERROR_CODE, QueryException.UNSUPPORTED_OPERATION_ERROR_CODE) + .build(); } } }, @@ -130,7 +134,9 @@ AggregatorFactory createAggregatorFactory(String name, String fieldName, String case STRING: return new StringAnyAggregatorFactory(name, fieldName, maxStringBytes); default: - throw new UnsupportedSQLQueryException("ANY aggregation is not supported for '%s' type", type); + throw DruidException.user("ANY aggregation is not supported for '%s' type", type) + .context(DruidException.ERROR_CODE, QueryException.UNSUPPORTED_OPERATION_ERROR_CODE) + .build(); } } }; @@ -188,11 +194,12 @@ public Aggregation toDruidAggregation( final String aggregatorName = finalizeAggregations ? Calcites.makePrefixedName(name, "a") : name; final ColumnType outputType = Calcites.getColumnTypeForRelDataType(aggregateCall.getType()); if (outputType == null) { - throw new ISE( - "Cannot translate output sqlTypeName[%s] to Druid type for aggregator[%s]", - aggregateCall.getType().getSqlTypeName(), - aggregateCall.getName() - ); + throw DruidException.system( + "Cannot translate output SQL type %s to Druid type for aggregator [%s]", + aggregateCall.getType().getSqlTypeName(), + aggregateCall.getName() + ) + .build(); } final String fieldName = getColumnName(plannerContext, virtualColumnRegistry, args.get(0), rexNodes.get(0)); @@ -230,12 +237,14 @@ public Aggregation toDruidAggregation( ); break; default: - throw new IAE( - "aggregation[%s], Invalid number of arguments[%,d] to [%s] operator", - aggregatorName, - args.size(), - aggregatorType.name() - ); + throw DruidException.user( + "aggregation [%s], Invalid number of arguments %,d to [%s] operator", + aggregatorName, + args.size(), + aggregatorType.name() + ) + .context(DruidException.ERROR_CODE, QueryException.PLAN_VALIDATION_FAILED_ERROR_CODE) + .build(); } return Aggregation.create( diff --git a/sql/src/main/java/org/apache/druid/sql/calcite/aggregation/builtin/MaxSqlAggregator.java b/sql/src/main/java/org/apache/druid/sql/calcite/aggregation/builtin/MaxSqlAggregator.java index e27b006778e0..5838721981df 100644 --- a/sql/src/main/java/org/apache/druid/sql/calcite/aggregation/builtin/MaxSqlAggregator.java +++ b/sql/src/main/java/org/apache/druid/sql/calcite/aggregation/builtin/MaxSqlAggregator.java @@ -22,7 +22,9 @@ import org.apache.calcite.rel.core.AggregateCall; import org.apache.calcite.sql.SqlAggFunction; import org.apache.calcite.sql.fun.SqlStdOperatorTable; +import org.apache.druid.error.DruidException; import org.apache.druid.math.expr.ExprMacroTable; +import org.apache.druid.query.QueryException; import org.apache.druid.query.aggregation.AggregatorFactory; import org.apache.druid.query.aggregation.DoubleMaxAggregatorFactory; import org.apache.druid.query.aggregation.FloatMaxAggregatorFactory; @@ -31,7 +33,6 @@ import org.apache.druid.segment.column.ValueType; import org.apache.druid.sql.calcite.aggregation.Aggregation; import org.apache.druid.sql.calcite.planner.Calcites; -import org.apache.druid.sql.calcite.planner.UnsupportedSQLQueryException; public class MaxSqlAggregator extends SimpleSqlAggregator { @@ -71,7 +72,9 @@ private static AggregatorFactory createMaxAggregatorFactory( case DOUBLE: return new DoubleMaxAggregatorFactory(name, fieldName, null, macroTable); default: - throw new UnsupportedSQLQueryException("Max aggregation is not supported for '%s' type", aggregationType); + throw DruidException.user("Max aggregation is not supported for '%s' type", aggregationType) + .context(DruidException.ERROR_CODE, QueryException.UNSUPPORTED_OPERATION_ERROR_CODE) + .build(); } } } diff --git a/sql/src/main/java/org/apache/druid/sql/calcite/aggregation/builtin/MinSqlAggregator.java b/sql/src/main/java/org/apache/druid/sql/calcite/aggregation/builtin/MinSqlAggregator.java index b009ead1fe95..f9197ccf8b06 100644 --- a/sql/src/main/java/org/apache/druid/sql/calcite/aggregation/builtin/MinSqlAggregator.java +++ b/sql/src/main/java/org/apache/druid/sql/calcite/aggregation/builtin/MinSqlAggregator.java @@ -22,7 +22,9 @@ import org.apache.calcite.rel.core.AggregateCall; import org.apache.calcite.sql.SqlAggFunction; import org.apache.calcite.sql.fun.SqlStdOperatorTable; +import org.apache.druid.error.DruidException; import org.apache.druid.math.expr.ExprMacroTable; +import org.apache.druid.query.QueryException; import org.apache.druid.query.aggregation.AggregatorFactory; import org.apache.druid.query.aggregation.DoubleMinAggregatorFactory; import org.apache.druid.query.aggregation.FloatMinAggregatorFactory; @@ -30,7 +32,6 @@ import org.apache.druid.segment.column.ColumnType; import org.apache.druid.sql.calcite.aggregation.Aggregation; import org.apache.druid.sql.calcite.planner.Calcites; -import org.apache.druid.sql.calcite.planner.UnsupportedSQLQueryException; public class MinSqlAggregator extends SimpleSqlAggregator { @@ -67,7 +68,9 @@ private static AggregatorFactory createMinAggregatorFactory( case DOUBLE: return new DoubleMinAggregatorFactory(name, fieldName, null, macroTable); default: - throw new UnsupportedSQLQueryException("MIN aggregator is not supported for '%s' type", aggregationType); + throw DruidException.user("MIN aggregator is not supported for '%s' type", aggregationType) + .context(DruidException.ERROR_CODE, QueryException.UNSUPPORTED_OPERATION_ERROR_CODE) + .build(); } } } diff --git a/sql/src/main/java/org/apache/druid/sql/calcite/aggregation/builtin/StringSqlAggregator.java b/sql/src/main/java/org/apache/druid/sql/calcite/aggregation/builtin/StringSqlAggregator.java index 53d7fc4cf7ae..0ac88e1ee812 100644 --- a/sql/src/main/java/org/apache/druid/sql/calcite/aggregation/builtin/StringSqlAggregator.java +++ b/sql/src/main/java/org/apache/druid/sql/calcite/aggregation/builtin/StringSqlAggregator.java @@ -36,9 +36,11 @@ import org.apache.calcite.sql.type.SqlTypeFamily; import org.apache.calcite.sql.type.SqlTypeName; import org.apache.calcite.util.Optionality; +import org.apache.druid.error.DruidException; import org.apache.druid.java.util.common.HumanReadableBytes; import org.apache.druid.java.util.common.StringUtils; import org.apache.druid.math.expr.ExprMacroTable; +import org.apache.druid.query.QueryException; import org.apache.druid.query.aggregation.ExpressionLambdaAggregatorFactory; import org.apache.druid.query.aggregation.FilteredAggregatorFactory; import org.apache.druid.query.filter.NotDimFilter; @@ -51,11 +53,11 @@ import org.apache.druid.sql.calcite.expression.Expressions; import org.apache.druid.sql.calcite.planner.Calcites; import org.apache.druid.sql.calcite.planner.PlannerContext; -import org.apache.druid.sql.calcite.planner.UnsupportedSQLQueryException; import org.apache.druid.sql.calcite.rel.VirtualColumnRegistry; import org.apache.druid.sql.calcite.table.RowSignatures; import javax.annotation.Nullable; + import java.util.List; import java.util.Objects; import java.util.stream.Collectors; @@ -197,7 +199,9 @@ public RelDataType inferReturnType(SqlOperatorBinding sqlOperatorBinding) { RelDataType type = sqlOperatorBinding.getOperandType(0); if (type instanceof RowSignatures.ComplexSqlType) { - throw new UnsupportedSQLQueryException("Cannot use STRING_AGG on complex inputs %s", type); + throw DruidException.user("Cannot use STRING_AGG on complex inputs %s", type) + .context(DruidException.ERROR_CODE, QueryException.UNSUPPORTED_OPERATION_ERROR_CODE) + .build(); } return Calcites.createSqlTypeWithNullability( sqlOperatorBinding.getTypeFactory(), diff --git a/sql/src/main/java/org/apache/druid/sql/calcite/aggregation/builtin/SumSqlAggregator.java b/sql/src/main/java/org/apache/druid/sql/calcite/aggregation/builtin/SumSqlAggregator.java index f4dcad3ed598..6cb16f9f451f 100644 --- a/sql/src/main/java/org/apache/druid/sql/calcite/aggregation/builtin/SumSqlAggregator.java +++ b/sql/src/main/java/org/apache/druid/sql/calcite/aggregation/builtin/SumSqlAggregator.java @@ -31,7 +31,9 @@ import org.apache.calcite.sql.type.OperandTypes; import org.apache.calcite.sql.type.ReturnTypes; import org.apache.calcite.util.Optionality; +import org.apache.druid.error.DruidException; import org.apache.druid.math.expr.ExprMacroTable; +import org.apache.druid.query.QueryException; import org.apache.druid.query.aggregation.AggregatorFactory; import org.apache.druid.query.aggregation.DoubleSumAggregatorFactory; import org.apache.druid.query.aggregation.FloatSumAggregatorFactory; @@ -40,7 +42,6 @@ import org.apache.druid.segment.column.ValueType; import org.apache.druid.sql.calcite.aggregation.Aggregation; import org.apache.druid.sql.calcite.planner.Calcites; -import org.apache.druid.sql.calcite.planner.UnsupportedSQLQueryException; public class SumSqlAggregator extends SimpleSqlAggregator { @@ -88,7 +89,9 @@ static AggregatorFactory createSumAggregatorFactory( case DOUBLE: return new DoubleSumAggregatorFactory(name, fieldName, null, macroTable); default: - throw new UnsupportedSQLQueryException("Sum aggregation is not supported for '%s' type", aggregationType); + throw DruidException.user("Sum aggregation is not supported for '%s' type", aggregationType) + .context(DruidException.ERROR_CODE, QueryException.UNSUPPORTED_OPERATION_ERROR_CODE) + .build(); } } diff --git a/sql/src/main/java/org/apache/druid/sql/calcite/expression/builtin/NestedDataOperatorConversions.java b/sql/src/main/java/org/apache/druid/sql/calcite/expression/builtin/NestedDataOperatorConversions.java index 76640bcf809b..2ba83e9721f6 100644 --- a/sql/src/main/java/org/apache/druid/sql/calcite/expression/builtin/NestedDataOperatorConversions.java +++ b/sql/src/main/java/org/apache/druid/sql/calcite/expression/builtin/NestedDataOperatorConversions.java @@ -40,11 +40,13 @@ import org.apache.calcite.sql.type.SqlTypeName; import org.apache.calcite.sql.type.SqlTypeTransforms; import org.apache.calcite.sql2rel.SqlRexConvertlet; +import org.apache.druid.error.DruidException; import org.apache.druid.java.util.common.IAE; import org.apache.druid.java.util.common.StringUtils; import org.apache.druid.math.expr.Expr; import org.apache.druid.math.expr.InputBindings; import org.apache.druid.math.expr.Parser; +import org.apache.druid.query.QueryException; import org.apache.druid.segment.column.ColumnType; import org.apache.druid.segment.column.RowSignature; import org.apache.druid.segment.nested.NestedPathFinder; @@ -56,11 +58,11 @@ import org.apache.druid.sql.calcite.expression.SqlOperatorConversion; import org.apache.druid.sql.calcite.planner.Calcites; import org.apache.druid.sql.calcite.planner.PlannerContext; -import org.apache.druid.sql.calcite.planner.UnsupportedSQLQueryException; import org.apache.druid.sql.calcite.planner.convertlet.DruidConvertletFactory; import org.apache.druid.sql.calcite.table.RowSignatures; import javax.annotation.Nullable; + import java.util.Collections; import java.util.List; @@ -200,11 +202,13 @@ public DruidExpression toDruidExpression( parts = NestedPathFinder.parseJsonPath(path); } catch (IllegalArgumentException iae) { - throw new UnsupportedSQLQueryException( - "Cannot use [%s]: [%s]", - call.getOperator().getName(), - iae.getMessage() - ); + throw DruidException.user( + "Cannot use %s", + call.getOperator().getName() + ) + .context(DruidException.ERROR_CODE, QueryException.UNSUPPORTED_OPERATION_ERROR_CODE) + .cause(iae) + .build(); } final String jsonPath = NestedPathFinder.toNormalizedJsonPath(parts); final DruidExpression.ExpressionGenerator builder = (args) -> @@ -391,11 +395,13 @@ public DruidExpression toDruidExpression( parts = NestedPathFinder.parseJsonPath(path); } catch (IllegalArgumentException iae) { - throw new UnsupportedSQLQueryException( - "Cannot use [%s]: [%s]", - call.getOperator().getName(), - iae.getMessage() - ); + throw DruidException.user( + "Cannot use %s", + call.getOperator().getName() + ) + .context(DruidException.ERROR_CODE, QueryException.UNSUPPORTED_OPERATION_ERROR_CODE) + .cause(iae) + .build(); } final String jsonPath = NestedPathFinder.toNormalizedJsonPath(parts); final DruidExpression.ExpressionGenerator builder = (args) -> @@ -689,11 +695,13 @@ public DruidExpression toDruidExpression( parts = NestedPathFinder.parseJsonPath(path); } catch (IllegalArgumentException iae) { - throw new UnsupportedSQLQueryException( - "Cannot use [%s]: [%s]", - call.getOperator().getName(), - iae.getMessage() - ); + throw DruidException.user( + "Cannot use %s", + call.getOperator().getName() + ) + .context(DruidException.ERROR_CODE, QueryException.UNSUPPORTED_OPERATION_ERROR_CODE) + .cause(iae) + .build(); } final String jsonPath = NestedPathFinder.toNormalizedJsonPath(parts); final DruidExpression.ExpressionGenerator builder = (args) -> diff --git a/sql/src/main/java/org/apache/druid/sql/calcite/rel/DruidJoinQueryRel.java b/sql/src/main/java/org/apache/druid/sql/calcite/rel/DruidJoinQueryRel.java index 5ab29ab13b1d..f17d0f47aa6e 100644 --- a/sql/src/main/java/org/apache/druid/sql/calcite/rel/DruidJoinQueryRel.java +++ b/sql/src/main/java/org/apache/druid/sql/calcite/rel/DruidJoinQueryRel.java @@ -38,12 +38,14 @@ import org.apache.calcite.rel.type.RelDataType; import org.apache.calcite.rex.RexNode; import org.apache.calcite.sql.SqlKind; +import org.apache.druid.error.DruidException; import org.apache.druid.java.util.common.ISE; import org.apache.druid.java.util.common.Pair; import org.apache.druid.java.util.common.StringUtils; import org.apache.druid.query.DataSource; import org.apache.druid.query.JoinDataSource; import org.apache.druid.query.QueryDataSource; +import org.apache.druid.query.QueryException; import org.apache.druid.query.TableDataSource; import org.apache.druid.query.filter.DimFilter; import org.apache.druid.segment.column.RowSignature; @@ -53,7 +55,6 @@ import org.apache.druid.sql.calcite.planner.Calcites; import org.apache.druid.sql.calcite.planner.PlannerConfig; import org.apache.druid.sql.calcite.planner.PlannerContext; -import org.apache.druid.sql.calcite.planner.UnsupportedSQLQueryException; import org.apache.druid.sql.calcite.table.RowSignatures; import javax.annotation.Nullable; @@ -360,7 +361,9 @@ public static JoinType toDruidJoinType(JoinRelType calciteJoinType) case INNER: return JoinType.INNER; default: - throw new UnsupportedSQLQueryException("Cannot handle joinType '%s'", calciteJoinType); + throw DruidException.user("Cannot handle joinType '%s'", calciteJoinType) + .context(DruidException.ERROR_CODE, QueryException.UNSUPPORTED_OPERATION_ERROR_CODE) + .build(); } } diff --git a/sql/src/main/java/org/apache/druid/sql/calcite/rule/DruidLogicalValuesRule.java b/sql/src/main/java/org/apache/druid/sql/calcite/rule/DruidLogicalValuesRule.java index b94a6ee4ac89..a387acf326df 100644 --- a/sql/src/main/java/org/apache/druid/sql/calcite/rule/DruidLogicalValuesRule.java +++ b/sql/src/main/java/org/apache/druid/sql/calcite/rule/DruidLogicalValuesRule.java @@ -25,11 +25,12 @@ import org.apache.calcite.plan.RelOptRuleCall; import org.apache.calcite.rel.logical.LogicalValues; import org.apache.calcite.rex.RexLiteral; +import org.apache.druid.error.DruidException; import org.apache.druid.query.InlineDataSource; +import org.apache.druid.query.QueryException; import org.apache.druid.segment.column.RowSignature; import org.apache.druid.sql.calcite.planner.Calcites; import org.apache.druid.sql.calcite.planner.PlannerContext; -import org.apache.druid.sql.calcite.planner.UnsupportedSQLQueryException; import org.apache.druid.sql.calcite.rel.DruidQueryRel; import org.apache.druid.sql.calcite.table.RowSignatures; @@ -126,14 +127,18 @@ static Object getValueFromLiteral(RexLiteral literal, PlannerContext plannerCont return Calcites.calciteDateTimeLiteralToJoda(literal, plannerContext.getTimeZone()).getMillis(); case NULL: if (!literal.isNull()) { - throw new UnsupportedSQLQueryException("Query has a non-null constant but is of NULL type."); + throw DruidException.user("Query has a non-null constant but is of NULL type.") + .context(DruidException.ERROR_CODE, QueryException.UNSUPPORTED_OPERATION_ERROR_CODE) + .build(); } return null; case TIMESTAMP_WITH_LOCAL_TIME_ZONE: case TIME: case TIME_WITH_LOCAL_TIME_ZONE: default: - throw new UnsupportedSQLQueryException("%s type is not supported", literal.getType().getSqlTypeName()); + throw DruidException.user("%s type is not supported", literal.getType().getSqlTypeName()) + .context(DruidException.ERROR_CODE, QueryException.UNSUPPORTED_OPERATION_ERROR_CODE) + .build(); } } } diff --git a/sql/src/test/java/org/apache/druid/sql/calcite/CalciteNestedDataQueryTest.java b/sql/src/test/java/org/apache/druid/sql/calcite/CalciteNestedDataQueryTest.java index 007bb4926d78..749f32f9dc09 100644 --- a/sql/src/test/java/org/apache/druid/sql/calcite/CalciteNestedDataQueryTest.java +++ b/sql/src/test/java/org/apache/druid/sql/calcite/CalciteNestedDataQueryTest.java @@ -32,6 +32,7 @@ import org.apache.druid.data.input.impl.LongDimensionSchema; import org.apache.druid.data.input.impl.StringDimensionSchema; import org.apache.druid.data.input.impl.TimestampSpec; +import org.apache.druid.error.DruidException; import org.apache.druid.guice.DruidInjectorBuilder; import org.apache.druid.guice.NestedDataModule; import org.apache.druid.java.util.common.HumanReadableBytes; @@ -68,7 +69,6 @@ import org.apache.druid.segment.virtual.NestedFieldVirtualColumn; import org.apache.druid.segment.writeout.OffHeapMemorySegmentWriteOutMediumFactory; import org.apache.druid.sql.calcite.filtration.Filtration; -import org.apache.druid.sql.calcite.planner.UnsupportedSQLQueryException; import org.apache.druid.sql.calcite.util.SpecificSegmentsQuerySegmentWalker; import org.apache.druid.sql.calcite.util.TestDataBuilder; import org.apache.druid.timeline.DataSegment; @@ -4172,9 +4172,12 @@ public void testGroupByInvalidPath() + "SUM(cnt) " + "FROM druid.nested GROUP BY 1", (expected) -> { - expected.expect(UnsupportedSQLQueryException.class); + expected.expect(DruidException.class); expected.expectMessage( - "Cannot use [JSON_VALUE_VARCHAR]: [Bad format, '.array.[1]' is not a valid JSONPath path: must start with '$']"); + "Cannot use JSON_VALUE_VARCHAR\n" + + "Error Code: Unsupported operation\n" + + "Cause: Bad format, '.array.[1]' is not a valid JSONPath path: must start with '$'" + ); } ); } From 4bb88a0c708347b9d61d8f5d35b7460749c1f8fc Mon Sep 17 00:00:00 2001 From: Paul Rogers Date: Fri, 17 Feb 2023 18:13:05 -0800 Subject: [PATCH 05/17] Refined errors Moved common error codes into DruidException methods Revisions from review comments --- .../druid/data/input/impl/JsonNodeReader.java | 2 +- .../apache/druid/error/DruidException.java | 131 ++++++++++++++++-- .../error/StandardRestExceptionEncoder.java | 7 +- .../org/apache/druid/sql/DirectStatement.java | 2 +- .../builtin/ArraySqlAggregator.java | 5 +- .../EarliestLatestAnySqlAggregator.java | 41 +++--- .../aggregation/builtin/MaxSqlAggregator.java | 5 +- .../aggregation/builtin/MinSqlAggregator.java | 5 +- .../builtin/StringSqlAggregator.java | 5 +- .../aggregation/builtin/SumSqlAggregator.java | 5 +- .../NestedDataOperatorConversions.java | 33 ++--- .../calcite/parser/DruidSqlParserUtils.java | 61 +++++--- .../sql/calcite/planner/DruidPlanner.java | 32 ++--- .../sql/calcite/planner/DruidRexExecutor.java | 19 +-- .../sql/calcite/planner/IngestHandler.java | 42 +++--- .../sql/calcite/planner/QueryHandler.java | 27 ++-- .../planner/RelParameterizerShuttle.java | 10 +- .../sql/calcite/rel/DruidJoinQueryRel.java | 5 +- .../calcite/rule/DruidLogicalValuesRule.java | 16 ++- .../druid/sql/calcite/run/SqlEngines.java | 9 +- .../apache/druid/sql/SqlStatementTest.java | 12 +- .../calcite/CalciteNestedDataQueryTest.java | 5 +- .../druid/sql/calcite/CalciteQueryTest.java | 12 +- 23 files changed, 288 insertions(+), 203 deletions(-) diff --git a/processing/src/main/java/org/apache/druid/data/input/impl/JsonNodeReader.java b/processing/src/main/java/org/apache/druid/data/input/impl/JsonNodeReader.java index a6ebb0a91136..b5a61f692929 100644 --- a/processing/src/main/java/org/apache/druid/data/input/impl/JsonNodeReader.java +++ b/processing/src/main/java/org/apache/druid/data/input/impl/JsonNodeReader.java @@ -56,7 +56,7 @@ *

* The input text can be: * 1. a JSON string of an object in a line or multiple lines(such as pretty-printed JSON text) - * 2. multiple JSON object strings concated by white space character(s) + * 2. multiple JSON object strings concatenated by white space character(s) *

* If an input string contains invalid JSON syntax, any valid JSON objects found prior to encountering the invalid * syntax will be successfully parsed, but parsing will not continue after the invalid syntax. diff --git a/processing/src/main/java/org/apache/druid/error/DruidException.java b/processing/src/main/java/org/apache/druid/error/DruidException.java index e58c15e47941..d5c3213f6030 100644 --- a/processing/src/main/java/org/apache/druid/error/DruidException.java +++ b/processing/src/main/java/org/apache/druid/error/DruidException.java @@ -21,6 +21,9 @@ import org.apache.druid.java.util.common.StringUtils; import org.apache.druid.java.util.common.logger.Logger; +import org.apache.druid.query.QueryException; + +import javax.annotation.Nullable; import java.util.HashMap; import java.util.LinkedHashMap; @@ -61,6 +64,21 @@ */ public class DruidException extends RuntimeException { + /** + * The {@code ErrorType} is a high-level classification of errors that balances + * the idea of persona and code knowledge. The codes roughly identify who is most + * likely the persona that will resolve the error. In some case (e.g. {@code USER}), + * the person is clear: the person using Druid. In other cases (e.g. {@code RESOURCE}), + * the target persona is amgibuous: is it the person who submitted the query? The person + * who installed Druid? The system admin? The person who decided how much resource + * the project could afford? + *

+ * Often the code is not sure of who the exact person is, but the code knows about + * the kind of error (e.g. {@code NETWORK}). In this case, it is up to each + * site to determine who is in charge of fixing this particular network error: the user + * (bad HTTP address), admin (forgot to open a port), system admin (a router died), + * hardware vendor (a network card failed), etc. + */ public enum ErrorType { /** @@ -83,7 +101,7 @@ public enum ErrorType * where the fix is either a workaround or a bug fix. Such error should only * be raised for "should never occur" type situations. */ - SYSTEM, + INTERNAL, /** * Error for a resource limit: memory, CPU, slots or so on. The workaround is @@ -113,7 +131,6 @@ public enum ErrorType NETWORK }; - public static final String ERROR_CODE = "Error Code"; public static final String HOST = "Host"; public static class Builder @@ -121,13 +138,18 @@ public static class Builder private final DruidException source; private final ErrorType type; private final String msg; + private String code; private Throwable e; private Map context; - private Builder(ErrorType type, String msg, Object[] args) + private Builder( + final ErrorType type, + final String msg, + @Nullable final Object[] args) { this.source = null; this.type = type; + this.code = QueryException.UNKNOWN_EXCEPTION_ERROR_CODE; this.msg = StringUtils.format(msg, args); } @@ -135,11 +157,18 @@ private Builder(DruidException e) { this.source = e; this.type = e.type; + this.code = e.code; this.msg = e.message(); this.e = e.getCause() == null ? e : e.getCause(); this.context = e.context == null ? null : new HashMap<>(e.context); } + public Builder code(String code) + { + this.code = code; + return this; + } + public Builder cause(Throwable e) { this.e = e; @@ -170,6 +199,7 @@ private DruidException build(boolean logged) e, msg, type, + code, // Used linked hash map to preserve order context == null ? null : new LinkedHashMap<>(context), logged || wasLogged() @@ -189,7 +219,7 @@ public DruidException build(Logger logger) } switch (type) { case CONFIG: - case SYSTEM: + case INTERNAL: logger.error(e, e.getMessage()); break; case NETWORK: @@ -211,6 +241,15 @@ public String toString() } private final ErrorType type; + + /** + * Error codes are categories within the top-level codes. They mimic prior Druid + * conventions, although prior codes were very sparse. The code is a string, not + * an enum, because Druid has no clear catalog of such codes at present. + *

+ * For now, error codes are enumerated in {@link org.apache.druid.query.QueryException}. + */ + private final String code; private final Map context; private final boolean logged; @@ -218,12 +257,14 @@ public DruidException( final Throwable e, final String msg, final ErrorType type, + final String code, final Map context, final boolean logged ) { super(msg, e); this.type = type; + this.code = code; this.context = context; this.logged = logged; } @@ -243,15 +284,50 @@ public static DruidException userError(String msg, Object...args) return user(msg, args).build(); } + /** + * User error for an unsupported operation. We assume the problem is that the user + * asked Druid to do something it cannot do, and so the user shouldn't ask. This + * is not an indication that Druid should provide an operation, and it is + * an internal error that it does not. + */ + public static Builder unsupported(String msg, Object...args) + { + return new Builder(ErrorType.USER, msg, args) + .code(QueryException.UNSUPPORTED_OPERATION_ERROR_CODE); + } + + public static DruidException unsupportedError(String msg, Object...args) + { + return unsupported(msg, args).build(); + } + + /** + * SQL query validation failed, most likely due to a problem in the SQL statement + * which the user provided. This is a somewhat less specific then the + * {@link #unsupported(String, Object...)} error, which says that validation failed + * because Druid doesn't support something. Use the validation error for case that + * are mostly likely because the SQL really is wrong. + */ + public static Builder validation(String msg, Object...args) + { + return new Builder(ErrorType.USER, msg, args) + .code(QueryException.PLAN_VALIDATION_FAILED_ERROR_CODE); + } + + public static DruidException validationError(String msg, Object...args) + { + return validation(msg, args).build(); + } + /** * Build an error that indicates that something went wrong internally * with Druid. This is the equivalent of an assertion failure: errors * of this type indicate a bug in the code: there is nothing the user * can do other than request a fix or find a workaround. */ - public static Builder system(String msg, Object...args) + public static Builder internalError(String msg, Object...args) { - return new Builder(ErrorType.SYSTEM, msg, args); + return new Builder(ErrorType.INTERNAL, msg, args); } public static Builder notFound(String msg, Object...args) @@ -261,7 +337,7 @@ public static Builder notFound(String msg, Object...args) public static DruidException unexpected(Exception e) { - return system(e.getMessage()).cause(e).build(); + return internalError(e.getMessage()).cause(e).build(); } /** @@ -323,8 +399,41 @@ public String context(String key) return context.get(key); } + public String code() + { + return code; + } + @Override public String getMessage() + { + StringBuilder buf = new StringBuilder(); + buf.append(type.name()).append(" - "); + buf.append(super.getMessage()); + if (!QueryException.UNSUPPORTED_OPERATION_ERROR_CODE.equals(code)) { + buf.append("; Error Code: [") + .append(code) + .append("]"); + } + if (context != null && context.size() > 0) { + int count = 0; + buf.append("; "); + for (Map.Entry entry : context.entrySet()) { + if (count > 0) { + buf.append(", "); + } + buf.append("\n") + .append(entry.getKey()) + .append(": [") + .append(entry.getValue()) + .append("]"); + count++; + } + } + return buf.toString(); + } + + public String getDisplayMessage() { StringBuilder buf = new StringBuilder(); switch (type) { @@ -334,13 +443,17 @@ public String getMessage() case RESOURCE: buf.append("Resource error: "); break; - case SYSTEM: - buf.append("System error: "); + case INTERNAL: + buf.append("Internal error: "); break; default: break; } buf.append(super.getMessage()); + if (!QueryException.UNSUPPORTED_OPERATION_ERROR_CODE.equals(code)) { + buf.append("\nError Code: ") + .append(code); + } if (context != null && context.size() > 0) { for (Map.Entry entry : context.entrySet()) { buf.append("\n") diff --git a/server/src/main/java/org/apache/druid/error/StandardRestExceptionEncoder.java b/server/src/main/java/org/apache/druid/error/StandardRestExceptionEncoder.java index 82cea864f35b..1069e33555b9 100644 --- a/server/src/main/java/org/apache/druid/error/StandardRestExceptionEncoder.java +++ b/server/src/main/java/org/apache/druid/error/StandardRestExceptionEncoder.java @@ -44,12 +44,9 @@ public ResponseBuilder builder(DruidException e) builder.put("type", errorCode(e)); builder.put("message", e.message()); builder.put("errorMessage", e.getMessage()); + builder.put("errorCode", e.code()); if (e.context() != null && !e.context().isEmpty()) { Map context = new HashMap<>(e.context()); - String errorCode = context.remove(DruidException.ERROR_CODE); - if (errorCode != null) { - builder.put("errorCode", errorCode); - } String host = context.remove(DruidException.HOST); if (host != null) { builder.put("host", host); @@ -79,7 +76,7 @@ private Status status(DruidException e) { switch (e.type()) { case CONFIG: - case SYSTEM: + case INTERNAL: case NETWORK: return Response.Status.INTERNAL_SERVER_ERROR; case TIMEOUT: diff --git a/sql/src/main/java/org/apache/druid/sql/DirectStatement.java b/sql/src/main/java/org/apache/druid/sql/DirectStatement.java index b993eb6f0e69..38b90a0055ce 100644 --- a/sql/src/main/java/org/apache/druid/sql/DirectStatement.java +++ b/sql/src/main/java/org/apache/druid/sql/DirectStatement.java @@ -228,7 +228,7 @@ public ResultSet plan() } catch (RelOptPlanner.CannotPlanException e) { // Not sure if this is even thrown here. - throw DruidException.system("Internal error: cannot plan SQL query") + throw DruidException.internalError("Cannot plan SQL query") .cause(e) .build(); } diff --git a/sql/src/main/java/org/apache/druid/sql/calcite/aggregation/builtin/ArraySqlAggregator.java b/sql/src/main/java/org/apache/druid/sql/calcite/aggregation/builtin/ArraySqlAggregator.java index 312f5c2f619e..7c91d4f1debe 100644 --- a/sql/src/main/java/org/apache/druid/sql/calcite/aggregation/builtin/ArraySqlAggregator.java +++ b/sql/src/main/java/org/apache/druid/sql/calcite/aggregation/builtin/ArraySqlAggregator.java @@ -40,7 +40,6 @@ import org.apache.druid.java.util.common.StringUtils; import org.apache.druid.math.expr.ExprMacroTable; import org.apache.druid.math.expr.ExpressionType; -import org.apache.druid.query.QueryException; import org.apache.druid.query.aggregation.ExpressionLambdaAggregatorFactory; import org.apache.druid.segment.column.ColumnType; import org.apache.druid.segment.column.RowSignature; @@ -169,9 +168,7 @@ public RelDataType inferReturnType(SqlOperatorBinding sqlOperatorBinding) { RelDataType type = sqlOperatorBinding.getOperandType(0); if (type instanceof RowSignatures.ComplexSqlType) { - throw DruidException.user("Cannot use ARRAY_AGG on complex inputs %s", type) - .context(DruidException.ERROR_CODE, QueryException.UNSUPPORTED_OPERATION_ERROR_CODE) - .build(); + throw DruidException.unsupportedError("Cannot use ARRAY_AGG on complex inputs %s", type); } return sqlOperatorBinding.getTypeFactory().createArrayType( type, diff --git a/sql/src/main/java/org/apache/druid/sql/calcite/aggregation/builtin/EarliestLatestAnySqlAggregator.java b/sql/src/main/java/org/apache/druid/sql/calcite/aggregation/builtin/EarliestLatestAnySqlAggregator.java index 8d2503d53046..e9f21cea173f 100644 --- a/sql/src/main/java/org/apache/druid/sql/calcite/aggregation/builtin/EarliestLatestAnySqlAggregator.java +++ b/sql/src/main/java/org/apache/druid/sql/calcite/aggregation/builtin/EarliestLatestAnySqlAggregator.java @@ -36,7 +36,6 @@ import org.apache.calcite.sql.type.SqlTypeUtil; import org.apache.calcite.util.Optionality; import org.apache.druid.error.DruidException; -import org.apache.druid.query.QueryException; import org.apache.druid.query.aggregation.AggregatorFactory; import org.apache.druid.query.aggregation.any.DoubleAnyAggregatorFactory; import org.apache.druid.query.aggregation.any.FloatAnyAggregatorFactory; @@ -91,9 +90,7 @@ AggregatorFactory createAggregatorFactory(String name, String fieldName, String case COMPLEX: return new StringFirstAggregatorFactory(name, fieldName, timeColumn, maxStringBytes); default: - throw DruidException.user("EARLIEST aggregator is not supported for '%s' type", type) - .context(DruidException.ERROR_CODE, QueryException.UNSUPPORTED_OPERATION_ERROR_CODE) - .build(); + throw DruidException.unsupportedError("EARLIEST aggregator is not supported for type %s", type); } } }, @@ -113,9 +110,7 @@ AggregatorFactory createAggregatorFactory(String name, String fieldName, String case COMPLEX: return new StringLastAggregatorFactory(name, fieldName, timeColumn, maxStringBytes); default: - throw DruidException.user("LATEST aggregator is not supported for '%s' type", type) - .context(DruidException.ERROR_CODE, QueryException.UNSUPPORTED_OPERATION_ERROR_CODE) - .build(); + throw DruidException.unsupportedError("LATEST aggregator is not supported for type %s", type); } } }, @@ -134,9 +129,7 @@ AggregatorFactory createAggregatorFactory(String name, String fieldName, String case STRING: return new StringAnyAggregatorFactory(name, fieldName, maxStringBytes); default: - throw DruidException.user("ANY aggregation is not supported for '%s' type", type) - .context(DruidException.ERROR_CODE, QueryException.UNSUPPORTED_OPERATION_ERROR_CODE) - .build(); + throw DruidException.unsupportedError("ANY aggregation is not supported for type %s", type); } } }; @@ -194,10 +187,10 @@ public Aggregation toDruidAggregation( final String aggregatorName = finalizeAggregations ? Calcites.makePrefixedName(name, "a") : name; final ColumnType outputType = Calcites.getColumnTypeForRelDataType(aggregateCall.getType()); if (outputType == null) { - throw DruidException.system( - "Cannot translate output SQL type %s to Druid type for aggregator [%s]", - aggregateCall.getType().getSqlTypeName(), - aggregateCall.getName() + throw DruidException.internalError( + "%s cannot translate output SQL type %s to a Druid type", + aggregateCall.getName(), + aggregateCall.getType().getSqlTypeName() ) .build(); } @@ -225,8 +218,11 @@ public Aggregation toDruidAggregation( maxStringBytes = RexLiteral.intValue(rexNodes.get(1)); } catch (AssertionError ae) { - plannerContext.setPlanningError("The second argument '%s' to function '%s' is not a number", rexNodes.get(1), aggregateCall.getName()); - return null; + throw DruidException.validationError( + "%s, argument 2 must be a number but found [%s]", + aggregateCall.getName(), + rexNodes.get(1) + ); } theAggFactory = aggregatorType.createAggregatorFactory( aggregatorName, @@ -237,14 +233,11 @@ public Aggregation toDruidAggregation( ); break; default: - throw DruidException.user( - "aggregation [%s], Invalid number of arguments %,d to [%s] operator", - aggregatorName, - args.size(), - aggregatorType.name() - ) - .context(DruidException.ERROR_CODE, QueryException.PLAN_VALIDATION_FAILED_ERROR_CODE) - .build(); + throw DruidException.validationError( + "%s expects 1 or 2 arguments but found %d", + aggregateCall.getName(), + args.size() + ); } return Aggregation.create( diff --git a/sql/src/main/java/org/apache/druid/sql/calcite/aggregation/builtin/MaxSqlAggregator.java b/sql/src/main/java/org/apache/druid/sql/calcite/aggregation/builtin/MaxSqlAggregator.java index 5838721981df..862fc74bef44 100644 --- a/sql/src/main/java/org/apache/druid/sql/calcite/aggregation/builtin/MaxSqlAggregator.java +++ b/sql/src/main/java/org/apache/druid/sql/calcite/aggregation/builtin/MaxSqlAggregator.java @@ -24,7 +24,6 @@ import org.apache.calcite.sql.fun.SqlStdOperatorTable; import org.apache.druid.error.DruidException; import org.apache.druid.math.expr.ExprMacroTable; -import org.apache.druid.query.QueryException; import org.apache.druid.query.aggregation.AggregatorFactory; import org.apache.druid.query.aggregation.DoubleMaxAggregatorFactory; import org.apache.druid.query.aggregation.FloatMaxAggregatorFactory; @@ -72,9 +71,7 @@ private static AggregatorFactory createMaxAggregatorFactory( case DOUBLE: return new DoubleMaxAggregatorFactory(name, fieldName, null, macroTable); default: - throw DruidException.user("Max aggregation is not supported for '%s' type", aggregationType) - .context(DruidException.ERROR_CODE, QueryException.UNSUPPORTED_OPERATION_ERROR_CODE) - .build(); + throw DruidException.unsupportedError("MAX does not support type %s", aggregationType); } } } diff --git a/sql/src/main/java/org/apache/druid/sql/calcite/aggregation/builtin/MinSqlAggregator.java b/sql/src/main/java/org/apache/druid/sql/calcite/aggregation/builtin/MinSqlAggregator.java index f9197ccf8b06..6e13463ccede 100644 --- a/sql/src/main/java/org/apache/druid/sql/calcite/aggregation/builtin/MinSqlAggregator.java +++ b/sql/src/main/java/org/apache/druid/sql/calcite/aggregation/builtin/MinSqlAggregator.java @@ -24,7 +24,6 @@ import org.apache.calcite.sql.fun.SqlStdOperatorTable; import org.apache.druid.error.DruidException; import org.apache.druid.math.expr.ExprMacroTable; -import org.apache.druid.query.QueryException; import org.apache.druid.query.aggregation.AggregatorFactory; import org.apache.druid.query.aggregation.DoubleMinAggregatorFactory; import org.apache.druid.query.aggregation.FloatMinAggregatorFactory; @@ -68,9 +67,7 @@ private static AggregatorFactory createMinAggregatorFactory( case DOUBLE: return new DoubleMinAggregatorFactory(name, fieldName, null, macroTable); default: - throw DruidException.user("MIN aggregator is not supported for '%s' type", aggregationType) - .context(DruidException.ERROR_CODE, QueryException.UNSUPPORTED_OPERATION_ERROR_CODE) - .build(); + throw DruidException.unsupportedError("MIN does not support type %s", aggregationType); } } } diff --git a/sql/src/main/java/org/apache/druid/sql/calcite/aggregation/builtin/StringSqlAggregator.java b/sql/src/main/java/org/apache/druid/sql/calcite/aggregation/builtin/StringSqlAggregator.java index 0ac88e1ee812..9ab6df585857 100644 --- a/sql/src/main/java/org/apache/druid/sql/calcite/aggregation/builtin/StringSqlAggregator.java +++ b/sql/src/main/java/org/apache/druid/sql/calcite/aggregation/builtin/StringSqlAggregator.java @@ -40,7 +40,6 @@ import org.apache.druid.java.util.common.HumanReadableBytes; import org.apache.druid.java.util.common.StringUtils; import org.apache.druid.math.expr.ExprMacroTable; -import org.apache.druid.query.QueryException; import org.apache.druid.query.aggregation.ExpressionLambdaAggregatorFactory; import org.apache.druid.query.aggregation.FilteredAggregatorFactory; import org.apache.druid.query.filter.NotDimFilter; @@ -199,9 +198,7 @@ public RelDataType inferReturnType(SqlOperatorBinding sqlOperatorBinding) { RelDataType type = sqlOperatorBinding.getOperandType(0); if (type instanceof RowSignatures.ComplexSqlType) { - throw DruidException.user("Cannot use STRING_AGG on complex inputs %s", type) - .context(DruidException.ERROR_CODE, QueryException.UNSUPPORTED_OPERATION_ERROR_CODE) - .build(); + throw DruidException.unsupportedError("Cannot use STRING_AGG on complex input of type %s", type); } return Calcites.createSqlTypeWithNullability( sqlOperatorBinding.getTypeFactory(), diff --git a/sql/src/main/java/org/apache/druid/sql/calcite/aggregation/builtin/SumSqlAggregator.java b/sql/src/main/java/org/apache/druid/sql/calcite/aggregation/builtin/SumSqlAggregator.java index 6cb16f9f451f..97a306e24dd6 100644 --- a/sql/src/main/java/org/apache/druid/sql/calcite/aggregation/builtin/SumSqlAggregator.java +++ b/sql/src/main/java/org/apache/druid/sql/calcite/aggregation/builtin/SumSqlAggregator.java @@ -33,7 +33,6 @@ import org.apache.calcite.util.Optionality; import org.apache.druid.error.DruidException; import org.apache.druid.math.expr.ExprMacroTable; -import org.apache.druid.query.QueryException; import org.apache.druid.query.aggregation.AggregatorFactory; import org.apache.druid.query.aggregation.DoubleSumAggregatorFactory; import org.apache.druid.query.aggregation.FloatSumAggregatorFactory; @@ -89,9 +88,7 @@ static AggregatorFactory createSumAggregatorFactory( case DOUBLE: return new DoubleSumAggregatorFactory(name, fieldName, null, macroTable); default: - throw DruidException.user("Sum aggregation is not supported for '%s' type", aggregationType) - .context(DruidException.ERROR_CODE, QueryException.UNSUPPORTED_OPERATION_ERROR_CODE) - .build(); + throw DruidException.unsupportedError("SUM is not supported for type %s", aggregationType); } } diff --git a/sql/src/main/java/org/apache/druid/sql/calcite/expression/builtin/NestedDataOperatorConversions.java b/sql/src/main/java/org/apache/druid/sql/calcite/expression/builtin/NestedDataOperatorConversions.java index 2ba83e9721f6..8180644a4a3f 100644 --- a/sql/src/main/java/org/apache/druid/sql/calcite/expression/builtin/NestedDataOperatorConversions.java +++ b/sql/src/main/java/org/apache/druid/sql/calcite/expression/builtin/NestedDataOperatorConversions.java @@ -46,7 +46,6 @@ import org.apache.druid.math.expr.Expr; import org.apache.druid.math.expr.InputBindings; import org.apache.druid.math.expr.Parser; -import org.apache.druid.query.QueryException; import org.apache.druid.segment.column.ColumnType; import org.apache.druid.segment.column.RowSignature; import org.apache.druid.segment.nested.NestedPathFinder; @@ -202,13 +201,11 @@ public DruidExpression toDruidExpression( parts = NestedPathFinder.parseJsonPath(path); } catch (IllegalArgumentException iae) { - throw DruidException.user( - "Cannot use %s", - call.getOperator().getName() - ) - .context(DruidException.ERROR_CODE, QueryException.UNSUPPORTED_OPERATION_ERROR_CODE) - .cause(iae) - .build(); + throw DruidException.unsupportedError( + "Cannot use [%s]: [%s]", + call.getOperator().getName(), + iae.getMessage() + ); } final String jsonPath = NestedPathFinder.toNormalizedJsonPath(parts); final DruidExpression.ExpressionGenerator builder = (args) -> @@ -236,7 +233,6 @@ public DruidExpression toDruidExpression( } } - /** * The {@link org.apache.calcite.sql2rel.StandardConvertletTable} converts json_value(.. RETURNING type) into * cast(json_value_any(..), type). @@ -395,13 +391,11 @@ public DruidExpression toDruidExpression( parts = NestedPathFinder.parseJsonPath(path); } catch (IllegalArgumentException iae) { - throw DruidException.user( - "Cannot use %s", - call.getOperator().getName() - ) - .context(DruidException.ERROR_CODE, QueryException.UNSUPPORTED_OPERATION_ERROR_CODE) - .cause(iae) - .build(); + throw DruidException.unsupportedError( + "Cannot use [%s]: [%s]", + call.getOperator().getName(), + iae.getMessage() + ); } final String jsonPath = NestedPathFinder.toNormalizedJsonPath(parts); final DruidExpression.ExpressionGenerator builder = (args) -> @@ -695,11 +689,10 @@ public DruidExpression toDruidExpression( parts = NestedPathFinder.parseJsonPath(path); } catch (IllegalArgumentException iae) { - throw DruidException.user( - "Cannot use %s", - call.getOperator().getName() + throw DruidException.unsupported( + "JSON path [%s] is not supported", + call.getOperator().getName() ) - .context(DruidException.ERROR_CODE, QueryException.UNSUPPORTED_OPERATION_ERROR_CODE) .cause(iae) .build(); } diff --git a/sql/src/main/java/org/apache/druid/sql/calcite/parser/DruidSqlParserUtils.java b/sql/src/main/java/org/apache/druid/sql/calcite/parser/DruidSqlParserUtils.java index ea6b3f6fb508..8746eec81590 100644 --- a/sql/src/main/java/org/apache/druid/sql/calcite/parser/DruidSqlParserUtils.java +++ b/sql/src/main/java/org/apache/druid/sql/calcite/parser/DruidSqlParserUtils.java @@ -34,7 +34,6 @@ import org.apache.calcite.sql.SqlTimestampLiteral; import org.apache.calcite.tools.ValidationException; import org.apache.druid.error.DruidException; -import org.apache.druid.java.util.common.IAE; import org.apache.druid.java.util.common.StringUtils; import org.apache.druid.java.util.common.granularity.Granularity; import org.apache.druid.java.util.common.granularity.GranularityType; @@ -66,7 +65,6 @@ public class DruidSqlParserUtils { - private static final Logger log = new Logger(DruidSqlParserUtils.class); public static final String ALL = "all"; @@ -231,19 +229,26 @@ public static List validateQueryAndConvertToIntervals( List intervals = filtration.getIntervals(); if (filtration.getDimFilter() != null) { - throw DruidException.userError("Only " + ColumnHolder.TIME_COLUMN_NAME + " column is supported in OVERWRITE WHERE clause"); + throw DruidException.validationError( + "Only %s column is supported in OVERWRITE WHERE clause", + ColumnHolder.TIME_COLUMN_NAME + ); } if (intervals.isEmpty()) { - throw DruidException.userError("Intervals for replace are empty"); + throw DruidException.validationError("Intervals for REPLACE are empty"); } for (Interval interval : intervals) { DateTime intervalStart = interval.getStart(); DateTime intervalEnd = interval.getEnd(); if (!granularity.bucketStart(intervalStart).equals(intervalStart) || !granularity.bucketStart(intervalEnd).equals(intervalEnd)) { - throw DruidException.userError("OVERWRITE WHERE clause contains an interval " + intervals + - " which is not aligned with PARTITIONED BY granularity " + granularity); + throw DruidException.validationError( + "OVERWRITE WHERE clause contains an interval %s" + + " which is not aligned with PARTITIONED BY granularity %s", + intervals, + granularity + ); } } return intervals @@ -325,7 +330,7 @@ public static DimFilter convertQueryToDimFilter(SqlNode replaceTimeQuery, DateTi { if (!(replaceTimeQuery instanceof SqlBasicCall)) { log.error("Expected SqlBasicCall during parsing, but found " + replaceTimeQuery.getClass().getName()); - throw DruidException.userError("Invalid OVERWRITE WHERE clause"); + throw DruidException.validationError("Invalid OVERWRITE WHERE clause"); } String columnName; SqlBasicCall sqlBasicCall = (SqlBasicCall) replaceTimeQuery; @@ -406,21 +411,26 @@ public static DimFilter convertQueryToDimFilter(SqlNode replaceTimeQuery, DateTi StringComparators.NUMERIC ); default: - throw DruidException.userError("Unsupported operation in OVERWRITE WHERE clause: " + sqlBasicCall.getOperator().getName()); + throw DruidException.validationError( + "Unsupported operation in OVERWRITE WHERE clause: %s", + sqlBasicCall.getOperator().getName() + ); } } /** * Converts a {@link SqlNode} identifier into a string representation * - * @param sqlNode the sql node + * @param sqlNode the SQL node * @return string representing the column name - * @throws ValidationException if the sql node is not an SqlIdentifier + * @throws ValidationException if the SQL node is not an SqlIdentifier */ public static String parseColumnName(SqlNode sqlNode) { if (!(sqlNode instanceof SqlIdentifier)) { - throw DruidException.userError("Expressions must be of the form __time TIMESTAMP"); + throw DruidException.validationError( + "OVERWRITE WHERE expressions must be of the form __time TIMESTAMP" + ); } return ((SqlIdentifier) sqlNode).getSimple(); } @@ -428,15 +438,17 @@ public static String parseColumnName(SqlNode sqlNode) /** * Converts a {@link SqlNode} into a timestamp, taking into account the timezone * - * @param sqlNode the sql node + * @param sqlNode the SQL node * @param timeZone timezone * @return the timestamp string as milliseconds from epoch - * @throws ValidationException if the sql node is not a SqlTimestampLiteral + * @throws ValidationException if the SQL node is not a SqlTimestampLiteral */ public static String parseTimeStampWithTimeZone(SqlNode sqlNode, DateTimeZone timeZone) { if (!(sqlNode instanceof SqlTimestampLiteral)) { - throw DruidException.userError("Expressions must be of the form __time TIMESTAMP"); + throw DruidException.validationError( + "OVERWRITE WHERE expressions must be of the form __time TIMESTAMP" + ); } Timestamp sqlTimestamp = Timestamp.valueOf(((SqlTimestampLiteral) sqlNode).toFormattedString()); @@ -452,15 +464,18 @@ public static String parseTimeStampWithTimeZone(SqlNode sqlNode, DateTimeZone ti public static void throwIfUnsupportedGranularityInPartitionedBy(Granularity granularity) { if (!GranularityType.isStandard(granularity)) { - throw new IAE( - "The granularity specified in PARTITIONED BY is not supported. " - + "Please use an equivalent of these granularities: %s.", - Arrays.stream(GranularityType.values()) - .filter(granularityType -> !granularityType.equals(GranularityType.NONE)) - .map(Enum::name) - .map(StringUtils::toLowerCase) - .collect(Collectors.joining(", ")) - ); + throw DruidException.validation( + "The granularity specified in PARTITIONED BY is not supported." + ) + .context( + "Valid granularities", + Arrays.stream(GranularityType.values()) + .filter(granularityType -> !granularityType.equals(GranularityType.NONE)) + .map(Enum::name) + .map(StringUtils::toLowerCase) + .collect(Collectors.joining(", ")) + ) + .build(); } } } diff --git a/sql/src/main/java/org/apache/druid/sql/calcite/planner/DruidPlanner.java b/sql/src/main/java/org/apache/druid/sql/calcite/planner/DruidPlanner.java index ebe908085e26..6e35fc3e6fb4 100644 --- a/sql/src/main/java/org/apache/druid/sql/calcite/planner/DruidPlanner.java +++ b/sql/src/main/java/org/apache/druid/sql/calcite/planner/DruidPlanner.java @@ -168,10 +168,10 @@ private SqlStatementHandler createHandler(final SqlNode node) if (query.isA(SqlKind.QUERY)) { return new QueryHandler.SelectHandler(handlerContext, query, explain); } - throw DruidException.user("Unsupported SQL statement") - .context("Statement kind", node.getKind()) - .context(DruidException.ERROR_CODE, QueryException.PLAN_VALIDATION_FAILED_ERROR_CODE) - .build(); + throw DruidException.unsupportedError( + "Unsupported SQL statement %s", + node.getKind() + ); } /** @@ -335,9 +335,8 @@ public static DruidException translateException(Exception e) // Anything else. Should not get here. Anything else should already have // been translated to a DruidException unless it is an unexpected exception. return DruidException - .system(e.getMessage()) + .internalError(e.getMessage()) .cause(inner) - .context(DruidException.ERROR_CODE, QueryException.UNKNOWN_EXCEPTION_ERROR_CODE) .build(); } } @@ -350,15 +349,14 @@ private static DruidException parseValidationMessage(Exception e, String errorCo Matcher m = p.matcher(msg); DruidException.Builder builder; if (m.matches()) { - builder = DruidException - .user(m.group(3)) - .context("Line", m.group(1)) - .context("Column", m.group(2)); + builder = DruidException.user( + "Line %s, Column %s: %s", m.group(1), m.group(2), m.group(3) + ); } else { builder = DruidException.user(msg).cause(e); } return builder - .context(DruidException.ERROR_CODE, errorCode) + .code(errorCode) .build(); } @@ -374,16 +372,18 @@ private static DruidException parseParserMessage(Exception e) DruidException.Builder builder; if (m.matches()) { String choices = m.group(4).trim().replaceAll("[ .]*\n\\ s+", ", "); - builder = DruidException - .user("Parse error: unexpected token " + m.group(1)) - .context("Line", m.group(2)) - .context("Column", m.group(3)) + builder = DruidException.user( + "Line %s, Column %s: unexpected token %s", + m.group(2), + m.group(3), + m.group(1) + ) .context("Expected", choices); } else { builder = DruidException.user(msg).cause(e); } return builder - .context(DruidException.ERROR_CODE, QueryException.SQL_PARSE_FAILED_ERROR_CODE) + .code(QueryException.SQL_PARSE_FAILED_ERROR_CODE) .build(); } } diff --git a/sql/src/main/java/org/apache/druid/sql/calcite/planner/DruidRexExecutor.java b/sql/src/main/java/org/apache/druid/sql/calcite/planner/DruidRexExecutor.java index 89837a4096f9..1dd15428fc57 100644 --- a/sql/src/main/java/org/apache/druid/sql/calcite/planner/DruidRexExecutor.java +++ b/sql/src/main/java/org/apache/druid/sql/calcite/planner/DruidRexExecutor.java @@ -31,7 +31,6 @@ import org.apache.druid.math.expr.ExprType; import org.apache.druid.math.expr.InputBindings; import org.apache.druid.math.expr.Parser; -import org.apache.druid.query.QueryException; import org.apache.druid.segment.column.RowSignature; import org.apache.druid.sql.calcite.expression.DruidExpression; import org.apache.druid.sql.calcite.expression.Expressions; @@ -90,10 +89,7 @@ public void reduce( // as a primitive long/float/double. // ExprEval.isNumericNull checks whether the parsed primitive value is null or not. if (!constExp.getType().isNullable() && exprResult.isNumericNull()) { - throw DruidException.user("Illegal DATE constant") - .context(DruidException.ERROR_CODE, QueryException.QUERY_UNSUPPORTED_ERROR_CODE) - .context("Value", constExp) - .build(); + throw DruidException.validationError("Illegal DATE constant %s", constExp); } literal = rexBuilder.makeDateLiteral( @@ -107,10 +103,7 @@ public void reduce( // as a primitive long/float/double. // ExprEval.isNumericNull checks whether the parsed primitive value is null or not. if (!constExp.getType().isNullable() && exprResult.isNumericNull()) { - throw DruidException.user("Illegal TIMESTAMP constant") - .context(DruidException.ERROR_CODE, QueryException.QUERY_UNSUPPORTED_ERROR_CODE) - .context("Value", constExp) - .build(); + throw DruidException.validationError("Illegal TIMESTAMP constant %s", constExp); } literal = Calcites.jodaToCalciteTimestampLiteral( @@ -135,9 +128,7 @@ public void reduce( double exprResultDouble = exprResult.asDouble(); if (Double.isNaN(exprResultDouble) || Double.isInfinite(exprResultDouble)) { String expression = druidExpression.getExpression(); - throw DruidException.user("Expression not supported in SQL") - .context(DruidException.ERROR_CODE, QueryException.QUERY_UNSUPPORTED_ERROR_CODE) - .context("Expression", expression) + throw DruidException.unsupported("Expression not supported in SQL : %s", expression) .context("Evaluates to", Double.toString(exprResultDouble)) .context("Suggestion", StringUtils.format( "You can either cast the expression as BIGINT ('CAST(%s as BIGINT)') or VARCHAR ('CAST(%s as VARCHAR)') or change the expression itself", @@ -175,9 +166,7 @@ public void reduce( resultAsBigDecimalList.add(null); } else if (Double.isNaN(doubleVal.doubleValue()) || Double.isInfinite(doubleVal.doubleValue())) { String expression = druidExpression.getExpression(); - throw DruidException.user("Array element not supported in SQL") - .context(DruidException.ERROR_CODE, QueryException.QUERY_UNSUPPORTED_ERROR_CODE) - .context("Array", expression) + throw DruidException.validation("Array element not supported in SQL: %s", expression) .context("Evaluates to", Double.toString(doubleVal.doubleValue())) .context("Suggestion", "You can either cast the element in the ARRAY to BIGINT or VARCHAR or change the expression itself") diff --git a/sql/src/main/java/org/apache/druid/sql/calcite/planner/IngestHandler.java b/sql/src/main/java/org/apache/druid/sql/calcite/planner/IngestHandler.java index 98667165b119..592044715c90 100644 --- a/sql/src/main/java/org/apache/druid/sql/calcite/planner/IngestHandler.java +++ b/sql/src/main/java/org/apache/druid/sql/calcite/planner/IngestHandler.java @@ -87,8 +87,8 @@ protected static SqlNode convertQuery(DruidSqlIngest sqlNode) SqlNodeList orderByList = sqlOrderBy.orderList; if (!(orderByList == null || orderByList.equals(SqlNodeList.EMPTY))) { String opName = sqlNode.getOperator().getName(); - throw DruidException.userError( - "Cannot have ORDER BY on %s %s statement, use CLUSTERED BY instead.", + throw DruidException.validationError( + "Cannot use ORDER BY on %s %s statement, use CLUSTERED BY instead.", "INSERT".equals(opName) ? "an" : "a", opName ); @@ -99,7 +99,7 @@ protected static SqlNode convertQuery(DruidSqlIngest sqlNode) } if (!query.isA(SqlKind.QUERY)) { - throw DruidException.userError("Cannot execute [%s].", query.getKind()); + throw DruidException.validationError("Cannot execute SQL statement %s", query.getKind()); } return query; } @@ -115,7 +115,7 @@ protected String operationName() public void validate() { if (ingestNode().getPartitionedBy() == null) { - throw DruidException.userError( + throw DruidException.validationError( "%s statements must specify PARTITIONED BY clause explicitly", operationName() ); @@ -130,18 +130,17 @@ public void validate() } } catch (JsonProcessingException e) { - throw DruidException.system( - "Unable to serialize partition granularity." - ) - .context("Value", ingestionGranularity) - .build(); + throw DruidException.validationError( + "Invalid partition granularity '%s", + ingestionGranularity + ); } super.validate(); // Check if CTX_SQL_OUTER_LIMIT is specified and fail the query if it is. CTX_SQL_OUTER_LIMIT being provided causes // the number of rows inserted to be limited which is likely to be confusing and unintended. if (handlerContext.queryContextMap().get(PlannerContext.CTX_SQL_OUTER_LIMIT) != null) { - throw DruidException.userError( - "%s cannot be provided with %s.", + throw DruidException.validationError( + "Context parameter %s cannot be provided with %s", PlannerContext.CTX_SQL_OUTER_LIMIT, operationName() ); @@ -167,11 +166,14 @@ private String validateAndGetDataSourceForIngest() { final SqlInsert insert = ingestNode(); if (insert.isUpsert()) { - throw DruidException.userError("UPSERT is not supported."); + throw DruidException.unsupportedError("UPSERT is not supported."); } if (insert.getTargetColumnList() != null) { - throw DruidException.userError(operationName() + " with a target column list is not supported."); + throw DruidException.unsupportedError( + "%s with a target column list is not supported", + operationName() + ); } final SqlIdentifier tableIdentifier = (SqlIdentifier) insert.getTargetTable(); @@ -179,7 +181,7 @@ private String validateAndGetDataSourceForIngest() if (tableIdentifier.names.isEmpty()) { // I don't think this can happen, but include a branch for it just in case. - throw DruidException.userError(operationName() + " requires a target table."); + throw DruidException.validationError("%s requires a target table", operationName()); } else if (tableIdentifier.names.size() == 1) { // Unqualified name. dataSource = Iterables.getOnlyElement(tableIdentifier.names); @@ -191,7 +193,7 @@ private String validateAndGetDataSourceForIngest() if (tableIdentifier.names.size() == 2 && defaultSchemaName.equals(tableIdentifier.names.get(0))) { dataSource = tableIdentifier.names.get(1); } else { - throw DruidException.userError( + throw DruidException.validationError( "Cannot %s into %s because it is not a Druid datasource.", operationName(), tableIdentifier @@ -320,15 +322,17 @@ protected DruidSqlIngest ingestNode() public void validate() { if (!handlerContext.plannerContext().featureAvailable(EngineFeature.CAN_REPLACE)) { - throw DruidException.userError( - "Cannot execute REPLACE with SQL engine '%s'.", + throw DruidException.validationError( + "Cannot execute REPLACE with SQL engine %s", handlerContext.engine().name() ); } SqlNode replaceTimeQuery = sqlNode.getReplaceTimeQuery(); if (replaceTimeQuery == null) { - throw DruidException.userError("Missing time chunk information in OVERWRITE clause for REPLACE. Use " - + "OVERWRITE WHERE <__time based condition> or OVERWRITE ALL to overwrite the entire table."); + throw DruidException.validationError( + "Missing time chunk information in OVERWRITE clause for REPLACE. Use " + + "OVERWRITE WHERE <__time based condition> or OVERWRITE ALL to overwrite the entire table." + ); } replaceIntervals = DruidSqlParserUtils.validateQueryAndConvertToIntervals( diff --git a/sql/src/main/java/org/apache/druid/sql/calcite/planner/QueryHandler.java b/sql/src/main/java/org/apache/druid/sql/calcite/planner/QueryHandler.java index 18b2c4c0cf7b..43ebd5f35a4c 100644 --- a/sql/src/main/java/org/apache/druid/sql/calcite/planner/QueryHandler.java +++ b/sql/src/main/java/org/apache/druid/sql/calcite/planner/QueryHandler.java @@ -55,7 +55,6 @@ import org.apache.calcite.tools.ValidationException; import org.apache.calcite.util.Pair; import org.apache.druid.error.DruidException; -import org.apache.druid.java.util.common.StringUtils; import org.apache.druid.java.util.common.guava.BaseSequence; import org.apache.druid.java.util.common.guava.Sequences; import org.apache.druid.java.util.common.logger.Logger; @@ -203,15 +202,13 @@ public PlannerResult plan() // Consider BINDABLE convention when necessary. Used for metadata tables. if (!handlerContext.plannerContext().featureAvailable(EngineFeature.ALLOW_BINDABLE_PLAN)) { - throw new ValidationException( - StringUtils.format( - "Cannot query table%s %s with SQL engine '%s'.", - bindableTables.size() != 1 ? "s" : "", - bindableTables.stream() - .map(table -> Joiner.on(".").join(table.getQualifiedName())) - .collect(Collectors.joining(", ")), - handlerContext.engine().name() - ) + throw DruidException.validationError( + "Cannot query table%s %s with SQL engine %s.", + bindableTables.size() != 1 ? "s" : "", + bindableTables.stream() + .map(table -> Joiner.on(".").join(table.getQualifiedName())) + .collect(Collectors.joining(", ")), + handlerContext.engine().name() ); } @@ -630,10 +627,12 @@ private DruidException buildSQLPlanningError(RelOptPlanner.CannotPlanException e if (!handlerContext.queryContext().isDebug()) { logger = log.noStackTrace(); } - DruidException.Builder builder = DruidException.system("Unsupported query") + DruidException.Builder builder = DruidException.internalError( + "Unsupported SQL query - failed to convert to a Druid native query" + ) .cause(exception) .context("SQL", handlerContext.plannerContext().getSql()) - .context(DruidException.ERROR_CODE, QueryException.QUERY_UNSUPPORTED_ERROR_CODE); + .code(QueryException.QUERY_UNSUPPORTED_ERROR_CODE); String errorMessage = handlerContext.plannerContext().getPlanningError(); if (null == errorMessage && exception instanceof UnsupportedSQLQueryException) { builder.context("Specific error", errorMessage); @@ -662,8 +661,8 @@ public SelectHandler( public void validate() { if (!handlerContext.plannerContext().featureAvailable(EngineFeature.CAN_SELECT)) { - throw DruidException.userError( - "Cannot execute SELECT with SQL engine '%s'.", + throw DruidException.validationError( + "Cannot execute SELECT with SQL engine %s", handlerContext.engine().name() ); } diff --git a/sql/src/main/java/org/apache/druid/sql/calcite/planner/RelParameterizerShuttle.java b/sql/src/main/java/org/apache/druid/sql/calcite/planner/RelParameterizerShuttle.java index a9345f31ea98..2383c70d658f 100644 --- a/sql/src/main/java/org/apache/druid/sql/calcite/planner/RelParameterizerShuttle.java +++ b/sql/src/main/java/org/apache/druid/sql/calcite/planner/RelParameterizerShuttle.java @@ -199,8 +199,9 @@ private RexNode bind(RexNode node, RexBuilder builder, RelDataTypeFactory typeFa if (plannerContext.getParameters().size() > dynamicParam.getIndex()) { TypedValue param = plannerContext.getParameters().get(dynamicParam.getIndex()); if (param == null) { - throw DruidException.userError( - "Parameter at position %d is not bound", dynamicParam.getIndex() + throw DruidException.validationError( + "Parameter at position %d is not bound", + dynamicParam.getIndex() ); } if (param.value == null) { @@ -213,8 +214,9 @@ private RexNode bind(RexNode node, RexBuilder builder, RelDataTypeFactory typeFa true ); } else { - throw DruidException.userError( - "Parameter at position %d is not bound", dynamicParam.getIndex() + throw DruidException.validationError( + "Parameter at position %d is not bound", + dynamicParam.getIndex() ); } } diff --git a/sql/src/main/java/org/apache/druid/sql/calcite/rel/DruidJoinQueryRel.java b/sql/src/main/java/org/apache/druid/sql/calcite/rel/DruidJoinQueryRel.java index f17d0f47aa6e..301a9efab581 100644 --- a/sql/src/main/java/org/apache/druid/sql/calcite/rel/DruidJoinQueryRel.java +++ b/sql/src/main/java/org/apache/druid/sql/calcite/rel/DruidJoinQueryRel.java @@ -45,7 +45,6 @@ import org.apache.druid.query.DataSource; import org.apache.druid.query.JoinDataSource; import org.apache.druid.query.QueryDataSource; -import org.apache.druid.query.QueryException; import org.apache.druid.query.TableDataSource; import org.apache.druid.query.filter.DimFilter; import org.apache.druid.segment.column.RowSignature; @@ -361,9 +360,7 @@ public static JoinType toDruidJoinType(JoinRelType calciteJoinType) case INNER: return JoinType.INNER; default: - throw DruidException.user("Cannot handle joinType '%s'", calciteJoinType) - .context(DruidException.ERROR_CODE, QueryException.UNSUPPORTED_OPERATION_ERROR_CODE) - .build(); + throw DruidException.unsupportedError("Cannot handle joinType '%s'", calciteJoinType); } } diff --git a/sql/src/main/java/org/apache/druid/sql/calcite/rule/DruidLogicalValuesRule.java b/sql/src/main/java/org/apache/druid/sql/calcite/rule/DruidLogicalValuesRule.java index a387acf326df..7840a9820ef3 100644 --- a/sql/src/main/java/org/apache/druid/sql/calcite/rule/DruidLogicalValuesRule.java +++ b/sql/src/main/java/org/apache/druid/sql/calcite/rule/DruidLogicalValuesRule.java @@ -27,7 +27,6 @@ import org.apache.calcite.rex.RexLiteral; import org.apache.druid.error.DruidException; import org.apache.druid.query.InlineDataSource; -import org.apache.druid.query.QueryException; import org.apache.druid.segment.column.RowSignature; import org.apache.druid.sql.calcite.planner.Calcites; import org.apache.druid.sql.calcite.planner.PlannerContext; @@ -127,18 +126,21 @@ static Object getValueFromLiteral(RexLiteral literal, PlannerContext plannerCont return Calcites.calciteDateTimeLiteralToJoda(literal, plannerContext.getTimeZone()).getMillis(); case NULL: if (!literal.isNull()) { - throw DruidException.user("Query has a non-null constant but is of NULL type.") - .context(DruidException.ERROR_CODE, QueryException.UNSUPPORTED_OPERATION_ERROR_CODE) - .build(); + throw DruidException.unsupportedError( + "Non-null constant %s for a NULL literal", + literal + ); } return null; case TIMESTAMP_WITH_LOCAL_TIME_ZONE: case TIME: case TIME_WITH_LOCAL_TIME_ZONE: default: - throw DruidException.user("%s type is not supported", literal.getType().getSqlTypeName()) - .context(DruidException.ERROR_CODE, QueryException.UNSUPPORTED_OPERATION_ERROR_CODE) - .build(); + throw DruidException.unsupportedError( + "Literal %s type %s is not supported", + literal, + literal.getType().getSqlTypeName() + ); } } } diff --git a/sql/src/main/java/org/apache/druid/sql/calcite/run/SqlEngines.java b/sql/src/main/java/org/apache/druid/sql/calcite/run/SqlEngines.java index 53f12952b410..4f89734225e9 100644 --- a/sql/src/main/java/org/apache/druid/sql/calcite/run/SqlEngines.java +++ b/sql/src/main/java/org/apache/druid/sql/calcite/run/SqlEngines.java @@ -42,11 +42,10 @@ public static void validateNoSpecialContextKeys( { for (String contextParameterName : queryContext.keySet()) { if (specialContextKeys.contains(contextParameterName)) { - throw DruidException - .user("Cannot execute query with context parameter") - .context("Parameter", contextParameterName) - .context("Value", queryContext.get(contextParameterName)) - .build(); + throw DruidException.validationError( + "Query context parameter '%s' is not allowed", + contextParameterName + ); } } } diff --git a/sql/src/test/java/org/apache/druid/sql/SqlStatementTest.java b/sql/src/test/java/org/apache/druid/sql/SqlStatementTest.java index 406855dc4c2c..1220340e2fd2 100644 --- a/sql/src/test/java/org/apache/druid/sql/SqlStatementTest.java +++ b/sql/src/test/java/org/apache/druid/sql/SqlStatementTest.java @@ -290,7 +290,7 @@ public void testDirectSyntaxError() // Expected Assert.assertEquals( QueryException.PLAN_VALIDATION_FAILED_ERROR_CODE, - e.context(DruidException.ERROR_CODE) + e.code() ); } } @@ -310,7 +310,7 @@ public void testDirectValidationError() // Expected Assert.assertEquals( QueryException.PLAN_VALIDATION_FAILED_ERROR_CODE, - e.context(DruidException.ERROR_CODE) + e.code() ); } } @@ -375,7 +375,7 @@ public void testHttpSyntaxError() // Expected Assert.assertEquals( QueryException.PLAN_VALIDATION_FAILED_ERROR_CODE, - e.context(DruidException.ERROR_CODE) + e.code() ); } } @@ -395,7 +395,7 @@ public void testHttpValidationError() // Expected Assert.assertEquals( QueryException.PLAN_VALIDATION_FAILED_ERROR_CODE, - e.context(DruidException.ERROR_CODE) + e.code() ); } } @@ -464,7 +464,7 @@ public void testPrepareSyntaxError() // Expected Assert.assertEquals( QueryException.PLAN_VALIDATION_FAILED_ERROR_CODE, - e.context(DruidException.ERROR_CODE) + e.code() ); } } @@ -484,7 +484,7 @@ public void testPrepareValidationError() // Expected Assert.assertEquals( QueryException.PLAN_VALIDATION_FAILED_ERROR_CODE, - e.context(DruidException.ERROR_CODE) + e.code() ); } } diff --git a/sql/src/test/java/org/apache/druid/sql/calcite/CalciteNestedDataQueryTest.java b/sql/src/test/java/org/apache/druid/sql/calcite/CalciteNestedDataQueryTest.java index 749f32f9dc09..30d6c8c3e9e8 100644 --- a/sql/src/test/java/org/apache/druid/sql/calcite/CalciteNestedDataQueryTest.java +++ b/sql/src/test/java/org/apache/druid/sql/calcite/CalciteNestedDataQueryTest.java @@ -4174,10 +4174,7 @@ public void testGroupByInvalidPath() (expected) -> { expected.expect(DruidException.class); expected.expectMessage( - "Cannot use JSON_VALUE_VARCHAR\n" + - "Error Code: Unsupported operation\n" + - "Cause: Bad format, '.array.[1]' is not a valid JSONPath path: must start with '$'" - ); + "Cannot use [JSON_VALUE_VARCHAR]: [Bad format, '.array.[1]' is not a valid JSONPath path: must start with '$']"); } ); } diff --git a/sql/src/test/java/org/apache/druid/sql/calcite/CalciteQueryTest.java b/sql/src/test/java/org/apache/druid/sql/calcite/CalciteQueryTest.java index b64f04401619..74d40ffda34e 100644 --- a/sql/src/test/java/org/apache/druid/sql/calcite/CalciteQueryTest.java +++ b/sql/src/test/java/org/apache/druid/sql/calcite/CalciteQueryTest.java @@ -2908,7 +2908,7 @@ public void testUnionAllTablesColumnCountMismatch() ); Assert.assertEquals( QueryException.PLAN_VALIDATION_FAILED_ERROR_CODE, - e.context(DruidException.ERROR_CODE) + e.code() ); } } @@ -3181,7 +3181,7 @@ public void testUnionAllThreeTablesColumnCountMismatch1() ); Assert.assertEquals( QueryException.PLAN_VALIDATION_FAILED_ERROR_CODE, - e.context(DruidException.ERROR_CODE) + e.code() ); } } @@ -3207,7 +3207,7 @@ public void testUnionAllThreeTablesColumnCountMismatch2() ); Assert.assertEquals( QueryException.PLAN_VALIDATION_FAILED_ERROR_CODE, - e.context(DruidException.ERROR_CODE) + e.code() ); } } @@ -3233,7 +3233,7 @@ public void testUnionAllThreeTablesColumnCountMismatch3() ); Assert.assertEquals( QueryException.PLAN_VALIDATION_FAILED_ERROR_CODE, - e.context(DruidException.ERROR_CODE) + e.code() ); } } @@ -5781,7 +5781,7 @@ public void testStringAggQueryOnComplexDatatypes() ); Assert.assertEquals( QueryException.PLAN_VALIDATION_FAILED_ERROR_CODE, - e.context(DruidException.ERROR_CODE) + e.code() ); } } @@ -11332,7 +11332,7 @@ public void testTimeExtractWithTooFewArguments() ); Assert.assertEquals( QueryException.PLAN_VALIDATION_FAILED_ERROR_CODE, - e.context(DruidException.ERROR_CODE) + e.code() ); } } From 7df767cafa1df2d5a377bc6f24a80ee926ebdb38 Mon Sep 17 00:00:00 2001 From: Paul Rogers Date: Sat, 18 Feb 2023 15:25:41 -0800 Subject: [PATCH 06/17] Build fixes --- .../calcite/aggregation/builtin/ArraySqlAggregator.java | 1 - .../org/apache/druid/sql/calcite/planner/DruidPlanner.java | 7 +++++-- 2 files changed, 5 insertions(+), 3 deletions(-) diff --git a/sql/src/main/java/org/apache/druid/sql/calcite/aggregation/builtin/ArraySqlAggregator.java b/sql/src/main/java/org/apache/druid/sql/calcite/aggregation/builtin/ArraySqlAggregator.java index 7c91d4f1debe..50df9ab83410 100644 --- a/sql/src/main/java/org/apache/druid/sql/calcite/aggregation/builtin/ArraySqlAggregator.java +++ b/sql/src/main/java/org/apache/druid/sql/calcite/aggregation/builtin/ArraySqlAggregator.java @@ -35,7 +35,6 @@ import org.apache.calcite.sql.type.SqlReturnTypeInference; import org.apache.calcite.sql.type.SqlTypeFamily; import org.apache.calcite.util.Optionality; -import org.apache.druid.error.DruidException; import org.apache.druid.java.util.common.HumanReadableBytes; import org.apache.druid.java.util.common.StringUtils; import org.apache.druid.math.expr.ExprMacroTable; diff --git a/sql/src/main/java/org/apache/druid/sql/calcite/planner/DruidPlanner.java b/sql/src/main/java/org/apache/druid/sql/calcite/planner/DruidPlanner.java index 6e35fc3e6fb4..33f665f6bd11 100644 --- a/sql/src/main/java/org/apache/druid/sql/calcite/planner/DruidPlanner.java +++ b/sql/src/main/java/org/apache/druid/sql/calcite/planner/DruidPlanner.java @@ -362,7 +362,8 @@ private static DruidException parseValidationMessage(Exception e, String errorCo private static DruidException parseParserMessage(Exception e) { - // Calcite exception that probably includes a position. + // Calcite exception that probably includes a position. The normal parse + // exception is rather cumbersome. Clean it up a bit. String msg = e.getMessage(); Pattern p = Pattern.compile( "Encountered \"(.*)\" at line (\\d+), column (\\d+).\nWas expecting one of:\n(.*)", @@ -371,7 +372,9 @@ private static DruidException parseParserMessage(Exception e) Matcher m = p.matcher(msg); DruidException.Builder builder; if (m.matches()) { - String choices = m.group(4).trim().replaceAll("[ .]*\n\\ s+", ", "); + p = Pattern.compile("[ .]*\n\\ s+"); + m = p.matcher(m.group(4).trim()); + String choices = m.replaceAll(", "); builder = DruidException.user( "Line %s, Column %s: unexpected token %s", m.group(2), From 7d72f3fe4d83f00a8c5a304a0bb0e140996236f5 Mon Sep 17 00:00:00 2001 From: Paul Rogers Date: Mon, 20 Feb 2023 13:24:31 -0800 Subject: [PATCH 07/17] Build fixes --- .../apache/druid/error/DruidException.java | 92 ++++++++++++++-- .../org/apache/druid/error/ErrorResponse.java | 104 ++++++++++++++++++ .../error/StandardRestExceptionEncoder.java | 30 +---- .../druid/server/QueryResultPusher.java | 27 +++-- .../EarliestLatestAnySqlAggregator.java | 6 +- .../aggregation/builtin/MaxSqlAggregator.java | 2 +- .../aggregation/builtin/MinSqlAggregator.java | 2 +- .../builtin/StringSqlAggregator.java | 2 +- .../aggregation/builtin/SumSqlAggregator.java | 2 +- .../NestedDataOperatorConversions.java | 6 +- .../calcite/parser/DruidSqlParserUtils.java | 4 +- .../sql/calcite/planner/DruidPlanner.java | 11 +- .../sql/calcite/planner/DruidRexExecutor.java | 2 +- .../sql/calcite/planner/IngestHandler.java | 2 +- .../sql/calcite/planner/QueryHandler.java | 28 ++--- .../sql/calcite/rel/DruidJoinQueryRel.java | 2 +- .../calcite/rule/DruidLogicalValuesRule.java | 4 +- .../apache/druid/sql/SqlStatementTest.java | 8 +- .../druid/sql/http/SqlResourceTest.java | 46 ++++---- 19 files changed, 276 insertions(+), 104 deletions(-) create mode 100644 processing/src/main/java/org/apache/druid/error/ErrorResponse.java diff --git a/processing/src/main/java/org/apache/druid/error/DruidException.java b/processing/src/main/java/org/apache/druid/error/DruidException.java index d5c3213f6030..de18259900ba 100644 --- a/processing/src/main/java/org/apache/druid/error/DruidException.java +++ b/processing/src/main/java/org/apache/druid/error/DruidException.java @@ -95,6 +95,12 @@ public enum ErrorType */ NOT_FOUND, + /** + * Special case of a user error where the user asked for a feature that + * Druid does not support. + */ + UNSUPPORTED, + /** * Error due to a problem beyond the user's control, such as an assertion * failed, unsupported operation, etc. These indicate problems with the software @@ -128,7 +134,13 @@ public enum ErrorType * Druid-to-external system, etc. Generally the end user cannot fix these errors: * it requires a DevOps person to resolve. */ - NETWORK + NETWORK, + + /** + * Indicates an exception deserialized from a {@link org.apache.druid.query.QueryException} + * which has no error type. + */ + UNKNOWN }; public static final String HOST = "Host"; @@ -142,6 +154,10 @@ public static class Builder private Throwable e; private Map context; + // For backward compatibility with QueryException + private String errorClass; + private String host; + private Builder( final ErrorType type, final String msg, @@ -166,12 +182,18 @@ private Builder(DruidException e) public Builder code(String code) { this.code = code; + if (QueryException.PLAN_VALIDATION_FAILED_ERROR_CODE.equals(code)) { + // Not always right, but close enough. For backward compatibility with + // code that needs the (now deprecated) error class. + this.errorClass = "org.apache.calcite.tools.ValidationException"; + } return this; } public Builder cause(Throwable e) { this.e = e; + this.errorClass = e.getClass().getName(); if (!msg.equals(e.getMessage())) { context("Cause", e.getMessage()); } @@ -188,6 +210,12 @@ public Builder context(String key, Object value) return this; } + public Builder errorClass(String errorClass) + { + this.errorClass = errorClass; + return this; + } + private boolean wasLogged() { return source != null && source.logged; @@ -200,6 +228,8 @@ private DruidException build(boolean logged) msg, type, code, + errorClass, + host, // Used linked hash map to preserve order context == null ? null : new LinkedHashMap<>(context), logged || wasLogged() @@ -253,11 +283,17 @@ public String toString() private final Map context; private final boolean logged; + // For backward compatibility with QueryException + private final String errorClass; + private final String host; + public DruidException( final Throwable e, final String msg, final ErrorType type, final String code, + final String errorClass, + final String host, final Map context, final boolean logged ) @@ -265,6 +301,8 @@ public DruidException( super(msg, e); this.type = type; this.code = code; + this.errorClass = errorClass; + this.host = host; this.context = context; this.logged = logged; } @@ -292,7 +330,7 @@ public static DruidException userError(String msg, Object...args) */ public static Builder unsupported(String msg, Object...args) { - return new Builder(ErrorType.USER, msg, args) + return new Builder(ErrorType.UNSUPPORTED, msg, args) .code(QueryException.UNSUPPORTED_OPERATION_ERROR_CODE); } @@ -301,6 +339,19 @@ public static DruidException unsupportedError(String msg, Object...args) return unsupported(msg, args).build(); } + public static Builder unsupportedSql(String msg, Object...args) + { + return new Builder(ErrorType.UNSUPPORTED, msg, args) + .code(QueryException.SQL_QUERY_UNSUPPORTED_ERROR_CODE) + // For backward compatibility: using text since class is not visible here. + .errorClass("org.apache.calcite.plan.RelOptPlanner$CannotPlanException"); + } + + public static DruidException unsupportedSqlError(String msg, Object...args) + { + return unsupportedSql(msg, args).build(); + } + /** * SQL query validation failed, most likely due to a problem in the SQL statement * which the user provided. This is a somewhat less specific then the @@ -384,6 +435,20 @@ public Builder toBuilder() return new Builder(this); } + public static DruidException fromErrorResponse(ErrorResponse response) + { + return new DruidException( + null, + response.getMessage(), + response.getType() == null ? ErrorType.UNKNOWN : response.getType(), + response.getErrorCode(), + response.getErrorClass(), + response.getHost(), + response.getContext(), + false + ); + } + public ErrorType type() { return type; @@ -408,13 +473,14 @@ public String code() public String getMessage() { StringBuilder buf = new StringBuilder(); - buf.append(type.name()).append(" - "); - buf.append(super.getMessage()); + if (type != ErrorType.USER) { + buf.append(type.name()).append(" - "); + } if (!QueryException.UNSUPPORTED_OPERATION_ERROR_CODE.equals(code)) { - buf.append("; Error Code: [") - .append(code) - .append("]"); + buf.append(code) + .append(" - "); } + buf.append(super.getMessage()); if (context != null && context.size() > 0) { int count = 0; buf.append("; "); @@ -469,4 +535,16 @@ public String message() { return super.getMessage(); } + + public ErrorResponse toErrorResponse() + { + return new ErrorResponse( + code, + message(), + errorClass, + host, + type == ErrorType.UNKNOWN ? null : type, + context + ); + } } diff --git a/processing/src/main/java/org/apache/druid/error/ErrorResponse.java b/processing/src/main/java/org/apache/druid/error/ErrorResponse.java new file mode 100644 index 000000000000..c1011f0eed24 --- /dev/null +++ b/processing/src/main/java/org/apache/druid/error/ErrorResponse.java @@ -0,0 +1,104 @@ +/* + * Licensed to the Apache Software Foundation (ASF) under one + * or more contributor license agreements. See the NOTICE file + * distributed with this work for additional information + * regarding copyright ownership. The ASF licenses this file + * to you under the Apache License, Version 2.0 (the + * "License"); you may not use this file except in compliance + * with the License. You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, + * software distributed under the License is distributed on an + * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY + * KIND, either express or implied. See the License for the + * specific language governing permissions and limitations + * under the License. + */ + +package org.apache.druid.error; + +import com.fasterxml.jackson.annotation.JsonCreator; +import com.fasterxml.jackson.annotation.JsonInclude; +import com.fasterxml.jackson.annotation.JsonInclude.Include; +import com.fasterxml.jackson.annotation.JsonProperty; + +import javax.annotation.Nullable; + +import java.util.Map; + +/** + * Union of the {@link org.apache.druid.query.QueryException} and + * {@link DruidException} fields. Used in tests to deserialize errors which may + * be in either format. + */ +public class ErrorResponse +{ + private final String msg; + private final String code; + private final String errorClass; + private final String host; + private final DruidException.ErrorType type; + private Map context; + + @JsonCreator + public ErrorResponse( + @JsonProperty("error") @Nullable String errorCode, + @JsonProperty("errorMessage") @Nullable String errorMessage, + @JsonProperty("errorClass") @Nullable String errorClass, + @JsonProperty("host") @Nullable String host, + @JsonProperty("type") @Nullable DruidException.ErrorType type, + @JsonProperty("context") @Nullable Map context + ) + { + this.msg = errorMessage; + this.code = errorCode; + this.errorClass = errorClass; + this.host = host; + this.type = type; + this.context = context; + } + + @Nullable + @JsonProperty("error") + @JsonInclude(Include.NON_NULL) + public String getErrorCode() + { + return code; + } + + @JsonProperty("errorMessage") + public String getMessage() + { + return msg; + } + + @JsonProperty + @JsonInclude(Include.NON_NULL) + public String getErrorClass() + { + return errorClass; + } + + @JsonProperty + @JsonInclude(Include.NON_NULL) + public String getHost() + { + return host; + } + + @JsonProperty + @JsonInclude(Include.NON_EMPTY) + public Map getContext() + { + return context; + } + + @JsonProperty + @JsonInclude(Include.NON_NULL) + public DruidException.ErrorType getType() + { + return type; + } +} diff --git a/server/src/main/java/org/apache/druid/error/StandardRestExceptionEncoder.java b/server/src/main/java/org/apache/druid/error/StandardRestExceptionEncoder.java index 1069e33555b9..936ef2fc2099 100644 --- a/server/src/main/java/org/apache/druid/error/StandardRestExceptionEncoder.java +++ b/server/src/main/java/org/apache/druid/error/StandardRestExceptionEncoder.java @@ -19,15 +19,11 @@ package org.apache.druid.error; -import com.google.common.collect.ImmutableMap; - +import javax.ws.rs.core.MediaType; import javax.ws.rs.core.Response; import javax.ws.rs.core.Response.ResponseBuilder; import javax.ws.rs.core.Response.Status; -import java.util.HashMap; -import java.util.Map; - public class StandardRestExceptionEncoder implements RestExceptionEncoder { private static final RestExceptionEncoder INSTANCE = new StandardRestExceptionEncoder(); @@ -40,24 +36,10 @@ public static RestExceptionEncoder instance() @Override public ResponseBuilder builder(DruidException e) { - ImmutableMap.Builder builder = ImmutableMap.builder(); - builder.put("type", errorCode(e)); - builder.put("message", e.message()); - builder.put("errorMessage", e.getMessage()); - builder.put("errorCode", e.code()); - if (e.context() != null && !e.context().isEmpty()) { - Map context = new HashMap<>(e.context()); - String host = context.remove(DruidException.HOST); - if (host != null) { - builder.put("host", host); - } - if (!context.isEmpty()) { - builder.put("context", context); - } - } return Response .status(status(e)) - .entity(builder.build()); + .entity(e.toErrorResponse()) + .type(MediaType.APPLICATION_JSON); } @Override @@ -66,11 +48,6 @@ public Response encode(DruidException e) return builder(e).build(); } - private Object errorCode(DruidException e) - { - return e.type().name(); - } - // Temporary status mapping private Status status(DruidException e) { @@ -86,6 +63,7 @@ private Status status(DruidException e) case RESOURCE: return Response.Status.fromStatusCode(429); // No predefined status name case USER: + case UNSUPPORTED: return Response.Status.BAD_REQUEST; default: // Should never occur diff --git a/server/src/main/java/org/apache/druid/server/QueryResultPusher.java b/server/src/main/java/org/apache/druid/server/QueryResultPusher.java index 44d5d3965aef..13b09e23484e 100644 --- a/server/src/main/java/org/apache/druid/server/QueryResultPusher.java +++ b/server/src/main/java/org/apache/druid/server/QueryResultPusher.java @@ -114,10 +114,19 @@ public QueryResultPusher( @Nullable public Response push() { + // Create the results writer outside the try/catch block. The block uses + // the results writer on failure. But, if start() fails, we have a null + // resultsWriter and we'll get an NPE. Instead, if start() fails, just + // let any exception bubble up. ResultsWriter resultsWriter = null; try { resultsWriter = start(); - + } + catch (RuntimeException e) { + log.warn(e, "Failed to obtain the results writer for query [%s]", queryId); + throw e; + } + try { final Response.ResponseBuilder startResponse = resultsWriter.start(); if (startResponse != null) { startResponse.header(QueryResource.QUERY_ID_RESPONSE_HEADER, queryId); @@ -170,7 +179,7 @@ public Response push() // returning results before a ForbiddenException gets thrown, that means that we've already leaked stuff // that should not have been leaked. I.e. it means, we haven't validated the authorization early enough. if (response != null && response.isCommitted()) { - log.error(re, "Got a forbidden exception for query[%s] after the response was already committed.", queryId); + log.error(re, "Got a forbidden exception for query [%s] after the response was already committed.", queryId); } throw re; } @@ -179,23 +188,27 @@ public Response push() catch (IOException ioEx) { return handleQueryException(resultsWriter, new QueryInterruptedException(ioEx)); } + catch (Throwable t) { + // May only occur in tests. + return handleQueryException(resultsWriter, new QueryInterruptedException(t)); + } finally { if (accumulator != null) { try { accumulator.close(); } catch (IOException e) { - log.warn(e, "Suppressing exception closing accumulator for query[%s]", queryId); + log.warn(e, "Suppressing exception closing accumulator for query [%s]", queryId); } } if (resultsWriter == null) { - log.warn("resultsWriter was null for query[%s], work was maybe done in start() that shouldn't be.", queryId); + log.warn("resultsWriter was null for query [%s], work was maybe done in start() that shouldn't be.", queryId); } else { try { resultsWriter.close(); } catch (IOException e) { - log.warn(e, "Suppressing exception closing accumulator for query[%s]", queryId); + log.warn(e, "Suppressing exception closing accumulator for query [%s]", queryId); } } if (asyncContext != null) { @@ -280,7 +293,7 @@ private Response handleQueryException(ResultsWriter resultsWriter, QueryExceptio catch (IOException ioException) { log.warn( ioException, - "Suppressing IOException thrown sending error response for query[%s]", + "Suppressing IOException thrown sending error response for query [%s]", queryId ); } @@ -300,7 +313,7 @@ private Response handleDruidException(ResultsWriter resultsWriter, DruidExceptio resultsWriter.recordFailure(e); // This case is always a failure because the error happened mid-stream of sending results back. Therefore, - // we do not believe that the response stream was actually useable + // we do not believe that the response stream was actually usable counter.incrementFailed(); return null; } diff --git a/sql/src/main/java/org/apache/druid/sql/calcite/aggregation/builtin/EarliestLatestAnySqlAggregator.java b/sql/src/main/java/org/apache/druid/sql/calcite/aggregation/builtin/EarliestLatestAnySqlAggregator.java index e9f21cea173f..191253cfa694 100644 --- a/sql/src/main/java/org/apache/druid/sql/calcite/aggregation/builtin/EarliestLatestAnySqlAggregator.java +++ b/sql/src/main/java/org/apache/druid/sql/calcite/aggregation/builtin/EarliestLatestAnySqlAggregator.java @@ -90,7 +90,7 @@ AggregatorFactory createAggregatorFactory(String name, String fieldName, String case COMPLEX: return new StringFirstAggregatorFactory(name, fieldName, timeColumn, maxStringBytes); default: - throw DruidException.unsupportedError("EARLIEST aggregator is not supported for type %s", type); + throw DruidException.unsupportedSqlError("EARLIEST aggregator is not supported for type %s", type); } } }, @@ -110,7 +110,7 @@ AggregatorFactory createAggregatorFactory(String name, String fieldName, String case COMPLEX: return new StringLastAggregatorFactory(name, fieldName, timeColumn, maxStringBytes); default: - throw DruidException.unsupportedError("LATEST aggregator is not supported for type %s", type); + throw DruidException.unsupportedSqlError("LATEST aggregator is not supported for type %s", type); } } }, @@ -129,7 +129,7 @@ AggregatorFactory createAggregatorFactory(String name, String fieldName, String case STRING: return new StringAnyAggregatorFactory(name, fieldName, maxStringBytes); default: - throw DruidException.unsupportedError("ANY aggregation is not supported for type %s", type); + throw DruidException.unsupportedSqlError("ANY aggregation is not supported for type %s", type); } } }; diff --git a/sql/src/main/java/org/apache/druid/sql/calcite/aggregation/builtin/MaxSqlAggregator.java b/sql/src/main/java/org/apache/druid/sql/calcite/aggregation/builtin/MaxSqlAggregator.java index 862fc74bef44..b727be40e693 100644 --- a/sql/src/main/java/org/apache/druid/sql/calcite/aggregation/builtin/MaxSqlAggregator.java +++ b/sql/src/main/java/org/apache/druid/sql/calcite/aggregation/builtin/MaxSqlAggregator.java @@ -71,7 +71,7 @@ private static AggregatorFactory createMaxAggregatorFactory( case DOUBLE: return new DoubleMaxAggregatorFactory(name, fieldName, null, macroTable); default: - throw DruidException.unsupportedError("MAX does not support type %s", aggregationType); + throw DruidException.unsupportedSqlError("MAX does not support type %s", aggregationType); } } } diff --git a/sql/src/main/java/org/apache/druid/sql/calcite/aggregation/builtin/MinSqlAggregator.java b/sql/src/main/java/org/apache/druid/sql/calcite/aggregation/builtin/MinSqlAggregator.java index 6e13463ccede..59946af5f506 100644 --- a/sql/src/main/java/org/apache/druid/sql/calcite/aggregation/builtin/MinSqlAggregator.java +++ b/sql/src/main/java/org/apache/druid/sql/calcite/aggregation/builtin/MinSqlAggregator.java @@ -67,7 +67,7 @@ private static AggregatorFactory createMinAggregatorFactory( case DOUBLE: return new DoubleMinAggregatorFactory(name, fieldName, null, macroTable); default: - throw DruidException.unsupportedError("MIN does not support type %s", aggregationType); + throw DruidException.unsupportedSqlError("MIN does not support type %s", aggregationType); } } } diff --git a/sql/src/main/java/org/apache/druid/sql/calcite/aggregation/builtin/StringSqlAggregator.java b/sql/src/main/java/org/apache/druid/sql/calcite/aggregation/builtin/StringSqlAggregator.java index 9ab6df585857..e36fd449f382 100644 --- a/sql/src/main/java/org/apache/druid/sql/calcite/aggregation/builtin/StringSqlAggregator.java +++ b/sql/src/main/java/org/apache/druid/sql/calcite/aggregation/builtin/StringSqlAggregator.java @@ -198,7 +198,7 @@ public RelDataType inferReturnType(SqlOperatorBinding sqlOperatorBinding) { RelDataType type = sqlOperatorBinding.getOperandType(0); if (type instanceof RowSignatures.ComplexSqlType) { - throw DruidException.unsupportedError("Cannot use STRING_AGG on complex input of type %s", type); + throw DruidException.unsupportedSqlError("Cannot use STRING_AGG on complex input of type %s", type); } return Calcites.createSqlTypeWithNullability( sqlOperatorBinding.getTypeFactory(), diff --git a/sql/src/main/java/org/apache/druid/sql/calcite/aggregation/builtin/SumSqlAggregator.java b/sql/src/main/java/org/apache/druid/sql/calcite/aggregation/builtin/SumSqlAggregator.java index 97a306e24dd6..1911c307add0 100644 --- a/sql/src/main/java/org/apache/druid/sql/calcite/aggregation/builtin/SumSqlAggregator.java +++ b/sql/src/main/java/org/apache/druid/sql/calcite/aggregation/builtin/SumSqlAggregator.java @@ -88,7 +88,7 @@ static AggregatorFactory createSumAggregatorFactory( case DOUBLE: return new DoubleSumAggregatorFactory(name, fieldName, null, macroTable); default: - throw DruidException.unsupportedError("SUM is not supported for type %s", aggregationType); + throw DruidException.unsupportedSqlError("SUM is not supported for type %s", aggregationType); } } diff --git a/sql/src/main/java/org/apache/druid/sql/calcite/expression/builtin/NestedDataOperatorConversions.java b/sql/src/main/java/org/apache/druid/sql/calcite/expression/builtin/NestedDataOperatorConversions.java index 8180644a4a3f..6cf91ac719dc 100644 --- a/sql/src/main/java/org/apache/druid/sql/calcite/expression/builtin/NestedDataOperatorConversions.java +++ b/sql/src/main/java/org/apache/druid/sql/calcite/expression/builtin/NestedDataOperatorConversions.java @@ -201,7 +201,7 @@ public DruidExpression toDruidExpression( parts = NestedPathFinder.parseJsonPath(path); } catch (IllegalArgumentException iae) { - throw DruidException.unsupportedError( + throw DruidException.unsupportedSqlError( "Cannot use [%s]: [%s]", call.getOperator().getName(), iae.getMessage() @@ -391,7 +391,7 @@ public DruidExpression toDruidExpression( parts = NestedPathFinder.parseJsonPath(path); } catch (IllegalArgumentException iae) { - throw DruidException.unsupportedError( + throw DruidException.unsupportedSqlError( "Cannot use [%s]: [%s]", call.getOperator().getName(), iae.getMessage() @@ -689,7 +689,7 @@ public DruidExpression toDruidExpression( parts = NestedPathFinder.parseJsonPath(path); } catch (IllegalArgumentException iae) { - throw DruidException.unsupported( + throw DruidException.unsupportedSql( "JSON path [%s] is not supported", call.getOperator().getName() ) diff --git a/sql/src/main/java/org/apache/druid/sql/calcite/parser/DruidSqlParserUtils.java b/sql/src/main/java/org/apache/druid/sql/calcite/parser/DruidSqlParserUtils.java index 8746eec81590..8614e735bb73 100644 --- a/sql/src/main/java/org/apache/druid/sql/calcite/parser/DruidSqlParserUtils.java +++ b/sql/src/main/java/org/apache/druid/sql/calcite/parser/DruidSqlParserUtils.java @@ -423,7 +423,7 @@ public static DimFilter convertQueryToDimFilter(SqlNode replaceTimeQuery, DateTi * * @param sqlNode the SQL node * @return string representing the column name - * @throws ValidationException if the SQL node is not an SqlIdentifier + * @throws DruidException if the SQL node is not an SqlIdentifier */ public static String parseColumnName(SqlNode sqlNode) { @@ -441,7 +441,7 @@ public static String parseColumnName(SqlNode sqlNode) * @param sqlNode the SQL node * @param timeZone timezone * @return the timestamp string as milliseconds from epoch - * @throws ValidationException if the SQL node is not a SqlTimestampLiteral + * @throws DruidException if the SQL node is not a SqlTimestampLiteral */ public static String parseTimeStampWithTimeZone(SqlNode sqlNode, DateTimeZone timeZone) { diff --git a/sql/src/main/java/org/apache/druid/sql/calcite/planner/DruidPlanner.java b/sql/src/main/java/org/apache/druid/sql/calcite/planner/DruidPlanner.java index 33f665f6bd11..467ab0864d56 100644 --- a/sql/src/main/java/org/apache/druid/sql/calcite/planner/DruidPlanner.java +++ b/sql/src/main/java/org/apache/druid/sql/calcite/planner/DruidPlanner.java @@ -168,7 +168,7 @@ private SqlStatementHandler createHandler(final SqlNode node) if (query.isA(SqlKind.QUERY)) { return new QueryHandler.SelectHandler(handlerContext, query, explain); } - throw DruidException.unsupportedError( + throw DruidException.unsupportedSqlError( "Unsupported SQL statement %s", node.getKind() ); @@ -372,11 +372,11 @@ private static DruidException parseParserMessage(Exception e) Matcher m = p.matcher(msg); DruidException.Builder builder; if (m.matches()) { - p = Pattern.compile("[ .]*\n\\ s+"); - m = p.matcher(m.group(4).trim()); - String choices = m.replaceAll(", "); + Pattern p2 = Pattern.compile("[ .]*\n\\ s+"); + Matcher m2 = p2.matcher(m.group(4).trim()); + String choices = m2.replaceAll(", "); builder = DruidException.user( - "Line %s, Column %s: unexpected token %s", + "Line %s, Column %s: unexpected token '%s'", m.group(2), m.group(3), m.group(1) @@ -387,6 +387,7 @@ private static DruidException parseParserMessage(Exception e) } return builder .code(QueryException.SQL_PARSE_FAILED_ERROR_CODE) + .errorClass(SqlParseException.class.getName()) .build(); } } diff --git a/sql/src/main/java/org/apache/druid/sql/calcite/planner/DruidRexExecutor.java b/sql/src/main/java/org/apache/druid/sql/calcite/planner/DruidRexExecutor.java index 1dd15428fc57..6064b6aac155 100644 --- a/sql/src/main/java/org/apache/druid/sql/calcite/planner/DruidRexExecutor.java +++ b/sql/src/main/java/org/apache/druid/sql/calcite/planner/DruidRexExecutor.java @@ -128,7 +128,7 @@ public void reduce( double exprResultDouble = exprResult.asDouble(); if (Double.isNaN(exprResultDouble) || Double.isInfinite(exprResultDouble)) { String expression = druidExpression.getExpression(); - throw DruidException.unsupported("Expression not supported in SQL : %s", expression) + throw DruidException.unsupportedSql("Expression not supported in SQL : %s", expression) .context("Evaluates to", Double.toString(exprResultDouble)) .context("Suggestion", StringUtils.format( "You can either cast the expression as BIGINT ('CAST(%s as BIGINT)') or VARCHAR ('CAST(%s as VARCHAR)') or change the expression itself", diff --git a/sql/src/main/java/org/apache/druid/sql/calcite/planner/IngestHandler.java b/sql/src/main/java/org/apache/druid/sql/calcite/planner/IngestHandler.java index 592044715c90..f484cf971211 100644 --- a/sql/src/main/java/org/apache/druid/sql/calcite/planner/IngestHandler.java +++ b/sql/src/main/java/org/apache/druid/sql/calcite/planner/IngestHandler.java @@ -170,7 +170,7 @@ private String validateAndGetDataSourceForIngest() } if (insert.getTargetColumnList() != null) { - throw DruidException.unsupportedError( + throw DruidException.unsupportedSqlError( "%s with a target column list is not supported", operationName() ); diff --git a/sql/src/main/java/org/apache/druid/sql/calcite/planner/QueryHandler.java b/sql/src/main/java/org/apache/druid/sql/calcite/planner/QueryHandler.java index 43ebd5f35a4c..f34dfe11393a 100644 --- a/sql/src/main/java/org/apache/druid/sql/calcite/planner/QueryHandler.java +++ b/sql/src/main/java/org/apache/druid/sql/calcite/planner/QueryHandler.java @@ -57,7 +57,6 @@ import org.apache.druid.error.DruidException; import org.apache.druid.java.util.common.guava.BaseSequence; import org.apache.druid.java.util.common.guava.Sequences; -import org.apache.druid.java.util.common.logger.Logger; import org.apache.druid.java.util.emitter.EmittingLogger; import org.apache.druid.query.Query; import org.apache.druid.query.QueryException; @@ -623,28 +622,23 @@ private RelRoot possiblyWrapRootWithOuterLimitFromContext(RelRoot root) private DruidException buildSQLPlanningError(RelOptPlanner.CannotPlanException exception) { - Logger logger = log; - if (!handlerContext.queryContext().isDebug()) { - logger = log.noStackTrace(); - } - DruidException.Builder builder = DruidException.internalError( - "Unsupported SQL query - failed to convert to a Druid native query" - ) - .cause(exception) - .context("SQL", handlerContext.plannerContext().getSql()) - .code(QueryException.QUERY_UNSUPPORTED_ERROR_CODE); String errorMessage = handlerContext.plannerContext().getPlanningError(); if (null == errorMessage && exception instanceof UnsupportedSQLQueryException) { - builder.context("Specific error", errorMessage); + errorMessage = exception.getMessage(); } if (null == errorMessage) { - builder.context("Note", "Please check Broker logs for additional details."); + errorMessage = "Please check Broker logs for additional details."; } else { - // Planning errors are more like hints: it isn't guaranteed that the - // planning error is actually what went wrong. - builder.context("Possible error", errorMessage); + // Planning errors are more like hints: it isn't guaranteed that the planning error is actually what went wrong. + errorMessage = "Possible error: " + errorMessage; } - return builder.build(logger); + // Finally, add the query itself to error message that user will get. + return DruidException + .unsupportedSql("Query not supported. %s", errorMessage) + .code(QueryException.SQL_QUERY_UNSUPPORTED_ERROR_CODE) + .cause(exception) + .context("SQL", handlerContext.plannerContext().getSql()) + .build(); } public static class SelectHandler extends QueryHandler diff --git a/sql/src/main/java/org/apache/druid/sql/calcite/rel/DruidJoinQueryRel.java b/sql/src/main/java/org/apache/druid/sql/calcite/rel/DruidJoinQueryRel.java index 301a9efab581..c8a0b8129d47 100644 --- a/sql/src/main/java/org/apache/druid/sql/calcite/rel/DruidJoinQueryRel.java +++ b/sql/src/main/java/org/apache/druid/sql/calcite/rel/DruidJoinQueryRel.java @@ -360,7 +360,7 @@ public static JoinType toDruidJoinType(JoinRelType calciteJoinType) case INNER: return JoinType.INNER; default: - throw DruidException.unsupportedError("Cannot handle joinType '%s'", calciteJoinType); + throw DruidException.unsupportedSqlError("Cannot handle joinType '%s'", calciteJoinType); } } diff --git a/sql/src/main/java/org/apache/druid/sql/calcite/rule/DruidLogicalValuesRule.java b/sql/src/main/java/org/apache/druid/sql/calcite/rule/DruidLogicalValuesRule.java index 7840a9820ef3..a168b9ad04df 100644 --- a/sql/src/main/java/org/apache/druid/sql/calcite/rule/DruidLogicalValuesRule.java +++ b/sql/src/main/java/org/apache/druid/sql/calcite/rule/DruidLogicalValuesRule.java @@ -126,7 +126,7 @@ static Object getValueFromLiteral(RexLiteral literal, PlannerContext plannerCont return Calcites.calciteDateTimeLiteralToJoda(literal, plannerContext.getTimeZone()).getMillis(); case NULL: if (!literal.isNull()) { - throw DruidException.unsupportedError( + throw DruidException.unsupportedSqlError( "Non-null constant %s for a NULL literal", literal ); @@ -136,7 +136,7 @@ static Object getValueFromLiteral(RexLiteral literal, PlannerContext plannerCont case TIME: case TIME_WITH_LOCAL_TIME_ZONE: default: - throw DruidException.unsupportedError( + throw DruidException.unsupportedSqlError( "Literal %s type %s is not supported", literal, literal.getType().getSqlTypeName() diff --git a/sql/src/test/java/org/apache/druid/sql/SqlStatementTest.java b/sql/src/test/java/org/apache/druid/sql/SqlStatementTest.java index 1220340e2fd2..0b6650649941 100644 --- a/sql/src/test/java/org/apache/druid/sql/SqlStatementTest.java +++ b/sql/src/test/java/org/apache/druid/sql/SqlStatementTest.java @@ -92,7 +92,7 @@ public class SqlStatementTest public TemporaryFolder temporaryFolder = new TemporaryFolder(); @Rule public QueryLogHook queryLogHook = QueryLogHook.create(); - private SpecificSegmentsQuerySegmentWalker walker = null; + private SpecificSegmentsQuerySegmentWalker walker; private TestRequestLogger testRequestLogger; private ListeningExecutorService executorService; private SqlStatementFactory sqlStatementFactory; @@ -289,7 +289,7 @@ public void testDirectSyntaxError() catch (DruidException e) { // Expected Assert.assertEquals( - QueryException.PLAN_VALIDATION_FAILED_ERROR_CODE, + QueryException.SQL_PARSE_FAILED_ERROR_CODE, e.code() ); } @@ -374,7 +374,7 @@ public void testHttpSyntaxError() catch (DruidException e) { // Expected Assert.assertEquals( - QueryException.PLAN_VALIDATION_FAILED_ERROR_CODE, + QueryException.SQL_PARSE_FAILED_ERROR_CODE, e.code() ); } @@ -463,7 +463,7 @@ public void testPrepareSyntaxError() catch (DruidException e) { // Expected Assert.assertEquals( - QueryException.PLAN_VALIDATION_FAILED_ERROR_CODE, + QueryException.SQL_PARSE_FAILED_ERROR_CODE, e.code() ); } diff --git a/sql/src/test/java/org/apache/druid/sql/http/SqlResourceTest.java b/sql/src/test/java/org/apache/druid/sql/http/SqlResourceTest.java index f825423ba9c4..2be34a5e1018 100644 --- a/sql/src/test/java/org/apache/druid/sql/http/SqlResourceTest.java +++ b/sql/src/test/java/org/apache/druid/sql/http/SqlResourceTest.java @@ -34,6 +34,7 @@ import org.apache.druid.common.exception.AllowedRegexErrorResponseTransformStrategy; import org.apache.druid.common.exception.ErrorResponseTransformStrategy; import org.apache.druid.common.guava.SettableSupplier; +import org.apache.druid.error.ErrorResponse; import org.apache.druid.jackson.DefaultObjectMapper; import org.apache.druid.java.util.common.ISE; import org.apache.druid.java.util.common.NonnullPair; @@ -1338,12 +1339,12 @@ public void testExplainCountStar() throws Exception @Test public void testCannotParse() throws Exception { - QueryException exception = postSyncForException("FROM druid.foo", Status.BAD_REQUEST.getStatusCode()); + ErrorResponse exception = postSyncForException("FROM druid.foo", Status.BAD_REQUEST.getStatusCode()); Assert.assertNotNull(exception); Assert.assertEquals(PlanningError.SQL_PARSE_ERROR.getErrorCode(), exception.getErrorCode()); Assert.assertEquals(PlanningError.SQL_PARSE_ERROR.getErrorClass(), exception.getErrorClass()); - Assert.assertTrue(exception.getMessage().contains("Encountered \"FROM\" at line 1, column 1.")); + Assert.assertTrue(exception.getMessage().contains("Line 1, Column 1: unexpected token 'FROM'")); checkSqlRequestLog(false); Assert.assertTrue(lifecycleManager.getAll("id").isEmpty()); } @@ -1351,7 +1352,7 @@ public void testCannotParse() throws Exception @Test public void testCannotValidate() throws Exception { - QueryException exception = postSyncForException("SELECT dim4 FROM druid.foo", Status.BAD_REQUEST.getStatusCode()); + ErrorResponse exception = postSyncForException("SELECT dim4 FROM druid.foo", Status.BAD_REQUEST.getStatusCode()); Assert.assertNotNull(exception); Assert.assertEquals(PlanningError.VALIDATION_ERROR.getErrorCode(), exception.getErrorCode()); @@ -1366,11 +1367,11 @@ public void testCannotConvert() throws Exception { // SELECT + ORDER unsupported final SqlQuery unsupportedQuery = createSimpleQueryWithId("id", "SELECT dim1 FROM druid.foo ORDER BY dim1"); - QueryException exception = postSyncForException(unsupportedQuery, Status.BAD_REQUEST.getStatusCode()); + ErrorResponse exception = postSyncForException(unsupportedQuery, Status.BAD_REQUEST.getStatusCode()); Assert.assertTrue((Boolean) req.getAttribute(AuthConfig.DRUID_AUTHORIZATION_CHECKED)); Assert.assertNotNull(exception); - Assert.assertEquals("SQL query is unsupported", exception.getErrorCode()); + Assert.assertEquals(QueryException.SQL_QUERY_UNSUPPORTED_ERROR_CODE, exception.getErrorCode()); Assert.assertEquals(PlanningError.UNSUPPORTED_SQL_ERROR.getErrorClass(), exception.getErrorClass()); Assert.assertTrue( exception.getMessage() @@ -1390,7 +1391,7 @@ public void testCannotConvert() throws Exception public void testCannotConvert_UnsupportedSQLQueryException() throws Exception { // max(string) unsupported - QueryException exception = postSyncForException( + ErrorResponse exception = postSyncForException( "SELECT max(dim1) FROM druid.foo", Status.BAD_REQUEST.getStatusCode() ); @@ -1399,9 +1400,7 @@ public void testCannotConvert_UnsupportedSQLQueryException() throws Exception Assert.assertEquals(PlanningError.UNSUPPORTED_SQL_ERROR.getErrorCode(), exception.getErrorCode()); Assert.assertEquals(PlanningError.UNSUPPORTED_SQL_ERROR.getErrorClass(), exception.getErrorClass()); Assert.assertTrue( - exception.getMessage() - .contains("Query not supported. " + - "Possible error: Max aggregation is not supported for 'STRING' type") + exception.getMessage().contains("MAX does not support type STRING") ); checkSqlRequestLog(false); Assert.assertTrue(lifecycleManager.getAll("id").isEmpty()); @@ -1441,7 +1440,7 @@ public void testUnsupportedQueryThrowsException() throws Exception { String errorMessage = "This will be supported in Druid 9999"; failOnExecute(errorMessage); - QueryException exception = postSyncForException( + ErrorResponse exception = postSyncForException( new SqlQuery( "SELECT ANSWER TO LIFE", ResultFormat.OBJECT, @@ -1537,7 +1536,7 @@ public ErrorResponseTransformStrategy getErrorResponseTransformStrategy() String errorMessage = "This will be supported in Druid 9999"; failOnExecute(errorMessage); - QueryException exception = postSyncForException( + ErrorResponse exception = postSyncForException( new SqlQuery( "SELECT ANSWER TO LIFE", ResultFormat.OBJECT, @@ -1589,7 +1588,7 @@ public ErrorResponseTransformStrategy getErrorResponseTransformStrategy() onExecute = s -> { throw new AssertionError(errorMessage); }; - QueryException exception = postSyncForException( + ErrorResponse exception = postSyncForException( new SqlQuery( "SELECT ANSWER TO LIFE", ResultFormat.OBJECT, @@ -1717,7 +1716,7 @@ public void testQueryTimeoutException() throws Exception sqlQueryId ); - QueryException exception = postSyncForException( + ErrorResponse exception = postSyncForException( new SqlQuery( "SELECT CAST(__time AS DATE), dim1, dim2, dim3 FROM druid.foo GROUP by __time, dim1, dim2, dim3 ORDER BY dim2 DESC", ResultFormat.OBJECT, @@ -1877,7 +1876,7 @@ public void testQueryContextException() throws Exception public void testQueryContextKeyNotAllowed() throws Exception { Map queryContext = ImmutableMap.of(DruidSqlInsert.SQL_INSERT_SEGMENT_GRANULARITY, "all"); - QueryException exception = postSyncForException( + ErrorResponse exception = postSyncForException( new SqlQuery("SELECT 1337", ResultFormat.OBJECT, false, false, false, queryContext, null), Status.BAD_REQUEST.getStatusCode() ); @@ -1886,7 +1885,7 @@ public void testQueryContextKeyNotAllowed() throws Exception Assert.assertEquals(PlanningError.VALIDATION_ERROR.getErrorCode(), exception.getErrorCode()); MatcherAssert.assertThat( exception.getMessage(), - CoreMatchers.containsString("Cannot execute query with context parameter [sqlInsertSegmentGranularity]") + CoreMatchers.containsString("Query context parameter 'sqlInsertSegmentGranularity' is not allowed") ); checkSqlRequestLog(false); } @@ -2018,16 +2017,16 @@ private Response postForSyncResponse(SqlQuery query, MockHttpServletRequest req) return response; } - private QueryException postSyncForException(String s, int expectedStatus) throws IOException + private ErrorResponse postSyncForException(String s, int expectedStatus) throws IOException { return postSyncForException(createSimpleQueryWithId("id", s), expectedStatus); } - private QueryException postSyncForException(SqlQuery query, int expectedStatus) throws IOException + private ErrorResponse postSyncForException(SqlQuery query, int expectedStatus) throws IOException { final Response response = postForSyncResponse(query, req); assertStatusAndCommonHeaders(response, expectedStatus); - return deserializeResponse(response, QueryException.class); + return deserializeResponse(response, ErrorResponse.class); } private T deserializeResponse(Response resp, Class clazz) throws IOException @@ -2037,9 +2036,14 @@ private T deserializeResponse(Response resp, Class clazz) throws IOExcept private byte[] responseToByteArray(Response resp) throws IOException { - ByteArrayOutputStream baos = new ByteArrayOutputStream(); - ((StreamingOutput) resp.getEntity()).write(baos); - return baos.toByteArray(); + if (resp.getEntity() instanceof StreamingOutput) { + ByteArrayOutputStream baos = new ByteArrayOutputStream(); + ((StreamingOutput) resp.getEntity()).write(baos); + return baos.toByteArray(); + } else { + String foo = JSON_MAPPER.writeValueAsString(resp.getEntity()); + return JSON_MAPPER.writeValueAsBytes(resp.getEntity()); + } } private String getContentType(Response resp) From e43ba32da56d3ffbb4ac0cd39feadee7deb27268 Mon Sep 17 00:00:00 2001 From: Paul Rogers Date: Tue, 21 Feb 2023 09:46:54 -0800 Subject: [PATCH 08/17] Revision of DruidException structure Incorporates review feedback --- .../apache/druid/msq/exec/MSQInsertTest.java | 14 +- .../apache/druid/msq/exec/MSQReplaceTest.java | 7 +- .../apache/druid/msq/exec/MSQSelectTest.java | 13 +- .../druid/error/DruidAssertionError.java | 44 ++ .../apache/druid/error/DruidException.java | 544 ++++-------------- .../org/apache/druid/error/ErrorAudience.java | 40 ++ .../org/apache/druid/error/ErrorCategory.java | 120 ++++ .../org/apache/druid/error/ErrorResponse.java | 12 +- .../apache/druid/error/MetricCategory.java | 8 + .../java/org/apache/druid/error/README.md | 383 ++++++++++++ .../org/apache/druid/error/SqlParseError.java | 50 ++ .../druid/error/SqlUnsupportedError.java | 56 ++ .../druid/error/SqlValidationError.java | 59 ++ .../error/StandardRestExceptionEncoder.java | 48 +- .../druid/server/QueryResultPusher.java | 7 +- .../org/apache/druid/sql/DirectStatement.java | 7 +- .../EarliestLatestAnySqlAggregator.java | 26 +- .../aggregation/builtin/MaxSqlAggregator.java | 7 +- .../aggregation/builtin/MinSqlAggregator.java | 5 +- .../builtin/StringSqlAggregator.java | 5 +- .../aggregation/builtin/SumSqlAggregator.java | 5 +- .../NestedDataOperatorConversions.java | 13 +- .../calcite/parser/DruidSqlParserUtils.java | 32 +- .../sql/calcite/planner/DruidPlanner.java | 72 +-- .../sql/calcite/planner/DruidRexExecutor.java | 26 +- .../sql/calcite/planner/IngestHandler.java | 43 +- .../sql/calcite/planner/QueryHandler.java | 22 +- .../planner/RelParameterizerShuttle.java | 11 +- .../sql/calcite/rel/DruidJoinQueryRel.java | 5 +- .../calcite/rule/DruidLogicalValuesRule.java | 11 +- .../druid/sql/calcite/run/SqlEngines.java | 7 +- .../apache/druid/sql/SqlStatementTest.java | 28 +- .../sql/calcite/BaseCalciteQueryTest.java | 9 +- .../sql/calcite/CalciteInsertDmlTest.java | 70 +-- .../CalciteMultiValueStringQueryTest.java | 6 +- .../calcite/CalciteNestedDataQueryTest.java | 4 +- .../calcite/CalciteParameterQueryTest.java | 9 +- .../druid/sql/calcite/CalciteQueryTest.java | 58 +- .../sql/calcite/CalciteReplaceDmlTest.java | 43 +- .../sql/calcite/CalciteSelectQueryTest.java | 5 +- .../druid/sql/http/SqlResourceTest.java | 16 +- 41 files changed, 1193 insertions(+), 757 deletions(-) create mode 100644 processing/src/main/java/org/apache/druid/error/DruidAssertionError.java create mode 100644 processing/src/main/java/org/apache/druid/error/ErrorAudience.java create mode 100644 processing/src/main/java/org/apache/druid/error/ErrorCategory.java create mode 100644 processing/src/main/java/org/apache/druid/error/MetricCategory.java create mode 100644 processing/src/main/java/org/apache/druid/error/README.md create mode 100644 processing/src/main/java/org/apache/druid/error/SqlParseError.java create mode 100644 processing/src/main/java/org/apache/druid/error/SqlUnsupportedError.java create mode 100644 processing/src/main/java/org/apache/druid/error/SqlValidationError.java diff --git a/extensions-core/multi-stage-query/src/test/java/org/apache/druid/msq/exec/MSQInsertTest.java b/extensions-core/multi-stage-query/src/test/java/org/apache/druid/msq/exec/MSQInsertTest.java index 940798c2b177..31c19a02e11e 100644 --- a/extensions-core/multi-stage-query/src/test/java/org/apache/druid/msq/exec/MSQInsertTest.java +++ b/extensions-core/multi-stage-query/src/test/java/org/apache/druid/msq/exec/MSQInsertTest.java @@ -24,8 +24,8 @@ import com.google.common.collect.ImmutableSet; import com.google.common.hash.HashFunction; import com.google.common.hash.Hashing; -import org.apache.druid.error.DruidException; -import org.apache.druid.common.config.NullHandling; +import org.apache.druid.error.SqlParseError; +import org.apache.druid.error.SqlValidationError; import org.apache.druid.hll.HyperLogLogCollector; import org.apache.druid.java.util.common.ISE; import org.apache.druid.java.util.common.Intervals; @@ -542,7 +542,7 @@ public void testInsertOnFoo1WithMultiValueMeasureGroupBy() .setExpectedDataSource("foo1") .setQueryContext(context) .setExpectedValidationErrorMatcher(CoreMatchers.allOf( - CoreMatchers.instanceOf(DruidException.class), + CoreMatchers.instanceOf(SqlValidationError.class), ThrowableMessageMatcher.hasMessage(CoreMatchers.containsString( "Aggregate expression is illegal in GROUP BY clause")) )) @@ -967,7 +967,7 @@ public void testInsertWrongTypeTimestamp() .setExpectedRowSignature(rowSignature) .setQueryContext(context) .setExpectedValidationErrorMatcher(CoreMatchers.allOf( - CoreMatchers.instanceOf(DruidException.class), + CoreMatchers.instanceOf(SqlValidationError.class), ThrowableMessageMatcher.hasMessage(CoreMatchers.containsString( "Field \"__time\" must be of type TIMESTAMP")) )) @@ -980,7 +980,7 @@ public void testIncorrectInsertQuery() testIngestQuery().setSql( "insert into foo1 select __time, dim1 , count(*) as cnt from foo where dim1 is not null group by 1, 2 clustered by dim1") .setExpectedValidationErrorMatcher(CoreMatchers.allOf( - CoreMatchers.instanceOf(DruidException.class), + CoreMatchers.instanceOf(SqlParseError.class), ThrowableMessageMatcher.hasMessage(CoreMatchers.startsWith( "CLUSTERED BY found before PARTITIONED BY. In Druid, the CLUSTERED BY clause must follow the PARTITIONED BY clause")) )) @@ -1098,7 +1098,7 @@ public void testInsertLimitWithPeriodGranularityThrowsException() + "LIMIT 50 " + "PARTITIONED BY MONTH") .setExpectedValidationErrorMatcher(CoreMatchers.allOf( - CoreMatchers.instanceOf(DruidException.class), + CoreMatchers.instanceOf(SqlValidationError.class), ThrowableMessageMatcher.hasMessage(CoreMatchers.containsString( "INSERT and REPLACE queries cannot have a LIMIT unless PARTITIONED BY is \"ALL\"")) )) @@ -1116,7 +1116,7 @@ public void testInsertOffsetThrowsException() + "OFFSET 10" + "PARTITIONED BY ALL TIME") .setExpectedValidationErrorMatcher(CoreMatchers.allOf( - CoreMatchers.instanceOf(DruidException.class), + CoreMatchers.instanceOf(SqlValidationError.class), ThrowableMessageMatcher.hasMessage(CoreMatchers.containsString( "INSERT and REPLACE queries cannot have an OFFSET")) )) diff --git a/extensions-core/multi-stage-query/src/test/java/org/apache/druid/msq/exec/MSQReplaceTest.java b/extensions-core/multi-stage-query/src/test/java/org/apache/druid/msq/exec/MSQReplaceTest.java index c8dd8ed7e0b4..f6854361f958 100644 --- a/extensions-core/multi-stage-query/src/test/java/org/apache/druid/msq/exec/MSQReplaceTest.java +++ b/extensions-core/multi-stage-query/src/test/java/org/apache/druid/msq/exec/MSQReplaceTest.java @@ -21,6 +21,7 @@ import com.google.common.collect.ImmutableList; import com.google.common.collect.ImmutableSet; +import org.apache.druid.error.SqlValidationError; import org.apache.druid.error.DruidException; import org.apache.druid.common.config.NullHandling; import org.apache.druid.indexing.common.actions.RetrieveUsedSegmentsAction; @@ -334,7 +335,7 @@ public void testReplaceIncorrectSyntax() .setQueryContext(context) .setExpectedValidationErrorMatcher( CoreMatchers.allOf( - CoreMatchers.instanceOf(DruidException.class), + CoreMatchers.instanceOf(SqlValidationError.class), ThrowableMessageMatcher.hasMessage(CoreMatchers.containsString( "Missing time chunk information in OVERWRITE clause for REPLACE. Use OVERWRITE WHERE <__time based condition> or OVERWRITE ALL to overwrite the entire table.")) ) @@ -584,7 +585,7 @@ public void testReplaceLimitWithPeriodGranularityThrowsException() + "PARTITIONED BY MONTH") .setQueryContext(context) .setExpectedValidationErrorMatcher(CoreMatchers.allOf( - CoreMatchers.instanceOf(DruidException.class), + CoreMatchers.instanceOf(SqlValidationError.class), ThrowableMessageMatcher.hasMessage(CoreMatchers.containsString( "INSERT and REPLACE queries cannot have a LIMIT unless PARTITIONED BY is \"ALL\"")) )) @@ -602,7 +603,7 @@ public void testReplaceOffsetThrowsException() + "OFFSET 10" + "PARTITIONED BY ALL TIME") .setExpectedValidationErrorMatcher(CoreMatchers.allOf( - CoreMatchers.instanceOf(DruidException.class), + CoreMatchers.instanceOf(SqlValidationError.class), ThrowableMessageMatcher.hasMessage(CoreMatchers.containsString( "INSERT and REPLACE queries cannot have an OFFSET")) )) diff --git a/extensions-core/multi-stage-query/src/test/java/org/apache/druid/msq/exec/MSQSelectTest.java b/extensions-core/multi-stage-query/src/test/java/org/apache/druid/msq/exec/MSQSelectTest.java index 2c0728389186..c27260247c45 100644 --- a/extensions-core/multi-stage-query/src/test/java/org/apache/druid/msq/exec/MSQSelectTest.java +++ b/extensions-core/multi-stage-query/src/test/java/org/apache/druid/msq/exec/MSQSelectTest.java @@ -25,7 +25,8 @@ import org.apache.druid.data.input.impl.CsvInputFormat; import org.apache.druid.data.input.impl.JsonInputFormat; import org.apache.druid.data.input.impl.LocalInputSource; -import org.apache.druid.error.DruidException; +import org.apache.druid.error.DruidExceptionV1; +import org.apache.druid.error.SqlValidationError; import org.apache.druid.frame.util.DurableStorageUtils; import org.apache.druid.java.util.common.DateTimes; import org.apache.druid.java.util.common.ISE; @@ -1188,7 +1189,7 @@ public void testIncorrectSelectQuery() testSelectQuery() .setSql("select a from ") .setExpectedValidationErrorMatcher(CoreMatchers.allOf( - CoreMatchers.instanceOf(DruidException.class), + CoreMatchers.instanceOf(SqlValidationError.class), ThrowableMessageMatcher.hasMessage(CoreMatchers.startsWith("Encountered \"from \"")) )) .setQueryContext(context) @@ -1203,7 +1204,7 @@ public void testSelectOnInformationSchemaSource() .setQueryContext(context) .setExpectedValidationErrorMatcher( CoreMatchers.allOf( - CoreMatchers.instanceOf(DruidException.class), + CoreMatchers.instanceOf(SqlValidationError.class), ThrowableMessageMatcher.hasMessage(CoreMatchers.startsWith( "Cannot query table INFORMATION_SCHEMA.SCHEMATA with SQL engine 'msq-task'.")) ) @@ -1219,7 +1220,7 @@ public void testSelectOnSysSource() .setQueryContext(context) .setExpectedValidationErrorMatcher( CoreMatchers.allOf( - CoreMatchers.instanceOf(DruidException.class), + CoreMatchers.instanceOf(SqlValidationError.class), ThrowableMessageMatcher.hasMessage(CoreMatchers.startsWith( "Cannot query table sys.segments with SQL engine 'msq-task'.")) ) @@ -1235,7 +1236,7 @@ public void testSelectOnSysSourceWithJoin() .setQueryContext(context) .setExpectedValidationErrorMatcher( CoreMatchers.allOf( - CoreMatchers.instanceOf(DruidException.class), + CoreMatchers.instanceOf(SqlValidationError.class), ThrowableMessageMatcher.hasMessage(CoreMatchers.startsWith( "Cannot query table sys.segments with SQL engine 'msq-task'.")) ) @@ -1252,7 +1253,7 @@ public void testSelectOnSysSourceContainingWith() .setQueryContext(context) .setExpectedValidationErrorMatcher( CoreMatchers.allOf( - CoreMatchers.instanceOf(DruidException.class), + CoreMatchers.instanceOf(SqlValidationError.class), ThrowableMessageMatcher.hasMessage(CoreMatchers.startsWith( "Cannot query table sys.segments with SQL engine 'msq-task'.")) ) diff --git a/processing/src/main/java/org/apache/druid/error/DruidAssertionError.java b/processing/src/main/java/org/apache/druid/error/DruidAssertionError.java new file mode 100644 index 000000000000..2e9de7fba9c2 --- /dev/null +++ b/processing/src/main/java/org/apache/druid/error/DruidAssertionError.java @@ -0,0 +1,44 @@ +/* + * Licensed to the Apache Software Foundation (ASF) under one + * or more contributor license agreements. See the NOTICE file + * distributed with this work for additional information + * regarding copyright ownership. The ASF licenses this file + * to you under the Apache License, Version 2.0 (the + * "License"); you may not use this file except in compliance + * with the License. You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, + * software distributed under the License is distributed on an + * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY + * KIND, either express or implied. See the License for the + * specific language governing permissions and limitations + * under the License. + */ + +package org.apache.druid.error; + +public class DruidAssertionError extends DruidException +{ + public DruidAssertionError(String msg, Object...args) + { + super(msg, args); + } + + public DruidAssertionError(Throwable cause, String msg, Object...args) + { + super(cause, msg, args); + } + + public DruidAssertionError(Throwable cause) + { + super(cause, cause.getMessage()); + } + + @Override + public ErrorCategory category() + { + return ErrorCategory.INTERNAL; + } +} diff --git a/processing/src/main/java/org/apache/druid/error/DruidException.java b/processing/src/main/java/org/apache/druid/error/DruidException.java index de18259900ba..0e514767cffe 100644 --- a/processing/src/main/java/org/apache/druid/error/DruidException.java +++ b/processing/src/main/java/org/apache/druid/error/DruidException.java @@ -1,36 +1,20 @@ -/* - * Licensed to the Apache Software Foundation (ASF) under one - * or more contributor license agreements. See the NOTICE file - * distributed with this work for additional information - * regarding copyright ownership. The ASF licenses this file - * to you under the Apache License, Version 2.0 (the - * "License"); you may not use this file except in compliance - * with the License. You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, - * software distributed under the License is distributed on an - * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY - * KIND, either express or implied. See the License for the - * specific language governing permissions and limitations - * under the License. - */ - package org.apache.druid.error; +import com.fasterxml.jackson.annotation.JsonInclude; +import com.fasterxml.jackson.annotation.JsonProperty; +import com.google.common.base.Strings; import org.apache.druid.java.util.common.StringUtils; -import org.apache.druid.java.util.common.logger.Logger; -import org.apache.druid.query.QueryException; import javax.annotation.Nullable; +import javax.annotation.concurrent.NotThreadSafe; -import java.util.HashMap; import java.util.LinkedHashMap; import java.util.Map; /** * Represents an error condition exposed to the user and/or operator of Druid. + * Each error category is given by a subclass of this class. + *

* Not needed for purely internal exceptions thrown and caught within Druid itself. * There are categories of error that determine the general form of corrective * action, and also determine HTTP (or other API) status codes. @@ -42,484 +26,177 @@ * still logging the full details. Typical usage: *


  * if (something_is_wrong) {
- *   throw DruidException.user("File not found")
- *       .context("File name", theFile.getName())
- *       .context("Directory", theFile.getParent())
- *       .build();
+ *   throw new NotFoundException("File not found")
+ *       .addContext("File name", theFile.getName())
+ *       .addContext("Directory", theFile.getParent());
  * }
  * 
*

- * Exceptions are immutable. In many cases, an error is thrown low in the code, - * bit context is known at a higher level. In this case, the higher code should - * catch the exception, convert back to a builder, add context, and throw the - * new exception. The original call stack is maintained. Example: + * Exceptions are mutable and may not be modified by two thread concurrently. + * However, it is highly unlikely that such concurrent access would occur: that's + * not how exceptions work. Exceptions can be exchanged across threads, as long + * as only one thread at a time mutates the exception. + *

+ * Druid exceptions allow the calling method (or thread) to add context and set + * the host name. It is often easier for a higher-level method to fill in this + * Information than to pass the information into every method. For example: *


- * catch (DruidExceptin e) {
- *   throw e.toBuilder().
- *       .context("File name", theFile.getName())
- *       .context("Directory", theFile.getParent())
- *       .build();
+ * void doTheRead(Reader reader) {
+ *   try {
+ *      // read some stuff
+ *   } catch (IOException e) {
+ *     throw new DruidIOException(e);
+ *   }
+ * }
+ *
+ * void outer(File theFile) {
+ *   try (Reader reader = open(theFile)) {
+ *     doTheRead(reader)
+ *   } catch (DruidException e) {
+ *      throw e.addContext("File name", theFile.getName());
+ *   }
  * }
  * 
*/ -public class DruidException extends RuntimeException +@NotThreadSafe +public abstract class DruidException extends RuntimeException { /** - * The {@code ErrorType} is a high-level classification of errors that balances - * the idea of persona and code knowledge. The codes roughly identify who is most - * likely the persona that will resolve the error. In some case (e.g. {@code USER}), - * the person is clear: the person using Druid. In other cases (e.g. {@code RESOURCE}), - * the target persona is amgibuous: is it the person who submitted the query? The person - * who installed Druid? The system admin? The person who decided how much resource - * the project could afford? - *

- * Often the code is not sure of who the exact person is, but the code knows about - * the kind of error (e.g. {@code NETWORK}). In this case, it is up to each - * site to determine who is in charge of fixing this particular network error: the user - * (bad HTTP address), admin (forgot to open a port), system admin (a router died), - * hardware vendor (a network card failed), etc. - */ - public enum ErrorType - { - /** - * General case of an error due to something the user asked to do in an REST - * request. Translates to an HTTP status 400 (BAD_REQUET) for a REST call - * (or the equivalent for other APIs.) - */ - USER, - - /** - * Special case of a user error where a resource is not found and we wish - * to return a 404 (NOT_FOUND) HTTP status (or the equivalent for other - * APIs.) - */ - NOT_FOUND, - - /** - * Special case of a user error where the user asked for a feature that - * Druid does not support. - */ - UNSUPPORTED, - - /** - * Error due to a problem beyond the user's control, such as an assertion - * failed, unsupported operation, etc. These indicate problems with the software - * where the fix is either a workaround or a bug fix. Such error should only - * be raised for "should never occur" type situations. - */ - INTERNAL, - - /** - * Error for a resource limit: memory, CPU, slots or so on. The workaround is - * generally to try later, get more resources, reduce load or otherwise resolve - * the resource pressure issue. - */ - RESOURCE, - - /** - * Similar to RESOURCE, except indicates a timeout, perhaps due to load, due - * to an external system being unavailable, etc. - */ - TIMEOUT, - - /** - * Error in configuration. Indicates that the administrator made a mistake during - * configuration or setup. The solution is for the administrator (not the end user) - * to resolve the issue. - */ - CONFIG, - - /** - * Indicates a network error of some kind: intra-Druid, client-to-Druid, - * Druid-to-external system, etc. Generally the end user cannot fix these errors: - * it requires a DevOps person to resolve. - */ - NETWORK, - - /** - * Indicates an exception deserialized from a {@link org.apache.druid.query.QueryException} - * which has no error type. - */ - UNKNOWN - }; - - public static final String HOST = "Host"; - - public static class Builder - { - private final DruidException source; - private final ErrorType type; - private final String msg; - private String code; - private Throwable e; - private Map context; - - // For backward compatibility with QueryException - private String errorClass; - private String host; - - private Builder( - final ErrorType type, - final String msg, - @Nullable final Object[] args) - { - this.source = null; - this.type = type; - this.code = QueryException.UNKNOWN_EXCEPTION_ERROR_CODE; - this.msg = StringUtils.format(msg, args); - } - - private Builder(DruidException e) - { - this.source = e; - this.type = e.type; - this.code = e.code; - this.msg = e.message(); - this.e = e.getCause() == null ? e : e.getCause(); - this.context = e.context == null ? null : new HashMap<>(e.context); - } - - public Builder code(String code) - { - this.code = code; - if (QueryException.PLAN_VALIDATION_FAILED_ERROR_CODE.equals(code)) { - // Not always right, but close enough. For backward compatibility with - // code that needs the (now deprecated) error class. - this.errorClass = "org.apache.calcite.tools.ValidationException"; - } - return this; - } - - public Builder cause(Throwable e) - { - this.e = e; - this.errorClass = e.getClass().getName(); - if (!msg.equals(e.getMessage())) { - context("Cause", e.getMessage()); - } - return this; - } - - public Builder context(String key, Object value) - { - if (context == null) { - // Used linked hash map to preserve order - context = new LinkedHashMap(); - } - context.put(key, value == null ? "" : value.toString()); - return this; - } - - public Builder errorClass(String errorClass) - { - this.errorClass = errorClass; - return this; - } - - private boolean wasLogged() - { - return source != null && source.logged; - } - - private DruidException build(boolean logged) - { - return new DruidException( - e, - msg, - type, - code, - errorClass, - host, - // Used linked hash map to preserve order - context == null ? null : new LinkedHashMap<>(context), - logged || wasLogged() - ); - } - - public DruidException build() - { - return build(false); - } - - public DruidException build(Logger logger) - { - DruidException e = build(true); - if (wasLogged()) { - return e; - } - switch (type) { - case CONFIG: - case INTERNAL: - logger.error(e, e.getMessage()); - break; - case NETWORK: - case RESOURCE: - logger.warn(e, e.getMessage()); - break; - default: - logger.info(e, e.getMessage()); - break; - } - return e; - } - - @Override - public String toString() - { - return build().getMessage(); - } - } - - private final ErrorType type; - - /** - * Error codes are categories within the top-level codes. They mimic prior Druid - * conventions, although prior codes were very sparse. The code is a string, not - * an enum, because Druid has no clear catalog of such codes at present. - *

- * For now, error codes are enumerated in {@link org.apache.druid.query.QueryException}. + * The context provides additional information about an exception which may + * be redacted on a managed system. Provide essential information in the + * message itself. */ - private final String code; - private final Map context; - private final boolean logged; - - // For backward compatibility with QueryException - private final String errorClass; - private final String host; - - public DruidException( - final Throwable e, - final String msg, - final ErrorType type, - final String code, - final String errorClass, - final String host, - final Map context, - final boolean logged - ) - { - super(msg, e); - this.type = type; - this.code = code; - this.errorClass = errorClass; - this.host = host; - this.context = context; - this.logged = logged; - } + // Linked hash map to preserve order + private Map context; /** - * Build an error that indicates the user provided incorrect input. - * The user can correct the error by correcting their input (their query, - * REST message, etc.) + * Name of the host on which the error occurred, when the error occurred on + * a host other than the one to which the original request was sent. For example, + * in a query, if the error occurs on a historical, this field names that historical. */ - public static Builder user(String msg, Object...args) - { - return new Builder(ErrorType.USER, msg, args); - } - - public static DruidException userError(String msg, Object...args) - { - return user(msg, args).build(); - } + private String host; /** - * User error for an unsupported operation. We assume the problem is that the user - * asked Druid to do something it cannot do, and so the user shouldn't ask. This - * is not an indication that Druid should provide an operation, and it is - * an internal error that it does not. + * Good errors provide a suggestion to resolve the issue. Such suggestions should + * focus on a simple Druid installation where the user is also the admin. More + * advanced deployments may find such helpful suggestions to be off the mark. + * To resolve this conflict, add suggestions separately from the message itself + * so each consumer can decide whether to include it or not. */ - public static Builder unsupported(String msg, Object...args) - { - return new Builder(ErrorType.UNSUPPORTED, msg, args) - .code(QueryException.UNSUPPORTED_OPERATION_ERROR_CODE); - } - - public static DruidException unsupportedError(String msg, Object...args) - { - return unsupported(msg, args).build(); - } - - public static Builder unsupportedSql(String msg, Object...args) - { - return new Builder(ErrorType.UNSUPPORTED, msg, args) - .code(QueryException.SQL_QUERY_UNSUPPORTED_ERROR_CODE) - // For backward compatibility: using text since class is not visible here. - .errorClass("org.apache.calcite.plan.RelOptPlanner$CannotPlanException"); - } + private String suggestion; - public static DruidException unsupportedSqlError(String msg, Object...args) + public DruidException( + final String msg, + @Nullable final Object...args) { - return unsupportedSql(msg, args).build(); + super(StringUtils.format(msg, args)); } - /** - * SQL query validation failed, most likely due to a problem in the SQL statement - * which the user provided. This is a somewhat less specific then the - * {@link #unsupported(String, Object...)} error, which says that validation failed - * because Druid doesn't support something. Use the validation error for case that - * are mostly likely because the SQL really is wrong. - */ - public static Builder validation(String msg, Object...args) + public DruidException( + final Throwable cause, + final String msg, + @Nullable final Object...args) { - return new Builder(ErrorType.USER, msg, args) - .code(QueryException.PLAN_VALIDATION_FAILED_ERROR_CODE); + super(StringUtils.format(msg, args), cause); } - public static DruidException validationError(String msg, Object...args) + public DruidException setHost(String host) { - return validation(msg, args).build(); + this.host = host; + return this; } - /** - * Build an error that indicates that something went wrong internally - * with Druid. This is the equivalent of an assertion failure: errors - * of this type indicate a bug in the code: there is nothing the user - * can do other than request a fix or find a workaround. - */ - public static Builder internalError(String msg, Object...args) + @JsonProperty + public String getErrorClass() { - return new Builder(ErrorType.INTERNAL, msg, args); + String errorClass = errorClass(); + return errorClass == null ? getClass().getName() : errorClass; } - public static Builder notFound(String msg, Object...args) + @JsonProperty + @JsonInclude(JsonInclude.Include.NON_NULL) + public String getHost() { - return new Builder(ErrorType.NOT_FOUND, msg, args); + return host; } - public static DruidException unexpected(Exception e) + public DruidException suggestion(String suggestion) { - return internalError(e.getMessage()).cause(e).build(); + this.suggestion = suggestion; + return this; } - /** - * Build an error that indicates Druid reached some kind of resource limit: - * memory, disk, CPU, etc. Generally the resolution is to reduce load or - * add resources to Druid. - */ - public static Builder resourceError(String msg, Object...args) + public String getSuggestion() { - return new Builder(ErrorType.RESOURCE, msg, args); + return suggestion; } - public static Builder timeoutError(String msg, Object...args) + public DruidException addContext(String key, String value) { - return new Builder(ErrorType.TIMEOUT, msg, args); + if (context == null) { + context = new LinkedHashMap<>(); + } + context.put(key, value); + return this; } - /** - * Build an error that indicates a configuration error which generally means - * that Druid won't start until the user corrects a configuration file or - * similar artifact. - */ - public static Builder configError(String msg, Object...args) - { - return new Builder(ErrorType.CONFIG, msg, args); - } + public abstract ErrorCategory category(); - /** - * Network I/O, connection, timeout or other error that indicates a problem - * with the client-to-Druid connection, and internal Druid-to-Druid connection, - * or a Druid-to-External error. - */ - public static Builder networkError(String msg, Object...args) + public String errorClass() { - return new Builder(ErrorType.NETWORK, msg, args); + return getClass().getName(); } /** - * Convert the exception back into a builder, generally so a higher level - * of code can add more context. + * The error code is a summary of the error returned to the user. Multiple errors + * map to the same code: the code is more like a category of errors. Error codes + * must be backward compatible, even if the prior "codes" are awkward. */ - public Builder toBuilder() + @Nullable + @JsonProperty("error") + public String getErrorCode() { - return new Builder(this); - } - - public static DruidException fromErrorResponse(ErrorResponse response) - { - return new DruidException( - null, - response.getMessage(), - response.getType() == null ? ErrorType.UNKNOWN : response.getType(), - response.getErrorCode(), - response.getErrorClass(), - response.getHost(), - response.getContext(), - false - ); - } - - public ErrorType type() - { - return type; - } - - public Map context() - { - return context; + return category().userText(); } - public String context(String key) - { - return context.get(key); - } - - public String code() + public String message() { - return code; + return super.getMessage(); } + @JsonProperty("errorMessage") @Override public String getMessage() { StringBuilder buf = new StringBuilder(); - if (type != ErrorType.USER) { - buf.append(type.name()).append(" - "); - } - if (!QueryException.UNSUPPORTED_OPERATION_ERROR_CODE.equals(code)) { - buf.append(code) - .append(" - "); - } buf.append(super.getMessage()); + String sep = "; "; if (context != null && context.size() > 0) { - int count = 0; - buf.append("; "); for (Map.Entry entry : context.entrySet()) { - if (count > 0) { - buf.append(", "); - } + buf.append(sep); + sep = ", "; buf.append("\n") .append(entry.getKey()) .append(": [") .append(entry.getValue()) .append("]"); - count++; } } + if (!Strings.isNullOrEmpty(host)) { + buf.append(sep).append("Host: ").append(host); + } return buf.toString(); } public String getDisplayMessage() { StringBuilder buf = new StringBuilder(); - switch (type) { - case CONFIG: - buf.append("Configuration error: "); - break; - case RESOURCE: - buf.append("Resource error: "); - break; - case INTERNAL: - buf.append("Internal error: "); - break; - default: - break; + String prefix = category().prefix(); + if (!Strings.isNullOrEmpty(prefix)) { + buf.append(prefix).append(" - "); } buf.append(super.getMessage()); - if (!QueryException.UNSUPPORTED_OPERATION_ERROR_CODE.equals(code)) { - buf.append("\nError Code: ") - .append(code); - } + buf.append("\nError Code: ").append(category().userText()); if (context != null && context.size() > 0) { for (Map.Entry entry : context.entrySet()) { buf.append("\n") @@ -528,22 +205,29 @@ public String getDisplayMessage() .append(entry.getValue()); } } + if (!Strings.isNullOrEmpty(host)) { + buf.append("\nHost: ").append(host); + } + if (!Strings.isNullOrEmpty(suggestion)) { + buf.append("\nSuggestion: ").append(suggestion); + } return buf.toString(); } - public String message() + @JsonProperty + @JsonInclude(JsonInclude.Include.NON_EMPTY) + public Map getContext() { - return super.getMessage(); + return context; } public ErrorResponse toErrorResponse() { return new ErrorResponse( - code, + category().userText(), message(), - errorClass, + errorClass(), host, - type == ErrorType.UNKNOWN ? null : type, context ); } diff --git a/processing/src/main/java/org/apache/druid/error/ErrorAudience.java b/processing/src/main/java/org/apache/druid/error/ErrorAudience.java new file mode 100644 index 000000000000..698d1256f754 --- /dev/null +++ b/processing/src/main/java/org/apache/druid/error/ErrorAudience.java @@ -0,0 +1,40 @@ +/* + * Licensed to the Apache Software Foundation (ASF) under one + * or more contributor license agreements. See the NOTICE file + * distributed with this work for additional information + * regarding copyright ownership. The ASF licenses this file + * to you under the Apache License, Version 2.0 (the + * "License"); you may not use this file except in compliance + * with the License. You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, + * software distributed under the License is distributed on an + * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY + * KIND, either express or implied. See the License for the + * specific language governing permissions and limitations + * under the License. + */ + +package org.apache.druid.error; + +/** + * The set of persona (audiences) for Druid exceptions. The audience is + * not a technical factor: it is merely a way to encourage developers to + * think about who can act on an error message. All errors go to the user + * who submitted the request, but perhaps in a simplified, redacted form. + * Such messages also target the actual audience: the Druid admin, the + * cluster admin, a Druid developer, etc. + *

+ * Sometimes the target audience is not known, or is ambiguous. In that + * case, just use {@link ErrorAudience#VARIOUS}. + */ +public enum ErrorAudience +{ + USER, + DRUID_ADMIN, + CLUSTER_ADMIN, + DRUID_DEVELOPER, + VARIOUS +} diff --git a/processing/src/main/java/org/apache/druid/error/ErrorCategory.java b/processing/src/main/java/org/apache/druid/error/ErrorCategory.java new file mode 100644 index 000000000000..541c6c2fb57f --- /dev/null +++ b/processing/src/main/java/org/apache/druid/error/ErrorCategory.java @@ -0,0 +1,120 @@ +/* + * Licensed to the Apache Software Foundation (ASF) under one + * or more contributor license agreements. See the NOTICE file + * distributed with this work for additional information + * regarding copyright ownership. The ASF licenses this file + * to you under the Apache License, Version 2.0 (the + * "License"); you may not use this file except in compliance + * with the License. You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, + * software distributed under the License is distributed on an + * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY + * KIND, either express or implied. See the License for the + * specific language governing permissions and limitations + * under the License. + */ + +package org.apache.druid.error; + +import org.apache.druid.query.QueryException; + +import java.net.HttpURLConnection; + +/** + * The error category is a combination of the "persona" for the error, and the + * kind of error. The "persona" is the audience for the error: who can fix the + * problem. Often the persona is clear: it is the end user if, say, the user + * asks us to do something that doesn't make sense. (Data source does not exist, + * SQL with a syntax error, etc.) Other times, the persona is ambiguous: who is + * responsible for a network error? + *

+ * The persona is not fine enough grain for all needs. So, within a persona, there + * can be finer-grain functional areas, such as the various kinds of SQL errors. + *

+ * Different kinds of errors require different HTTP status codes for the REST API. + *

+ * To add structure to this confusion of factors, we define a set of error categories: + * one for each combination of functional area, user and HTTP status. The categories + * are more an art than a science: they are influenced by the need of the consumers + * of errors: especially managed Druid installations. + */ +public enum ErrorCategory +{ + SQL_PARSE( + QueryException.SQL_PARSE_FAILED_ERROR_CODE, + HttpURLConnection.HTTP_BAD_REQUEST, + ErrorAudience.USER, + "", + MetricCategory.FAILED + ), + SQL_VALIDATION( + QueryException.PLAN_VALIDATION_FAILED_ERROR_CODE, + HttpURLConnection.HTTP_BAD_REQUEST, + ErrorAudience.USER, + "", + MetricCategory.FAILED + ), + SQL_UNSUPPORTED( + QueryException.SQL_QUERY_UNSUPPORTED_ERROR_CODE, + HttpURLConnection.HTTP_BAD_REQUEST, + ErrorAudience.USER, + "", + MetricCategory.FAILED + ), + INTERNAL( + QueryException.UNSUPPORTED_OPERATION_ERROR_CODE, + HttpURLConnection.HTTP_BAD_REQUEST, + ErrorAudience.DRUID_DEVELOPER, + "Internal error", + MetricCategory.FAILED + ); + + private final String userText; + private final int httpStatus; + private final ErrorAudience audience; + private final String messagePrefix; + private final MetricCategory metricCategory; + + ErrorCategory( + String userText, + int httpStatus, + ErrorAudience audience, + String messagePrefix, + MetricCategory metricCategory + ) + { + this.userText = userText; + this.httpStatus = httpStatus; + this.audience = audience; + this.messagePrefix = messagePrefix; + this.metricCategory = metricCategory; + } + + public String userText() + { + return userText; + } + + public int httpStatus() + { + return httpStatus; + } + + public ErrorAudience audience() + { + return audience; + } + + public String prefix() + { + return messagePrefix; + } + + public MetricCategory metricCategory() + { + return metricCategory; + } +} diff --git a/processing/src/main/java/org/apache/druid/error/ErrorResponse.java b/processing/src/main/java/org/apache/druid/error/ErrorResponse.java index c1011f0eed24..7e80c2334c29 100644 --- a/processing/src/main/java/org/apache/druid/error/ErrorResponse.java +++ b/processing/src/main/java/org/apache/druid/error/ErrorResponse.java @@ -30,7 +30,7 @@ /** * Union of the {@link org.apache.druid.query.QueryException} and - * {@link DruidException} fields. Used in tests to deserialize errors which may + * {@link DruidExceptionV1} fields. Used in tests to deserialize errors which may * be in either format. */ public class ErrorResponse @@ -39,7 +39,6 @@ public class ErrorResponse private final String code; private final String errorClass; private final String host; - private final DruidException.ErrorType type; private Map context; @JsonCreator @@ -48,7 +47,6 @@ public ErrorResponse( @JsonProperty("errorMessage") @Nullable String errorMessage, @JsonProperty("errorClass") @Nullable String errorClass, @JsonProperty("host") @Nullable String host, - @JsonProperty("type") @Nullable DruidException.ErrorType type, @JsonProperty("context") @Nullable Map context ) { @@ -56,7 +54,6 @@ public ErrorResponse( this.code = errorCode; this.errorClass = errorClass; this.host = host; - this.type = type; this.context = context; } @@ -94,11 +91,4 @@ public Map getContext() { return context; } - - @JsonProperty - @JsonInclude(Include.NON_NULL) - public DruidException.ErrorType getType() - { - return type; - } } diff --git a/processing/src/main/java/org/apache/druid/error/MetricCategory.java b/processing/src/main/java/org/apache/druid/error/MetricCategory.java new file mode 100644 index 000000000000..e8978be5a8e2 --- /dev/null +++ b/processing/src/main/java/org/apache/druid/error/MetricCategory.java @@ -0,0 +1,8 @@ +package org.apache.druid.error; + +public enum MetricCategory +{ + INTERRUPTED, + TIME_OUT, + FAILED +} diff --git a/processing/src/main/java/org/apache/druid/error/README.md b/processing/src/main/java/org/apache/druid/error/README.md new file mode 100644 index 000000000000..6dc0d96d6722 --- /dev/null +++ b/processing/src/main/java/org/apache/druid/error/README.md @@ -0,0 +1,383 @@ +# Guide to Druid Error Messages + +Errors in Druid are complex. Errors come from both Druid code and from libraries. +The audience for errors varies depending on the type of error. Managed systems +often want to redact sensitive information, while developers need full details. +Each subsystem within Druid has evolve to use its own error handling methodology. +The goal of this note is to explain how we wish to handle errors moving forward. + +## Requirements + +Druid has a robust set of error handling requirements that, taken together drive +the error handling implementation explained below. + +### Audiences + +Errors must address a number of audiences. While most errors are returned to the +end user, it may sometimes be the case that the end user can’t fix the issues. +For example, the user can fix errors in a SQL statement, but cannot fix +configuration issues. Still, the user is always the first point of contact +for errors. + +When the error is something that the user cannot fix, we must strike a balance: +provide the user enough information to request the proper help, but not to +reveal internal details that the user does not need, or that the user should +not see. At the same time, if someone other than the user has to fix the error +(a system administrator, a developer, etc.), then that second person does need +the details. + +This split audience drives a large part of the error handling system design. + +### Managed Deployments + +Druid runs in all sizes of deployments. Developers run Druid on a single machine, +and must fix issues arising from code changes. Developers, presumably, have a +strong understanding of Druid internals. New users will download Druid to run on +a laptop, but such users have little knowledge of Druid — they are just getting +started. As Druid moves into production, a TechOps team may run Druid, while a +different set of users load data and issue queries. In a fully managed environment, +users are in different organizations than the people who manage Druid. + +This complexity makes clear that the audiences above may not have any contact +with one another. Further, a fully managed environment wants to restrict the +information that “leaks” to the end user. While a developer wants the details, +a user in another company should see a “sanitized” message stripped of internal +details. + +This requirement means that Druid errors must be flexible. It should be possible +to both log the details (for the people running Druid), while meaningfully redact +sensitive information for end users in remote organizations. + +### Error Categorization + +To help with the above requirements, we need to categorize errors. Druid is +complex: there are hundreds (if not thousands) of things that could go wrong, +ranging from bad user input to incorrect configuration, a badly configured network, +or buggy code. We’ve noted that we want to identify the audience for an error. +Sometimes that is easy: the user is responsible for the text of a SQL message. +Other times, it is hard: who is responsible for a network timeout? + +We’ve also noted that managed environments want to redact information. Doing so +error-by-error is an impossible task. Doing so by category is more practical. + +Druid has the concept of an “error code” which is one level of abstraction above +the detailed error message. We wish to generalize that concept to categorize all +errors. Creating the categories is a non-trivial task: it requires balancing the +audience (when known) with the functional area. For example both timeouts and bad +configuration might be of interest to an “admin”. In larger shops, the Druid admin +is distinct from the network admin. Thus, it might make sense to have a “network” +category distinct from a “config” category. And so on. + +Each category may need to include unique information. SQL errors should include +the line number of the error. I/O errors the identify of the resource that failed. +Config errors the name of the config variable. And so on. + +Error sanitization systems may use the category to aid in redacting information. +“User” errors might be provided as-is, while “system” errors might be redacted to +a generic “internal error: please contact support for assistance.” In such cases, +the log file would provide “support” with the details. + +### Error Destinations + +The above sections hint that errors flow to multiple destinations. The end user +(via an API response) is the most obvious destination. Errors also flow to logs, +and from there to many forms of log aggregation systems. As noted, each destination +may receive a different “view” of the error. End users get a simplified, user-focused +view. Developers get the full details, including stack trace. Administrators may +get just “important” errors, and just the description, shorn of stack trace. And +so on. + +Druid primarily uses a REST API for its messages. However, each Druid subsystem +has evolved its own way to return errors. The query endpoints use the +`QueryException` format, other endpoints use a variety of ad-hoc formats: +some use plain text, others use ad-hoc JSON, etc. + +As Druid evolves, we have added multiple query API protocols: JDBC, gRPC, +etc. Each protocol has its own way to format errors, often not as a REST response. + +This means that Druid exceptions don’t have just one format: they must allow +each destination to apply a format appropriate for that destination. + +### Forwarding Remote Errors + +Druid is a distributed system. Queries run across tiers. The “live” query +system uses scatter/gather in which queries run on data nodes. MSQ runs +across multiple stages. In these cases, a remote node may raise an error +which must be returned to a different node, and then forwarded to the user. +Care must be taken to preserve the error as created on the remote node. In +particular, stack traces should be from the remote node, not the receiving node. + +Errors are sent across a REST API. As such, Druid exceptions must be +serializable, in some form which allows recovering the exception on the +receiver (Broker, MSQ controller) side. + +By contrast, when errors are returned to the end user, we do not expect that +the user will deserialize the errors using Druid’s error classes. Most clients +don’t have visibility to Druid’s code. Thus, errors returned to the user +should have a standard format so that a single client-side class can +deserialize any Druid exception. + +## Implementation + +With the requirements out of the way, we can now discuss the implementation +that meets these requirements. + +### `DruidException` and its Subclasses + +Most exceptions raised within Druid code should use a subclass of the +`DruidException` class. Use this class when the error is to be returned to the +user (and, perhaps, logged.) Use other exceptions when the goal is to throw an +exception caught and handled by some other bit of code, and which is not +returned to the user. + +Create a subclass of `DruidException`: + +* For each error category. +* Within a category when the error must contain specific additional fields. +* When the class name, when in logs, provides useful information to developers. + +These rules provide a three-level hierarchy: + +```text +DruidException + Exception + Exception +``` + +### Special Fields + +Errors include a number of specialized fields that assist with the requirements +above. + +* `host`: When an error occurs on a data node, this field indicates the + identity of that node. When the error occurs on the node that received the + user response (e.g. the Broker), the field is `null`. +* `suggestion`: Provides suggested corrective action, which may only be valid + in the case of a simple Druid deployment. For example, “Increase the value + of the druid.something.memory config variable.” Managed systems may omit + this text. +* `code`: An error code that identifies the category of error. Categories are + grouped by target audience: some are for the user (SQL syntax, SQL validation, + etc.) Some are for the admin (OOM, resource issues.) Some are ambiguous + (network timeouts.) The code allows managed systems to do wholesale redactions. + +### Context + +The context is the “get out of jail free” card. The context allows us to add as +much detail to an error as wanted, without running the risk of exposing +sensitive information. In a managed system, the context may be hidden from the +user, but still logged. In a development system, the context gives the developer +the information needed to identify a problem. + +Context should include secondary information the can safely be hidden. Primary +information (such as the name of the column that can’t be found) should be in +the message itself. + +### Query Endpoint Errors + +Errors returned from `/sql` or `/sql/task` have a format defined by `QueryException`: + +```json +{ + "error": "", + "errorClass": "", + "errorMessage": "", + "host": "" +} +``` + +The `host` is set only for errors that occured on data nodes, but not when the error +occurred on the Broker (in, say, SQL validation.) + +The `error` is an ad-hoc set of codes defined in `QueryException`, but is neither +exaustive or unique: some errors could fall into multiple error codes. + +Per Druid's compatibility rules, we can add new fields to the above format, but we +cannot remove existing fields or change their meaning. This is particularly unfortunate +for the `errorClass` field since it exposes the specific class name used to throw the +exception: something that will change with the `DruidException` system. + +### Data Node Errors + +Data nodes (Historical, Peon, Indexer) use the same format as the query endpoint. Such +errors are deserialized into the `QueryException` class. Thus, `QueryException` has a +JSON serialization coupled tightly to both our internal Broker-to-data-node API, and the +external `/sql` API. `DruidException` must fit into this internal API without causing +compatibility issues during rolling upgrades. That is, the wire format must not change. +Short-term, this may mean that we discard information when returning errors from the +data node so that the Broker does not fail due to unexpected fields. This restriction +limits our freedom in crafting good error messages on data nodes. + +### MSQ Errors + +MSQ introduced a well-organized system to report errors from MSQ tasks to the user by +way of an Overlord task report. The system is unique to the MSQ environment and is not +general enough to handle non-MSQ cases. We do not want to modify the MSQ system. Instead, +we want to ensure that the `DruidException` plays well with the MSQ system. + +#### Quick Overview of the MSQ Fault System + +A quick review of the MSQ code suggests that `DruidException` tries to solve +the same problems as the MSQ error system, though in perhaps a more general way. + +MSQ apparently has a fault system separate from exceptions. MSQ splits errors +into two parts: `MSQFault`, which is JSON-serializable, and `MSQException`, which +is not. There are many subclasses of `MSQFault` which are not also subclasses of +exceptions. + +`MSQFault` is part of the `MSQErrorReport`. It seems that `MSQFault` is designed +to capture the fields that are JSON serialized into reports, while `MSQException` +is something that can unwind the stack. This split means we don't have to add +JSON serialization to our exception classes. This is wise since, as we'll see +later, the REST JSON format differs a bit from the MSQ format. A single class +cannot have to distinct JSON serializations. By creating the fault class, MSQ +can control its own specialized JSON format. + +Now, let's compare the MSQ system with the proposed Druid exception system. + +The `MSQFault` interface has many subclasses: apparently one for each kind of +error. Each class includes fields for any error-specific context. JSON +serialization places those fields as top-level context in the serialized +error. Example: + +```json +{ + "errorCode": "", + "errorMessage": "", + "": ", ... +} +``` + +#### Integration with `DruidException` + +In an MSQ task, the MSQ system is reponsible for returnining errors to the +user by way of the Overlord task report. Unlike the "classic" query system +(and unlike other REST service), MSQ does not directly return an error to +the user. Instead, MSQ adds the error to the report, then does substantial +work to shut down workers, wrap up the Overlord task, etc. + +As a result, we never expect that an `MSQFault` will need to map to a +`DruidException`. We do, however, expect the need to go the other way. MSQ +reuses substantial portions of the native query codebase. That code doesn't +know if it is running in a historical (where it would just throw a +`DruidException` and exit) or in MSQ (where it has to play well in the MSQ +system.) So, we allow that code to use the `DruidException` system. It is up +to the MSQ worker to translate the `DruidException` into a suitable `MSQFault`, +which is then handled via the MSQ error system. + +For example, a `DruidException` may provide fields such as the S3 bucket on +which an I/O error occurred. MSQ can map that to a matching `MSQFault` so that +the information appears as a field in the JSON message. + +### REST Endpoint Errors + +A section above discussed the form of errors from the `/sql` endpoint. Druid has +hundreds of other REST endpoints, with many ad-hoc error solutions. We propose to +unify error reporting to use the (enhanced) `/sql` format. That is, we use the +`DruidException` everywhere in our code, and we map those exceptions to REST +responses the same way for every REST API (unless there is some special reason +not to.) + +### Third-Party Exceptions + +Druid uses libraries (including the Java library) that throws its own exceptions. +The only workable approach is: + +* Catch such exceptions as close to the cause as possible, then translate the error + to a `DruidException`, providing Druid-specific context. The original exception is + attached as a cause, so developers can track down the underlying issue. +* Provide a Servlet filter that catches "stray" exceptions and returns a generic error + message. These cases indicate gaps in the code where we failed to properly catch and + handle and exception. Managed systems can't know if the non-Druid exception contains + sensitive information. So, report the error as something like "An internal error + occurred. See the logs for details", associated with a unique error category so that + manage services can replace the wording. + +### Other APIs + +Druid occasionally offers non-REST APIs: JDBC, gRPC, etc. For these cases, an API-specific +mapping from the `DruidException` to the specialized API can handle the needs of that API. + +In an ideal world, `DruidException` would be independent of all APIs, and the REST API +would do its own mapping. Howeer, since REST is standard in Druid, we allow `DruidException` +to serialize to and from Druid's REST API JSON. + +### JSON Deserialization + +The implementation envisions a large number of `DruidException` subclasses: one per +category, with finer grain subclasses. The JSON format given above was designed based +on a single class: `QueryException`. There is no `type` field that Jackson could use to +recover the subclass. + +When running a query, the data node with raise a specific error class, then serialize it +in the generic format. The Broker does not have sufficient information to recover the +original class. Instead, the Broker deserializes exceptions as a `GenericException` class. +The result can be thrown, and will re-serialize to the same format as that sent by the +data node, but it loses the ability to parse exceptions based on the exception class. + +### Mutable Exception Fields + +Druid prefers that class fields be immutable because such an approach reduces risk in +a multi-threaded system. In an ideal world, `DruidException` fields would also be +immutable, with a "builder" class to gather values. Such a solution is workable only +if we have one (or a very few) exception classes. The design here, however, follows +MSQ practice and envisions many such classes. Creating a builder per class would be +tedious. + +Instead, we allow certain `DruidException` fields to be mutable so that they can be +set after the exception is created. Mutable fields include: + +* `host` (set at the top level of the data node) +* `context` (to allow callers to add information to an exception as it bubbles up + the call stack). +* `suggestion` (to allow a higher-level of code to offer a suggestion when the + code that throws the exception doesn't have sufficient context.) + +## Guidelines + +With the above requirements and design in mind, we can identify some guidelines +when developers write error messages. + +### Wording + +Word error messages so that they speak to the end user who will receive the +error as a response to a request. When the error is a “user error” this is +simple. Explain the problem in user terms. That is, rather than “key not found”, +say something like “If you set the X context parameter, you must also set the Y +parameter.” + +The task is harder when the error is one only an admin or developer can solve. +We want to provide the information that audience needs to solve the problem. +But, we must provide that information only in logs. For example, if we hit an +assertion error, there is not much the user can do. But, a developer wants to +know where the error triggered and why. For this, provide a generic message to +the user “Druid internal error.” Druid might add “See logs for details.” A +managed service would say, “Contact support.” However, the log should provide +the full details, including the stack trace, and the value that caused the issue. + +Thus, some errors must be constructed with two sets of information: the bland +user message, and the details for developers. Use the context to help. + +### Interpolated Values + +Errors will often include interpolated values. Example: “Table not +found”. Druid has a long-standing convention of always enclosing interpolated +values in square brackets: “Table [foo] not found.” While this format is not +standard English, it is standard Druid, and all error messages must follow +this form. + +### Sensitive Values + +Error messages want to be as helpful as possible by providing all the details +that would be needed to resolve the issue. This is ideal during development, +or in a single-machine deployment. But, in doing so, a message may leak +sensitive information when run in a managed service. + +Errors should thus divide information into two “pools.” The error message itself +should contain only that information which is suitable for a managed service +user. Information which might be considered sensitive should reside in the +context. A managed service can strip context values that a user cannot see. + +Another alternative is for a manages service to redact an entire category of +errors, as noted above. Thus, errors should be assigned categories that enable +efficient redaction policies. diff --git a/processing/src/main/java/org/apache/druid/error/SqlParseError.java b/processing/src/main/java/org/apache/druid/error/SqlParseError.java new file mode 100644 index 000000000000..dae37426f93f --- /dev/null +++ b/processing/src/main/java/org/apache/druid/error/SqlParseError.java @@ -0,0 +1,50 @@ +/* + * Licensed to the Apache Software Foundation (ASF) under one + * or more contributor license agreements. See the NOTICE file + * distributed with this work for additional information + * regarding copyright ownership. The ASF licenses this file + * to you under the Apache License, Version 2.0 (the + * "License"); you may not use this file except in compliance + * with the License. You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, + * software distributed under the License is distributed on an + * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY + * KIND, either express or implied. See the License for the + * specific language governing permissions and limitations + * under the License. + */ + +package org.apache.druid.error; + +/** + * SQL query parse failed. + */ +public class SqlParseError extends DruidException +{ + public SqlParseError(String msg, Object...args) + { + super(msg, args); + } + + public SqlParseError(Throwable cause, String msg, Object...args) + { + super(cause, msg, args); + } + + @Override + public ErrorCategory category() + { + return ErrorCategory.SQL_PARSE; + } + + @Override + public String errorClass() + { + // For backward compatibility. + // Calcite classes not visible here, so using a string + return "org.apache.calcite.sql.parser.SqlParseException"; + } +} diff --git a/processing/src/main/java/org/apache/druid/error/SqlUnsupportedError.java b/processing/src/main/java/org/apache/druid/error/SqlUnsupportedError.java new file mode 100644 index 000000000000..49b6dde91f56 --- /dev/null +++ b/processing/src/main/java/org/apache/druid/error/SqlUnsupportedError.java @@ -0,0 +1,56 @@ +/* + * Licensed to the Apache Software Foundation (ASF) under one + * or more contributor license agreements. See the NOTICE file + * distributed with this work for additional information + * regarding copyright ownership. The ASF licenses this file + * to you under the Apache License, Version 2.0 (the + * "License"); you may not use this file except in compliance + * with the License. You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, + * software distributed under the License is distributed on an + * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY + * KIND, either express or implied. See the License for the + * specific language governing permissions and limitations + * under the License. + */ + +package org.apache.druid.error; + +/** + * SQL query validation failed, because a SQL statement asked Druid to do + * something which it does not support. This message indicates that the + * unsupported thing is by design, not because we've not gotten to it yet. + * For example, asking for `MAX(VARCHAR)` is not supported because it does + * not make sense. Use a different exception if the error is due to something + * that Druid should support, but doesn't yet. + * + * @see {@link SqlValidationError} for the general validation error case. + */ +public class SqlUnsupportedError extends DruidException +{ + public SqlUnsupportedError(String msg, Object...args) + { + super(msg, args); + } + + public SqlUnsupportedError(Throwable cause, String msg, Object...args) + { + super(cause, msg, args); + } + + @Override + public ErrorCategory category() + { + return ErrorCategory.SQL_UNSUPPORTED; + } + + @Override + public String errorClass() + { + // For backward compatibility: using text since class is not visible here. + return "org.apache.calcite.plan.RelOptPlanner$CannotPlanException"; + } +} diff --git a/processing/src/main/java/org/apache/druid/error/SqlValidationError.java b/processing/src/main/java/org/apache/druid/error/SqlValidationError.java new file mode 100644 index 000000000000..ded7d547e443 --- /dev/null +++ b/processing/src/main/java/org/apache/druid/error/SqlValidationError.java @@ -0,0 +1,59 @@ +/* + * Licensed to the Apache Software Foundation (ASF) under one + * or more contributor license agreements. See the NOTICE file + * distributed with this work for additional information + * regarding copyright ownership. The ASF licenses this file + * to you under the Apache License, Version 2.0 (the + * "License"); you may not use this file except in compliance + * with the License. You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, + * software distributed under the License is distributed on an + * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY + * KIND, either express or implied. See the License for the + * specific language governing permissions and limitations + * under the License. + */ + +package org.apache.druid.error; + +/** + * SQL query validation failed, most likely due to a problem in the SQL statement + * which the user provided. + * + * @see {@link SqlUnsupportedError} for the special case + * in which the SQL asked to do something Druid does not support. + */ +public class SqlValidationError extends DruidException +{ + public SqlValidationError(String msg, Object...args) + { + super(msg, args); + } + + public SqlValidationError(Throwable cause, String msg, Object...args) + { + super(cause, msg, args); + } + + public SqlValidationError(Throwable cause) + { + super(cause, cause.getMessage()); + } + + @Override + public ErrorCategory category() + { + return ErrorCategory.SQL_VALIDATION; + } + + @Override + public String errorClass() + { + // For backward compatibility. + // Using string because the class is not visible here. + return "org.apache.calcite.tools.ValidationException"; + } +} diff --git a/server/src/main/java/org/apache/druid/error/StandardRestExceptionEncoder.java b/server/src/main/java/org/apache/druid/error/StandardRestExceptionEncoder.java index 936ef2fc2099..542131110bfa 100644 --- a/server/src/main/java/org/apache/druid/error/StandardRestExceptionEncoder.java +++ b/server/src/main/java/org/apache/druid/error/StandardRestExceptionEncoder.java @@ -37,7 +37,7 @@ public static RestExceptionEncoder instance() public ResponseBuilder builder(DruidException e) { return Response - .status(status(e)) + .status(Response.Status.fromStatusCode(e.category().httpStatus())) .entity(e.toErrorResponse()) .type(MediaType.APPLICATION_JSON); } @@ -47,27 +47,27 @@ public Response encode(DruidException e) { return builder(e).build(); } - - // Temporary status mapping - private Status status(DruidException e) - { - switch (e.type()) { - case CONFIG: - case INTERNAL: - case NETWORK: - return Response.Status.INTERNAL_SERVER_ERROR; - case TIMEOUT: - return Response.Status.fromStatusCode(504); // No predefined status name - case NOT_FOUND: - return Response.Status.NOT_FOUND; - case RESOURCE: - return Response.Status.fromStatusCode(429); // No predefined status name - case USER: - case UNSUPPORTED: - return Response.Status.BAD_REQUEST; - default: - // Should never occur - return Response.Status.INTERNAL_SERVER_ERROR; - } - } + // + // // Temporary status mapping + // private Status status(DruidExceptionV1 e) + // { + // switch (e.type()) { + // case CONFIG: + // case INTERNAL: + // case NETWORK: + // return Response.Status.INTERNAL_SERVER_ERROR; + // case TIMEOUT: + // return Response.Status.fromStatusCode(504); // No predefined status name + // case NOT_FOUND: + // return Response.Status.NOT_FOUND; + // case RESOURCE: + // return Response.Status.fromStatusCode(429); // No predefined status name + // case USER: + // case UNSUPPORTED: + // return Response.Status.BAD_REQUEST; + // default: + // // Should never occur + // return Response.Status.INTERNAL_SERVER_ERROR; + // } + // } } diff --git a/server/src/main/java/org/apache/druid/server/QueryResultPusher.java b/server/src/main/java/org/apache/druid/server/QueryResultPusher.java index 13b09e23484e..4ce171068085 100644 --- a/server/src/main/java/org/apache/druid/server/QueryResultPusher.java +++ b/server/src/main/java/org/apache/druid/server/QueryResultPusher.java @@ -24,6 +24,7 @@ import com.google.common.io.CountingOutputStream; import org.apache.druid.client.DirectDruidClient; import org.apache.druid.error.DruidException; +import org.apache.druid.error.DruidExceptionV1; import org.apache.druid.error.StandardRestExceptionEncoder; import org.apache.druid.java.util.common.ISE; import org.apache.druid.java.util.common.RE; @@ -318,11 +319,11 @@ private Response handleDruidException(ResultsWriter resultsWriter, DruidExceptio return null; } - switch (e.type()) { - case RESOURCE: + switch (e.category().metricCategory()) { + case INTERRUPTED: counter.incrementInterrupted(); break; - case NETWORK: + case TIME_OUT: counter.incrementTimedOut(); break; default: diff --git a/sql/src/main/java/org/apache/druid/sql/DirectStatement.java b/sql/src/main/java/org/apache/druid/sql/DirectStatement.java index 38b90a0055ce..96b242ea4977 100644 --- a/sql/src/main/java/org/apache/druid/sql/DirectStatement.java +++ b/sql/src/main/java/org/apache/druid/sql/DirectStatement.java @@ -21,7 +21,8 @@ import com.google.common.annotations.VisibleForTesting; import org.apache.calcite.plan.RelOptPlanner; -import org.apache.druid.error.DruidException; +import org.apache.druid.error.DruidAssertionError; +import org.apache.druid.error.DruidExceptionV1; import org.apache.druid.java.util.common.ISE; import org.apache.druid.java.util.common.StringUtils; import org.apache.druid.java.util.common.logger.Logger; @@ -228,9 +229,7 @@ public ResultSet plan() } catch (RelOptPlanner.CannotPlanException e) { // Not sure if this is even thrown here. - throw DruidException.internalError("Cannot plan SQL query") - .cause(e) - .build(); + throw new DruidAssertionError(e, "Cannot plan SQL query"); } catch (RuntimeException e) { state = State.FAILED; diff --git a/sql/src/main/java/org/apache/druid/sql/calcite/aggregation/builtin/EarliestLatestAnySqlAggregator.java b/sql/src/main/java/org/apache/druid/sql/calcite/aggregation/builtin/EarliestLatestAnySqlAggregator.java index 191253cfa694..58d2ac1cc710 100644 --- a/sql/src/main/java/org/apache/druid/sql/calcite/aggregation/builtin/EarliestLatestAnySqlAggregator.java +++ b/sql/src/main/java/org/apache/druid/sql/calcite/aggregation/builtin/EarliestLatestAnySqlAggregator.java @@ -35,7 +35,10 @@ import org.apache.calcite.sql.type.SqlTypeName; import org.apache.calcite.sql.type.SqlTypeUtil; import org.apache.calcite.util.Optionality; -import org.apache.druid.error.DruidException; +import org.apache.druid.error.DruidAssertionError; +import org.apache.druid.error.DruidExceptionV1; +import org.apache.druid.error.SqlUnsupportedError; +import org.apache.druid.error.SqlValidationError; import org.apache.druid.query.aggregation.AggregatorFactory; import org.apache.druid.query.aggregation.any.DoubleAnyAggregatorFactory; import org.apache.druid.query.aggregation.any.FloatAnyAggregatorFactory; @@ -90,7 +93,7 @@ AggregatorFactory createAggregatorFactory(String name, String fieldName, String case COMPLEX: return new StringFirstAggregatorFactory(name, fieldName, timeColumn, maxStringBytes); default: - throw DruidException.unsupportedSqlError("EARLIEST aggregator is not supported for type %s", type); + throw new SqlUnsupportedError("EARLIEST aggregator is not supported for type [%s]", type); } } }, @@ -110,7 +113,7 @@ AggregatorFactory createAggregatorFactory(String name, String fieldName, String case COMPLEX: return new StringLastAggregatorFactory(name, fieldName, timeColumn, maxStringBytes); default: - throw DruidException.unsupportedSqlError("LATEST aggregator is not supported for type %s", type); + throw new SqlUnsupportedError("LATEST aggregator is not supported for type [%s]", type); } } }, @@ -129,7 +132,7 @@ AggregatorFactory createAggregatorFactory(String name, String fieldName, String case STRING: return new StringAnyAggregatorFactory(name, fieldName, maxStringBytes); default: - throw DruidException.unsupportedSqlError("ANY aggregation is not supported for type %s", type); + throw new SqlUnsupportedError("ANY aggregation is not supported for type [%s]", type); } } }; @@ -187,12 +190,11 @@ public Aggregation toDruidAggregation( final String aggregatorName = finalizeAggregations ? Calcites.makePrefixedName(name, "a") : name; final ColumnType outputType = Calcites.getColumnTypeForRelDataType(aggregateCall.getType()); if (outputType == null) { - throw DruidException.internalError( - "%s cannot translate output SQL type %s to a Druid type", + throw new DruidAssertionError( + "[%s] cannot translate output SQL type [%s] to a Druid type", aggregateCall.getName(), aggregateCall.getType().getSqlTypeName() - ) - .build(); + ); } final String fieldName = getColumnName(plannerContext, virtualColumnRegistry, args.get(0), rexNodes.get(0)); @@ -218,8 +220,8 @@ public Aggregation toDruidAggregation( maxStringBytes = RexLiteral.intValue(rexNodes.get(1)); } catch (AssertionError ae) { - throw DruidException.validationError( - "%s, argument 2 must be a number but found [%s]", + throw new SqlValidationError( + "[%s], argument 2 must be a number but found [%s]", aggregateCall.getName(), rexNodes.get(1) ); @@ -233,8 +235,8 @@ public Aggregation toDruidAggregation( ); break; default: - throw DruidException.validationError( - "%s expects 1 or 2 arguments but found %d", + throw new SqlValidationError( + "[%s] expects 1 or 2 arguments but found [%d]", aggregateCall.getName(), args.size() ); diff --git a/sql/src/main/java/org/apache/druid/sql/calcite/aggregation/builtin/MaxSqlAggregator.java b/sql/src/main/java/org/apache/druid/sql/calcite/aggregation/builtin/MaxSqlAggregator.java index b727be40e693..01cfe3f463e9 100644 --- a/sql/src/main/java/org/apache/druid/sql/calcite/aggregation/builtin/MaxSqlAggregator.java +++ b/sql/src/main/java/org/apache/druid/sql/calcite/aggregation/builtin/MaxSqlAggregator.java @@ -22,7 +22,8 @@ import org.apache.calcite.rel.core.AggregateCall; import org.apache.calcite.sql.SqlAggFunction; import org.apache.calcite.sql.fun.SqlStdOperatorTable; -import org.apache.druid.error.DruidException; +import org.apache.druid.error.DruidExceptionV1; +import org.apache.druid.error.SqlUnsupportedError; import org.apache.druid.math.expr.ExprMacroTable; import org.apache.druid.query.aggregation.AggregatorFactory; import org.apache.druid.query.aggregation.DoubleMaxAggregatorFactory; @@ -71,7 +72,9 @@ private static AggregatorFactory createMaxAggregatorFactory( case DOUBLE: return new DoubleMaxAggregatorFactory(name, fieldName, null, macroTable); default: - throw DruidException.unsupportedSqlError("MAX does not support type %s", aggregationType); + // This error refers to the Druid type. But, we're in SQL validation. + // It should refer to the SQL type. + throw new SqlUnsupportedError("MAX does not support type [%s]", aggregationType); } } } diff --git a/sql/src/main/java/org/apache/druid/sql/calcite/aggregation/builtin/MinSqlAggregator.java b/sql/src/main/java/org/apache/druid/sql/calcite/aggregation/builtin/MinSqlAggregator.java index 59946af5f506..f48a0cd36422 100644 --- a/sql/src/main/java/org/apache/druid/sql/calcite/aggregation/builtin/MinSqlAggregator.java +++ b/sql/src/main/java/org/apache/druid/sql/calcite/aggregation/builtin/MinSqlAggregator.java @@ -22,7 +22,8 @@ import org.apache.calcite.rel.core.AggregateCall; import org.apache.calcite.sql.SqlAggFunction; import org.apache.calcite.sql.fun.SqlStdOperatorTable; -import org.apache.druid.error.DruidException; +import org.apache.druid.error.DruidExceptionV1; +import org.apache.druid.error.SqlUnsupportedError; import org.apache.druid.math.expr.ExprMacroTable; import org.apache.druid.query.aggregation.AggregatorFactory; import org.apache.druid.query.aggregation.DoubleMinAggregatorFactory; @@ -67,7 +68,7 @@ private static AggregatorFactory createMinAggregatorFactory( case DOUBLE: return new DoubleMinAggregatorFactory(name, fieldName, null, macroTable); default: - throw DruidException.unsupportedSqlError("MIN does not support type %s", aggregationType); + throw new SqlUnsupportedError("MIN does not support type [%s]", aggregationType); } } } diff --git a/sql/src/main/java/org/apache/druid/sql/calcite/aggregation/builtin/StringSqlAggregator.java b/sql/src/main/java/org/apache/druid/sql/calcite/aggregation/builtin/StringSqlAggregator.java index e36fd449f382..408694ebd1f2 100644 --- a/sql/src/main/java/org/apache/druid/sql/calcite/aggregation/builtin/StringSqlAggregator.java +++ b/sql/src/main/java/org/apache/druid/sql/calcite/aggregation/builtin/StringSqlAggregator.java @@ -36,7 +36,8 @@ import org.apache.calcite.sql.type.SqlTypeFamily; import org.apache.calcite.sql.type.SqlTypeName; import org.apache.calcite.util.Optionality; -import org.apache.druid.error.DruidException; +import org.apache.druid.error.DruidExceptionV1; +import org.apache.druid.error.SqlUnsupportedError; import org.apache.druid.java.util.common.HumanReadableBytes; import org.apache.druid.java.util.common.StringUtils; import org.apache.druid.math.expr.ExprMacroTable; @@ -198,7 +199,7 @@ public RelDataType inferReturnType(SqlOperatorBinding sqlOperatorBinding) { RelDataType type = sqlOperatorBinding.getOperandType(0); if (type instanceof RowSignatures.ComplexSqlType) { - throw DruidException.unsupportedSqlError("Cannot use STRING_AGG on complex input of type %s", type); + throw new SqlUnsupportedError("Cannot use STRING_AGG on complex input of type [%s]", type); } return Calcites.createSqlTypeWithNullability( sqlOperatorBinding.getTypeFactory(), diff --git a/sql/src/main/java/org/apache/druid/sql/calcite/aggregation/builtin/SumSqlAggregator.java b/sql/src/main/java/org/apache/druid/sql/calcite/aggregation/builtin/SumSqlAggregator.java index 1911c307add0..15f987458375 100644 --- a/sql/src/main/java/org/apache/druid/sql/calcite/aggregation/builtin/SumSqlAggregator.java +++ b/sql/src/main/java/org/apache/druid/sql/calcite/aggregation/builtin/SumSqlAggregator.java @@ -31,7 +31,8 @@ import org.apache.calcite.sql.type.OperandTypes; import org.apache.calcite.sql.type.ReturnTypes; import org.apache.calcite.util.Optionality; -import org.apache.druid.error.DruidException; +import org.apache.druid.error.DruidExceptionV1; +import org.apache.druid.error.SqlUnsupportedError; import org.apache.druid.math.expr.ExprMacroTable; import org.apache.druid.query.aggregation.AggregatorFactory; import org.apache.druid.query.aggregation.DoubleSumAggregatorFactory; @@ -88,7 +89,7 @@ static AggregatorFactory createSumAggregatorFactory( case DOUBLE: return new DoubleSumAggregatorFactory(name, fieldName, null, macroTable); default: - throw DruidException.unsupportedSqlError("SUM is not supported for type %s", aggregationType); + throw new SqlUnsupportedError("SUM is not supported for type [%s]", aggregationType); } } diff --git a/sql/src/main/java/org/apache/druid/sql/calcite/expression/builtin/NestedDataOperatorConversions.java b/sql/src/main/java/org/apache/druid/sql/calcite/expression/builtin/NestedDataOperatorConversions.java index 6cf91ac719dc..c33489defcec 100644 --- a/sql/src/main/java/org/apache/druid/sql/calcite/expression/builtin/NestedDataOperatorConversions.java +++ b/sql/src/main/java/org/apache/druid/sql/calcite/expression/builtin/NestedDataOperatorConversions.java @@ -40,6 +40,8 @@ import org.apache.calcite.sql.type.SqlTypeName; import org.apache.calcite.sql.type.SqlTypeTransforms; import org.apache.calcite.sql2rel.SqlRexConvertlet; +import org.apache.druid.error.DruidExceptionV1; +import org.apache.druid.error.SqlUnsupportedError; import org.apache.druid.error.DruidException; import org.apache.druid.java.util.common.IAE; import org.apache.druid.java.util.common.StringUtils; @@ -201,7 +203,7 @@ public DruidExpression toDruidExpression( parts = NestedPathFinder.parseJsonPath(path); } catch (IllegalArgumentException iae) { - throw DruidException.unsupportedSqlError( + throw new SqlUnsupportedError( "Cannot use [%s]: [%s]", call.getOperator().getName(), iae.getMessage() @@ -391,7 +393,7 @@ public DruidExpression toDruidExpression( parts = NestedPathFinder.parseJsonPath(path); } catch (IllegalArgumentException iae) { - throw DruidException.unsupportedSqlError( + throw new SqlUnsupportedError( "Cannot use [%s]: [%s]", call.getOperator().getName(), iae.getMessage() @@ -689,12 +691,11 @@ public DruidExpression toDruidExpression( parts = NestedPathFinder.parseJsonPath(path); } catch (IllegalArgumentException iae) { - throw DruidException.unsupportedSql( + throw new SqlUnsupportedError( + iae, "JSON path [%s] is not supported", call.getOperator().getName() - ) - .cause(iae) - .build(); + ); } final String jsonPath = NestedPathFinder.toNormalizedJsonPath(parts); final DruidExpression.ExpressionGenerator builder = (args) -> diff --git a/sql/src/main/java/org/apache/druid/sql/calcite/parser/DruidSqlParserUtils.java b/sql/src/main/java/org/apache/druid/sql/calcite/parser/DruidSqlParserUtils.java index 8614e735bb73..d0ec9739b19b 100644 --- a/sql/src/main/java/org/apache/druid/sql/calcite/parser/DruidSqlParserUtils.java +++ b/sql/src/main/java/org/apache/druid/sql/calcite/parser/DruidSqlParserUtils.java @@ -34,6 +34,7 @@ import org.apache.calcite.sql.SqlTimestampLiteral; import org.apache.calcite.tools.ValidationException; import org.apache.druid.error.DruidException; +import org.apache.druid.error.SqlValidationError; import org.apache.druid.java.util.common.StringUtils; import org.apache.druid.java.util.common.granularity.Granularity; import org.apache.druid.java.util.common.granularity.GranularityType; @@ -105,7 +106,7 @@ public static Granularity convertSqlNodeToGranularityThrowingParseExceptions(Sql public static Granularity convertSqlNodeToGranularity(SqlNode sqlNode) throws ParseException { - final String genericParseFailedMessageFormatString = "Encountered %s after PARTITIONED BY. " + final String genericParseFailedMessageFormatString = "Encountered [%s] after PARTITIONED BY. " + "Expected HOUR, DAY, MONTH, YEAR, ALL TIME, FLOOR function or %s function"; if (!(sqlNode instanceof SqlCall)) { @@ -229,23 +230,23 @@ public static List validateQueryAndConvertToIntervals( List intervals = filtration.getIntervals(); if (filtration.getDimFilter() != null) { - throw DruidException.validationError( + throw new SqlValidationError( "Only %s column is supported in OVERWRITE WHERE clause", ColumnHolder.TIME_COLUMN_NAME ); } if (intervals.isEmpty()) { - throw DruidException.validationError("Intervals for REPLACE are empty"); + throw new SqlValidationError("Intervals for REPLACE are empty"); } for (Interval interval : intervals) { DateTime intervalStart = interval.getStart(); DateTime intervalEnd = interval.getEnd(); if (!granularity.bucketStart(intervalStart).equals(intervalStart) || !granularity.bucketStart(intervalEnd).equals(intervalEnd)) { - throw DruidException.validationError( - "OVERWRITE WHERE clause contains an interval %s" + - " which is not aligned with PARTITIONED BY granularity %s", + throw new SqlValidationError( + "OVERWRITE WHERE clause contains an interval [%s]" + + " which is not aligned with PARTITIONED BY granularity [%s]", intervals, granularity ); @@ -330,7 +331,7 @@ public static DimFilter convertQueryToDimFilter(SqlNode replaceTimeQuery, DateTi { if (!(replaceTimeQuery instanceof SqlBasicCall)) { log.error("Expected SqlBasicCall during parsing, but found " + replaceTimeQuery.getClass().getName()); - throw DruidException.validationError("Invalid OVERWRITE WHERE clause"); + throw new SqlValidationError("Invalid OVERWRITE WHERE clause"); } String columnName; SqlBasicCall sqlBasicCall = (SqlBasicCall) replaceTimeQuery; @@ -411,8 +412,8 @@ public static DimFilter convertQueryToDimFilter(SqlNode replaceTimeQuery, DateTi StringComparators.NUMERIC ); default: - throw DruidException.validationError( - "Unsupported operation in OVERWRITE WHERE clause: %s", + throw new SqlValidationError( + "Unsupported operation in OVERWRITE WHERE clause: [%s]", sqlBasicCall.getOperator().getName() ); } @@ -428,7 +429,7 @@ public static DimFilter convertQueryToDimFilter(SqlNode replaceTimeQuery, DateTi public static String parseColumnName(SqlNode sqlNode) { if (!(sqlNode instanceof SqlIdentifier)) { - throw DruidException.validationError( + throw new SqlValidationError( "OVERWRITE WHERE expressions must be of the form __time TIMESTAMP" ); } @@ -446,7 +447,7 @@ public static String parseColumnName(SqlNode sqlNode) public static String parseTimeStampWithTimeZone(SqlNode sqlNode, DateTimeZone timeZone) { if (!(sqlNode instanceof SqlTimestampLiteral)) { - throw DruidException.validationError( + throw new SqlValidationError( "OVERWRITE WHERE expressions must be of the form __time TIMESTAMP" ); } @@ -464,18 +465,17 @@ public static String parseTimeStampWithTimeZone(SqlNode sqlNode, DateTimeZone ti public static void throwIfUnsupportedGranularityInPartitionedBy(Granularity granularity) { if (!GranularityType.isStandard(granularity)) { - throw DruidException.validation( + throw new SqlValidationError( "The granularity specified in PARTITIONED BY is not supported." ) - .context( - "Valid granularities", + .suggestion( + "Valid granularities: " + Arrays.stream(GranularityType.values()) .filter(granularityType -> !granularityType.equals(GranularityType.NONE)) .map(Enum::name) .map(StringUtils::toLowerCase) .collect(Collectors.joining(", ")) - ) - .build(); + ); } } } diff --git a/sql/src/main/java/org/apache/druid/sql/calcite/planner/DruidPlanner.java b/sql/src/main/java/org/apache/druid/sql/calcite/planner/DruidPlanner.java index 467ab0864d56..e112f8a8cd94 100644 --- a/sql/src/main/java/org/apache/druid/sql/calcite/planner/DruidPlanner.java +++ b/sql/src/main/java/org/apache/druid/sql/calcite/planner/DruidPlanner.java @@ -29,7 +29,13 @@ import org.apache.calcite.sql.parser.SqlParseException; import org.apache.calcite.tools.FrameworkConfig; import org.apache.calcite.tools.ValidationException; +import org.apache.druid.error.DruidAssertionError; import org.apache.druid.error.DruidException; +import org.apache.druid.error.DruidExceptionV1; +import org.apache.druid.error.SqlParseError; +import org.apache.druid.error.SqlUnsupportedError; +import org.apache.druid.error.SqlValidationError; +import org.apache.druid.java.util.common.StringUtils; import org.apache.druid.query.QueryContext; import org.apache.druid.query.QueryException; import org.apache.druid.server.security.Access; @@ -168,8 +174,8 @@ private SqlStatementHandler createHandler(final SqlNode node) if (query.isA(SqlKind.QUERY)) { return new QueryHandler.SelectHandler(handlerContext, query, explain); } - throw DruidException.unsupportedSqlError( - "Unsupported SQL statement %s", + throw new SqlUnsupportedError( + "Unsupported SQL statement [%s]", node.getKind() ); } @@ -323,71 +329,65 @@ public static DruidException translateException(Exception e) return inner; } catch (ValidationException inner) { - return parseValidationMessage(inner, QueryException.PLAN_VALIDATION_FAILED_ERROR_CODE); + return parseValidationMessage(inner, false); } catch (SqlParseException inner) { return parseParserMessage(inner); } catch (RelOptPlanner.CannotPlanException inner) { - return parseValidationMessage(inner, QueryException.QUERY_UNSUPPORTED_ERROR_CODE); + return parseValidationMessage(inner, true); } catch (Exception inner) { // Anything else. Should not get here. Anything else should already have // been translated to a DruidException unless it is an unexpected exception. - return DruidException - .internalError(e.getMessage()) - .cause(inner) - .build(); + return new DruidAssertionError(inner, e.getMessage()); } } - private static DruidException parseValidationMessage(Exception e, String errorCode) + private static DruidException parseValidationMessage(Exception e, boolean unsupported) { // Calcite exception that probably includes a position. String msg = e.getMessage(); Pattern p = Pattern.compile("(?:org\\..*: )From line (\\d+), column (\\d+) to line \\d+, column \\d+: (.*)$"); Matcher m = p.matcher(msg); - DruidException.Builder builder; + Exception cause; + String errorMsg; if (m.matches()) { - builder = DruidException.user( - "Line %s, Column %s: %s", m.group(1), m.group(2), m.group(3) - ); + cause = null; + errorMsg = StringUtils.format("Line [%s], Column [%s]: %s", m.group(1), m.group(2), m.group(3)); } else { - builder = DruidException.user(msg).cause(e); + cause = e; + errorMsg = msg; + } + if (unsupported) { + return new SqlUnsupportedError(cause, errorMsg); + } else { + return new SqlValidationError(cause, errorMsg); } - return builder - .code(errorCode) - .build(); } private static DruidException parseParserMessage(Exception e) { // Calcite exception that probably includes a position. The normal parse // exception is rather cumbersome. Clean it up a bit. - String msg = e.getMessage(); + final String msg = e.getMessage(); Pattern p = Pattern.compile( "Encountered \"(.*)\" at line (\\d+), column (\\d+).\nWas expecting one of:\n(.*)", Pattern.MULTILINE | Pattern.DOTALL ); Matcher m = p.matcher(msg); - DruidException.Builder builder; - if (m.matches()) { - Pattern p2 = Pattern.compile("[ .]*\n\\ s+"); - Matcher m2 = p2.matcher(m.group(4).trim()); - String choices = m2.replaceAll(", "); - builder = DruidException.user( - "Line %s, Column %s: unexpected token '%s'", - m.group(2), - m.group(3), - m.group(1) - ) - .context("Expected", choices); - } else { - builder = DruidException.user(msg).cause(e); + if (!m.matches()) { + return new SqlParseError(e, msg); } - return builder - .code(QueryException.SQL_PARSE_FAILED_ERROR_CODE) - .errorClass(SqlParseException.class.getName()) - .build(); + Pattern p2 = Pattern.compile("[ .]*\n\\ s+"); + Matcher m2 = p2.matcher(m.group(4).trim()); + String choices = m2.replaceAll(", "); + return new SqlParseError( + "Line [%s], Column [%s]: unexpected token [%s]", + m.group(2), + m.group(3), + m.group(1) + ) + .suggestion("Expected one of " + choices); } } diff --git a/sql/src/main/java/org/apache/druid/sql/calcite/planner/DruidRexExecutor.java b/sql/src/main/java/org/apache/druid/sql/calcite/planner/DruidRexExecutor.java index 6064b6aac155..7b0f5233e6d7 100644 --- a/sql/src/main/java/org/apache/druid/sql/calcite/planner/DruidRexExecutor.java +++ b/sql/src/main/java/org/apache/druid/sql/calcite/planner/DruidRexExecutor.java @@ -23,7 +23,9 @@ import org.apache.calcite.rex.RexExecutor; import org.apache.calcite.rex.RexNode; import org.apache.calcite.sql.type.SqlTypeName; -import org.apache.druid.error.DruidException; +import org.apache.druid.error.DruidExceptionV1; +import org.apache.druid.error.SqlUnsupportedError; +import org.apache.druid.error.SqlValidationError; import org.apache.druid.java.util.common.DateTimes; import org.apache.druid.java.util.common.StringUtils; import org.apache.druid.math.expr.Expr; @@ -89,7 +91,7 @@ public void reduce( // as a primitive long/float/double. // ExprEval.isNumericNull checks whether the parsed primitive value is null or not. if (!constExp.getType().isNullable() && exprResult.isNumericNull()) { - throw DruidException.validationError("Illegal DATE constant %s", constExp); + throw new SqlValidationError("Illegal DATE constant [%s]", constExp); } literal = rexBuilder.makeDateLiteral( @@ -103,7 +105,7 @@ public void reduce( // as a primitive long/float/double. // ExprEval.isNumericNull checks whether the parsed primitive value is null or not. if (!constExp.getType().isNullable() && exprResult.isNumericNull()) { - throw DruidException.validationError("Illegal TIMESTAMP constant %s", constExp); + throw new SqlValidationError("Illegal TIMESTAMP constant [%s]", constExp); } literal = Calcites.jodaToCalciteTimestampLiteral( @@ -128,14 +130,13 @@ public void reduce( double exprResultDouble = exprResult.asDouble(); if (Double.isNaN(exprResultDouble) || Double.isInfinite(exprResultDouble)) { String expression = druidExpression.getExpression(); - throw DruidException.unsupportedSql("Expression not supported in SQL : %s", expression) - .context("Evaluates to", Double.toString(exprResultDouble)) - .context("Suggestion", StringUtils.format( + throw new SqlUnsupportedError("Expression not supported in SQL : [%s]", expression) + .addContext("Evaluates to", Double.toString(exprResultDouble)) + .addContext("Suggestion", StringUtils.format( "You can either cast the expression as BIGINT ('CAST(%s as BIGINT)') or VARCHAR ('CAST(%s as VARCHAR)') or change the expression itself", expression, expression - )) - .build(); + )); } bigDecimal = BigDecimal.valueOf(exprResult.asDouble()); } @@ -166,11 +167,10 @@ public void reduce( resultAsBigDecimalList.add(null); } else if (Double.isNaN(doubleVal.doubleValue()) || Double.isInfinite(doubleVal.doubleValue())) { String expression = druidExpression.getExpression(); - throw DruidException.validation("Array element not supported in SQL: %s", expression) - .context("Evaluates to", Double.toString(doubleVal.doubleValue())) - .context("Suggestion", - "You can either cast the element in the ARRAY to BIGINT or VARCHAR or change the expression itself") - .build(); + throw new SqlValidationError("Array element not supported in SQL: [%s]", expression) + .addContext("Evaluates to", Double.toString(doubleVal.doubleValue())) + .suggestion( + "You can either cast the element in the ARRAY to BIGINT or VARCHAR or change the expression itself"); } else { resultAsBigDecimalList.add(BigDecimal.valueOf(doubleVal.doubleValue())); } diff --git a/sql/src/main/java/org/apache/druid/sql/calcite/planner/IngestHandler.java b/sql/src/main/java/org/apache/druid/sql/calcite/planner/IngestHandler.java index f484cf971211..e3f8bc287384 100644 --- a/sql/src/main/java/org/apache/druid/sql/calcite/planner/IngestHandler.java +++ b/sql/src/main/java/org/apache/druid/sql/calcite/planner/IngestHandler.java @@ -36,7 +36,8 @@ import org.apache.calcite.tools.ValidationException; import org.apache.calcite.util.Pair; import org.apache.druid.common.utils.IdUtils; -import org.apache.druid.error.DruidException; +import org.apache.druid.error.SqlUnsupportedError; +import org.apache.druid.error.SqlValidationError; import org.apache.druid.java.util.common.granularity.Granularity; import org.apache.druid.server.security.Action; import org.apache.druid.server.security.Resource; @@ -87,8 +88,8 @@ protected static SqlNode convertQuery(DruidSqlIngest sqlNode) SqlNodeList orderByList = sqlOrderBy.orderList; if (!(orderByList == null || orderByList.equals(SqlNodeList.EMPTY))) { String opName = sqlNode.getOperator().getName(); - throw DruidException.validationError( - "Cannot use ORDER BY on %s %s statement, use CLUSTERED BY instead.", + throw new SqlValidationError( + "Cannot use ORDER BY on %s [%s] statement, use CLUSTERED BY instead.", "INSERT".equals(opName) ? "an" : "a", opName ); @@ -99,7 +100,7 @@ protected static SqlNode convertQuery(DruidSqlIngest sqlNode) } if (!query.isA(SqlKind.QUERY)) { - throw DruidException.validationError("Cannot execute SQL statement %s", query.getKind()); + throw new SqlValidationError("Cannot execute SQL statement [%s]", query.getKind()); } return query; } @@ -115,8 +116,8 @@ protected String operationName() public void validate() { if (ingestNode().getPartitionedBy() == null) { - throw DruidException.validationError( - "%s statements must specify PARTITIONED BY clause explicitly", + throw new SqlValidationError( + "[%s] statements must specify PARTITIONED BY clause explicitly", operationName() ); } @@ -130,8 +131,8 @@ public void validate() } } catch (JsonProcessingException e) { - throw DruidException.validationError( - "Invalid partition granularity '%s", + throw new SqlValidationError( + "Invalid partition granularity [%s]", ingestionGranularity ); } @@ -139,8 +140,8 @@ public void validate() // Check if CTX_SQL_OUTER_LIMIT is specified and fail the query if it is. CTX_SQL_OUTER_LIMIT being provided causes // the number of rows inserted to be limited which is likely to be confusing and unintended. if (handlerContext.queryContextMap().get(PlannerContext.CTX_SQL_OUTER_LIMIT) != null) { - throw DruidException.validationError( - "Context parameter %s cannot be provided with %s", + throw new SqlValidationError( + "Context parameter [%s] cannot be provided with [%s]", PlannerContext.CTX_SQL_OUTER_LIMIT, operationName() ); @@ -166,12 +167,12 @@ private String validateAndGetDataSourceForIngest() { final SqlInsert insert = ingestNode(); if (insert.isUpsert()) { - throw DruidException.unsupportedError("UPSERT is not supported."); + throw new SqlUnsupportedError("UPSERT is not supported."); } if (insert.getTargetColumnList() != null) { - throw DruidException.unsupportedSqlError( - "%s with a target column list is not supported", + throw new SqlUnsupportedError( + "[%s] with a target column list is not supported", operationName() ); } @@ -181,7 +182,7 @@ private String validateAndGetDataSourceForIngest() if (tableIdentifier.names.isEmpty()) { // I don't think this can happen, but include a branch for it just in case. - throw DruidException.validationError("%s requires a target table", operationName()); + throw new SqlValidationError("[%s] requires a target table", operationName()); } else if (tableIdentifier.names.size() == 1) { // Unqualified name. dataSource = Iterables.getOnlyElement(tableIdentifier.names); @@ -193,8 +194,8 @@ private String validateAndGetDataSourceForIngest() if (tableIdentifier.names.size() == 2 && defaultSchemaName.equals(tableIdentifier.names.get(0))) { dataSource = tableIdentifier.names.get(1); } else { - throw DruidException.validationError( - "Cannot %s into %s because it is not a Druid datasource.", + throw new SqlValidationError( + "Cannot [%s] into [%s] because it is not a Druid datasource.", operationName(), tableIdentifier ); @@ -205,7 +206,7 @@ private String validateAndGetDataSourceForIngest() IdUtils.validateId(operationName() + " dataSource", dataSource); } catch (IllegalArgumentException e) { - throw DruidException.unexpected(e); + throw new SqlValidationError(e); } return dataSource; @@ -268,7 +269,7 @@ protected DruidSqlIngest ingestNode() public void validate() { if (!handlerContext.plannerContext().featureAvailable(EngineFeature.CAN_INSERT)) { - throw DruidException.userError( + throw new SqlUnsupportedError( "Cannot execute INSERT with SQL engine '%s'.", handlerContext.engine().name() ); @@ -322,14 +323,14 @@ protected DruidSqlIngest ingestNode() public void validate() { if (!handlerContext.plannerContext().featureAvailable(EngineFeature.CAN_REPLACE)) { - throw DruidException.validationError( - "Cannot execute REPLACE with SQL engine %s", + throw new SqlValidationError( + "Cannot execute REPLACE with SQL engine [%s]", handlerContext.engine().name() ); } SqlNode replaceTimeQuery = sqlNode.getReplaceTimeQuery(); if (replaceTimeQuery == null) { - throw DruidException.validationError( + throw new SqlValidationError( "Missing time chunk information in OVERWRITE clause for REPLACE. Use " + "OVERWRITE WHERE <__time based condition> or OVERWRITE ALL to overwrite the entire table." ); diff --git a/sql/src/main/java/org/apache/druid/sql/calcite/planner/QueryHandler.java b/sql/src/main/java/org/apache/druid/sql/calcite/planner/QueryHandler.java index f34dfe11393a..7f9edb469f31 100644 --- a/sql/src/main/java/org/apache/druid/sql/calcite/planner/QueryHandler.java +++ b/sql/src/main/java/org/apache/druid/sql/calcite/planner/QueryHandler.java @@ -55,6 +55,9 @@ import org.apache.calcite.tools.ValidationException; import org.apache.calcite.util.Pair; import org.apache.druid.error.DruidException; +import org.apache.druid.error.DruidExceptionV1; +import org.apache.druid.error.SqlUnsupportedError; +import org.apache.druid.error.SqlValidationError; import org.apache.druid.java.util.common.guava.BaseSequence; import org.apache.druid.java.util.common.guava.Sequences; import org.apache.druid.java.util.emitter.EmittingLogger; @@ -201,8 +204,8 @@ public PlannerResult plan() // Consider BINDABLE convention when necessary. Used for metadata tables. if (!handlerContext.plannerContext().featureAvailable(EngineFeature.ALLOW_BINDABLE_PLAN)) { - throw DruidException.validationError( - "Cannot query table%s %s with SQL engine %s.", + throw new SqlValidationError( + "Cannot query table%s [%s] with SQL engine [%s]", bindableTables.size() != 1 ? "s" : "", bindableTables.stream() .map(table -> Joiner.on(".").join(table.getQualifiedName())) @@ -625,20 +628,15 @@ private DruidException buildSQLPlanningError(RelOptPlanner.CannotPlanException e String errorMessage = handlerContext.plannerContext().getPlanningError(); if (null == errorMessage && exception instanceof UnsupportedSQLQueryException) { errorMessage = exception.getMessage(); - } - if (null == errorMessage) { + } else if (null == errorMessage) { errorMessage = "Please check Broker logs for additional details."; } else { // Planning errors are more like hints: it isn't guaranteed that the planning error is actually what went wrong. errorMessage = "Possible error: " + errorMessage; } // Finally, add the query itself to error message that user will get. - return DruidException - .unsupportedSql("Query not supported. %s", errorMessage) - .code(QueryException.SQL_QUERY_UNSUPPORTED_ERROR_CODE) - .cause(exception) - .context("SQL", handlerContext.plannerContext().getSql()) - .build(); + return new SqlUnsupportedError(exception, "Query not supported. %s", errorMessage) + .addContext("SQL", handlerContext.plannerContext().getSql()); } public static class SelectHandler extends QueryHandler @@ -655,8 +653,8 @@ public SelectHandler( public void validate() { if (!handlerContext.plannerContext().featureAvailable(EngineFeature.CAN_SELECT)) { - throw DruidException.validationError( - "Cannot execute SELECT with SQL engine %s", + throw new SqlValidationError( + "Cannot execute SELECT with SQL engine [%s]", handlerContext.engine().name() ); } diff --git a/sql/src/main/java/org/apache/druid/sql/calcite/planner/RelParameterizerShuttle.java b/sql/src/main/java/org/apache/druid/sql/calcite/planner/RelParameterizerShuttle.java index 2383c70d658f..3a07c1787438 100644 --- a/sql/src/main/java/org/apache/druid/sql/calcite/planner/RelParameterizerShuttle.java +++ b/sql/src/main/java/org/apache/druid/sql/calcite/planner/RelParameterizerShuttle.java @@ -43,7 +43,8 @@ import org.apache.calcite.rex.RexNode; import org.apache.calcite.rex.RexShuttle; import org.apache.calcite.sql.type.SqlTypeName; -import org.apache.druid.error.DruidException; +import org.apache.druid.error.DruidExceptionV1; +import org.apache.druid.error.SqlValidationError; /** * Traverse {@link RelNode} tree and replaces all {@link RexDynamicParam} with {@link org.apache.calcite.rex.RexLiteral} @@ -199,8 +200,8 @@ private RexNode bind(RexNode node, RexBuilder builder, RelDataTypeFactory typeFa if (plannerContext.getParameters().size() > dynamicParam.getIndex()) { TypedValue param = plannerContext.getParameters().get(dynamicParam.getIndex()); if (param == null) { - throw DruidException.validationError( - "Parameter at position %d is not bound", + throw new SqlValidationError( + "Parameter at position [%d] is not bound", dynamicParam.getIndex() ); } @@ -214,8 +215,8 @@ private RexNode bind(RexNode node, RexBuilder builder, RelDataTypeFactory typeFa true ); } else { - throw DruidException.validationError( - "Parameter at position %d is not bound", + throw new SqlValidationError( + "Parameter at position [%d] is not bound", dynamicParam.getIndex() ); } diff --git a/sql/src/main/java/org/apache/druid/sql/calcite/rel/DruidJoinQueryRel.java b/sql/src/main/java/org/apache/druid/sql/calcite/rel/DruidJoinQueryRel.java index c8a0b8129d47..173cd126c7cb 100644 --- a/sql/src/main/java/org/apache/druid/sql/calcite/rel/DruidJoinQueryRel.java +++ b/sql/src/main/java/org/apache/druid/sql/calcite/rel/DruidJoinQueryRel.java @@ -38,7 +38,8 @@ import org.apache.calcite.rel.type.RelDataType; import org.apache.calcite.rex.RexNode; import org.apache.calcite.sql.SqlKind; -import org.apache.druid.error.DruidException; +import org.apache.druid.error.DruidExceptionV1; +import org.apache.druid.error.SqlUnsupportedError; import org.apache.druid.java.util.common.ISE; import org.apache.druid.java.util.common.Pair; import org.apache.druid.java.util.common.StringUtils; @@ -360,7 +361,7 @@ public static JoinType toDruidJoinType(JoinRelType calciteJoinType) case INNER: return JoinType.INNER; default: - throw DruidException.unsupportedSqlError("Cannot handle joinType '%s'", calciteJoinType); + throw new SqlUnsupportedError("Cannot handle joinType [%s]", calciteJoinType); } } diff --git a/sql/src/main/java/org/apache/druid/sql/calcite/rule/DruidLogicalValuesRule.java b/sql/src/main/java/org/apache/druid/sql/calcite/rule/DruidLogicalValuesRule.java index a168b9ad04df..3a6587db6903 100644 --- a/sql/src/main/java/org/apache/druid/sql/calcite/rule/DruidLogicalValuesRule.java +++ b/sql/src/main/java/org/apache/druid/sql/calcite/rule/DruidLogicalValuesRule.java @@ -25,7 +25,8 @@ import org.apache.calcite.plan.RelOptRuleCall; import org.apache.calcite.rel.logical.LogicalValues; import org.apache.calcite.rex.RexLiteral; -import org.apache.druid.error.DruidException; +import org.apache.druid.error.DruidExceptionV1; +import org.apache.druid.error.SqlUnsupportedError; import org.apache.druid.query.InlineDataSource; import org.apache.druid.segment.column.RowSignature; import org.apache.druid.sql.calcite.planner.Calcites; @@ -126,8 +127,8 @@ static Object getValueFromLiteral(RexLiteral literal, PlannerContext plannerCont return Calcites.calciteDateTimeLiteralToJoda(literal, plannerContext.getTimeZone()).getMillis(); case NULL: if (!literal.isNull()) { - throw DruidException.unsupportedSqlError( - "Non-null constant %s for a NULL literal", + throw new SqlUnsupportedError( + "Non-null constant [%s] for a NULL literal", literal ); } @@ -136,8 +137,8 @@ static Object getValueFromLiteral(RexLiteral literal, PlannerContext plannerCont case TIME: case TIME_WITH_LOCAL_TIME_ZONE: default: - throw DruidException.unsupportedSqlError( - "Literal %s type %s is not supported", + throw new SqlUnsupportedError( + "Literal [%s] type [%s] is not supported", literal, literal.getType().getSqlTypeName() ); diff --git a/sql/src/main/java/org/apache/druid/sql/calcite/run/SqlEngines.java b/sql/src/main/java/org/apache/druid/sql/calcite/run/SqlEngines.java index 4f89734225e9..a40c23d4e64d 100644 --- a/sql/src/main/java/org/apache/druid/sql/calcite/run/SqlEngines.java +++ b/sql/src/main/java/org/apache/druid/sql/calcite/run/SqlEngines.java @@ -20,7 +20,8 @@ package org.apache.druid.sql.calcite.run; import org.apache.calcite.tools.ValidationException; -import org.apache.druid.error.DruidException; +import org.apache.druid.error.DruidExceptionV1; +import org.apache.druid.error.SqlValidationError; import java.util.Map; import java.util.Set; @@ -42,8 +43,8 @@ public static void validateNoSpecialContextKeys( { for (String contextParameterName : queryContext.keySet()) { if (specialContextKeys.contains(contextParameterName)) { - throw DruidException.validationError( - "Query context parameter '%s' is not allowed", + throw new SqlValidationError( + "Query context parameter [%s] is not allowed", contextParameterName ); } diff --git a/sql/src/test/java/org/apache/druid/sql/SqlStatementTest.java b/sql/src/test/java/org/apache/druid/sql/SqlStatementTest.java index 0b6650649941..024467e48662 100644 --- a/sql/src/test/java/org/apache/druid/sql/SqlStatementTest.java +++ b/sql/src/test/java/org/apache/druid/sql/SqlStatementTest.java @@ -25,7 +25,9 @@ import com.google.common.util.concurrent.MoreExecutors; import org.apache.calcite.rel.type.RelDataType; import org.apache.calcite.rel.type.RelDataTypeField; -import org.apache.druid.error.DruidException; +import org.apache.druid.error.DruidExceptionV1; +import org.apache.druid.error.SqlParseError; +import org.apache.druid.error.SqlValidationError; import org.apache.druid.java.util.common.ISE; import org.apache.druid.java.util.common.concurrent.Execs; import org.apache.druid.java.util.common.guava.LazySequence; @@ -286,11 +288,11 @@ public void testDirectSyntaxError() stmt.execute(); fail(); } - catch (DruidException e) { + catch (SqlParseError e) { // Expected Assert.assertEquals( QueryException.SQL_PARSE_FAILED_ERROR_CODE, - e.code() + e.getErrorCode() ); } } @@ -306,11 +308,11 @@ public void testDirectValidationError() stmt.execute(); fail(); } - catch (DruidException e) { + catch (SqlValidationError e) { // Expected Assert.assertEquals( QueryException.PLAN_VALIDATION_FAILED_ERROR_CODE, - e.code() + e.getErrorCode() ); } } @@ -371,11 +373,11 @@ public void testHttpSyntaxError() stmt.execute(); fail(); } - catch (DruidException e) { + catch (SqlParseError e) { // Expected Assert.assertEquals( QueryException.SQL_PARSE_FAILED_ERROR_CODE, - e.code() + e.getErrorCode() ); } } @@ -391,11 +393,11 @@ public void testHttpValidationError() stmt.execute(); fail(); } - catch (DruidException e) { + catch (SqlValidationError e) { // Expected Assert.assertEquals( QueryException.PLAN_VALIDATION_FAILED_ERROR_CODE, - e.code() + e.getErrorCode() ); } } @@ -460,11 +462,11 @@ public void testPrepareSyntaxError() stmt.prepare(); fail(); } - catch (DruidException e) { + catch (SqlParseError e) { // Expected Assert.assertEquals( QueryException.SQL_PARSE_FAILED_ERROR_CODE, - e.code() + e.getErrorCode() ); } } @@ -480,11 +482,11 @@ public void testPrepareValidationError() stmt.prepare(); fail(); } - catch (DruidException e) { + catch (SqlValidationError e) { // Expected Assert.assertEquals( QueryException.PLAN_VALIDATION_FAILED_ERROR_CODE, - e.code() + e.getErrorCode() ); } } diff --git a/sql/src/test/java/org/apache/druid/sql/calcite/BaseCalciteQueryTest.java b/sql/src/test/java/org/apache/druid/sql/calcite/BaseCalciteQueryTest.java index 8fc797a49818..1987aef421d0 100644 --- a/sql/src/test/java/org/apache/druid/sql/calcite/BaseCalciteQueryTest.java +++ b/sql/src/test/java/org/apache/druid/sql/calcite/BaseCalciteQueryTest.java @@ -641,18 +641,13 @@ public void assertQueryIsUnplannable(final PlannerConfig plannerConfig, final St catch (DruidException e) { Assert.assertEquals( sql, - "Unsupported query", + StringUtils.format("Query not supported. Possible error: %s", expectedError), e.message() ); Assert.assertEquals( sql, sql, - e.context("SQL") - ); - Assert.assertEquals( - sql, - expectedError, - e.context("Possible error") + e.getContext().get("SQL") ); } catch (Exception e) { diff --git a/sql/src/test/java/org/apache/druid/sql/calcite/CalciteInsertDmlTest.java b/sql/src/test/java/org/apache/druid/sql/calcite/CalciteInsertDmlTest.java index b44a88b9f25d..850da29c15ae 100644 --- a/sql/src/test/java/org/apache/druid/sql/calcite/CalciteInsertDmlTest.java +++ b/sql/src/test/java/org/apache/druid/sql/calcite/CalciteInsertDmlTest.java @@ -26,7 +26,9 @@ import org.apache.druid.data.input.InputSource; import org.apache.druid.data.input.impl.CsvInputFormat; import org.apache.druid.data.input.impl.InlineInputSource; -import org.apache.druid.error.DruidException; +import org.apache.druid.error.SqlParseError; +import org.apache.druid.error.SqlUnsupportedError; +import org.apache.druid.error.SqlValidationError; import org.apache.druid.java.util.common.StringUtils; import org.apache.druid.java.util.common.granularity.Granularities; import org.apache.druid.java.util.common.granularity.Granularity; @@ -199,7 +201,7 @@ public void testInsertIntoInvalidDataSourceName() { testIngestionQuery() .sql("INSERT INTO \"in/valid\" SELECT dim1, dim2 FROM foo PARTITIONED BY ALL TIME") - .expectValidationError(DruidException.class, "INSERT dataSource cannot contain the '/' character.") + .expectValidationError(SqlValidationError.class, "INSERT dataSource cannot contain the '/' character.") .verify(); } @@ -208,7 +210,7 @@ public void testInsertUsingColumnList() { testIngestionQuery() .sql("INSERT INTO dst (foo, bar) SELECT dim1, dim2 FROM foo PARTITIONED BY ALL TIME") - .expectValidationError(DruidException.class, "INSERT with a target column list is not supported.") + .expectValidationError(SqlUnsupportedError.class, "[INSERT] with a target column list is not supported") .verify(); } @@ -217,7 +219,7 @@ public void testUpsert() { testIngestionQuery() .sql("UPSERT INTO dst SELECT * FROM foo PARTITIONED BY ALL TIME") - .expectValidationError(DruidException.class, "UPSERT is not supported.") + .expectValidationError(SqlUnsupportedError.class, "UPSERT is not supported.") .verify(); } @@ -229,8 +231,8 @@ public void testSelectFromSystemTable() testIngestionQuery() .sql("INSERT INTO dst SELECT * FROM INFORMATION_SCHEMA.COLUMNS PARTITIONED BY ALL TIME") .expectValidationError( - DruidException.class, - "Cannot query table INFORMATION_SCHEMA.COLUMNS with SQL engine 'ingestion-test'." + SqlValidationError.class, + "Cannot query table [INFORMATION_SCHEMA.COLUMNS] with SQL engine [ingestion-test]" ) .verify(); } @@ -241,8 +243,8 @@ public void testInsertIntoSystemTable() testIngestionQuery() .sql("INSERT INTO INFORMATION_SCHEMA.COLUMNS SELECT * FROM foo PARTITIONED BY ALL TIME") .expectValidationError( - DruidException.class, - "Cannot INSERT into INFORMATION_SCHEMA.COLUMNS because it is not a Druid datasource." + SqlValidationError.class, + "Cannot [INSERT] into [INFORMATION_SCHEMA.COLUMNS] because it is not a Druid datasource." ) .verify(); } @@ -253,8 +255,8 @@ public void testInsertIntoView() testIngestionQuery() .sql("INSERT INTO view.aview SELECT * FROM foo PARTITIONED BY ALL TIME") .expectValidationError( - DruidException.class, - "Cannot INSERT into view.aview because it is not a Druid datasource." + SqlValidationError.class, + "Cannot [INSERT] into [view.aview] because it is not a Druid datasource." ) .verify(); } @@ -283,8 +285,8 @@ public void testInsertIntoNonexistentSchema() testIngestionQuery() .sql("INSERT INTO nonexistent.dst SELECT * FROM foo PARTITIONED BY ALL TIME") .expectValidationError( - DruidException.class, - "Cannot INSERT into nonexistent.dst because it is not a Druid datasource." + SqlValidationError.class, + "Cannot [INSERT] into [nonexistent.dst] because it is not a Druid datasource." ) .verify(); } @@ -820,7 +822,7 @@ public void testInsertWithoutPartitionedByWithClusteredBy() + "CLUSTERED BY 2, dim1 DESC, CEIL(m2)" ) .expectValidationError( - DruidException.class, + SqlParseError.class, "CLUSTERED BY found before PARTITIONED BY. In Druid, the CLUSTERED BY clause must follow the PARTITIONED BY clause" ) .verify(); @@ -901,9 +903,9 @@ public void testInsertWithClusteredByAndOrderBy() ); Assert.fail("Exception should be thrown"); } - catch (DruidException e) { + catch (SqlValidationError e) { Assert.assertEquals( - "Cannot have ORDER BY on an INSERT statement, use CLUSTERED BY instead.", + "Cannot use ORDER BY on an [INSERT] statement, use CLUSTERED BY instead.", e.getMessage() ); } @@ -922,9 +924,9 @@ public void testInsertWithPartitionedByContainingInvalidGranularity() ); Assert.fail("Exception should be thrown"); } - catch (DruidException e) { + catch (SqlParseError e) { Assert.assertEquals( - "Encountered 'invalid_granularity' after PARTITIONED BY. Expected HOUR, DAY, MONTH, YEAR, ALL TIME, FLOOR function or TIME_FLOOR function", + "Encountered ['invalid_granularity'] after PARTITIONED BY. Expected HOUR, DAY, MONTH, YEAR, ALL TIME, FLOOR function or TIME_FLOOR function", e.getMessage() ); } @@ -945,9 +947,9 @@ public void testInsertWithOrderBy() ); Assert.fail("Exception should be thrown"); } - catch (DruidException e) { + catch (SqlValidationError e) { Assert.assertEquals( - "Cannot have ORDER BY on an INSERT statement, use CLUSTERED BY instead.", + "Cannot use ORDER BY on an [INSERT] statement, use CLUSTERED BY instead.", e.getMessage() ); } @@ -959,8 +961,8 @@ public void testInsertWithOrderBy() @Test public void testInsertWithoutPartitionedBy() { - DruidException e = Assert.assertThrows( - DruidException.class, + SqlValidationError e = Assert.assertThrows( + SqlValidationError.class, () -> testQuery( StringUtils.format("INSERT INTO dst SELECT * FROM %s", externSql(externalDataSource)), @@ -968,7 +970,7 @@ public void testInsertWithoutPartitionedBy() ImmutableList.of() ) ); - Assert.assertEquals("INSERT statements must specify PARTITIONED BY clause explicitly", e.getMessage()); + Assert.assertEquals("[INSERT] statements must specify PARTITIONED BY clause explicitly", e.getMessage()); didTest = true; } @@ -1192,7 +1194,7 @@ public void testSurfaceErrorsWhenInsertingThroughIncorrectSelectStatment() { assertQueryIsUnplannable( "INSERT INTO druid.dst SELECT dim2, dim1, m1 FROM foo2 UNION SELECT dim1, dim2, m1 FROM foo PARTITIONED BY ALL TIME", - "Possible error: SQL requires 'UNION' but only 'UNION ALL' is supported." + "SQL requires 'UNION' but only 'UNION ALL' is supported." ); // Not using testIngestionQuery, so must set didTest manually to satisfy the check in tearDown. @@ -1312,8 +1314,8 @@ public void testInsertWithInvalidSelectStatement() .sql("INSERT INTO t SELECT channel, added as count FROM foo PARTITIONED BY ALL") // count is a keyword .expectValidationError( CoreMatchers.allOf( - CoreMatchers.instanceOf(DruidException.class), - ThrowableMessageMatcher.hasMessage(CoreMatchers.startsWith("Encountered \"as count\"")) + CoreMatchers.instanceOf(SqlParseError.class), + ThrowableMessageMatcher.hasMessage(CoreMatchers.startsWith("Line [1], Column [37]: unexpected token [as count]")) ) ) .verify(); @@ -1325,7 +1327,7 @@ public void testInsertWithUnnamedColumnInSelectStatement() testIngestionQuery() .sql("INSERT INTO t SELECT dim1, dim2 || '-lol' FROM foo PARTITIONED BY ALL") .expectValidationError( - DruidException.class, + SqlValidationError.class, IngestHandler.UNNAMED_INGESTION_COLUMN_ERROR ) .verify(); @@ -1337,7 +1339,7 @@ public void testInsertWithInvalidColumnNameInIngest() testIngestionQuery() .sql("INSERT INTO t SELECT __time, dim1 AS EXPR$0 FROM foo PARTITIONED BY ALL") .expectValidationError( - DruidException.class, + SqlValidationError.class, IngestHandler.UNNAMED_INGESTION_COLUMN_ERROR ) .verify(); @@ -1351,7 +1353,7 @@ public void testInsertWithUnnamedColumnInNestedSelectStatement() + "SELECT __time, * FROM " + "(SELECT __time, LOWER(dim1) FROM foo) PARTITIONED BY ALL TIME") .expectValidationError( - DruidException.class, + SqlValidationError.class, IngestHandler.UNNAMED_INGESTION_COLUMN_ERROR ) .verify(); @@ -1364,11 +1366,9 @@ public void testInsertQueryWithInvalidGranularity() .sql("insert into foo1 select __time, dim1 FROM foo partitioned by time_floor(__time, 'PT2H')") .expectValidationError( CoreMatchers.allOf( - CoreMatchers.instanceOf(DruidException.class), + CoreMatchers.instanceOf(SqlParseError.class), ThrowableMessageMatcher.hasMessage(CoreMatchers.containsString( - "The granularity specified in PARTITIONED BY is not supported. " - + "Please use an equivalent of these granularities: second, minute, five_minute, ten_minute, " - + "fifteen_minute, thirty_minute, hour, six_hour, eight_hour, day, week, month, quarter, year, all.")) + "The granularity specified in PARTITIONED BY is not supported.")) ) ) .verify(); @@ -1391,7 +1391,7 @@ public void testInsertOnExternalDataSourceWithIncompatibleTimeColumnSignature() ) .expectValidationError( CoreMatchers.allOf( - CoreMatchers.instanceOf(DruidException.class), + CoreMatchers.instanceOf(SqlValidationError.class), ThrowableMessageMatcher.hasMessage(CoreMatchers.containsString( "EXTERN function with __time column can be used when __time column is of type long")) ) @@ -1409,8 +1409,8 @@ public void testInsertWithSqlOuterLimit() .context(context) .sql("INSERT INTO dst SELECT * FROM foo PARTITIONED BY ALL TIME") .expectValidationError( - DruidException.class, - "sqlOuterLimit cannot be provided with INSERT." + SqlValidationError.class, + "Context parameter [sqlOuterLimit] cannot be provided with [INSERT]" ) .verify(); } diff --git a/sql/src/test/java/org/apache/druid/sql/calcite/CalciteMultiValueStringQueryTest.java b/sql/src/test/java/org/apache/druid/sql/calcite/CalciteMultiValueStringQueryTest.java index 8c126d0b7fd0..014dc7c2926a 100644 --- a/sql/src/test/java/org/apache/druid/sql/calcite/CalciteMultiValueStringQueryTest.java +++ b/sql/src/test/java/org/apache/druid/sql/calcite/CalciteMultiValueStringQueryTest.java @@ -22,7 +22,7 @@ import com.google.common.collect.ImmutableList; import com.google.common.collect.ImmutableSet; import org.apache.druid.common.config.NullHandling; -import org.apache.druid.error.DruidException; +import org.apache.druid.error.SqlValidationError; import org.apache.druid.java.util.common.StringUtils; import org.apache.druid.java.util.common.granularity.Granularities; import org.apache.druid.math.expr.ExpressionProcessing; @@ -1797,7 +1797,7 @@ public void testMultiValueToArrayMoreArgs() testQueryThrows( "SELECT MV_TO_ARRAY(dim3,dim3) FROM druid.numfoo", exception -> { - exception.expect(DruidException.class); + exception.expect(SqlValidationError.class); exception.expectMessage("Invalid number of arguments to function"); } ); @@ -1809,7 +1809,7 @@ public void testMultiValueToArrayNoArgs() testQueryThrows( "SELECT MV_TO_ARRAY() FROM druid.numfoo", exception -> { - exception.expect(DruidException.class); + exception.expect(SqlValidationError.class); exception.expectMessage("Invalid number of arguments to function"); } ); diff --git a/sql/src/test/java/org/apache/druid/sql/calcite/CalciteNestedDataQueryTest.java b/sql/src/test/java/org/apache/druid/sql/calcite/CalciteNestedDataQueryTest.java index 30d6c8c3e9e8..eefd2bc7eb53 100644 --- a/sql/src/test/java/org/apache/druid/sql/calcite/CalciteNestedDataQueryTest.java +++ b/sql/src/test/java/org/apache/druid/sql/calcite/CalciteNestedDataQueryTest.java @@ -32,7 +32,7 @@ import org.apache.druid.data.input.impl.LongDimensionSchema; import org.apache.druid.data.input.impl.StringDimensionSchema; import org.apache.druid.data.input.impl.TimestampSpec; -import org.apache.druid.error.DruidException; +import org.apache.druid.error.SqlValidationError; import org.apache.druid.guice.DruidInjectorBuilder; import org.apache.druid.guice.NestedDataModule; import org.apache.druid.java.util.common.HumanReadableBytes; @@ -4172,7 +4172,7 @@ public void testGroupByInvalidPath() + "SUM(cnt) " + "FROM druid.nested GROUP BY 1", (expected) -> { - expected.expect(DruidException.class); + expected.expect(SqlValidationError.class); expected.expectMessage( "Cannot use [JSON_VALUE_VARCHAR]: [Bad format, '.array.[1]' is not a valid JSONPath path: must start with '$']"); } diff --git a/sql/src/test/java/org/apache/druid/sql/calcite/CalciteParameterQueryTest.java b/sql/src/test/java/org/apache/druid/sql/calcite/CalciteParameterQueryTest.java index 97ad77b8ea85..15f423cbaded 100644 --- a/sql/src/test/java/org/apache/druid/sql/calcite/CalciteParameterQueryTest.java +++ b/sql/src/test/java/org/apache/druid/sql/calcite/CalciteParameterQueryTest.java @@ -22,7 +22,8 @@ import com.google.common.collect.ImmutableList; import org.apache.calcite.avatica.SqlType; import org.apache.druid.common.config.NullHandling; -import org.apache.druid.error.DruidException; +import org.apache.druid.error.DruidExceptionV1; +import org.apache.druid.error.SqlValidationError; import org.apache.druid.java.util.common.DateTimes; import org.apache.druid.java.util.common.Intervals; import org.apache.druid.java.util.common.granularity.Granularities; @@ -577,7 +578,7 @@ public void testLongs() @Test public void testMissingParameter() { - expectedException.expect(DruidException.class); + expectedException.expect(SqlValidationError.class); expectedException.expectMessage("Parameter at position [0] is not bound"); testQuery( "SELECT COUNT(*)\n" @@ -592,7 +593,7 @@ public void testMissingParameter() @Test public void testPartiallyMissingParameter() { - expectedException.expect(DruidException.class); + expectedException.expect(SqlValidationError.class); expectedException.expectMessage("Parameter at position [1] is not bound"); testQuery( "SELECT COUNT(*)\n" @@ -610,7 +611,7 @@ public void testPartiallyMissingParameterInTheMiddle() List params = new ArrayList<>(); params.add(null); params.add(new SqlParameter(SqlType.INTEGER, 1)); - expectedException.expect(DruidException.class); + expectedException.expect(SqlValidationError.class); expectedException.expectMessage("Parameter at position [0] is not bound"); testQuery( "SELECT 1 + ?, dim1 FROM foo LIMIT ?", diff --git a/sql/src/test/java/org/apache/druid/sql/calcite/CalciteQueryTest.java b/sql/src/test/java/org/apache/druid/sql/calcite/CalciteQueryTest.java index 74d40ffda34e..aa8ccc8f8f04 100644 --- a/sql/src/test/java/org/apache/druid/sql/calcite/CalciteQueryTest.java +++ b/sql/src/test/java/org/apache/druid/sql/calcite/CalciteQueryTest.java @@ -25,7 +25,8 @@ import com.google.common.collect.ImmutableSet; import org.apache.calcite.runtime.CalciteContextException; import org.apache.druid.common.config.NullHandling; -import org.apache.druid.error.DruidException; +import org.apache.druid.error.DruidExceptionV1; +import org.apache.druid.error.SqlValidationError; import org.apache.druid.java.util.common.DateTimes; import org.apache.druid.java.util.common.HumanReadableBytes; import org.apache.druid.java.util.common.Intervals; @@ -371,8 +372,8 @@ public void testInformationSchemaColumnsOnAnotherView() public void testCannotInsertWithNativeEngine() { notMsqCompatible(); - final DruidException e = Assert.assertThrows( - DruidException.class, + final SqlValidationError e = Assert.assertThrows( + SqlValidationError.class, () -> testQuery( "INSERT INTO dst SELECT * FROM foo PARTITIONED BY ALL", ImmutableList.of(), @@ -392,8 +393,8 @@ public void testCannotInsertWithNativeEngine() public void testCannotReplaceWithNativeEngine() { notMsqCompatible(); - final DruidException e = Assert.assertThrows( - DruidException.class, + final SqlValidationError e = Assert.assertThrows( + SqlValidationError.class, () -> testQuery( "REPLACE INTO dst OVERWRITE ALL SELECT * FROM foo PARTITIONED BY ALL", ImmutableList.of(), @@ -813,7 +814,7 @@ public void testLatestAggregators() @Test public void testEarliestByInvalidTimestamp() { - expectedException.expect(DruidException.class); + expectedException.expect(SqlValidationError.class); expectedException.expectMessage("Cannot apply 'EARLIEST_BY' to arguments of type 'EARLIEST_BY(, )"); testQuery( @@ -826,7 +827,7 @@ public void testEarliestByInvalidTimestamp() @Test public void testLatestByInvalidTimestamp() { - expectedException.expect(DruidException.class); + expectedException.expect(SqlValidationError.class); expectedException.expectMessage("Cannot apply 'LATEST_BY' to arguments of type 'LATEST_BY(, )"); testQuery( @@ -2902,13 +2903,13 @@ public void testUnionAllTablesColumnCountMismatch() ); Assert.fail("query execution should fail"); } - catch (DruidException e) { + catch (SqlValidationError e) { Assert.assertTrue( e.getMessage().contains("Column count mismatch in UNION ALL") ); Assert.assertEquals( QueryException.PLAN_VALIDATION_FAILED_ERROR_CODE, - e.code() + e.getErrorCode() ); } } @@ -3175,13 +3176,13 @@ public void testUnionAllThreeTablesColumnCountMismatch1() ); Assert.fail("query execution should fail"); } - catch (DruidException e) { + catch (SqlValidationError e) { Assert.assertTrue( e.getMessage().contains("Column count mismatch in UNION ALL") ); Assert.assertEquals( QueryException.PLAN_VALIDATION_FAILED_ERROR_CODE, - e.code() + e.getErrorCode() ); } } @@ -3201,13 +3202,13 @@ public void testUnionAllThreeTablesColumnCountMismatch2() ); Assert.fail("query execution should fail"); } - catch (DruidException e) { + catch (SqlValidationError e) { Assert.assertTrue( e.getMessage().contains("Column count mismatch in UNION ALL") ); Assert.assertEquals( QueryException.PLAN_VALIDATION_FAILED_ERROR_CODE, - e.code() + e.getErrorCode() ); } } @@ -3227,13 +3228,13 @@ public void testUnionAllThreeTablesColumnCountMismatch3() ); Assert.fail("query execution should fail"); } - catch (DruidException e) { + catch (SqlValidationError e) { Assert.assertTrue( e.getMessage().contains("Column count mismatch in UNION ALL") ); Assert.assertEquals( QueryException.PLAN_VALIDATION_FAILED_ERROR_CODE, - e.code() + e.getErrorCode() ); } } @@ -5775,13 +5776,13 @@ public void testStringAggQueryOnComplexDatatypes() testQuery("SELECT STRING_AGG(unique_dim1, ',') FROM druid.foo", ImmutableList.of(), ImmutableList.of()); Assert.fail("query execution should fail"); } - catch (DruidException e) { + catch (SqlValidationError e) { Assert.assertTrue( e.getMessage().contains("Cannot use STRING_AGG on complex inputs COMPLEX") ); Assert.assertEquals( QueryException.PLAN_VALIDATION_FAILED_ERROR_CODE, - e.code() + e.getErrorCode() ); } } @@ -5918,7 +5919,7 @@ public void testCountStarWithTimeInIntervalFilterNonLiteral() "SELECT COUNT(*) FROM druid.foo " + "WHERE TIME_IN_INTERVAL(__time, dim1)", expected -> { - expected.expect(CoreMatchers.instanceOf(DruidException.class)); + expected.expect(CoreMatchers.instanceOf(SqlValidationError.class)); expected.expect(ThrowableMessageMatcher.hasMessage(CoreMatchers.containsString( "From line 1, column 38 to line 1, column 67: " + "Cannot apply 'TIME_IN_INTERVAL' to arguments of type 'TIME_IN_INTERVAL(, )'. " @@ -6064,17 +6065,12 @@ public void testCountStarWithTimeFilterUsingStringLiteralsInvalid_isUnplannable( try { testBuilder().sql(sql).run(); } - catch (DruidException e) { + catch (SqlValidationError e) { Assert.assertEquals( sql, - "Illegal TIMESTAMP constant", + "Illegal TIMESTAMP constant [CAST('z2000-01-01 00:00:00'):TIMESTAMP(3) NOT NULL]", e.message() ); - Assert.assertEquals( - sql, - "CAST('z2000-01-01 00:00:00'):TIMESTAMP(3) NOT NULL", - e.context("Value") - ); } catch (Exception e) { log.error(e, "Expected DruidException for query: %s", sql); @@ -11326,13 +11322,13 @@ public void testTimeExtractWithTooFewArguments() testQuery("SELECT TIME_EXTRACT(__time) FROM druid.foo", ImmutableList.of(), ImmutableList.of()); Assert.fail("query execution should fail"); } - catch (DruidException e) { + catch (SqlValidationError e) { Assert.assertTrue( e.getMessage().contains("Invalid number of arguments to function 'TIME_EXTRACT'. Was expecting 2 arguments") ); Assert.assertEquals( QueryException.PLAN_VALIDATION_FAILED_ERROR_CODE, - e.code() + e.getErrorCode() ); } } @@ -13988,7 +13984,7 @@ public void testStringAggExpression() ); } - @Test(expected = DruidException.class) + @Test(expected = SqlValidationError.class) public void testStringAggExpressionNonConstantSeparator() { testQuery( @@ -14137,7 +14133,7 @@ public void testHumanReadableFormatFunction() @Test public void testHumanReadableFormatFunctionExceptionWithWrongNumberType() { - this.expectedException.expect(DruidException.class); + this.expectedException.expect(SqlValidationError.class); this.expectedException.expectMessage("Supported form(s): HUMAN_READABLE_BINARY_BYTE_FORMAT(Number, [Precision])"); testQuery( "SELECT HUMAN_READABLE_BINARY_BYTE_FORMAT('45678')", @@ -14149,7 +14145,7 @@ public void testHumanReadableFormatFunctionExceptionWithWrongNumberType() @Test public void testHumanReadableFormatFunctionWithWrongPrecisionType() { - this.expectedException.expect(DruidException.class); + this.expectedException.expect(SqlValidationError.class); this.expectedException.expectMessage("Supported form(s): HUMAN_READABLE_BINARY_BYTE_FORMAT(Number, [Precision])"); testQuery( "SELECT HUMAN_READABLE_BINARY_BYTE_FORMAT(45678, '2')", @@ -14161,7 +14157,7 @@ public void testHumanReadableFormatFunctionWithWrongPrecisionType() @Test public void testHumanReadableFormatFunctionWithInvalidNumberOfArguments() { - this.expectedException.expect(DruidException.class); + this.expectedException.expect(SqlValidationError.class); /* * frankly speaking, the exception message thrown here is a little bit confusing diff --git a/sql/src/test/java/org/apache/druid/sql/calcite/CalciteReplaceDmlTest.java b/sql/src/test/java/org/apache/druid/sql/calcite/CalciteReplaceDmlTest.java index 3dc445b4f3c5..d8bd358e39a1 100644 --- a/sql/src/test/java/org/apache/druid/sql/calcite/CalciteReplaceDmlTest.java +++ b/sql/src/test/java/org/apache/druid/sql/calcite/CalciteReplaceDmlTest.java @@ -22,7 +22,8 @@ import com.fasterxml.jackson.databind.ObjectMapper; import com.google.common.collect.ImmutableList; import com.google.common.collect.ImmutableMap; -import org.apache.druid.error.DruidException; +import org.apache.druid.error.DruidExceptionV1; +import org.apache.druid.error.SqlValidationError; import org.apache.druid.java.util.common.StringUtils; import org.apache.druid.java.util.common.granularity.Granularities; import org.apache.druid.java.util.common.jackson.JacksonUtils; @@ -219,7 +220,7 @@ public void testReplaceForUnsupportedDeleteWhereClause() testIngestionQuery() .sql("REPLACE INTO dst OVERWRITE WHERE __time LIKE '20__-02-01' SELECT * FROM foo PARTITIONED BY MONTH") .expectValidationError( - DruidException.class, + SqlValidationError.class, "Unsupported operation in OVERWRITE WHERE clause: LIKE" ) .verify(); @@ -231,7 +232,7 @@ public void testReplaceForInvalidDeleteWhereClause() testIngestionQuery() .sql("REPLACE INTO dst OVERWRITE WHERE TRUE SELECT * FROM foo PARTITIONED BY MONTH") .expectValidationError( - DruidException.class, + SqlValidationError.class, "Invalid OVERWRITE WHERE clause" ) .verify(); @@ -243,7 +244,7 @@ public void testReplaceForDeleteWhereClauseOnUnsupportedColumns() testIngestionQuery() .sql("REPLACE INTO dst OVERWRITE WHERE dim1 > TIMESTAMP '2000-01-05 00:00:00' SELECT * FROM foo PARTITIONED BY ALL TIME") .expectValidationError( - DruidException.class, + SqlValidationError.class, "Only __time column is supported in OVERWRITE WHERE clause" ) .verify(); @@ -255,7 +256,7 @@ public void testReplaceWithOrderBy() { testIngestionQuery() .sql("REPLACE INTO dst OVERWRITE ALL SELECT * FROM foo ORDER BY dim1 PARTITIONED BY ALL TIME") - .expectValidationError(DruidException.class, "Cannot have ORDER BY on a REPLACE statement, use CLUSTERED BY instead.") + .expectValidationError(SqlValidationError.class, "Cannot have ORDER BY on a REPLACE statement, use CLUSTERED BY instead.") .verify(); } @@ -265,7 +266,7 @@ public void testReplaceForMisalignedPartitionInterval() testIngestionQuery() .sql("REPLACE INTO dst OVERWRITE WHERE __time >= TIMESTAMP '2000-01-05 00:00:00' AND __time <= TIMESTAMP '2000-01-06 00:00:00' SELECT * FROM foo PARTITIONED BY MONTH") .expectValidationError( - DruidException.class, + SqlValidationError.class, "OVERWRITE WHERE clause contains an interval [2000-01-05T00:00:00.000Z/2000-01-06T00:00:00.001Z] which is not aligned with PARTITIONED BY granularity {type=period, period=P1M, timeZone=UTC, origin=null}" ) .verify(); @@ -277,7 +278,7 @@ public void testReplaceForInvalidPartition() testIngestionQuery() .sql("REPLACE INTO dst OVERWRITE WHERE __time >= TIMESTAMP '2000-01-05 00:00:00' AND __time <= TIMESTAMP '2000-02-05 00:00:00' SELECT * FROM foo PARTITIONED BY ALL TIME") .expectValidationError( - DruidException.class, + SqlValidationError.class, "OVERWRITE WHERE clause contains an interval [2000-01-05T00:00:00.000Z/2000-02-05T00:00:00.001Z] which is not aligned with PARTITIONED BY granularity AllGranularity" ) .verify(); @@ -291,7 +292,7 @@ public void testReplaceFromTableWithEmptyInterval() + "__time < TIMESTAMP '2000-01-01' AND __time > TIMESTAMP '2000-01-01' " + "SELECT * FROM foo PARTITIONED BY MONTH") .expectValidationError( - DruidException.class, + SqlValidationError.class, "Intervals for replace are empty" ) .verify(); @@ -302,7 +303,7 @@ public void testReplaceForWithInvalidInterval() { testIngestionQuery() .sql("REPLACE INTO dst OVERWRITE WHERE __time >= TIMESTAMP '2000-01-INVALID0:00' AND __time <= TIMESTAMP '2000-02-05 00:00:00' SELECT * FROM foo PARTITIONED BY ALL TIME") - .expectValidationError(DruidException.class) + .expectValidationError(SqlValidationError.class) .verify(); } @@ -311,7 +312,7 @@ public void testReplaceForWithoutPartitionSpec() { testIngestionQuery() .sql("REPLACE INTO dst SELECT * FROM foo PARTITIONED BY ALL TIME") - .expectValidationError(DruidException.class) + .expectValidationError(SqlValidationError.class) .verify(); } @@ -381,7 +382,7 @@ public void testReplaceIntoInvalidDataSourceName() { testIngestionQuery() .sql("REPLACE INTO \"in/valid\" OVERWRITE ALL SELECT dim1, dim2 FROM foo PARTITIONED BY ALL TIME") - .expectValidationError(DruidException.class, "REPLACE dataSource cannot contain the '/' character.") + .expectValidationError(SqlValidationError.class, "REPLACE dataSource cannot contain the '/' character.") .verify(); } @@ -390,7 +391,7 @@ public void testReplaceUsingColumnList() { testIngestionQuery() .sql("REPLACE INTO dst (foo, bar) OVERWRITE ALL SELECT dim1, dim2 FROM foo PARTITIONED BY ALL TIME") - .expectValidationError(DruidException.class, "REPLACE with a target column list is not supported.") + .expectValidationError(SqlValidationError.class, "REPLACE with a target column list is not supported.") .verify(); } @@ -399,7 +400,7 @@ public void testReplaceWithoutPartitionedBy() { testIngestionQuery() .sql("REPLACE INTO dst OVERWRITE ALL SELECT __time, FLOOR(m1) as floor_m1, dim1 FROM foo") - .expectValidationError(DruidException.class, "REPLACE statements must specify PARTITIONED BY clause explicitly") + .expectValidationError(SqlValidationError.class, "REPLACE statements must specify PARTITIONED BY clause explicitly") .verify(); } @@ -408,7 +409,7 @@ public void testReplaceWithoutPartitionedByWithClusteredBy() { testIngestionQuery() .sql("REPLACE INTO dst OVERWRITE ALL SELECT __time, FLOOR(m1) as floor_m1, dim1 FROM foo CLUSTERED BY dim1") - .expectValidationError(DruidException.class, "CLUSTERED BY found before PARTITIONED BY. In Druid, the CLUSTERED BY clause must follow the PARTITIONED BY clause") + .expectValidationError(SqlValidationError.class, "CLUSTERED BY found before PARTITIONED BY. In Druid, the CLUSTERED BY clause must follow the PARTITIONED BY clause") .verify(); } @@ -417,7 +418,7 @@ public void testReplaceWithoutOverwriteClause() { testIngestionQuery() .sql("REPLACE INTO dst SELECT * FROM foo PARTITIONED BY ALL TIME") - .expectValidationError(DruidException.class, "Missing time chunk information in OVERWRITE clause for REPLACE. Use OVERWRITE WHERE <__time based condition> or OVERWRITE ALL to overwrite the entire table.") + .expectValidationError(SqlValidationError.class, "Missing time chunk information in OVERWRITE clause for REPLACE. Use OVERWRITE WHERE <__time based condition> or OVERWRITE ALL to overwrite the entire table.") .verify(); } @@ -426,7 +427,7 @@ public void testReplaceWithoutCompleteOverwriteClause() { testIngestionQuery() .sql("REPLACE INTO dst OVERWRITE SELECT * FROM foo PARTITIONED BY ALL TIME") - .expectValidationError(DruidException.class, "Missing time chunk information in OVERWRITE clause for REPLACE. Use OVERWRITE WHERE <__time based condition> or OVERWRITE ALL to overwrite the entire table.") + .expectValidationError(SqlValidationError.class, "Missing time chunk information in OVERWRITE clause for REPLACE. Use OVERWRITE WHERE <__time based condition> or OVERWRITE ALL to overwrite the entire table.") .verify(); } @@ -436,7 +437,7 @@ public void testReplaceIntoSystemTable() testIngestionQuery() .sql("REPLACE INTO INFORMATION_SCHEMA.COLUMNS OVERWRITE ALL SELECT * FROM foo PARTITIONED BY ALL TIME") .expectValidationError( - DruidException.class, + SqlValidationError.class, "Cannot REPLACE into INFORMATION_SCHEMA.COLUMNS because it is not a Druid datasource." ) .verify(); @@ -448,7 +449,7 @@ public void testReplaceIntoView() testIngestionQuery() .sql("REPLACE INTO view.aview OVERWRITE ALL SELECT * FROM foo PARTITIONED BY ALL TIME") .expectValidationError( - DruidException.class, + SqlValidationError.class, "Cannot REPLACE into view.aview because it is not a Druid datasource." ) .verify(); @@ -478,7 +479,7 @@ public void testReplaceIntoNonexistentSchema() testIngestionQuery() .sql("REPLACE INTO nonexistent.dst OVERWRITE ALL SELECT * FROM foo PARTITIONED BY ALL TIME") .expectValidationError( - DruidException.class, + SqlValidationError.class, "Cannot REPLACE into nonexistent.dst because it is not a Druid datasource." ) .verify(); @@ -585,7 +586,7 @@ public void testReplaceWithPartitionedByContainingInvalidGranularity() ); Assert.fail("Exception should be thrown"); } - catch (DruidException e) { + catch (SqlValidationError e) { assertEquals( "Encountered 'invalid_granularity' after PARTITIONED BY. Expected HOUR, DAY, MONTH, YEAR, ALL TIME, FLOOR function or TIME_FLOOR function", e.getMessage() @@ -909,7 +910,7 @@ public void testReplaceWithSqlOuterLimit() testIngestionQuery() .context(context) .sql("REPLACE INTO dst OVERWRITE ALL SELECT * FROM foo PARTITIONED BY ALL TIME") - .expectValidationError(DruidException.class, "sqlOuterLimit cannot be provided with REPLACE.") + .expectValidationError(SqlValidationError.class, "sqlOuterLimit cannot be provided with REPLACE.") .verify(); } } diff --git a/sql/src/test/java/org/apache/druid/sql/calcite/CalciteSelectQueryTest.java b/sql/src/test/java/org/apache/druid/sql/calcite/CalciteSelectQueryTest.java index 9c7b59bd53d5..f694accde48c 100644 --- a/sql/src/test/java/org/apache/druid/sql/calcite/CalciteSelectQueryTest.java +++ b/sql/src/test/java/org/apache/druid/sql/calcite/CalciteSelectQueryTest.java @@ -21,7 +21,8 @@ import com.google.common.collect.ImmutableList; import org.apache.druid.common.config.NullHandling; -import org.apache.druid.error.DruidException; +import org.apache.druid.error.DruidExceptionV1; +import org.apache.druid.error.SqlValidationError; import org.apache.druid.java.util.common.DateTimes; import org.apache.druid.java.util.common.Intervals; import org.apache.druid.java.util.common.granularity.Granularities; @@ -964,7 +965,7 @@ public void testSelectCurrentTimePrecisionTooHigh() testQueryThrows( "SELECT CURRENT_TIMESTAMP(4)", expectedException -> { - expectedException.expect(DruidException.class); + expectedException.expect(SqlValidationError.class); expectedException.expectMessage( "Argument to function 'CURRENT_TIMESTAMP' must be a valid precision between '0' and '3'" ); diff --git a/sql/src/test/java/org/apache/druid/sql/http/SqlResourceTest.java b/sql/src/test/java/org/apache/druid/sql/http/SqlResourceTest.java index 2be34a5e1018..030049f370e3 100644 --- a/sql/src/test/java/org/apache/druid/sql/http/SqlResourceTest.java +++ b/sql/src/test/java/org/apache/druid/sql/http/SqlResourceTest.java @@ -118,6 +118,7 @@ import javax.ws.rs.core.Response; import javax.ws.rs.core.Response.Status; import javax.ws.rs.core.StreamingOutput; + import java.io.ByteArrayOutputStream; import java.io.IOException; import java.nio.charset.StandardCharsets; @@ -1344,7 +1345,7 @@ public void testCannotParse() throws Exception Assert.assertNotNull(exception); Assert.assertEquals(PlanningError.SQL_PARSE_ERROR.getErrorCode(), exception.getErrorCode()); Assert.assertEquals(PlanningError.SQL_PARSE_ERROR.getErrorClass(), exception.getErrorClass()); - Assert.assertTrue(exception.getMessage().contains("Line 1, Column 1: unexpected token 'FROM'")); + Assert.assertTrue(exception.getMessage().contains("Line [1], Column [1]: unexpected token [FROM]")); checkSqlRequestLog(false); Assert.assertTrue(lifecycleManager.getAll("id").isEmpty()); } @@ -1400,7 +1401,7 @@ public void testCannotConvert_UnsupportedSQLQueryException() throws Exception Assert.assertEquals(PlanningError.UNSUPPORTED_SQL_ERROR.getErrorCode(), exception.getErrorCode()); Assert.assertEquals(PlanningError.UNSUPPORTED_SQL_ERROR.getErrorClass(), exception.getErrorClass()); Assert.assertTrue( - exception.getMessage().contains("MAX does not support type STRING") + exception.getMessage().contains("MAX does not support type [STRING]") ); checkSqlRequestLog(false); Assert.assertTrue(lifecycleManager.getAll("id").isEmpty()); @@ -1885,7 +1886,7 @@ public void testQueryContextKeyNotAllowed() throws Exception Assert.assertEquals(PlanningError.VALIDATION_ERROR.getErrorCode(), exception.getErrorCode()); MatcherAssert.assertThat( exception.getMessage(), - CoreMatchers.containsString("Query context parameter 'sqlInsertSegmentGranularity' is not allowed") + CoreMatchers.containsString("Query context parameter [sqlInsertSegmentGranularity] is not allowed") ); checkSqlRequestLog(false); } @@ -1935,14 +1936,6 @@ private Pair doPostRaw(final SqlQuery query) throws Exce return doPostRaw(query, req); } - private Pair>> doPost(final SqlQuery query, MockHttpServletRequest req) - throws Exception - { - return doPost(query, req, new TypeReference>>() - { - }); - } - // Returns either an error or a result, assuming the result is a JSON object. @SuppressWarnings("unchecked") private Pair doPost( @@ -2041,7 +2034,6 @@ private byte[] responseToByteArray(Response resp) throws IOException ((StreamingOutput) resp.getEntity()).write(baos); return baos.toByteArray(); } else { - String foo = JSON_MAPPER.writeValueAsString(resp.getEntity()); return JSON_MAPPER.writeValueAsBytes(resp.getEntity()); } } From 571e3b53e6fd27c9154e44427035b4cc402d02fc Mon Sep 17 00:00:00 2001 From: Paul Rogers Date: Sat, 4 Mar 2023 18:08:21 -0800 Subject: [PATCH 09/17] Revised to add unique error code, error catalog --- .../apache/druid/msq/exec/MSQReplaceTest.java | 1 + .../apache/druid/msq/exec/MSQSelectTest.java | 19 +- .../druid/error/DruidAssertionError.java | 45 ++- .../apache/druid/error/DruidException.java | 268 ++++++++---------- .../org/apache/druid/error/ErrorCategory.java | 120 -------- .../org/apache/druid/error/ErrorCode.java | 37 +++ .../org/apache/druid/error/ErrorResponse.java | 14 +- .../apache/druid/error/MetricCategory.java | 19 ++ .../org/apache/druid/error/SqlParseError.java | 42 ++- .../druid/error/SqlUnsupportedError.java | 56 +++- .../druid/error/SqlValidationError.java | 45 ++- .../error/StandardRestExceptionEncoder.java | 67 +++-- .../apache/druid/server/QueryResource.java | 4 +- .../druid/server/QueryResultPusher.java | 19 +- .../org/apache/druid/sql/DirectStatement.java | 1 - .../EarliestLatestAnySqlAggregator.java | 31 +- .../aggregation/builtin/MaxSqlAggregator.java | 3 +- .../aggregation/builtin/MinSqlAggregator.java | 3 +- .../builtin/StringSqlAggregator.java | 3 +- .../aggregation/builtin/SumSqlAggregator.java | 3 +- .../NestedDataOperatorConversions.java | 13 +- .../calcite/parser/DruidSqlParserUtils.java | 72 +++-- .../sql/calcite/planner/DruidPlanner.java | 61 ++-- .../sql/calcite/planner/DruidRexExecutor.java | 42 ++- .../sql/calcite/planner/IngestHandler.java | 82 +++--- .../sql/calcite/planner/QueryHandler.java | 40 +-- .../planner/RelParameterizerShuttle.java | 21 +- .../planner/SqlParameterizerShuttle.java | 36 ++- .../sql/calcite/rel/DruidJoinQueryRel.java | 7 +- .../calcite/rule/DruidLogicalValuesRule.java | 17 +- .../druid/sql/calcite/run/SqlEngines.java | 8 +- .../org/apache/druid/sql/guice/SqlModule.java | 5 + .../apache/druid/sql/http/SqlResource.java | 17 +- .../apache/druid/sql/SqlStatementTest.java | 17 +- .../sql/calcite/BaseCalciteQueryTest.java | 7 +- .../sql/calcite/CalciteInsertDmlTest.java | 58 ++-- .../calcite/CalciteParameterQueryTest.java | 7 +- .../druid/sql/calcite/CalciteQueryTest.java | 59 ++-- .../sql/calcite/CalciteReplaceDmlTest.java | 43 +-- .../sql/calcite/CalciteSelectQueryTest.java | 6 +- .../druid/sql/http/SqlResourceTest.java | 10 +- 41 files changed, 804 insertions(+), 624 deletions(-) delete mode 100644 processing/src/main/java/org/apache/druid/error/ErrorCategory.java create mode 100644 processing/src/main/java/org/apache/druid/error/ErrorCode.java diff --git a/extensions-core/multi-stage-query/src/test/java/org/apache/druid/msq/exec/MSQReplaceTest.java b/extensions-core/multi-stage-query/src/test/java/org/apache/druid/msq/exec/MSQReplaceTest.java index f6854361f958..b777afacb8cb 100644 --- a/extensions-core/multi-stage-query/src/test/java/org/apache/druid/msq/exec/MSQReplaceTest.java +++ b/extensions-core/multi-stage-query/src/test/java/org/apache/druid/msq/exec/MSQReplaceTest.java @@ -24,6 +24,7 @@ import org.apache.druid.error.SqlValidationError; import org.apache.druid.error.DruidException; import org.apache.druid.common.config.NullHandling; +import org.apache.druid.error.SqlValidationError; import org.apache.druid.indexing.common.actions.RetrieveUsedSegmentsAction; import org.apache.druid.java.util.common.Intervals; import org.apache.druid.msq.test.CounterSnapshotMatcher; diff --git a/extensions-core/multi-stage-query/src/test/java/org/apache/druid/msq/exec/MSQSelectTest.java b/extensions-core/multi-stage-query/src/test/java/org/apache/druid/msq/exec/MSQSelectTest.java index c27260247c45..c2f91ec1ce12 100644 --- a/extensions-core/multi-stage-query/src/test/java/org/apache/druid/msq/exec/MSQSelectTest.java +++ b/extensions-core/multi-stage-query/src/test/java/org/apache/druid/msq/exec/MSQSelectTest.java @@ -25,7 +25,8 @@ import org.apache.druid.data.input.impl.CsvInputFormat; import org.apache.druid.data.input.impl.JsonInputFormat; import org.apache.druid.data.input.impl.LocalInputSource; -import org.apache.druid.error.DruidExceptionV1; +import org.apache.druid.error.SqlParseError; +import org.apache.druid.error.SqlUnsupportedError; import org.apache.druid.error.SqlValidationError; import org.apache.druid.frame.util.DurableStorageUtils; import org.apache.druid.java.util.common.DateTimes; @@ -97,7 +98,6 @@ @RunWith(Parameterized.class) public class MSQSelectTest extends MSQTestBase { - @Parameterized.Parameters(name = "{index}:with context {0}") public static Collection data() { @@ -1189,8 +1189,8 @@ public void testIncorrectSelectQuery() testSelectQuery() .setSql("select a from ") .setExpectedValidationErrorMatcher(CoreMatchers.allOf( - CoreMatchers.instanceOf(SqlValidationError.class), - ThrowableMessageMatcher.hasMessage(CoreMatchers.startsWith("Encountered \"from \"")) + CoreMatchers.instanceOf(SqlParseError.class), + ThrowableMessageMatcher.hasMessage(CoreMatchers.startsWith("SQL-Parse-UnexpectedToken: line=[1], column=[10], token=[from ],")) )) .setQueryContext(context) .verifyPlanningErrors(); @@ -1206,7 +1206,7 @@ public void testSelectOnInformationSchemaSource() CoreMatchers.allOf( CoreMatchers.instanceOf(SqlValidationError.class), ThrowableMessageMatcher.hasMessage(CoreMatchers.startsWith( - "Cannot query table INFORMATION_SCHEMA.SCHEMATA with SQL engine 'msq-task'.")) + "SQL-Validation-WrongEngineForTable: tables=[INFORMATION_SCHEMA.SCHEMATA], engine=[msq-task]")) ) ) .verifyPlanningErrors(); @@ -1222,7 +1222,7 @@ public void testSelectOnSysSource() CoreMatchers.allOf( CoreMatchers.instanceOf(SqlValidationError.class), ThrowableMessageMatcher.hasMessage(CoreMatchers.startsWith( - "Cannot query table sys.segments with SQL engine 'msq-task'.")) + "SQL-Validation-WrongEngineForTable: tables=[sys.segments], engine=[msq-task]")) ) ) .verifyPlanningErrors(); @@ -1238,7 +1238,7 @@ public void testSelectOnSysSourceWithJoin() CoreMatchers.allOf( CoreMatchers.instanceOf(SqlValidationError.class), ThrowableMessageMatcher.hasMessage(CoreMatchers.startsWith( - "Cannot query table sys.segments with SQL engine 'msq-task'.")) + "SQL-Validation-WrongEngineForTable: tables=[sys.segments], engine=[msq-task]")) ) ) .verifyPlanningErrors(); @@ -1255,13 +1255,12 @@ public void testSelectOnSysSourceContainingWith() CoreMatchers.allOf( CoreMatchers.instanceOf(SqlValidationError.class), ThrowableMessageMatcher.hasMessage(CoreMatchers.startsWith( - "Cannot query table sys.segments with SQL engine 'msq-task'.")) + "SQL-Validation-WrongEngineForTable: tables=[sys.segments], engine=[msq-task]")) ) ) .verifyPlanningErrors(); } - @Test public void testSelectOnUserDefinedSourceContainingWith() { @@ -1678,7 +1677,7 @@ public void testGroupByWithComplexColumnThrowsUnsupportedException() .setSql("select unique_dim1 from foo2 group by unique_dim1") .setQueryContext(context) .setExpectedExecutionErrorMatcher(CoreMatchers.allOf( - CoreMatchers.instanceOf(UnsupportedSQLQueryException.class), + CoreMatchers.instanceOf(SqlUnsupportedError.class), ThrowableMessageMatcher.hasMessage(CoreMatchers.containsString( "SQL requires a group-by on a column of type COMPLEX that is unsupported")) )) diff --git a/processing/src/main/java/org/apache/druid/error/DruidAssertionError.java b/processing/src/main/java/org/apache/druid/error/DruidAssertionError.java index 2e9de7fba9c2..d6edf2ecd8cd 100644 --- a/processing/src/main/java/org/apache/druid/error/DruidAssertionError.java +++ b/processing/src/main/java/org/apache/druid/error/DruidAssertionError.java @@ -19,26 +19,55 @@ package org.apache.druid.error; +import org.apache.druid.java.util.common.UOE; +import org.apache.druid.query.QueryException; + +import java.net.HttpURLConnection; + public class DruidAssertionError extends DruidException { - public DruidAssertionError(String msg, Object...args) + public DruidAssertionError( + String message + ) + { + this(null, message); + } + + public DruidAssertionError( + Throwable cause, + String message + ) { - super(msg, args); + super( + cause, + ErrorCode.fullCode(ErrorCode.INTERNAL_GROUP, "AssertionFailed"), + message + ); + this.legacyCode = QueryException.UNSUPPORTED_OPERATION_ERROR_CODE; + this.legacyClass = UOE.class.getName(); } - public DruidAssertionError(Throwable cause, String msg, Object...args) + public static DruidException forMessage(String message) { - super(cause, msg, args); + return new DruidAssertionError(SIMPLE_MESSAGE) + .withValue(MESSAGE_KEY, message); } - public DruidAssertionError(Throwable cause) + public static DruidException forCause(Throwable cause, String message) + { + return new DruidAssertionError(cause, SIMPLE_MESSAGE) + .withValue(MESSAGE_KEY, message); + } + + @Override + public ErrorAudience audience() { - super(cause, cause.getMessage()); + return ErrorAudience.DRUID_DEVELOPER; } @Override - public ErrorCategory category() + public int httpStatus() { - return ErrorCategory.INTERNAL; + return HttpURLConnection.HTTP_INTERNAL_ERROR; } } diff --git a/processing/src/main/java/org/apache/druid/error/DruidException.java b/processing/src/main/java/org/apache/druid/error/DruidException.java index 0e514767cffe..08df486d0342 100644 --- a/processing/src/main/java/org/apache/druid/error/DruidException.java +++ b/processing/src/main/java/org/apache/druid/error/DruidException.java @@ -1,38 +1,65 @@ +/* + * Licensed to the Apache Software Foundation (ASF) under one + * or more contributor license agreements. See the NOTICE file + * distributed with this work for additional information + * regarding copyright ownership. The ASF licenses this file + * to you under the Apache License, Version 2.0 (the + * "License"); you may not use this file except in compliance + * with the License. You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, + * software distributed under the License is distributed on an + * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY + * KIND, either express or implied. See the License for the + * specific language governing permissions and limitations + * under the License. + */ + package org.apache.druid.error; -import com.fasterxml.jackson.annotation.JsonInclude; -import com.fasterxml.jackson.annotation.JsonProperty; -import com.google.common.base.Strings; -import org.apache.druid.java.util.common.StringUtils; +import org.apache.commons.text.StringSubstitutor; -import javax.annotation.Nullable; import javax.annotation.concurrent.NotThreadSafe; +import java.util.ArrayList; import java.util.LinkedHashMap; +import java.util.List; import java.util.Map; +import java.util.Objects; +import java.util.Properties; /** * Represents an error condition exposed to the user and/or operator of Druid. - * Each error category is given by a subclass of this class. + * Every error consists of: + *
    + *
  • An error code.
  • + *
  • A set of zero or more parameters.
  • + *
  • A default error message "template".
  • + *
*

- * Not needed for purely internal exceptions thrown and caught within Druid itself. - * There are categories of error that determine the general form of corrective - * action, and also determine HTTP (or other API) status codes. + * The error code is a unique identifier for each and every distinct + * kind of error. Codes should follow the pattern of + * @{code --} such as + * @{code SQL-VALIDATION-UNKNOWN_COLUMN}. *

- * Druid exceptions can contain context. Use the context for details, such as - * file names, query context variables, symbols, etc. This allows the error - * message itself to be simple. Context allows consumers to filter out various - * bits of information that a site does not wish to expose to the user, while - * still logging the full details. Typical usage: - *


- * if (something_is_wrong) {
- *   throw new NotFoundException("File not found")
- *       .addContext("File name", theFile.getName())
- *       .addContext("Directory", theFile.getParent());
- * }
- * 
+ * The message template is a user-visible explanation for the error. + * The message is a template because it contains named placeholders + * to fill in with parameters:
+ * "Line ${line}, Column ${column}: Column [${name}] not found"
*

- * Exceptions are mutable and may not be modified by two thread concurrently. + * The parameters are the values to fill in the placeholders in the + * template. Each subclass defines the parameters for that error, along + * with the required mapping to placeholders. + *

+ * With this system, extensions can translate the messages to the needs + * of a specific system. For example, if system generates SQL, then telling + * the user the line number of the error is just confusing. In that system, + * the error could be translated to:
+ * "Field '${name}' is not defined. Check the field list."
+ *

+ * Exceptions are mutable and must not be modified by two threads concurrently. * However, it is highly unlikely that such concurrent access would occur: that's * not how exceptions work. Exceptions can be exchanged across threads, as long * as only one thread at a time mutates the exception. @@ -53,182 +80,135 @@ * try (Reader reader = open(theFile)) { * doTheRead(reader) * } catch (DruidException e) { - * throw e.addContext("File name", theFile.getName()); + * e.setFileName(theFile.getName()); + * throw e; * } * } * + *

+ * Exceptions are not serializable. Instead, exceptions are translated + * to some other form when sent over the wire. */ @NotThreadSafe public abstract class DruidException extends RuntimeException { - /** - * The context provides additional information about an exception which may - * be redacted on a managed system. Provide essential information in the - * message itself. - */ - // Linked hash map to preserve order - private Map context; + public static final String SIMPLE_MESSAGE = "${message}"; + public static final String MESSAGE_KEY = "message"; - /** - * Name of the host on which the error occurred, when the error occurred on - * a host other than the one to which the original request was sent. For example, - * in a query, if the error occurs on a historical, this field names that historical. - */ - private String host; - - /** - * Good errors provide a suggestion to resolve the issue. Such suggestions should - * focus on a simple Druid installation where the user is also the admin. More - * advanced deployments may find such helpful suggestions to be off the mark. - * To resolve this conflict, add suggestions separately from the message itself - * so each consumer can decide whether to include it or not. - */ - private String suggestion; + private final String code; + private final String message; + protected final Map values = new LinkedHashMap<>(); + protected String legacyCode; + protected String legacyClass; public DruidException( - final String msg, - @Nullable final Object...args) + final String code, + final String message + ) { - super(StringUtils.format(msg, args)); + this(null, code, message); } public DruidException( final Throwable cause, - final String msg, - @Nullable final Object...args) + final String code, + final String message + ) { - super(StringUtils.format(msg, args), cause); + super(code, cause); + this.code = code; + this.message = message; } - public DruidException setHost(String host) + public DruidException withValue(String key, Object value) { - this.host = host; + values.put(key, Objects.toString(value)); return this; } - @JsonProperty - public String getErrorClass() + public DruidException withValues(Map values) { - String errorClass = errorClass(); - return errorClass == null ? getClass().getName() : errorClass; + this.values.putAll(values); + return this; } - @JsonProperty - @JsonInclude(JsonInclude.Include.NON_NULL) - public String getHost() + /** + * The error code is a summary of the error returned to the user. Multiple errors + * map to the same code: the code is more like a category of errors. Error codes + * must be backward compatible, even if the prior "codes" are awkward. + */ + public String errorCode() { - return host; + return code; } - public DruidException suggestion(String suggestion) + public String message() { - this.suggestion = suggestion; - return this; + return message; } - public String getSuggestion() + public Map values() { - return suggestion; + return values; } - public DruidException addContext(String key, String value) + // Used primarily when logging an error. + @Override + public String getMessage() { - if (context == null) { - context = new LinkedHashMap<>(); + if (values.isEmpty()) { + return code; } - context.put(key, value); - return this; + List entries = new ArrayList<>(); + for (Map.Entry entry : values.entrySet()) { + entries.add(entry.getKey() + "=[" + entry.getValue() + "]"); + } + return code + ": " + String.join(", ", entries); } - public abstract ErrorCategory category(); - - public String errorClass() + // For debugging. + @Override + public String toString() { - return getClass().getName(); + return format(message); } - /** - * The error code is a summary of the error returned to the user. Multiple errors - * map to the same code: the code is more like a category of errors. Error codes - * must be backward compatible, even if the prior "codes" are awkward. - */ - @Nullable - @JsonProperty("error") - public String getErrorCode() + public String format(String template) { - return category().userText(); + StringSubstitutor sub = new StringSubstitutor(values); + return sub.replace(template); } - public String message() + public String format(Properties catalog) { - return super.getMessage(); - } - - @JsonProperty("errorMessage") - @Override - public String getMessage() - { - StringBuilder buf = new StringBuilder(); - buf.append(super.getMessage()); - String sep = "; "; - if (context != null && context.size() > 0) { - for (Map.Entry entry : context.entrySet()) { - buf.append(sep); - sep = ", "; - buf.append("\n") - .append(entry.getKey()) - .append(": [") - .append(entry.getValue()) - .append("]"); - } + String template = catalog.getProperty(code); + if (template == null) { + return toString(); + } else { + return format(template); } - if (!Strings.isNullOrEmpty(host)) { - buf.append(sep).append("Host: ").append(host); - } - return buf.toString(); } - public String getDisplayMessage() + public ErrorResponse toErrorResponse(Properties catalog) { - StringBuilder buf = new StringBuilder(); - String prefix = category().prefix(); - if (!Strings.isNullOrEmpty(prefix)) { - buf.append(prefix).append(" - "); - } - buf.append(super.getMessage()); - buf.append("\nError Code: ").append(category().userText()); - if (context != null && context.size() > 0) { - for (Map.Entry entry : context.entrySet()) { - buf.append("\n") - .append(entry.getKey()) - .append(": ") - .append(entry.getValue()); - } - } - if (!Strings.isNullOrEmpty(host)) { - buf.append("\nHost: ").append(host); - } - if (!Strings.isNullOrEmpty(suggestion)) { - buf.append("\nSuggestion: ").append(suggestion); - } - return buf.toString(); + return new ErrorResponse( + code, + format(catalog), + legacyClass, + null + ); } - @JsonProperty - @JsonInclude(JsonInclude.Include.NON_EMPTY) - public Map getContext() + public abstract ErrorAudience audience(); + public abstract int httpStatus(); + + public MetricCategory metricCategory() { - return context; + return MetricCategory.FAILED; } - public ErrorResponse toErrorResponse() + public String getErrorCode() { - return new ErrorResponse( - category().userText(), - message(), - errorClass(), - host, - context - ); + return legacyCode; } } diff --git a/processing/src/main/java/org/apache/druid/error/ErrorCategory.java b/processing/src/main/java/org/apache/druid/error/ErrorCategory.java deleted file mode 100644 index 541c6c2fb57f..000000000000 --- a/processing/src/main/java/org/apache/druid/error/ErrorCategory.java +++ /dev/null @@ -1,120 +0,0 @@ -/* - * Licensed to the Apache Software Foundation (ASF) under one - * or more contributor license agreements. See the NOTICE file - * distributed with this work for additional information - * regarding copyright ownership. The ASF licenses this file - * to you under the Apache License, Version 2.0 (the - * "License"); you may not use this file except in compliance - * with the License. You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, - * software distributed under the License is distributed on an - * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY - * KIND, either express or implied. See the License for the - * specific language governing permissions and limitations - * under the License. - */ - -package org.apache.druid.error; - -import org.apache.druid.query.QueryException; - -import java.net.HttpURLConnection; - -/** - * The error category is a combination of the "persona" for the error, and the - * kind of error. The "persona" is the audience for the error: who can fix the - * problem. Often the persona is clear: it is the end user if, say, the user - * asks us to do something that doesn't make sense. (Data source does not exist, - * SQL with a syntax error, etc.) Other times, the persona is ambiguous: who is - * responsible for a network error? - *

- * The persona is not fine enough grain for all needs. So, within a persona, there - * can be finer-grain functional areas, such as the various kinds of SQL errors. - *

- * Different kinds of errors require different HTTP status codes for the REST API. - *

- * To add structure to this confusion of factors, we define a set of error categories: - * one for each combination of functional area, user and HTTP status. The categories - * are more an art than a science: they are influenced by the need of the consumers - * of errors: especially managed Druid installations. - */ -public enum ErrorCategory -{ - SQL_PARSE( - QueryException.SQL_PARSE_FAILED_ERROR_CODE, - HttpURLConnection.HTTP_BAD_REQUEST, - ErrorAudience.USER, - "", - MetricCategory.FAILED - ), - SQL_VALIDATION( - QueryException.PLAN_VALIDATION_FAILED_ERROR_CODE, - HttpURLConnection.HTTP_BAD_REQUEST, - ErrorAudience.USER, - "", - MetricCategory.FAILED - ), - SQL_UNSUPPORTED( - QueryException.SQL_QUERY_UNSUPPORTED_ERROR_CODE, - HttpURLConnection.HTTP_BAD_REQUEST, - ErrorAudience.USER, - "", - MetricCategory.FAILED - ), - INTERNAL( - QueryException.UNSUPPORTED_OPERATION_ERROR_CODE, - HttpURLConnection.HTTP_BAD_REQUEST, - ErrorAudience.DRUID_DEVELOPER, - "Internal error", - MetricCategory.FAILED - ); - - private final String userText; - private final int httpStatus; - private final ErrorAudience audience; - private final String messagePrefix; - private final MetricCategory metricCategory; - - ErrorCategory( - String userText, - int httpStatus, - ErrorAudience audience, - String messagePrefix, - MetricCategory metricCategory - ) - { - this.userText = userText; - this.httpStatus = httpStatus; - this.audience = audience; - this.messagePrefix = messagePrefix; - this.metricCategory = metricCategory; - } - - public String userText() - { - return userText; - } - - public int httpStatus() - { - return httpStatus; - } - - public ErrorAudience audience() - { - return audience; - } - - public String prefix() - { - return messagePrefix; - } - - public MetricCategory metricCategory() - { - return metricCategory; - } -} diff --git a/processing/src/main/java/org/apache/druid/error/ErrorCode.java b/processing/src/main/java/org/apache/druid/error/ErrorCode.java new file mode 100644 index 000000000000..174703d335f5 --- /dev/null +++ b/processing/src/main/java/org/apache/druid/error/ErrorCode.java @@ -0,0 +1,37 @@ +/* + * Licensed to the Apache Software Foundation (ASF) under one + * or more contributor license agreements. See the NOTICE file + * distributed with this work for additional information + * regarding copyright ownership. The ASF licenses this file + * to you under the Apache License, Version 2.0 (the + * "License"); you may not use this file except in compliance + * with the License. You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, + * software distributed under the License is distributed on an + * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY + * KIND, either express or implied. See the License for the + * specific language governing permissions and limitations + * under the License. + */ + +package org.apache.druid.error; + +public class ErrorCode +{ + public static final String SQL_GROUP = "SQL"; + public static final String INTERNAL_GROUP = "INTERNAL"; + + public static final String SQL_VALIDATION_GROUP = SQL_GROUP + "-Validation"; + public static final String SQL_PARSE_GROUP = SQL_GROUP + "-Parse"; + public static final String SQL_UNSUPPORTED_GROUP = SQL_GROUP + "-Unsupported"; + + public static final String GENERAL_TAIL = "General"; + + public static String fullCode(String base, String tail) + { + return base + "-" + tail; + } +} diff --git a/processing/src/main/java/org/apache/druid/error/ErrorResponse.java b/processing/src/main/java/org/apache/druid/error/ErrorResponse.java index 7e80c2334c29..8fabb377f775 100644 --- a/processing/src/main/java/org/apache/druid/error/ErrorResponse.java +++ b/processing/src/main/java/org/apache/druid/error/ErrorResponse.java @@ -26,8 +26,6 @@ import javax.annotation.Nullable; -import java.util.Map; - /** * Union of the {@link org.apache.druid.query.QueryException} and * {@link DruidExceptionV1} fields. Used in tests to deserialize errors which may @@ -39,22 +37,19 @@ public class ErrorResponse private final String code; private final String errorClass; private final String host; - private Map context; @JsonCreator public ErrorResponse( @JsonProperty("error") @Nullable String errorCode, @JsonProperty("errorMessage") @Nullable String errorMessage, @JsonProperty("errorClass") @Nullable String errorClass, - @JsonProperty("host") @Nullable String host, - @JsonProperty("context") @Nullable Map context + @JsonProperty("host") @Nullable String host ) { this.msg = errorMessage; this.code = errorCode; this.errorClass = errorClass; this.host = host; - this.context = context; } @Nullable @@ -84,11 +79,4 @@ public String getHost() { return host; } - - @JsonProperty - @JsonInclude(Include.NON_EMPTY) - public Map getContext() - { - return context; - } } diff --git a/processing/src/main/java/org/apache/druid/error/MetricCategory.java b/processing/src/main/java/org/apache/druid/error/MetricCategory.java index e8978be5a8e2..91328a960c8a 100644 --- a/processing/src/main/java/org/apache/druid/error/MetricCategory.java +++ b/processing/src/main/java/org/apache/druid/error/MetricCategory.java @@ -1,3 +1,22 @@ +/* + * Licensed to the Apache Software Foundation (ASF) under one + * or more contributor license agreements. See the NOTICE file + * distributed with this work for additional information + * regarding copyright ownership. The ASF licenses this file + * to you under the Apache License, Version 2.0 (the + * "License"); you may not use this file except in compliance + * with the License. You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, + * software distributed under the License is distributed on an + * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY + * KIND, either express or implied. See the License for the + * specific language governing permissions and limitations + * under the License. + */ + package org.apache.druid.error; public enum MetricCategory diff --git a/processing/src/main/java/org/apache/druid/error/SqlParseError.java b/processing/src/main/java/org/apache/druid/error/SqlParseError.java index dae37426f93f..c8b99a95217d 100644 --- a/processing/src/main/java/org/apache/druid/error/SqlParseError.java +++ b/processing/src/main/java/org/apache/druid/error/SqlParseError.java @@ -19,32 +19,54 @@ package org.apache.druid.error; +import org.apache.druid.query.QueryException; + +import java.net.HttpURLConnection; + /** * SQL query parse failed. */ public class SqlParseError extends DruidException { - public SqlParseError(String msg, Object...args) + public SqlParseError( + String code, + String message + ) { - super(msg, args); + this(null, code, message); } - public SqlParseError(Throwable cause, String msg, Object...args) + public SqlParseError( + Throwable cause, + String code, + String message + ) { - super(cause, msg, args); + super( + cause, + ErrorCode.fullCode(ErrorCode.SQL_PARSE_GROUP, code), + fullMessage(message) + ); + this.legacyCode = QueryException.SQL_PARSE_FAILED_ERROR_CODE; + // For backward compatibility. + // Calcite classes not visible here, so using a string + this.legacyClass = "org.apache.calcite.sql.parser.SqlParseException"; + } + + public static String fullMessage(String message) + { + return "Line ${line}, column ${column}: " + message; } @Override - public ErrorCategory category() + public ErrorAudience audience() { - return ErrorCategory.SQL_PARSE; + return ErrorAudience.USER; } @Override - public String errorClass() + public int httpStatus() { - // For backward compatibility. - // Calcite classes not visible here, so using a string - return "org.apache.calcite.sql.parser.SqlParseException"; + return HttpURLConnection.HTTP_BAD_REQUEST; } } diff --git a/processing/src/main/java/org/apache/druid/error/SqlUnsupportedError.java b/processing/src/main/java/org/apache/druid/error/SqlUnsupportedError.java index 49b6dde91f56..bd165325d197 100644 --- a/processing/src/main/java/org/apache/druid/error/SqlUnsupportedError.java +++ b/processing/src/main/java/org/apache/druid/error/SqlUnsupportedError.java @@ -19,6 +19,10 @@ package org.apache.druid.error; +import org.apache.druid.query.QueryException; + +import java.net.HttpURLConnection; + /** * SQL query validation failed, because a SQL statement asked Druid to do * something which it does not support. This message indicates that the @@ -31,26 +35,60 @@ */ public class SqlUnsupportedError extends DruidException { - public SqlUnsupportedError(String msg, Object...args) + public SqlUnsupportedError( + String code, + String message + ) { - super(msg, args); + this(null, code, message); } - public SqlUnsupportedError(Throwable cause, String msg, Object...args) + public SqlUnsupportedError( + Throwable cause, + String code, + String message + ) { - super(cause, msg, args); + super( + cause, + ErrorCode.fullCode(ErrorCode.SQL_UNSUPPORTED_GROUP, code), + message + ); + // For backward compatibility. + // Calcite classes not visible here, so using a string + this.legacyClass = "org.apache.calcite.plan.RelOptPlanner$CannotPlanException"; + this.legacyCode = QueryException.SQL_QUERY_UNSUPPORTED_ERROR_CODE; } @Override - public ErrorCategory category() + public ErrorAudience audience() { - return ErrorCategory.SQL_UNSUPPORTED; + return ErrorAudience.USER; } @Override - public String errorClass() + public int httpStatus() + { + return HttpURLConnection.HTTP_BAD_REQUEST; + } + + public static DruidException unsupportedAggType(String agg, Object type) + { + return new SqlUnsupportedError( + "InvalidAggArg", + "${fn} aggregation is not supported for type [${type}]" + ) + .withValue("fn", agg) + .withValue("type", type); + } + + public static DruidException cannotUseOperator(String op, Throwable cause) { - // For backward compatibility: using text since class is not visible here. - return "org.apache.calcite.plan.RelOptPlanner$CannotPlanException"; + throw new SqlUnsupportedError( + "Operator", + "Cannot use [${op}]: [${message}]" + ) + .withValue("op", op) + .withValue(DruidException.MESSAGE_KEY, cause.getMessage()); } } diff --git a/processing/src/main/java/org/apache/druid/error/SqlValidationError.java b/processing/src/main/java/org/apache/druid/error/SqlValidationError.java index ded7d547e443..16ccae3519b2 100644 --- a/processing/src/main/java/org/apache/druid/error/SqlValidationError.java +++ b/processing/src/main/java/org/apache/druid/error/SqlValidationError.java @@ -19,6 +19,10 @@ package org.apache.druid.error; +import org.apache.druid.query.QueryException; + +import java.net.HttpURLConnection; + /** * SQL query validation failed, most likely due to a problem in the SQL statement * which the user provided. @@ -28,32 +32,47 @@ */ public class SqlValidationError extends DruidException { - public SqlValidationError(String msg, Object...args) + public SqlValidationError( + String code, + String message + ) { - super(msg, args); + this(null, code, message); } - public SqlValidationError(Throwable cause, String msg, Object...args) + public SqlValidationError( + Throwable cause, + String code, + String message + ) { - super(cause, msg, args); + super( + cause, + ErrorCode.fullCode(ErrorCode.SQL_VALIDATION_GROUP, code), + message + ); + this.legacyClass = "org.apache.calcite.tools.ValidationException"; + this.legacyCode = QueryException.PLAN_VALIDATION_FAILED_ERROR_CODE; } - public SqlValidationError(Throwable cause) + @Override + public ErrorAudience audience() { - super(cause, cause.getMessage()); + return ErrorAudience.USER; } @Override - public ErrorCategory category() + public int httpStatus() { - return ErrorCategory.SQL_VALIDATION; + return HttpURLConnection.HTTP_BAD_REQUEST; } - @Override - public String errorClass() + public static DruidException forCause(Throwable e) { - // For backward compatibility. - // Using string because the class is not visible here. - return "org.apache.calcite.tools.ValidationException"; + return new SqlValidationError( + ErrorCode.GENERAL_TAIL, + SIMPLE_MESSAGE + ) + .withValue(MESSAGE_KEY, e.getMessage()); } } diff --git a/server/src/main/java/org/apache/druid/error/StandardRestExceptionEncoder.java b/server/src/main/java/org/apache/druid/error/StandardRestExceptionEncoder.java index 542131110bfa..b1517bf8dd71 100644 --- a/server/src/main/java/org/apache/druid/error/StandardRestExceptionEncoder.java +++ b/server/src/main/java/org/apache/druid/error/StandardRestExceptionEncoder.java @@ -19,26 +19,64 @@ package org.apache.druid.error; +import org.apache.druid.java.util.common.logger.Logger; + import javax.ws.rs.core.MediaType; import javax.ws.rs.core.Response; import javax.ws.rs.core.Response.ResponseBuilder; -import javax.ws.rs.core.Response.Status; + +import java.io.BufferedReader; +import java.io.File; +import java.io.FileInputStream; +import java.io.IOException; +import java.io.InputStreamReader; +import java.io.Reader; +import java.nio.charset.StandardCharsets; +import java.util.Properties; public class StandardRestExceptionEncoder implements RestExceptionEncoder { private static final RestExceptionEncoder INSTANCE = new StandardRestExceptionEncoder(); + private static final Logger LOG = new Logger(StandardRestExceptionEncoder.class); + + private final Properties catalog; public static RestExceptionEncoder instance() { return INSTANCE; } + public StandardRestExceptionEncoder() + { + // Load the default error catalog, if it exists. + this.catalog = new Properties(); + File catalogFile = new File("conf/druid/errors.properties"); + if (catalogFile.isFile()) { + try (Reader reader = new BufferedReader( + new InputStreamReader( + new FileInputStream(catalogFile), + StandardCharsets.UTF_8))) { + this.catalog.load(reader); + LOG.info( + "Loaded [%d] entries from error catalog file [%s]", + catalog.size(), + catalogFile.getAbsolutePath() + ); + } + catch (IOException e) { + // Warn about failures, but don't take the server down. We'll run + // with standard errors. + LOG.error(e, "Failed to load error catalog file [%s]", catalogFile.getAbsolutePath()); + } + } + } + @Override public ResponseBuilder builder(DruidException e) { return Response - .status(Response.Status.fromStatusCode(e.category().httpStatus())) - .entity(e.toErrorResponse()) + .status(Response.Status.fromStatusCode(e.httpStatus())) + .entity(e.toErrorResponse(catalog)) .type(MediaType.APPLICATION_JSON); } @@ -47,27 +85,4 @@ public Response encode(DruidException e) { return builder(e).build(); } - // - // // Temporary status mapping - // private Status status(DruidExceptionV1 e) - // { - // switch (e.type()) { - // case CONFIG: - // case INTERNAL: - // case NETWORK: - // return Response.Status.INTERNAL_SERVER_ERROR; - // case TIMEOUT: - // return Response.Status.fromStatusCode(504); // No predefined status name - // case NOT_FOUND: - // return Response.Status.NOT_FOUND; - // case RESOURCE: - // return Response.Status.fromStatusCode(429); // No predefined status name - // case USER: - // case UNSUPPORTED: - // return Response.Status.BAD_REQUEST; - // default: - // // Should never occur - // return Response.Status.INTERNAL_SERVER_ERROR; - // } - // } } diff --git a/server/src/main/java/org/apache/druid/server/QueryResource.java b/server/src/main/java/org/apache/druid/server/QueryResource.java index f4a7ab3edb75..96194eed5caf 100644 --- a/server/src/main/java/org/apache/druid/server/QueryResource.java +++ b/server/src/main/java/org/apache/druid/server/QueryResource.java @@ -34,6 +34,7 @@ import com.google.common.collect.Iterables; import com.google.inject.Inject; import org.apache.druid.client.DirectDruidClient; +import org.apache.druid.error.StandardRestExceptionEncoder; import org.apache.druid.guice.LazySingleton; import org.apache.druid.guice.annotations.Json; import org.apache.druid.guice.annotations.Self; @@ -537,7 +538,8 @@ public QueryResourceQueryResultPusher( QueryResource.this.counter, queryLifecycle.getQueryId(), MediaType.valueOf(io.getResponseWriter().getResponseType()), - ImmutableMap.of() + ImmutableMap.of(), + StandardRestExceptionEncoder.instance() ); this.req = req; this.queryLifecycle = queryLifecycle; diff --git a/server/src/main/java/org/apache/druid/server/QueryResultPusher.java b/server/src/main/java/org/apache/druid/server/QueryResultPusher.java index 4ce171068085..09cb7b4657f8 100644 --- a/server/src/main/java/org/apache/druid/server/QueryResultPusher.java +++ b/server/src/main/java/org/apache/druid/server/QueryResultPusher.java @@ -24,8 +24,7 @@ import com.google.common.io.CountingOutputStream; import org.apache.druid.client.DirectDruidClient; import org.apache.druid.error.DruidException; -import org.apache.druid.error.DruidExceptionV1; -import org.apache.druid.error.StandardRestExceptionEncoder; +import org.apache.druid.error.RestExceptionEncoder; import org.apache.druid.java.util.common.ISE; import org.apache.druid.java.util.common.RE; import org.apache.druid.java.util.common.StringUtils; @@ -47,6 +46,7 @@ import javax.ws.rs.core.MediaType; import javax.ws.rs.core.Response; import javax.ws.rs.core.StreamingOutput; + import java.io.Closeable; import java.io.IOException; import java.io.OutputStream; @@ -64,10 +64,11 @@ public abstract class QueryResultPusher private final QueryResource.QueryMetricCounter counter; private final MediaType contentType; private final Map extraHeaders; + private final RestExceptionEncoder exceptionEncoder; - private StreamingHttpResponseAccumulator accumulator = null; - private AsyncContext asyncContext = null; - private HttpServletResponse response = null; + private StreamingHttpResponseAccumulator accumulator; + private AsyncContext asyncContext; + private HttpServletResponse response; public QueryResultPusher( HttpServletRequest request, @@ -77,7 +78,8 @@ public QueryResultPusher( QueryResource.QueryMetricCounter counter, String queryId, MediaType contentType, - Map extraHeaders + Map extraHeaders, + RestExceptionEncoder exceptionEncoder ) { this.request = request; @@ -88,6 +90,7 @@ public QueryResultPusher( this.counter = counter; this.contentType = contentType; this.extraHeaders = extraHeaders; + this.exceptionEncoder = exceptionEncoder; } /** @@ -319,7 +322,7 @@ private Response handleDruidException(ResultsWriter resultsWriter, DruidExceptio return null; } - switch (e.category().metricCategory()) { + switch (e.metricCategory()) { case INTERRUPTED: counter.incrementInterrupted(); break; @@ -333,7 +336,7 @@ private Response handleDruidException(ResultsWriter resultsWriter, DruidExceptio resultsWriter.recordFailure(e); - final Response.ResponseBuilder bob = StandardRestExceptionEncoder.instance().builder(e); + final Response.ResponseBuilder bob = exceptionEncoder.builder(e); bob.header(QueryResource.QUERY_ID_RESPONSE_HEADER, queryId); for (Map.Entry entry : extraHeaders.entrySet()) { bob.header(entry.getKey(), entry.getValue()); diff --git a/sql/src/main/java/org/apache/druid/sql/DirectStatement.java b/sql/src/main/java/org/apache/druid/sql/DirectStatement.java index 96b242ea4977..7499ecb2a68a 100644 --- a/sql/src/main/java/org/apache/druid/sql/DirectStatement.java +++ b/sql/src/main/java/org/apache/druid/sql/DirectStatement.java @@ -22,7 +22,6 @@ import com.google.common.annotations.VisibleForTesting; import org.apache.calcite.plan.RelOptPlanner; import org.apache.druid.error.DruidAssertionError; -import org.apache.druid.error.DruidExceptionV1; import org.apache.druid.java.util.common.ISE; import org.apache.druid.java.util.common.StringUtils; import org.apache.druid.java.util.common.logger.Logger; diff --git a/sql/src/main/java/org/apache/druid/sql/calcite/aggregation/builtin/EarliestLatestAnySqlAggregator.java b/sql/src/main/java/org/apache/druid/sql/calcite/aggregation/builtin/EarliestLatestAnySqlAggregator.java index 58d2ac1cc710..e184373cc776 100644 --- a/sql/src/main/java/org/apache/druid/sql/calcite/aggregation/builtin/EarliestLatestAnySqlAggregator.java +++ b/sql/src/main/java/org/apache/druid/sql/calcite/aggregation/builtin/EarliestLatestAnySqlAggregator.java @@ -36,7 +36,6 @@ import org.apache.calcite.sql.type.SqlTypeUtil; import org.apache.calcite.util.Optionality; import org.apache.druid.error.DruidAssertionError; -import org.apache.druid.error.DruidExceptionV1; import org.apache.druid.error.SqlUnsupportedError; import org.apache.druid.error.SqlValidationError; import org.apache.druid.query.aggregation.AggregatorFactory; @@ -93,7 +92,7 @@ AggregatorFactory createAggregatorFactory(String name, String fieldName, String case COMPLEX: return new StringFirstAggregatorFactory(name, fieldName, timeColumn, maxStringBytes); default: - throw new SqlUnsupportedError("EARLIEST aggregator is not supported for type [%s]", type); + throw SqlUnsupportedError.unsupportedAggType("EARLIEST", type); } } }, @@ -113,7 +112,7 @@ AggregatorFactory createAggregatorFactory(String name, String fieldName, String case COMPLEX: return new StringLastAggregatorFactory(name, fieldName, timeColumn, maxStringBytes); default: - throw new SqlUnsupportedError("LATEST aggregator is not supported for type [%s]", type); + throw SqlUnsupportedError.unsupportedAggType("LATEST", type); } } }, @@ -132,7 +131,7 @@ AggregatorFactory createAggregatorFactory(String name, String fieldName, String case STRING: return new StringAnyAggregatorFactory(name, fieldName, maxStringBytes); default: - throw new SqlUnsupportedError("ANY aggregation is not supported for type [%s]", type); + throw SqlUnsupportedError.unsupportedAggType("ANY", type); } } }; @@ -191,10 +190,10 @@ public Aggregation toDruidAggregation( final ColumnType outputType = Calcites.getColumnTypeForRelDataType(aggregateCall.getType()); if (outputType == null) { throw new DruidAssertionError( - "[%s] cannot translate output SQL type [%s] to a Druid type", - aggregateCall.getName(), - aggregateCall.getType().getSqlTypeName() - ); + "[${fn}] cannot translate output SQL type [${type}] to a Druid type" + ) + .withValue("fn", aggregateCall.getName()) + .withValue("type", aggregateCall.getType().getSqlTypeName()); } final String fieldName = getColumnName(plannerContext, virtualColumnRegistry, args.get(0), rexNodes.get(0)); @@ -220,11 +219,8 @@ public Aggregation toDruidAggregation( maxStringBytes = RexLiteral.intValue(rexNodes.get(1)); } catch (AssertionError ae) { - throw new SqlValidationError( - "[%s], argument 2 must be a number but found [%s]", - aggregateCall.getName(), - rexNodes.get(1) - ); + plannerContext.setPlanningError("The second argument '%s' to function '%s' is not a number", rexNodes.get(1), aggregateCall.getName()); + return null; } theAggFactory = aggregatorType.createAggregatorFactory( aggregatorName, @@ -236,10 +232,11 @@ public Aggregation toDruidAggregation( break; default: throw new SqlValidationError( - "[%s] expects 1 or 2 arguments but found [%d]", - aggregateCall.getName(), - args.size() - ); + "WrongArgCount", + "[${fn}] expects 1 or 2 arguments but found [${count}]" + ) + .withValue("fn", aggregateCall.getName()) + .withValue("count", args.size()); } return Aggregation.create( diff --git a/sql/src/main/java/org/apache/druid/sql/calcite/aggregation/builtin/MaxSqlAggregator.java b/sql/src/main/java/org/apache/druid/sql/calcite/aggregation/builtin/MaxSqlAggregator.java index 01cfe3f463e9..71d4cf3e620b 100644 --- a/sql/src/main/java/org/apache/druid/sql/calcite/aggregation/builtin/MaxSqlAggregator.java +++ b/sql/src/main/java/org/apache/druid/sql/calcite/aggregation/builtin/MaxSqlAggregator.java @@ -22,7 +22,6 @@ import org.apache.calcite.rel.core.AggregateCall; import org.apache.calcite.sql.SqlAggFunction; import org.apache.calcite.sql.fun.SqlStdOperatorTable; -import org.apache.druid.error.DruidExceptionV1; import org.apache.druid.error.SqlUnsupportedError; import org.apache.druid.math.expr.ExprMacroTable; import org.apache.druid.query.aggregation.AggregatorFactory; @@ -74,7 +73,7 @@ private static AggregatorFactory createMaxAggregatorFactory( default: // This error refers to the Druid type. But, we're in SQL validation. // It should refer to the SQL type. - throw new SqlUnsupportedError("MAX does not support type [%s]", aggregationType); + throw SqlUnsupportedError.unsupportedAggType("MAX", aggregationType); } } } diff --git a/sql/src/main/java/org/apache/druid/sql/calcite/aggregation/builtin/MinSqlAggregator.java b/sql/src/main/java/org/apache/druid/sql/calcite/aggregation/builtin/MinSqlAggregator.java index f48a0cd36422..a6e7bda3e7fe 100644 --- a/sql/src/main/java/org/apache/druid/sql/calcite/aggregation/builtin/MinSqlAggregator.java +++ b/sql/src/main/java/org/apache/druid/sql/calcite/aggregation/builtin/MinSqlAggregator.java @@ -22,7 +22,6 @@ import org.apache.calcite.rel.core.AggregateCall; import org.apache.calcite.sql.SqlAggFunction; import org.apache.calcite.sql.fun.SqlStdOperatorTable; -import org.apache.druid.error.DruidExceptionV1; import org.apache.druid.error.SqlUnsupportedError; import org.apache.druid.math.expr.ExprMacroTable; import org.apache.druid.query.aggregation.AggregatorFactory; @@ -68,7 +67,7 @@ private static AggregatorFactory createMinAggregatorFactory( case DOUBLE: return new DoubleMinAggregatorFactory(name, fieldName, null, macroTable); default: - throw new SqlUnsupportedError("MIN does not support type [%s]", aggregationType); + throw SqlUnsupportedError.unsupportedAggType("MIN", aggregationType); } } } diff --git a/sql/src/main/java/org/apache/druid/sql/calcite/aggregation/builtin/StringSqlAggregator.java b/sql/src/main/java/org/apache/druid/sql/calcite/aggregation/builtin/StringSqlAggregator.java index 408694ebd1f2..6f0e3daf6561 100644 --- a/sql/src/main/java/org/apache/druid/sql/calcite/aggregation/builtin/StringSqlAggregator.java +++ b/sql/src/main/java/org/apache/druid/sql/calcite/aggregation/builtin/StringSqlAggregator.java @@ -36,7 +36,6 @@ import org.apache.calcite.sql.type.SqlTypeFamily; import org.apache.calcite.sql.type.SqlTypeName; import org.apache.calcite.util.Optionality; -import org.apache.druid.error.DruidExceptionV1; import org.apache.druid.error.SqlUnsupportedError; import org.apache.druid.java.util.common.HumanReadableBytes; import org.apache.druid.java.util.common.StringUtils; @@ -199,7 +198,7 @@ public RelDataType inferReturnType(SqlOperatorBinding sqlOperatorBinding) { RelDataType type = sqlOperatorBinding.getOperandType(0); if (type instanceof RowSignatures.ComplexSqlType) { - throw new SqlUnsupportedError("Cannot use STRING_AGG on complex input of type [%s]", type); + throw SqlUnsupportedError.unsupportedAggType("STRING_AGG", type); } return Calcites.createSqlTypeWithNullability( sqlOperatorBinding.getTypeFactory(), diff --git a/sql/src/main/java/org/apache/druid/sql/calcite/aggregation/builtin/SumSqlAggregator.java b/sql/src/main/java/org/apache/druid/sql/calcite/aggregation/builtin/SumSqlAggregator.java index 15f987458375..b9d2d236b3bf 100644 --- a/sql/src/main/java/org/apache/druid/sql/calcite/aggregation/builtin/SumSqlAggregator.java +++ b/sql/src/main/java/org/apache/druid/sql/calcite/aggregation/builtin/SumSqlAggregator.java @@ -31,7 +31,6 @@ import org.apache.calcite.sql.type.OperandTypes; import org.apache.calcite.sql.type.ReturnTypes; import org.apache.calcite.util.Optionality; -import org.apache.druid.error.DruidExceptionV1; import org.apache.druid.error.SqlUnsupportedError; import org.apache.druid.math.expr.ExprMacroTable; import org.apache.druid.query.aggregation.AggregatorFactory; @@ -89,7 +88,7 @@ static AggregatorFactory createSumAggregatorFactory( case DOUBLE: return new DoubleSumAggregatorFactory(name, fieldName, null, macroTable); default: - throw new SqlUnsupportedError("SUM is not supported for type [%s]", aggregationType); + throw SqlUnsupportedError.unsupportedAggType("SUM", aggregationType); } } diff --git a/sql/src/main/java/org/apache/druid/sql/calcite/expression/builtin/NestedDataOperatorConversions.java b/sql/src/main/java/org/apache/druid/sql/calcite/expression/builtin/NestedDataOperatorConversions.java index c33489defcec..f76a092b765c 100644 --- a/sql/src/main/java/org/apache/druid/sql/calcite/expression/builtin/NestedDataOperatorConversions.java +++ b/sql/src/main/java/org/apache/druid/sql/calcite/expression/builtin/NestedDataOperatorConversions.java @@ -40,7 +40,6 @@ import org.apache.calcite.sql.type.SqlTypeName; import org.apache.calcite.sql.type.SqlTypeTransforms; import org.apache.calcite.sql2rel.SqlRexConvertlet; -import org.apache.druid.error.DruidExceptionV1; import org.apache.druid.error.SqlUnsupportedError; import org.apache.druid.error.DruidException; import org.apache.druid.java.util.common.IAE; @@ -203,10 +202,9 @@ public DruidExpression toDruidExpression( parts = NestedPathFinder.parseJsonPath(path); } catch (IllegalArgumentException iae) { - throw new SqlUnsupportedError( - "Cannot use [%s]: [%s]", - call.getOperator().getName(), - iae.getMessage() + throw SqlUnsupportedError.cannotUseOperator( + call.getOperator().getName(), + iae ); } final String jsonPath = NestedPathFinder.toNormalizedJsonPath(parts); @@ -393,10 +391,9 @@ public DruidExpression toDruidExpression( parts = NestedPathFinder.parseJsonPath(path); } catch (IllegalArgumentException iae) { - throw new SqlUnsupportedError( - "Cannot use [%s]: [%s]", + throw SqlUnsupportedError.cannotUseOperator( call.getOperator().getName(), - iae.getMessage() + iae ); } final String jsonPath = NestedPathFinder.toNormalizedJsonPath(parts); diff --git a/sql/src/main/java/org/apache/druid/sql/calcite/parser/DruidSqlParserUtils.java b/sql/src/main/java/org/apache/druid/sql/calcite/parser/DruidSqlParserUtils.java index d0ec9739b19b..9425b3d96f81 100644 --- a/sql/src/main/java/org/apache/druid/sql/calcite/parser/DruidSqlParserUtils.java +++ b/sql/src/main/java/org/apache/druid/sql/calcite/parser/DruidSqlParserUtils.java @@ -78,12 +78,19 @@ public static Granularity convertSqlNodeToGranularityThrowingParseExceptions(Sql try { return convertSqlNodeToGranularity(sqlNode); } + catch (DruidException e) { + throw e; + } catch (Exception e) { log.debug(e, StringUtils.format("Unable to convert %s to a valid granularity.", sqlNode.toString())); throw new ParseException(e.getMessage()); } } + private static final String PARITION_BY_ERROR = "Encountered [${expr}] after PARTITIONED BY. " + + "Expected HOUR, DAY, MONTH, YEAR, ALL TIME, FLOOR function or " + + TimeFloorOperatorConversion.SQL_FUNCTION_NAME + " function"; + /** * This method is used to extract the granularity from a SqlNode representing following function calls: * 1. FLOOR(__time TO TimeUnit) @@ -105,16 +112,12 @@ public static Granularity convertSqlNodeToGranularityThrowingParseExceptions(Sql */ public static Granularity convertSqlNodeToGranularity(SqlNode sqlNode) throws ParseException { - - final String genericParseFailedMessageFormatString = "Encountered [%s] after PARTITIONED BY. " - + "Expected HOUR, DAY, MONTH, YEAR, ALL TIME, FLOOR function or %s function"; - if (!(sqlNode instanceof SqlCall)) { - throw new ParseException(StringUtils.format( - genericParseFailedMessageFormatString, - sqlNode.toString(), - TimeFloorOperatorConversion.SQL_FUNCTION_NAME - )); + throw new SqlValidationError( + "InvalidPartitionBy", + PARITION_BY_ERROR + ) + .withValue("expr", sqlNode.toString()); } SqlCall sqlCall = (SqlCall) sqlNode; @@ -188,11 +191,11 @@ public static Granularity convertSqlNodeToGranularity(SqlNode sqlNode) throws Pa } // Shouldn't reach here - throw new ParseException(StringUtils.format( - genericParseFailedMessageFormatString, - sqlNode.toString(), - TimeFloorOperatorConversion.SQL_FUNCTION_NAME - )); + throw new SqlValidationError( + "InvalidPartitionBy", + PARITION_BY_ERROR + ) + .withValue("expr", sqlNode.toString()); } /** @@ -231,13 +234,16 @@ public static List validateQueryAndConvertToIntervals( if (filtration.getDimFilter() != null) { throw new SqlValidationError( - "Only %s column is supported in OVERWRITE WHERE clause", - ColumnHolder.TIME_COLUMN_NAME + "OverwriteWhereIsNotTime", + "Only " + ColumnHolder.TIME_COLUMN_NAME + " column is supported in OVERWRITE WHERE clause" ); } if (intervals.isEmpty()) { - throw new SqlValidationError("Intervals for REPLACE are empty"); + throw new SqlValidationError( + "OverwriteEmptyIntervals", + "Intervals for REPLACE are empty" + ); } for (Interval interval : intervals) { @@ -245,11 +251,12 @@ public static List validateQueryAndConvertToIntervals( DateTime intervalEnd = interval.getEnd(); if (!granularity.bucketStart(intervalStart).equals(intervalStart) || !granularity.bucketStart(intervalEnd).equals(intervalEnd)) { throw new SqlValidationError( - "OVERWRITE WHERE clause contains an interval [%s]" + - " which is not aligned with PARTITIONED BY granularity [%s]", - intervals, - granularity - ); + "OverwriteUnalignedInterval", + "OVERWRITE WHERE clause contains an interval [${interval}]" + + " which is not aligned with PARTITIONED BY granularity [${granularity}]" + ) + .withValue("interval", intervals) + .withValue("granularity", granularity); } } return intervals @@ -331,7 +338,10 @@ public static DimFilter convertQueryToDimFilter(SqlNode replaceTimeQuery, DateTi { if (!(replaceTimeQuery instanceof SqlBasicCall)) { log.error("Expected SqlBasicCall during parsing, but found " + replaceTimeQuery.getClass().getName()); - throw new SqlValidationError("Invalid OVERWRITE WHERE clause"); + throw new SqlValidationError( + "InvalidOverwriteWhere", + "Invalid OVERWRITE WHERE clause" + ); } String columnName; SqlBasicCall sqlBasicCall = (SqlBasicCall) replaceTimeQuery; @@ -413,9 +423,10 @@ public static DimFilter convertQueryToDimFilter(SqlNode replaceTimeQuery, DateTi ); default: throw new SqlValidationError( - "Unsupported operation in OVERWRITE WHERE clause: [%s]", - sqlBasicCall.getOperator().getName() - ); + "OverwriteWhereExpr", + "Unsupported operation in OVERWRITE WHERE clause: [${expr]]" + ) + .withValue("expr", sqlBasicCall.getOperator().getName()); } } @@ -430,6 +441,7 @@ public static String parseColumnName(SqlNode sqlNode) { if (!(sqlNode instanceof SqlIdentifier)) { throw new SqlValidationError( + "OverwriteWhereInvalidForm", "OVERWRITE WHERE expressions must be of the form __time TIMESTAMP" ); } @@ -448,6 +460,7 @@ public static String parseTimeStampWithTimeZone(SqlNode sqlNode, DateTimeZone ti { if (!(sqlNode instanceof SqlTimestampLiteral)) { throw new SqlValidationError( + "OverwriteWhereInvalidForm", "OVERWRITE WHERE expressions must be of the form __time TIMESTAMP" ); } @@ -466,10 +479,11 @@ public static void throwIfUnsupportedGranularityInPartitionedBy(Granularity gran { if (!GranularityType.isStandard(granularity)) { throw new SqlValidationError( - "The granularity specified in PARTITIONED BY is not supported." + "PartitionedByGrain", + "The granularity specified in PARTITIONED BY is not supported.\nValid granularities: ${supported}" ) - .suggestion( - "Valid granularities: " + + .withValue( + "supported", Arrays.stream(GranularityType.values()) .filter(granularityType -> !granularityType.equals(GranularityType.NONE)) .map(Enum::name) diff --git a/sql/src/main/java/org/apache/druid/sql/calcite/planner/DruidPlanner.java b/sql/src/main/java/org/apache/druid/sql/calcite/planner/DruidPlanner.java index e112f8a8cd94..f03bf0bde3cc 100644 --- a/sql/src/main/java/org/apache/druid/sql/calcite/planner/DruidPlanner.java +++ b/sql/src/main/java/org/apache/druid/sql/calcite/planner/DruidPlanner.java @@ -31,13 +31,11 @@ import org.apache.calcite.tools.ValidationException; import org.apache.druid.error.DruidAssertionError; import org.apache.druid.error.DruidException; -import org.apache.druid.error.DruidExceptionV1; +import org.apache.druid.error.ErrorCode; import org.apache.druid.error.SqlParseError; import org.apache.druid.error.SqlUnsupportedError; import org.apache.druid.error.SqlValidationError; -import org.apache.druid.java.util.common.StringUtils; import org.apache.druid.query.QueryContext; -import org.apache.druid.query.QueryException; import org.apache.druid.server.security.Access; import org.apache.druid.server.security.Resource; import org.apache.druid.server.security.ResourceAction; @@ -48,6 +46,7 @@ import java.io.Closeable; import java.util.HashSet; +import java.util.LinkedHashMap; import java.util.Map; import java.util.Set; import java.util.function.Function; @@ -175,9 +174,10 @@ private SqlStatementHandler createHandler(final SqlNode node) return new QueryHandler.SelectHandler(handlerContext, query, explain); } throw new SqlUnsupportedError( - "Unsupported SQL statement [%s]", - node.getKind() - ); + "Statement", + "Unsupported SQL statement [${statement}]" + ) + .withValue("statement", node.getKind()); } /** @@ -340,34 +340,55 @@ public static DruidException translateException(Exception e) catch (Exception inner) { // Anything else. Should not get here. Anything else should already have // been translated to a DruidException unless it is an unexpected exception. - return new DruidAssertionError(inner, e.getMessage()); + return DruidAssertionError.forCause(inner, e.getMessage()); } } private static DruidException parseValidationMessage(Exception e, boolean unsupported) { + if (e.getCause() instanceof DruidException) { + return (DruidException) e.getCause(); + } // Calcite exception that probably includes a position. String msg = e.getMessage(); Pattern p = Pattern.compile("(?:org\\..*: )From line (\\d+), column (\\d+) to line \\d+, column \\d+: (.*)$"); Matcher m = p.matcher(msg); Exception cause; String errorMsg; + Map values = new LinkedHashMap<>(); if (m.matches()) { cause = null; - errorMsg = StringUtils.format("Line [%s], Column [%s]: %s", m.group(1), m.group(2), m.group(3)); + values.put("line", m.group(1)); + values.put("column", m.group(2)); + values.put(DruidException.MESSAGE_KEY, m.group(3)); + errorMsg = SqlParseError.fullMessage(DruidException.SIMPLE_MESSAGE); } else { cause = e; - errorMsg = msg; + values.put(DruidException.MESSAGE_KEY, msg); + errorMsg = DruidException.SIMPLE_MESSAGE; } if (unsupported) { - return new SqlUnsupportedError(cause, errorMsg); + return new SqlUnsupportedError( + cause, + ErrorCode.GENERAL_TAIL, + DruidException.SIMPLE_MESSAGE + ) + .withValues(values); } else { - return new SqlValidationError(cause, errorMsg); + return new SqlValidationError( + e, + ErrorCode.GENERAL_TAIL, + errorMsg + ) + .withValues(values); } } private static DruidException parseParserMessage(Exception e) { + if (e.getCause() instanceof DruidException) { + return (DruidException) e.getCause(); + } // Calcite exception that probably includes a position. The normal parse // exception is rather cumbersome. Clean it up a bit. final String msg = e.getMessage(); @@ -377,17 +398,23 @@ private static DruidException parseParserMessage(Exception e) ); Matcher m = p.matcher(msg); if (!m.matches()) { - return new SqlParseError(e, msg); + return new SqlParseError( + e, + ErrorCode.GENERAL_TAIL, + DruidException.SIMPLE_MESSAGE + ) + .withValue(DruidException.MESSAGE_KEY, e.getMessage()); } Pattern p2 = Pattern.compile("[ .]*\n\\ s+"); Matcher m2 = p2.matcher(m.group(4).trim()); String choices = m2.replaceAll(", "); return new SqlParseError( - "Line [%s], Column [%s]: unexpected token [%s]", - m.group(2), - m.group(3), - m.group(1) + "UnexpectedToken", + SqlParseError.fullMessage("unexpected token [${token}]\nExpected ${expected}") ) - .suggestion("Expected one of " + choices); + .withValue("line", m.group(2)) + .withValue("column", m.group(3)) + .withValue("token", m.group(1)) + .withValue("expected", choices); } } diff --git a/sql/src/main/java/org/apache/druid/sql/calcite/planner/DruidRexExecutor.java b/sql/src/main/java/org/apache/druid/sql/calcite/planner/DruidRexExecutor.java index 7b0f5233e6d7..ae2364626a3f 100644 --- a/sql/src/main/java/org/apache/druid/sql/calcite/planner/DruidRexExecutor.java +++ b/sql/src/main/java/org/apache/druid/sql/calcite/planner/DruidRexExecutor.java @@ -23,11 +23,9 @@ import org.apache.calcite.rex.RexExecutor; import org.apache.calcite.rex.RexNode; import org.apache.calcite.sql.type.SqlTypeName; -import org.apache.druid.error.DruidExceptionV1; import org.apache.druid.error.SqlUnsupportedError; import org.apache.druid.error.SqlValidationError; import org.apache.druid.java.util.common.DateTimes; -import org.apache.druid.java.util.common.StringUtils; import org.apache.druid.math.expr.Expr; import org.apache.druid.math.expr.ExprEval; import org.apache.druid.math.expr.ExprType; @@ -91,7 +89,12 @@ public void reduce( // as a primitive long/float/double. // ExprEval.isNumericNull checks whether the parsed primitive value is null or not. if (!constExp.getType().isNullable() && exprResult.isNumericNull()) { - throw new SqlValidationError("Illegal DATE constant [%s]", constExp); + throw new SqlValidationError( + "InvalidConstant", + "Illegal ${type} constant [${expr}]" + ) + .withValue("type", "DATE") + .withValue("expr", constExp); } literal = rexBuilder.makeDateLiteral( @@ -105,7 +108,12 @@ public void reduce( // as a primitive long/float/double. // ExprEval.isNumericNull checks whether the parsed primitive value is null or not. if (!constExp.getType().isNullable() && exprResult.isNumericNull()) { - throw new SqlValidationError("Illegal TIMESTAMP constant [%s]", constExp); + throw new SqlValidationError( + "InvalidConstant", + "Illegal ${type} constant [${expr}]" + ) + .withValue("type", "TIMESTAMP") + .withValue("expr", constExp); } literal = Calcites.jodaToCalciteTimestampLiteral( @@ -130,13 +138,14 @@ public void reduce( double exprResultDouble = exprResult.asDouble(); if (Double.isNaN(exprResultDouble) || Double.isInfinite(exprResultDouble)) { String expression = druidExpression.getExpression(); - throw new SqlUnsupportedError("Expression not supported in SQL : [%s]", expression) - .addContext("Evaluates to", Double.toString(exprResultDouble)) - .addContext("Suggestion", StringUtils.format( - "You can either cast the expression as BIGINT ('CAST(%s as BIGINT)') or VARCHAR ('CAST(%s as VARCHAR)') or change the expression itself", - expression, - expression - )); + throw new SqlUnsupportedError( + "UnsupportedExpr", + "[${expr}] evaluates to [${eval}] that is not supported in SQL. " + + "You can either cast the expression as BIGINT ('CAST(%s as BIGINT)') " + + "or VARCHAR ('CAST(%s as VARCHAR)') or change the expression itself" + ) + .withValue("expr", expression) + .withValue("eval", exprResultDouble); } bigDecimal = BigDecimal.valueOf(exprResult.asDouble()); } @@ -167,10 +176,13 @@ public void reduce( resultAsBigDecimalList.add(null); } else if (Double.isNaN(doubleVal.doubleValue()) || Double.isInfinite(doubleVal.doubleValue())) { String expression = druidExpression.getExpression(); - throw new SqlValidationError("Array element not supported in SQL: [%s]", expression) - .addContext("Evaluates to", Double.toString(doubleVal.doubleValue())) - .suggestion( - "You can either cast the element in the ARRAY to BIGINT or VARCHAR or change the expression itself"); + throw new SqlUnsupportedError( + "ArrayElement", + "[${expr}] contains an element that evaluates to [${eval}] which is not supported in SQL. " + + "You can either cast the element in the ARRAY to BIGINT or VARCHAR or change the expression itself" + ) + .withValue("expr", expression) + .withValue("eval", doubleVal.doubleValue()); } else { resultAsBigDecimalList.add(BigDecimal.valueOf(doubleVal.doubleValue())); } diff --git a/sql/src/main/java/org/apache/druid/sql/calcite/planner/IngestHandler.java b/sql/src/main/java/org/apache/druid/sql/calcite/planner/IngestHandler.java index e3f8bc287384..3b1be7ecb52e 100644 --- a/sql/src/main/java/org/apache/druid/sql/calcite/planner/IngestHandler.java +++ b/sql/src/main/java/org/apache/druid/sql/calcite/planner/IngestHandler.java @@ -89,10 +89,10 @@ protected static SqlNode convertQuery(DruidSqlIngest sqlNode) if (!(orderByList == null || orderByList.equals(SqlNodeList.EMPTY))) { String opName = sqlNode.getOperator().getName(); throw new SqlValidationError( - "Cannot use ORDER BY on %s [%s] statement, use CLUSTERED BY instead.", - "INSERT".equals(opName) ? "an" : "a", - opName - ); + "InsertOrderBy", + "Cannot use ORDER BY with ${op}, use CLUSTERED BY instead" + ) + .withValue("op", opName); } } if (sqlNode.getClusteredBy() != null) { @@ -100,7 +100,11 @@ protected static SqlNode convertQuery(DruidSqlIngest sqlNode) } if (!query.isA(SqlKind.QUERY)) { - throw new SqlValidationError("Cannot execute SQL statement [%s]", query.getKind()); + throw new SqlValidationError( + "Unsupported", + "Cannot execute SQL statement [%{op}]" + ) + .withValue("op", query.getKind()); } return query; } @@ -117,9 +121,10 @@ public void validate() { if (ingestNode().getPartitionedBy() == null) { throw new SqlValidationError( - "[%s] statements must specify PARTITIONED BY clause explicitly", - operationName() - ); + "InsertWithoutPartitionBy", + "${op} statements must specify the PARTITIONED BY clause explicitly" + ) + .withValue("op", operationName()); } try { PlannerContext plannerContext = handlerContext.plannerContext(); @@ -132,19 +137,21 @@ public void validate() } catch (JsonProcessingException e) { throw new SqlValidationError( - "Invalid partition granularity [%s]", - ingestionGranularity - ); + "PartitionGrain", + "Invalid partition granularity [${grain}]" + ) + .withValue("grain", ingestionGranularity); } super.validate(); // Check if CTX_SQL_OUTER_LIMIT is specified and fail the query if it is. CTX_SQL_OUTER_LIMIT being provided causes // the number of rows inserted to be limited which is likely to be confusing and unintended. if (handlerContext.queryContextMap().get(PlannerContext.CTX_SQL_OUTER_LIMIT) != null) { throw new SqlValidationError( - "Context parameter [%s] cannot be provided with [%s]", - PlannerContext.CTX_SQL_OUTER_LIMIT, - operationName() - ); + "InsertContext", + "Context parameter [%{param}] cannot be provided with [${op}]" + ) + .withValue("param", PlannerContext.CTX_SQL_OUTER_LIMIT) + .withValue("op", operationName()); } targetDatasource = validateAndGetDataSourceForIngest(); resourceActions.add(new ResourceAction(new Resource(targetDatasource, ResourceType.DATASOURCE), Action.WRITE)); @@ -167,14 +174,15 @@ private String validateAndGetDataSourceForIngest() { final SqlInsert insert = ingestNode(); if (insert.isUpsert()) { - throw new SqlUnsupportedError("UPSERT is not supported."); + throw new SqlUnsupportedError("UPSERT", "UPSERT is not supported."); } if (insert.getTargetColumnList() != null) { throw new SqlUnsupportedError( - "[%s] with a target column list is not supported", - operationName() - ); + "InsertList", + "[${op}] with a target column list is not supported" + ) + .withValue("op", operationName()); } final SqlIdentifier tableIdentifier = (SqlIdentifier) insert.getTargetTable(); @@ -182,7 +190,11 @@ private String validateAndGetDataSourceForIngest() if (tableIdentifier.names.isEmpty()) { // I don't think this can happen, but include a branch for it just in case. - throw new SqlValidationError("[%s] requires a target table", operationName()); + throw new SqlValidationError( + "NoInsertTarget", + "[${op}] requires a target table" + ) + .withValue("op", operationName()); } else if (tableIdentifier.names.size() == 1) { // Unqualified name. dataSource = Iterables.getOnlyElement(tableIdentifier.names); @@ -195,10 +207,11 @@ private String validateAndGetDataSourceForIngest() dataSource = tableIdentifier.names.get(1); } else { throw new SqlValidationError( - "Cannot [%s] into [%s] because it is not a Druid datasource.", - operationName(), - tableIdentifier - ); + "InsertNotDatasource", + "Cannot [${op}] into [${table}] because it is not a Druid datasource" + ) + .withValue("op", operationName()) + .withValue("table", tableIdentifier); } } @@ -206,7 +219,7 @@ private String validateAndGetDataSourceForIngest() IdUtils.validateId(operationName() + " dataSource", dataSource); } catch (IllegalArgumentException e) { - throw new SqlValidationError(e); + throw SqlValidationError.forCause(e); } return dataSource; @@ -270,9 +283,11 @@ public void validate() { if (!handlerContext.plannerContext().featureAvailable(EngineFeature.CAN_INSERT)) { throw new SqlUnsupportedError( - "Cannot execute INSERT with SQL engine '%s'.", - handlerContext.engine().name() - ); + "UnsupportedEngineOp", + "Cannot execute ${op} with SQL engine [${engine}]" + ) + .withValue("op", "INSERT") + .withValue("engine", handlerContext.engine().name()); } super.validate(); } @@ -323,14 +338,17 @@ protected DruidSqlIngest ingestNode() public void validate() { if (!handlerContext.plannerContext().featureAvailable(EngineFeature.CAN_REPLACE)) { - throw new SqlValidationError( - "Cannot execute REPLACE with SQL engine [%s]", - handlerContext.engine().name() - ); + throw new SqlUnsupportedError( + "UnsupportedEngineOp", + "Cannot execute ${op} with SQL engine [${engine}]" + ) + .withValue("op", "REPLACE") + .withValue("engine", handlerContext.engine().name()); } SqlNode replaceTimeQuery = sqlNode.getReplaceTimeQuery(); if (replaceTimeQuery == null) { throw new SqlValidationError( + "OverwriteTimeRange", "Missing time chunk information in OVERWRITE clause for REPLACE. Use " + "OVERWRITE WHERE <__time based condition> or OVERWRITE ALL to overwrite the entire table." ); diff --git a/sql/src/main/java/org/apache/druid/sql/calcite/planner/QueryHandler.java b/sql/src/main/java/org/apache/druid/sql/calcite/planner/QueryHandler.java index 7f9edb469f31..f9130dd51572 100644 --- a/sql/src/main/java/org/apache/druid/sql/calcite/planner/QueryHandler.java +++ b/sql/src/main/java/org/apache/druid/sql/calcite/planner/QueryHandler.java @@ -55,14 +55,12 @@ import org.apache.calcite.tools.ValidationException; import org.apache.calcite.util.Pair; import org.apache.druid.error.DruidException; -import org.apache.druid.error.DruidExceptionV1; import org.apache.druid.error.SqlUnsupportedError; import org.apache.druid.error.SqlValidationError; import org.apache.druid.java.util.common.guava.BaseSequence; import org.apache.druid.java.util.common.guava.Sequences; import org.apache.druid.java.util.emitter.EmittingLogger; import org.apache.druid.query.Query; -import org.apache.druid.query.QueryException; import org.apache.druid.server.QueryResponse; import org.apache.druid.server.security.Action; import org.apache.druid.server.security.Resource; @@ -205,13 +203,13 @@ public PlannerResult plan() if (!handlerContext.plannerContext().featureAvailable(EngineFeature.ALLOW_BINDABLE_PLAN)) { throw new SqlValidationError( - "Cannot query table%s [%s] with SQL engine [%s]", - bindableTables.size() != 1 ? "s" : "", - bindableTables.stream() + "WrongEngineForTable", + "Cannot query table(s) [%{tables}] with SQL engine [${engine}]" + ) + .withValue("tables", bindableTables.stream() .map(table -> Joiner.on(".").join(table.getQualifiedName())) - .collect(Collectors.joining(", ")), - handlerContext.engine().name() - ); + .collect(Collectors.joining(", "))) + .withValue("engine", handlerContext.engine().name()); } return planWithBindableConvention(); @@ -628,15 +626,22 @@ private DruidException buildSQLPlanningError(RelOptPlanner.CannotPlanException e String errorMessage = handlerContext.plannerContext().getPlanningError(); if (null == errorMessage && exception instanceof UnsupportedSQLQueryException) { errorMessage = exception.getMessage(); - } else if (null == errorMessage) { - errorMessage = "Please check Broker logs for additional details."; + } + if (errorMessage == null) { + return new SqlUnsupportedError( + exception, + "Query", + "Query not supported. Please check Broker logs for additional details." + ); } else { // Planning errors are more like hints: it isn't guaranteed that the planning error is actually what went wrong. - errorMessage = "Possible error: " + errorMessage; + return new SqlUnsupportedError( + exception, + "QueryWithReason", + "Query not supported. Possible error: ${message}" + ) + .withValue("message", errorMessage); } - // Finally, add the query itself to error message that user will get. - return new SqlUnsupportedError(exception, "Query not supported. %s", errorMessage) - .addContext("SQL", handlerContext.plannerContext().getSql()); } public static class SelectHandler extends QueryHandler @@ -654,9 +659,10 @@ public void validate() { if (!handlerContext.plannerContext().featureAvailable(EngineFeature.CAN_SELECT)) { throw new SqlValidationError( - "Cannot execute SELECT with SQL engine [%s]", - handlerContext.engine().name() - ); + "WrongEngineForSelect", + "Cannot execute SELECT with SQL engine [${engine}]" + ) + .withValue("engine", handlerContext.engine().name()); } super.validate(); } diff --git a/sql/src/main/java/org/apache/druid/sql/calcite/planner/RelParameterizerShuttle.java b/sql/src/main/java/org/apache/druid/sql/calcite/planner/RelParameterizerShuttle.java index 3a07c1787438..ca47ffacfd61 100644 --- a/sql/src/main/java/org/apache/druid/sql/calcite/planner/RelParameterizerShuttle.java +++ b/sql/src/main/java/org/apache/druid/sql/calcite/planner/RelParameterizerShuttle.java @@ -43,7 +43,7 @@ import org.apache.calcite.rex.RexNode; import org.apache.calcite.rex.RexShuttle; import org.apache.calcite.sql.type.SqlTypeName; -import org.apache.druid.error.DruidExceptionV1; +import org.apache.druid.error.DruidException; import org.apache.druid.error.SqlValidationError; /** @@ -200,10 +200,7 @@ private RexNode bind(RexNode node, RexBuilder builder, RelDataTypeFactory typeFa if (plannerContext.getParameters().size() > dynamicParam.getIndex()) { TypedValue param = plannerContext.getParameters().get(dynamicParam.getIndex()); if (param == null) { - throw new SqlValidationError( - "Parameter at position [%d] is not bound", - dynamicParam.getIndex() - ); + throw unbound(dynamicParam); } if (param.value == null) { return builder.makeNullLiteral(typeFactory.createSqlType(SqlTypeName.NULL)); @@ -215,12 +212,18 @@ private RexNode bind(RexNode node, RexBuilder builder, RelDataTypeFactory typeFa true ); } else { - throw new SqlValidationError( - "Parameter at position [%d] is not bound", - dynamicParam.getIndex() - ); + throw unbound(dynamicParam); } } return node; } + + private static DruidException unbound(RexDynamicParam dynamicParam) + { + return new SqlValidationError( + "UnboundParameter", + "Parameter at position [${index}] is not bound" + ) + .withValue("index", dynamicParam.getIndex() + 1); + } } diff --git a/sql/src/main/java/org/apache/druid/sql/calcite/planner/SqlParameterizerShuttle.java b/sql/src/main/java/org/apache/druid/sql/calcite/planner/SqlParameterizerShuttle.java index a0c9fcd72854..bdaa019c0582 100644 --- a/sql/src/main/java/org/apache/druid/sql/calcite/planner/SqlParameterizerShuttle.java +++ b/sql/src/main/java/org/apache/druid/sql/calcite/planner/SqlParameterizerShuttle.java @@ -28,7 +28,8 @@ import org.apache.calcite.sql.type.SqlTypeName; import org.apache.calcite.sql.util.SqlShuttle; import org.apache.calcite.util.TimestampString; -import org.apache.druid.java.util.common.IAE; +import org.apache.druid.error.DruidException; +import org.apache.druid.error.SqlValidationError; import java.util.ArrayList; import java.util.Arrays; @@ -69,11 +70,11 @@ public SqlParameterizerShuttle(PlannerContext plannerContext) public SqlNode visit(SqlDynamicParam param) { if (plannerContext.getParameters().size() <= param.getIndex()) { - throw new IAE("Parameter at position [%s] is not bound", param.getIndex()); + throw unbound(param); } TypedValue paramBinding = plannerContext.getParameters().get(param.getIndex()); if (paramBinding == null) { - throw new IAE("Parameter at position [%s] is not bound", param.getIndex()); + throw unbound(param); } if (paramBinding.value == null) { return SqlLiteral.createNull(param.getParserPosition()); @@ -91,7 +92,7 @@ public SqlNode visit(SqlDynamicParam param) } if (typeName == SqlTypeName.ARRAY) { - return createArrayLiteral(paramBinding.value); + return createArrayLiteral(paramBinding.value, param.getIndex()); } try { // This throws ClassCastException for a DATE parameter given as @@ -105,6 +106,15 @@ public SqlNode visit(SqlDynamicParam param) } } + private static DruidException unbound(SqlDynamicParam param) + { + return new SqlValidationError( + "UnboundParameter", + "Parameter at position [${index}] is not bound" + ) + .withValue("index", param.getIndex() + 1); + } + /** * Convert an ARRAY parameter to the equivalent of the ARRAY[a, b, ...] * syntax. This is not well-supported in the present version of Calcite, @@ -112,7 +122,7 @@ public SqlNode visit(SqlDynamicParam param) * structure. Supports a limited set of member types. Does not attempt * to enforce that all elements have the same type. */ - private SqlNode createArrayLiteral(Object value) + private SqlNode createArrayLiteral(Object value, int posn) { List list; if (value instanceof List) { @@ -123,7 +133,11 @@ private SqlNode createArrayLiteral(Object value) List args = new ArrayList<>(list.size()); for (Object element : list) { if (element == null) { - throw new IAE("An array parameter cannot contain null values"); + throw new SqlValidationError( + "NullParameter", + "Parameter [${posn}]: An array parameter cannot contain null values" + ) + .withValue("posn", posn + 1); } SqlNode node; if (element instanceof String) { @@ -135,10 +149,12 @@ private SqlNode createArrayLiteral(Object value) } else if (element instanceof Boolean) { node = SqlLiteral.createBoolean((Boolean) value, SqlParserPos.ZERO); } else { - throw new IAE( - "An array parameter does not allow values of type %s", - value.getClass().getSimpleName() - ); + throw new SqlValidationError( + "InvalidParameter", + "Parameter [${posn}]: An array parameter does not allow values of type[${type}]" + ) + .withValue("posn", posn + 1) + .withValue("type", value.getClass().getSimpleName()); } args.add(node); } diff --git a/sql/src/main/java/org/apache/druid/sql/calcite/rel/DruidJoinQueryRel.java b/sql/src/main/java/org/apache/druid/sql/calcite/rel/DruidJoinQueryRel.java index 173cd126c7cb..0cc17dcb2679 100644 --- a/sql/src/main/java/org/apache/druid/sql/calcite/rel/DruidJoinQueryRel.java +++ b/sql/src/main/java/org/apache/druid/sql/calcite/rel/DruidJoinQueryRel.java @@ -38,7 +38,6 @@ import org.apache.calcite.rel.type.RelDataType; import org.apache.calcite.rex.RexNode; import org.apache.calcite.sql.SqlKind; -import org.apache.druid.error.DruidExceptionV1; import org.apache.druid.error.SqlUnsupportedError; import org.apache.druid.java.util.common.ISE; import org.apache.druid.java.util.common.Pair; @@ -361,7 +360,11 @@ public static JoinType toDruidJoinType(JoinRelType calciteJoinType) case INNER: return JoinType.INNER; default: - throw new SqlUnsupportedError("Cannot handle joinType [%s]", calciteJoinType); + throw new SqlUnsupportedError( + "JoinType", + "Cannot handle joinType [${type}]" + ) + .withValue("type", calciteJoinType); } } diff --git a/sql/src/main/java/org/apache/druid/sql/calcite/rule/DruidLogicalValuesRule.java b/sql/src/main/java/org/apache/druid/sql/calcite/rule/DruidLogicalValuesRule.java index 3a6587db6903..945f05e5522f 100644 --- a/sql/src/main/java/org/apache/druid/sql/calcite/rule/DruidLogicalValuesRule.java +++ b/sql/src/main/java/org/apache/druid/sql/calcite/rule/DruidLogicalValuesRule.java @@ -25,7 +25,6 @@ import org.apache.calcite.plan.RelOptRuleCall; import org.apache.calcite.rel.logical.LogicalValues; import org.apache.calcite.rex.RexLiteral; -import org.apache.druid.error.DruidExceptionV1; import org.apache.druid.error.SqlUnsupportedError; import org.apache.druid.query.InlineDataSource; import org.apache.druid.segment.column.RowSignature; @@ -128,9 +127,10 @@ static Object getValueFromLiteral(RexLiteral literal, PlannerContext plannerCont case NULL: if (!literal.isNull()) { throw new SqlUnsupportedError( - "Non-null constant [%s] for a NULL literal", - literal - ); + "NonNullConst", + "Non-null constant [${expr}] for a NULL literal" + ) + .withValue("expr", literal); } return null; case TIMESTAMP_WITH_LOCAL_TIME_ZONE: @@ -138,10 +138,11 @@ static Object getValueFromLiteral(RexLiteral literal, PlannerContext plannerCont case TIME_WITH_LOCAL_TIME_ZONE: default: throw new SqlUnsupportedError( - "Literal [%s] type [%s] is not supported", - literal, - literal.getType().getSqlTypeName() - ); + "Literal", + "Literal [${expr}] type [${type}] is not supported" + ) + .withValue("expr", literal) + .withValue("type", literal.getType().getSqlTypeName()); } } } diff --git a/sql/src/main/java/org/apache/druid/sql/calcite/run/SqlEngines.java b/sql/src/main/java/org/apache/druid/sql/calcite/run/SqlEngines.java index a40c23d4e64d..5d6d8f3b995b 100644 --- a/sql/src/main/java/org/apache/druid/sql/calcite/run/SqlEngines.java +++ b/sql/src/main/java/org/apache/druid/sql/calcite/run/SqlEngines.java @@ -20,7 +20,6 @@ package org.apache.druid.sql.calcite.run; import org.apache.calcite.tools.ValidationException; -import org.apache.druid.error.DruidExceptionV1; import org.apache.druid.error.SqlValidationError; import java.util.Map; @@ -44,9 +43,10 @@ public static void validateNoSpecialContextKeys( for (String contextParameterName : queryContext.keySet()) { if (specialContextKeys.contains(contextParameterName)) { throw new SqlValidationError( - "Query context parameter [%s] is not allowed", - contextParameterName - ); + "IllegalContext", + "Query context parameter [${param}] is not allowed" + ) + .withValue("param", contextParameterName); } } } diff --git a/sql/src/main/java/org/apache/druid/sql/guice/SqlModule.java b/sql/src/main/java/org/apache/druid/sql/guice/SqlModule.java index 56d0d2d5d41f..d8ac8178a89d 100644 --- a/sql/src/main/java/org/apache/druid/sql/guice/SqlModule.java +++ b/sql/src/main/java/org/apache/druid/sql/guice/SqlModule.java @@ -27,6 +27,8 @@ import com.google.inject.Module; import com.google.inject.Provides; import org.apache.druid.catalog.model.TableDefnRegistry; +import org.apache.druid.error.RestExceptionEncoder; +import org.apache.druid.error.StandardRestExceptionEncoder; import org.apache.druid.guice.LazySingleton; import org.apache.druid.guice.PolyBind; import org.apache.druid.guice.annotations.NativeQuery; @@ -124,6 +126,9 @@ public void configure(Binder binder) // Default do-nothing catalog resolver binder.bind(CatalogResolver.class).toInstance(CatalogResolver.NULL_RESOLVER); + + // Default exception encoder + binder.bind(RestExceptionEncoder.class).toInstance(StandardRestExceptionEncoder.instance()); } private boolean isEnabled() diff --git a/sql/src/main/java/org/apache/druid/sql/http/SqlResource.java b/sql/src/main/java/org/apache/druid/sql/http/SqlResource.java index cbb18cf36240..a664966e3de4 100644 --- a/sql/src/main/java/org/apache/druid/sql/http/SqlResource.java +++ b/sql/src/main/java/org/apache/druid/sql/http/SqlResource.java @@ -23,6 +23,7 @@ import com.google.common.base.Preconditions; import com.google.inject.Inject; import org.apache.druid.common.exception.SanitizableException; +import org.apache.druid.error.RestExceptionEncoder; import org.apache.druid.guice.annotations.NativeQuery; import org.apache.druid.guice.annotations.Self; import org.apache.druid.java.util.common.StringUtils; @@ -81,6 +82,7 @@ public class SqlResource private final ServerConfig serverConfig; private final ResponseContextConfig responseContextConfig; private final DruidNode selfNode; + private final RestExceptionEncoder exceptionEncoder; @Inject SqlResource( @@ -90,7 +92,8 @@ public class SqlResource final SqlLifecycleManager sqlLifecycleManager, final ServerConfig serverConfig, ResponseContextConfig responseContextConfig, - @Self DruidNode selfNode + @Self DruidNode selfNode, + RestExceptionEncoder exceptionEncoder ) { this.jsonMapper = Preconditions.checkNotNull(jsonMapper, "jsonMapper"); @@ -100,7 +103,7 @@ public class SqlResource this.serverConfig = Preconditions.checkNotNull(serverConfig, "serverConfig"); this.responseContextConfig = responseContextConfig; this.selfNode = selfNode; - + this.exceptionEncoder = exceptionEncoder; } @POST @@ -208,7 +211,6 @@ private SqlResourceQueryResultPusher makePusher(HttpServletRequest req, HttpStat private class SqlResourceQueryResultPusher extends QueryResultPusher { - private final String sqlQueryId; private final HttpStatement stmt; private final SqlQuery sqlQuery; @@ -223,13 +225,14 @@ public SqlResourceQueryResultPusher( { super( req, - SqlResource.this.jsonMapper, - SqlResource.this.responseContextConfig, - SqlResource.this.selfNode, + jsonMapper, + responseContextConfig, + selfNode, SqlResource.QUERY_METRIC_COUNTER, sqlQueryId, MediaType.APPLICATION_JSON_TYPE, - headers + headers, + exceptionEncoder ); this.sqlQueryId = sqlQueryId; this.stmt = stmt; diff --git a/sql/src/test/java/org/apache/druid/sql/SqlStatementTest.java b/sql/src/test/java/org/apache/druid/sql/SqlStatementTest.java index 024467e48662..45af1d6d7044 100644 --- a/sql/src/test/java/org/apache/druid/sql/SqlStatementTest.java +++ b/sql/src/test/java/org/apache/druid/sql/SqlStatementTest.java @@ -25,7 +25,6 @@ import com.google.common.util.concurrent.MoreExecutors; import org.apache.calcite.rel.type.RelDataType; import org.apache.calcite.rel.type.RelDataTypeField; -import org.apache.druid.error.DruidExceptionV1; import org.apache.druid.error.SqlParseError; import org.apache.druid.error.SqlValidationError; import org.apache.druid.java.util.common.ISE; @@ -290,6 +289,10 @@ public void testDirectSyntaxError() } catch (SqlParseError e) { // Expected + Assert.assertEquals( + "SQL-Parse-UnexpectedToken", + e.errorCode() + ); Assert.assertEquals( QueryException.SQL_PARSE_FAILED_ERROR_CODE, e.getErrorCode() @@ -310,6 +313,10 @@ public void testDirectValidationError() } catch (SqlValidationError e) { // Expected + Assert.assertEquals( + "SQL-Validation-General", + e.errorCode() + ); Assert.assertEquals( QueryException.PLAN_VALIDATION_FAILED_ERROR_CODE, e.getErrorCode() @@ -395,6 +402,10 @@ public void testHttpValidationError() } catch (SqlValidationError e) { // Expected + Assert.assertEquals( + "SQL-Validation-General", + e.errorCode() + ); Assert.assertEquals( QueryException.PLAN_VALIDATION_FAILED_ERROR_CODE, e.getErrorCode() @@ -484,6 +495,10 @@ public void testPrepareValidationError() } catch (SqlValidationError e) { // Expected + Assert.assertEquals( + "SQL-Validation-General", + e.errorCode() + ); Assert.assertEquals( QueryException.PLAN_VALIDATION_FAILED_ERROR_CODE, e.getErrorCode() diff --git a/sql/src/test/java/org/apache/druid/sql/calcite/BaseCalciteQueryTest.java b/sql/src/test/java/org/apache/druid/sql/calcite/BaseCalciteQueryTest.java index 1987aef421d0..1d9e76df15cc 100644 --- a/sql/src/test/java/org/apache/druid/sql/calcite/BaseCalciteQueryTest.java +++ b/sql/src/test/java/org/apache/druid/sql/calcite/BaseCalciteQueryTest.java @@ -642,12 +642,7 @@ public void assertQueryIsUnplannable(final PlannerConfig plannerConfig, final St Assert.assertEquals( sql, StringUtils.format("Query not supported. Possible error: %s", expectedError), - e.message() - ); - Assert.assertEquals( - sql, - sql, - e.getContext().get("SQL") + e.toString() ); } catch (Exception e) { diff --git a/sql/src/test/java/org/apache/druid/sql/calcite/CalciteInsertDmlTest.java b/sql/src/test/java/org/apache/druid/sql/calcite/CalciteInsertDmlTest.java index 850da29c15ae..e9bf9cbca907 100644 --- a/sql/src/test/java/org/apache/druid/sql/calcite/CalciteInsertDmlTest.java +++ b/sql/src/test/java/org/apache/druid/sql/calcite/CalciteInsertDmlTest.java @@ -201,7 +201,7 @@ public void testInsertIntoInvalidDataSourceName() { testIngestionQuery() .sql("INSERT INTO \"in/valid\" SELECT dim1, dim2 FROM foo PARTITIONED BY ALL TIME") - .expectValidationError(SqlValidationError.class, "INSERT dataSource cannot contain the '/' character.") + .expectValidationError(SqlValidationError.class, "SQL-Validation-General: message=[INSERT dataSource cannot contain the '/' character.]") .verify(); } @@ -210,7 +210,7 @@ public void testInsertUsingColumnList() { testIngestionQuery() .sql("INSERT INTO dst (foo, bar) SELECT dim1, dim2 FROM foo PARTITIONED BY ALL TIME") - .expectValidationError(SqlUnsupportedError.class, "[INSERT] with a target column list is not supported") + .expectValidationError(SqlUnsupportedError.class, "SQL-Unsupported-InsertList: op=[INSERT]") .verify(); } @@ -219,7 +219,7 @@ public void testUpsert() { testIngestionQuery() .sql("UPSERT INTO dst SELECT * FROM foo PARTITIONED BY ALL TIME") - .expectValidationError(SqlUnsupportedError.class, "UPSERT is not supported.") + .expectValidationError(SqlUnsupportedError.class, "SQL-Unsupported-UPSERT") .verify(); } @@ -232,7 +232,7 @@ public void testSelectFromSystemTable() .sql("INSERT INTO dst SELECT * FROM INFORMATION_SCHEMA.COLUMNS PARTITIONED BY ALL TIME") .expectValidationError( SqlValidationError.class, - "Cannot query table [INFORMATION_SCHEMA.COLUMNS] with SQL engine [ingestion-test]" + "SQL-Validation-WrongEngineForTable: tables=[INFORMATION_SCHEMA.COLUMNS], engine=[ingestion-test]" ) .verify(); } @@ -244,7 +244,7 @@ public void testInsertIntoSystemTable() .sql("INSERT INTO INFORMATION_SCHEMA.COLUMNS SELECT * FROM foo PARTITIONED BY ALL TIME") .expectValidationError( SqlValidationError.class, - "Cannot [INSERT] into [INFORMATION_SCHEMA.COLUMNS] because it is not a Druid datasource." + "SQL-Validation-InsertNotDatasource: op=[INSERT], table=[INFORMATION_SCHEMA.COLUMNS]" ) .verify(); } @@ -256,7 +256,7 @@ public void testInsertIntoView() .sql("INSERT INTO view.aview SELECT * FROM foo PARTITIONED BY ALL TIME") .expectValidationError( SqlValidationError.class, - "Cannot [INSERT] into [view.aview] because it is not a Druid datasource." + "SQL-Validation-InsertNotDatasource: op=[INSERT], table=[view.aview]" ) .verify(); } @@ -286,7 +286,7 @@ public void testInsertIntoNonexistentSchema() .sql("INSERT INTO nonexistent.dst SELECT * FROM foo PARTITIONED BY ALL TIME") .expectValidationError( SqlValidationError.class, - "Cannot [INSERT] into [nonexistent.dst] because it is not a Druid datasource." + "SQL-Validation-InsertNotDatasource: op=[INSERT], table=[nonexistent.dst]" ) .verify(); } @@ -823,7 +823,7 @@ public void testInsertWithoutPartitionedByWithClusteredBy() ) .expectValidationError( SqlParseError.class, - "CLUSTERED BY found before PARTITIONED BY. In Druid, the CLUSTERED BY clause must follow the PARTITIONED BY clause" + "SQL-Parse-General: message=[CLUSTERED BY found before PARTITIONED BY. In Druid, the CLUSTERED BY clause must follow the PARTITIONED BY clause]" ) .verify(); } @@ -905,9 +905,13 @@ public void testInsertWithClusteredByAndOrderBy() } catch (SqlValidationError e) { Assert.assertEquals( - "Cannot use ORDER BY on an [INSERT] statement, use CLUSTERED BY instead.", + "SQL-Validation-InsertOrderBy: op=[INSERT]", e.getMessage() ); + Assert.assertEquals( + "Cannot use ORDER BY with INSERT, use CLUSTERED BY instead", + e.toString() + ); } didTest = true; } @@ -924,11 +928,15 @@ public void testInsertWithPartitionedByContainingInvalidGranularity() ); Assert.fail("Exception should be thrown"); } - catch (SqlParseError e) { + catch (SqlValidationError e) { Assert.assertEquals( - "Encountered ['invalid_granularity'] after PARTITIONED BY. Expected HOUR, DAY, MONTH, YEAR, ALL TIME, FLOOR function or TIME_FLOOR function", + "SQL-Validation-InvalidPartitionBy: expr=['invalid_granularity']", e.getMessage() ); + Assert.assertEquals( + "Encountered ['invalid_granularity'] after PARTITIONED BY. Expected HOUR, DAY, MONTH, YEAR, ALL TIME, FLOOR function or TIME_FLOOR function", + e.toString() + ); } didTest = true; } @@ -949,9 +957,13 @@ public void testInsertWithOrderBy() } catch (SqlValidationError e) { Assert.assertEquals( - "Cannot use ORDER BY on an [INSERT] statement, use CLUSTERED BY instead.", + "SQL-Validation-InsertOrderBy: op=[INSERT]", e.getMessage() ); + Assert.assertEquals( + "Cannot use ORDER BY with INSERT, use CLUSTERED BY instead", + e.toString() + ); } finally { didTest = true; @@ -970,7 +982,11 @@ public void testInsertWithoutPartitionedBy() ImmutableList.of() ) ); - Assert.assertEquals("[INSERT] statements must specify PARTITIONED BY clause explicitly", e.getMessage()); + Assert.assertEquals("SQL-Validation-InsertWithoutPartitionBy: op=[INSERT]", e.getMessage()); + Assert.assertEquals( + "INSERT statements must specify the PARTITIONED BY clause explicitly", + e.toString() + ); didTest = true; } @@ -1315,7 +1331,7 @@ public void testInsertWithInvalidSelectStatement() .expectValidationError( CoreMatchers.allOf( CoreMatchers.instanceOf(SqlParseError.class), - ThrowableMessageMatcher.hasMessage(CoreMatchers.startsWith("Line [1], Column [37]: unexpected token [as count]")) + ThrowableMessageMatcher.hasMessage(CoreMatchers.startsWith("SQL-Parse-UnexpectedToken: line=[1], column=[37], token=[as count]")) ) ) .verify(); @@ -1328,7 +1344,7 @@ public void testInsertWithUnnamedColumnInSelectStatement() .sql("INSERT INTO t SELECT dim1, dim2 || '-lol' FROM foo PARTITIONED BY ALL") .expectValidationError( SqlValidationError.class, - IngestHandler.UNNAMED_INGESTION_COLUMN_ERROR + "SQL-Validation-General: message=[" + IngestHandler.UNNAMED_INGESTION_COLUMN_ERROR + "]" ) .verify(); } @@ -1340,7 +1356,7 @@ public void testInsertWithInvalidColumnNameInIngest() .sql("INSERT INTO t SELECT __time, dim1 AS EXPR$0 FROM foo PARTITIONED BY ALL") .expectValidationError( SqlValidationError.class, - IngestHandler.UNNAMED_INGESTION_COLUMN_ERROR + "SQL-Validation-General: message=[" + IngestHandler.UNNAMED_INGESTION_COLUMN_ERROR + "]" ) .verify(); } @@ -1354,7 +1370,7 @@ public void testInsertWithUnnamedColumnInNestedSelectStatement() + "(SELECT __time, LOWER(dim1) FROM foo) PARTITIONED BY ALL TIME") .expectValidationError( SqlValidationError.class, - IngestHandler.UNNAMED_INGESTION_COLUMN_ERROR + "SQL-Validation-General: message=[" + IngestHandler.UNNAMED_INGESTION_COLUMN_ERROR + "]" ) .verify(); } @@ -1366,11 +1382,11 @@ public void testInsertQueryWithInvalidGranularity() .sql("insert into foo1 select __time, dim1 FROM foo partitioned by time_floor(__time, 'PT2H')") .expectValidationError( CoreMatchers.allOf( - CoreMatchers.instanceOf(SqlParseError.class), + CoreMatchers.instanceOf(SqlValidationError.class), ThrowableMessageMatcher.hasMessage(CoreMatchers.containsString( - "The granularity specified in PARTITIONED BY is not supported.")) + "SQL-Validation-PartitionedByGrain")) ) - ) + ) .verify(); } @@ -1410,7 +1426,7 @@ public void testInsertWithSqlOuterLimit() .sql("INSERT INTO dst SELECT * FROM foo PARTITIONED BY ALL TIME") .expectValidationError( SqlValidationError.class, - "Context parameter [sqlOuterLimit] cannot be provided with [INSERT]" + "SQL-Validation-InsertContext: param=[sqlOuterLimit], op=[INSERT]" ) .verify(); } diff --git a/sql/src/test/java/org/apache/druid/sql/calcite/CalciteParameterQueryTest.java b/sql/src/test/java/org/apache/druid/sql/calcite/CalciteParameterQueryTest.java index 15f423cbaded..ac3b60bf0c3b 100644 --- a/sql/src/test/java/org/apache/druid/sql/calcite/CalciteParameterQueryTest.java +++ b/sql/src/test/java/org/apache/druid/sql/calcite/CalciteParameterQueryTest.java @@ -22,7 +22,6 @@ import com.google.common.collect.ImmutableList; import org.apache.calcite.avatica.SqlType; import org.apache.druid.common.config.NullHandling; -import org.apache.druid.error.DruidExceptionV1; import org.apache.druid.error.SqlValidationError; import org.apache.druid.java.util.common.DateTimes; import org.apache.druid.java.util.common.Intervals; @@ -579,7 +578,7 @@ public void testLongs() public void testMissingParameter() { expectedException.expect(SqlValidationError.class); - expectedException.expectMessage("Parameter at position [0] is not bound"); + expectedException.expectMessage("SQL-Validation-UnboundParameter: index=[1]"); testQuery( "SELECT COUNT(*)\n" + "FROM druid.numfoo\n" @@ -594,7 +593,7 @@ public void testMissingParameter() public void testPartiallyMissingParameter() { expectedException.expect(SqlValidationError.class); - expectedException.expectMessage("Parameter at position [1] is not bound"); + expectedException.expectMessage("SQL-Validation-UnboundParameter: index=[2]"); testQuery( "SELECT COUNT(*)\n" + "FROM druid.numfoo\n" @@ -612,7 +611,7 @@ public void testPartiallyMissingParameterInTheMiddle() params.add(null); params.add(new SqlParameter(SqlType.INTEGER, 1)); expectedException.expect(SqlValidationError.class); - expectedException.expectMessage("Parameter at position [0] is not bound"); + expectedException.expectMessage("SQL-Validation-UnboundParameter: index=[1]"); testQuery( "SELECT 1 + ?, dim1 FROM foo LIMIT ?", ImmutableList.of(), diff --git a/sql/src/test/java/org/apache/druid/sql/calcite/CalciteQueryTest.java b/sql/src/test/java/org/apache/druid/sql/calcite/CalciteQueryTest.java index aa8ccc8f8f04..374235386bf7 100644 --- a/sql/src/test/java/org/apache/druid/sql/calcite/CalciteQueryTest.java +++ b/sql/src/test/java/org/apache/druid/sql/calcite/CalciteQueryTest.java @@ -25,7 +25,7 @@ import com.google.common.collect.ImmutableSet; import org.apache.calcite.runtime.CalciteContextException; import org.apache.druid.common.config.NullHandling; -import org.apache.druid.error.DruidExceptionV1; +import org.apache.druid.error.SqlUnsupportedError; import org.apache.druid.error.SqlValidationError; import org.apache.druid.java.util.common.DateTimes; import org.apache.druid.java.util.common.HumanReadableBytes; @@ -372,8 +372,8 @@ public void testInformationSchemaColumnsOnAnotherView() public void testCannotInsertWithNativeEngine() { notMsqCompatible(); - final SqlValidationError e = Assert.assertThrows( - SqlValidationError.class, + final SqlUnsupportedError e = Assert.assertThrows( + SqlUnsupportedError.class, () -> testQuery( "INSERT INTO dst SELECT * FROM foo PARTITIONED BY ALL", ImmutableList.of(), @@ -384,7 +384,7 @@ public void testCannotInsertWithNativeEngine() MatcherAssert.assertThat( e, ThrowableMessageMatcher.hasMessage( - CoreMatchers.equalTo("Cannot execute INSERT with SQL engine 'native'.") + CoreMatchers.equalTo("SQL-Unsupported-UnsupportedEngineOp: op=[INSERT], engine=[native]") ) ); } @@ -393,8 +393,8 @@ public void testCannotInsertWithNativeEngine() public void testCannotReplaceWithNativeEngine() { notMsqCompatible(); - final SqlValidationError e = Assert.assertThrows( - SqlValidationError.class, + final SqlUnsupportedError e = Assert.assertThrows( + SqlUnsupportedError.class, () -> testQuery( "REPLACE INTO dst OVERWRITE ALL SELECT * FROM foo PARTITIONED BY ALL", ImmutableList.of(), @@ -405,7 +405,7 @@ public void testCannotReplaceWithNativeEngine() MatcherAssert.assertThat( e, ThrowableMessageMatcher.hasMessage( - CoreMatchers.equalTo("Cannot execute REPLACE with SQL engine 'native'.") + CoreMatchers.equalTo("SQL-Unsupported-UnsupportedEngineOp: op=[REPLACE], engine=[native]") ) ); } @@ -1067,7 +1067,7 @@ public void testStringLatestGroupBy() public void testStringLatestGroupByWithAlwaysFalseCondition() { testQuery( - "SELECT LATEST(dim4, 10),dim2 FROM numfoo WHERE (dim1 = 'something' AND dim1 IN( 'something else') ) GROUP BY dim2", + "SELECT LATEST(dim4, 10), dim2 FROM numfoo WHERE (dim1 = 'something' AND dim1 IN('something else')) GROUP BY dim2", ImmutableList.of( Druids.newScanQueryBuilder() .dataSource(InlineDataSource.fromIterable( @@ -1092,7 +1092,7 @@ public void testStringLatestGroupByWithAlwaysFalseCondition() public void testStringLatestByGroupByWithAlwaysFalseCondition() { testQuery( - "SELECT LATEST_BY(dim4, __time, 10),dim2 FROM numfoo WHERE (dim1 = 'something' AND dim1 IN( 'something else') ) GROUP BY dim2", + "SELECT LATEST_BY(dim4, __time, 10), dim2 FROM numfoo WHERE (dim1 = 'something' AND dim1 IN('something else')) GROUP BY dim2", ImmutableList.of( Druids.newScanQueryBuilder() .dataSource(InlineDataSource.fromIterable( @@ -2905,7 +2905,11 @@ public void testUnionAllTablesColumnCountMismatch() } catch (SqlValidationError e) { Assert.assertTrue( - e.getMessage().contains("Column count mismatch in UNION ALL") + e.toString().contains("Column count mismatch in UNION ALL") + ); + Assert.assertEquals( + "SQL-Validation-General", + e.errorCode() ); Assert.assertEquals( QueryException.PLAN_VALIDATION_FAILED_ERROR_CODE, @@ -2981,6 +2985,7 @@ public void testUnionAllTablesColumnTypeMismatchStringLong() public void testUnionAllTablesWhenMappingIsRequired() { // Cannot plan this UNION ALL operation, because the column swap would require generating a subquery. + assertQueryIsUnplannable( "SELECT\n" + "c, COUNT(*)\n" @@ -3204,7 +3209,11 @@ public void testUnionAllThreeTablesColumnCountMismatch2() } catch (SqlValidationError e) { Assert.assertTrue( - e.getMessage().contains("Column count mismatch in UNION ALL") + e.toString().contains("Column count mismatch in UNION ALL") + ); + Assert.assertEquals( + "SQL-Validation-General", + e.errorCode() ); Assert.assertEquals( QueryException.PLAN_VALIDATION_FAILED_ERROR_CODE, @@ -3230,7 +3239,11 @@ public void testUnionAllThreeTablesColumnCountMismatch3() } catch (SqlValidationError e) { Assert.assertTrue( - e.getMessage().contains("Column count mismatch in UNION ALL") + e.toString().contains("Column count mismatch in UNION ALL") + ); + Assert.assertEquals( + "SQL-Validation-General", + e.errorCode() ); Assert.assertEquals( QueryException.PLAN_VALIDATION_FAILED_ERROR_CODE, @@ -5778,7 +5791,11 @@ public void testStringAggQueryOnComplexDatatypes() } catch (SqlValidationError e) { Assert.assertTrue( - e.getMessage().contains("Cannot use STRING_AGG on complex inputs COMPLEX") + e.toString().contains("STRING_AGG aggregation is not supported for type [COMPLEX]") + ); + Assert.assertEquals( + "SQL-Validation-General", + e.errorCode() ); Assert.assertEquals( QueryException.PLAN_VALIDATION_FAILED_ERROR_CODE, @@ -5921,9 +5938,9 @@ public void testCountStarWithTimeInIntervalFilterNonLiteral() expected -> { expected.expect(CoreMatchers.instanceOf(SqlValidationError.class)); expected.expect(ThrowableMessageMatcher.hasMessage(CoreMatchers.containsString( - "From line 1, column 38 to line 1, column 67: " - + "Cannot apply 'TIME_IN_INTERVAL' to arguments of type 'TIME_IN_INTERVAL(, )'. " - + "Supported form(s): 'TIME_IN_INTERVAL(, )'"))); + "SQL-Validation-General: line=[1], column=[38], " + + "message=[Cannot apply 'TIME_IN_INTERVAL' to arguments of type 'TIME_IN_INTERVAL(, )'. Supported form(s): TIME_IN_INTERVAL(, )]")) + ); } ); } @@ -6069,7 +6086,7 @@ public void testCountStarWithTimeFilterUsingStringLiteralsInvalid_isUnplannable( Assert.assertEquals( sql, "Illegal TIMESTAMP constant [CAST('z2000-01-01 00:00:00'):TIMESTAMP(3) NOT NULL]", - e.message() + e.toString() ); } catch (Exception e) { @@ -11324,7 +11341,11 @@ public void testTimeExtractWithTooFewArguments() } catch (SqlValidationError e) { Assert.assertTrue( - e.getMessage().contains("Invalid number of arguments to function 'TIME_EXTRACT'. Was expecting 2 arguments") + e.toString().contains("Invalid number of arguments to function 'TIME_EXTRACT'. Was expecting 2 arguments") + ); + Assert.assertEquals( + "SQL-Validation-General", + e.errorCode() ); Assert.assertEquals( QueryException.PLAN_VALIDATION_FAILED_ERROR_CODE, @@ -13984,7 +14005,7 @@ public void testStringAggExpression() ); } - @Test(expected = SqlValidationError.class) + @Test(expected = SqlUnsupportedError.class) public void testStringAggExpressionNonConstantSeparator() { testQuery( diff --git a/sql/src/test/java/org/apache/druid/sql/calcite/CalciteReplaceDmlTest.java b/sql/src/test/java/org/apache/druid/sql/calcite/CalciteReplaceDmlTest.java index d8bd358e39a1..15c48254f2b2 100644 --- a/sql/src/test/java/org/apache/druid/sql/calcite/CalciteReplaceDmlTest.java +++ b/sql/src/test/java/org/apache/druid/sql/calcite/CalciteReplaceDmlTest.java @@ -22,7 +22,8 @@ import com.fasterxml.jackson.databind.ObjectMapper; import com.google.common.collect.ImmutableList; import com.google.common.collect.ImmutableMap; -import org.apache.druid.error.DruidExceptionV1; +import org.apache.druid.error.SqlParseError; +import org.apache.druid.error.SqlUnsupportedError; import org.apache.druid.error.SqlValidationError; import org.apache.druid.java.util.common.StringUtils; import org.apache.druid.java.util.common.granularity.Granularities; @@ -221,7 +222,7 @@ public void testReplaceForUnsupportedDeleteWhereClause() .sql("REPLACE INTO dst OVERWRITE WHERE __time LIKE '20__-02-01' SELECT * FROM foo PARTITIONED BY MONTH") .expectValidationError( SqlValidationError.class, - "Unsupported operation in OVERWRITE WHERE clause: LIKE" + "SQL-Validation-OverwriteWhereExpr: expr=[LIKE]" ) .verify(); } @@ -233,7 +234,7 @@ public void testReplaceForInvalidDeleteWhereClause() .sql("REPLACE INTO dst OVERWRITE WHERE TRUE SELECT * FROM foo PARTITIONED BY MONTH") .expectValidationError( SqlValidationError.class, - "Invalid OVERWRITE WHERE clause" + "SQL-Validation-InvalidOverwriteWhere" ) .verify(); } @@ -245,7 +246,7 @@ public void testReplaceForDeleteWhereClauseOnUnsupportedColumns() .sql("REPLACE INTO dst OVERWRITE WHERE dim1 > TIMESTAMP '2000-01-05 00:00:00' SELECT * FROM foo PARTITIONED BY ALL TIME") .expectValidationError( SqlValidationError.class, - "Only __time column is supported in OVERWRITE WHERE clause" + "SQL-Validation-OverwriteWhereIsNotTime" ) .verify(); } @@ -256,7 +257,7 @@ public void testReplaceWithOrderBy() { testIngestionQuery() .sql("REPLACE INTO dst OVERWRITE ALL SELECT * FROM foo ORDER BY dim1 PARTITIONED BY ALL TIME") - .expectValidationError(SqlValidationError.class, "Cannot have ORDER BY on a REPLACE statement, use CLUSTERED BY instead.") + .expectValidationError(SqlValidationError.class, "SQL-Validation-InsertOrderBy: op=[REPLACE]") .verify(); } @@ -267,7 +268,7 @@ public void testReplaceForMisalignedPartitionInterval() .sql("REPLACE INTO dst OVERWRITE WHERE __time >= TIMESTAMP '2000-01-05 00:00:00' AND __time <= TIMESTAMP '2000-01-06 00:00:00' SELECT * FROM foo PARTITIONED BY MONTH") .expectValidationError( SqlValidationError.class, - "OVERWRITE WHERE clause contains an interval [2000-01-05T00:00:00.000Z/2000-01-06T00:00:00.001Z] which is not aligned with PARTITIONED BY granularity {type=period, period=P1M, timeZone=UTC, origin=null}" + "SQL-Validation-OverwriteUnalignedInterval: interval=[[2000-01-05T00:00:00.000Z/2000-01-06T00:00:00.001Z]], granularity=[{type=period, period=P1M, timeZone=UTC, origin=null}]" ) .verify(); } @@ -279,7 +280,7 @@ public void testReplaceForInvalidPartition() .sql("REPLACE INTO dst OVERWRITE WHERE __time >= TIMESTAMP '2000-01-05 00:00:00' AND __time <= TIMESTAMP '2000-02-05 00:00:00' SELECT * FROM foo PARTITIONED BY ALL TIME") .expectValidationError( SqlValidationError.class, - "OVERWRITE WHERE clause contains an interval [2000-01-05T00:00:00.000Z/2000-02-05T00:00:00.001Z] which is not aligned with PARTITIONED BY granularity AllGranularity" + "SQL-Validation-OverwriteUnalignedInterval: interval=[[2000-01-05T00:00:00.000Z/2000-02-05T00:00:00.001Z]], granularity=[AllGranularity]" ) .verify(); } @@ -293,7 +294,7 @@ public void testReplaceFromTableWithEmptyInterval() + "SELECT * FROM foo PARTITIONED BY MONTH") .expectValidationError( SqlValidationError.class, - "Intervals for replace are empty" + "SQL-Validation-OverwriteEmptyIntervals" ) .verify(); } @@ -303,7 +304,7 @@ public void testReplaceForWithInvalidInterval() { testIngestionQuery() .sql("REPLACE INTO dst OVERWRITE WHERE __time >= TIMESTAMP '2000-01-INVALID0:00' AND __time <= TIMESTAMP '2000-02-05 00:00:00' SELECT * FROM foo PARTITIONED BY ALL TIME") - .expectValidationError(SqlValidationError.class) + .expectValidationError(SqlParseError.class) .verify(); } @@ -382,7 +383,7 @@ public void testReplaceIntoInvalidDataSourceName() { testIngestionQuery() .sql("REPLACE INTO \"in/valid\" OVERWRITE ALL SELECT dim1, dim2 FROM foo PARTITIONED BY ALL TIME") - .expectValidationError(SqlValidationError.class, "REPLACE dataSource cannot contain the '/' character.") + .expectValidationError(SqlValidationError.class, "SQL-Validation-General: message=[REPLACE dataSource cannot contain the '/' character.]") .verify(); } @@ -391,7 +392,7 @@ public void testReplaceUsingColumnList() { testIngestionQuery() .sql("REPLACE INTO dst (foo, bar) OVERWRITE ALL SELECT dim1, dim2 FROM foo PARTITIONED BY ALL TIME") - .expectValidationError(SqlValidationError.class, "REPLACE with a target column list is not supported.") + .expectValidationError(SqlUnsupportedError.class, "SQL-Unsupported-InsertList: op=[REPLACE]") .verify(); } @@ -400,7 +401,7 @@ public void testReplaceWithoutPartitionedBy() { testIngestionQuery() .sql("REPLACE INTO dst OVERWRITE ALL SELECT __time, FLOOR(m1) as floor_m1, dim1 FROM foo") - .expectValidationError(SqlValidationError.class, "REPLACE statements must specify PARTITIONED BY clause explicitly") + .expectValidationError(SqlValidationError.class, "SQL-Validation-InsertWithoutPartitionBy: op=[REPLACE]") .verify(); } @@ -409,7 +410,7 @@ public void testReplaceWithoutPartitionedByWithClusteredBy() { testIngestionQuery() .sql("REPLACE INTO dst OVERWRITE ALL SELECT __time, FLOOR(m1) as floor_m1, dim1 FROM foo CLUSTERED BY dim1") - .expectValidationError(SqlValidationError.class, "CLUSTERED BY found before PARTITIONED BY. In Druid, the CLUSTERED BY clause must follow the PARTITIONED BY clause") + .expectValidationError(SqlParseError.class, "SQL-Parse-General: message=[CLUSTERED BY found before PARTITIONED BY. In Druid, the CLUSTERED BY clause must follow the PARTITIONED BY clause]") .verify(); } @@ -418,7 +419,7 @@ public void testReplaceWithoutOverwriteClause() { testIngestionQuery() .sql("REPLACE INTO dst SELECT * FROM foo PARTITIONED BY ALL TIME") - .expectValidationError(SqlValidationError.class, "Missing time chunk information in OVERWRITE clause for REPLACE. Use OVERWRITE WHERE <__time based condition> or OVERWRITE ALL to overwrite the entire table.") + .expectValidationError(SqlValidationError.class, "SQL-Validation-OverwriteTimeRange") .verify(); } @@ -427,7 +428,7 @@ public void testReplaceWithoutCompleteOverwriteClause() { testIngestionQuery() .sql("REPLACE INTO dst OVERWRITE SELECT * FROM foo PARTITIONED BY ALL TIME") - .expectValidationError(SqlValidationError.class, "Missing time chunk information in OVERWRITE clause for REPLACE. Use OVERWRITE WHERE <__time based condition> or OVERWRITE ALL to overwrite the entire table.") + .expectValidationError(SqlValidationError.class, "SQL-Validation-OverwriteTimeRange") .verify(); } @@ -438,7 +439,7 @@ public void testReplaceIntoSystemTable() .sql("REPLACE INTO INFORMATION_SCHEMA.COLUMNS OVERWRITE ALL SELECT * FROM foo PARTITIONED BY ALL TIME") .expectValidationError( SqlValidationError.class, - "Cannot REPLACE into INFORMATION_SCHEMA.COLUMNS because it is not a Druid datasource." + "SQL-Validation-InsertNotDatasource: op=[REPLACE], table=[INFORMATION_SCHEMA.COLUMNS]" ) .verify(); } @@ -450,7 +451,7 @@ public void testReplaceIntoView() .sql("REPLACE INTO view.aview OVERWRITE ALL SELECT * FROM foo PARTITIONED BY ALL TIME") .expectValidationError( SqlValidationError.class, - "Cannot REPLACE into view.aview because it is not a Druid datasource." + "SQL-Validation-InsertNotDatasource: op=[REPLACE], table=[view.aview]" ) .verify(); } @@ -480,7 +481,7 @@ public void testReplaceIntoNonexistentSchema() .sql("REPLACE INTO nonexistent.dst OVERWRITE ALL SELECT * FROM foo PARTITIONED BY ALL TIME") .expectValidationError( SqlValidationError.class, - "Cannot REPLACE into nonexistent.dst because it is not a Druid datasource." + "SQL-Validation-InsertNotDatasource: op=[REPLACE], table=[nonexistent.dst]" ) .verify(); } @@ -586,9 +587,9 @@ public void testReplaceWithPartitionedByContainingInvalidGranularity() ); Assert.fail("Exception should be thrown"); } - catch (SqlValidationError e) { + catch (SqlParseError e) { assertEquals( - "Encountered 'invalid_granularity' after PARTITIONED BY. Expected HOUR, DAY, MONTH, YEAR, ALL TIME, FLOOR function or TIME_FLOOR function", + "SQL-Parse-General: message=[SQL-Parse-InvalidPartitionBy: expr=['invalid_granularity']]", e.getMessage() ); } @@ -910,7 +911,7 @@ public void testReplaceWithSqlOuterLimit() testIngestionQuery() .context(context) .sql("REPLACE INTO dst OVERWRITE ALL SELECT * FROM foo PARTITIONED BY ALL TIME") - .expectValidationError(SqlValidationError.class, "sqlOuterLimit cannot be provided with REPLACE.") + .expectValidationError(SqlValidationError.class, "SQL-Validation-InsertContext: param=[sqlOuterLimit], op=[REPLACE]") .verify(); } } diff --git a/sql/src/test/java/org/apache/druid/sql/calcite/CalciteSelectQueryTest.java b/sql/src/test/java/org/apache/druid/sql/calcite/CalciteSelectQueryTest.java index f694accde48c..7cb287bd3c2c 100644 --- a/sql/src/test/java/org/apache/druid/sql/calcite/CalciteSelectQueryTest.java +++ b/sql/src/test/java/org/apache/druid/sql/calcite/CalciteSelectQueryTest.java @@ -21,7 +21,6 @@ import com.google.common.collect.ImmutableList; import org.apache.druid.common.config.NullHandling; -import org.apache.druid.error.DruidExceptionV1; import org.apache.druid.error.SqlValidationError; import org.apache.druid.java.util.common.DateTimes; import org.apache.druid.java.util.common.Intervals; @@ -313,7 +312,8 @@ public void testSelectConstantExpressionFromTable() public void testSelectConstantExpressionEquivalentToNaN() { expectedException.expectMessage( - "'(log10(0) - log10(0))' evaluates to 'NaN' that is not supported in SQL. You can either cast the expression as BIGINT ('CAST((log10(0) - log10(0)) as BIGINT)') or VARCHAR ('CAST((log10(0) - log10(0)) as VARCHAR)') or change the expression itself"); + "SQL-Unsupported-UnsupportedExpr: expr=[(log10(0) - log10(0))], eval=[NaN]" + ); testQuery( "SELECT log10(0) - log10(0), dim1 FROM foo LIMIT 1", ImmutableList.of(), @@ -325,7 +325,7 @@ public void testSelectConstantExpressionEquivalentToNaN() public void testSelectConstantExpressionEquivalentToInfinity() { expectedException.expectMessage( - "'log10(0)' evaluates to '-Infinity' that is not supported in SQL. You can either cast the expression as BIGINT ('CAST(log10(0) as BIGINT)') or VARCHAR ('CAST(log10(0) as VARCHAR)') or change the expression itself"); + "SQL-Unsupported-UnsupportedExpr: expr=[log10(0)], eval=[-Infinity]"); testQuery( "SELECT log10(0), dim1 FROM foo LIMIT 1", ImmutableList.of(), diff --git a/sql/src/test/java/org/apache/druid/sql/http/SqlResourceTest.java b/sql/src/test/java/org/apache/druid/sql/http/SqlResourceTest.java index 030049f370e3..586f9eeb916e 100644 --- a/sql/src/test/java/org/apache/druid/sql/http/SqlResourceTest.java +++ b/sql/src/test/java/org/apache/druid/sql/http/SqlResourceTest.java @@ -35,6 +35,7 @@ import org.apache.druid.common.exception.ErrorResponseTransformStrategy; import org.apache.druid.common.guava.SettableSupplier; import org.apache.druid.error.ErrorResponse; +import org.apache.druid.error.StandardRestExceptionEncoder; import org.apache.druid.jackson.DefaultObjectMapper; import org.apache.druid.java.util.common.ISE; import org.apache.druid.java.util.common.NonnullPair; @@ -310,7 +311,8 @@ public PreparedStatement preparedStatement(SqlQueryPlus sqlRequest) lifecycleManager, new ServerConfig(), TEST_RESPONSE_CONTEXT_CONFIG, - DUMMY_DRUID_NODE + DUMMY_DRUID_NODE, + StandardRestExceptionEncoder.instance() ); } @@ -1532,7 +1534,8 @@ public ErrorResponseTransformStrategy getErrorResponseTransformStrategy() } }, TEST_RESPONSE_CONTEXT_CONFIG, - DUMMY_DRUID_NODE + DUMMY_DRUID_NODE, + StandardRestExceptionEncoder.instance() ); String errorMessage = "This will be supported in Druid 9999"; @@ -1581,7 +1584,8 @@ public ErrorResponseTransformStrategy getErrorResponseTransformStrategy() } }, TEST_RESPONSE_CONTEXT_CONFIG, - DUMMY_DRUID_NODE + DUMMY_DRUID_NODE, + StandardRestExceptionEncoder.instance() ); String errorMessage = "could not assert"; From 7b62abc2cb5c763450204914d65181bb51d2ce68 Mon Sep 17 00:00:00 2001 From: imply-cheddar Date: Thu, 30 Mar 2023 15:03:35 +0900 Subject: [PATCH 10/17] Add DruidException This adds a singular DruidException that is intended to be used to represent all terminal states and error messages that should result in responses delivered to the user. Javadocs exist on DruidException itself. --- .../druid/msq/sql/MSQTaskSqlEngine.java | 7 +- .../apache/druid/msq/sql/SqlTaskResource.java | 16 +- .../apache/druid/msq/exec/MSQInsertTest.java | 34 +- .../apache/druid/msq/exec/MSQReplaceTest.java | 39 +- .../apache/druid/msq/exec/MSQSelectTest.java | 49 +- ...erifyMSQSupportedNativeQueriesFactory.java | 0 .../apache/druid/common/utils/IdUtils.java | 59 ++- .../druid/error/DruidAssertionError.java | 73 --- .../apache/druid/error/DruidException.java | 421 +++++++++++++----- .../org/apache/druid/error/ErrorAudience.java | 40 -- .../org/apache/druid/error/ErrorResponse.java | 208 +++++++-- .../org/apache/druid/error/InvalidInput.java | 63 +++ .../{ErrorCode.java => InvalidSqlInput.java} | 31 +- .../apache/druid/error/MetricCategory.java | 27 -- .../druid/error/QueryExceptionCompat.java | 72 +++ .../java/org/apache/druid/error/README.md | 25 +- .../org/apache/druid/error/SqlParseError.java | 72 --- .../druid/error/SqlUnsupportedError.java | 94 ---- .../druid/error/SqlValidationError.java | 78 ---- .../druid/java/util/common/logger/Logger.java | 31 ++ .../segment/nested/NestedPathFinder.java | 35 +- .../druid/error/DruidExceptionMatcher.java | 103 +++++ .../org/apache/druid/matchers/DMatchers.java | 14 +- .../apache/druid/matchers/LambdaMatcher.java | 62 +++ .../error/StandardRestExceptionEncoder.java | 88 ---- .../apache/druid/server/QueryResource.java | 4 +- .../druid/server/QueryResultPusher.java | 117 ++--- sql/pom.xml | 5 + sql/src/main/codegen/includes/common.ftl | 1 - sql/src/main/codegen/includes/insert.ftl | 4 +- sql/src/main/codegen/includes/replace.ftl | 4 +- .../org/apache/druid/sql/DirectStatement.java | 12 +- .../builtin/ArraySqlAggregator.java | 4 - .../aggregation/builtin/AvgSqlAggregator.java | 8 +- .../EarliestLatestAnySqlAggregator.java | 82 ++-- .../aggregation/builtin/MaxSqlAggregator.java | 10 +- .../aggregation/builtin/MinSqlAggregator.java | 3 +- .../builtin/SimpleSqlAggregator.java | 8 + .../builtin/StringSqlAggregator.java | 14 +- .../aggregation/builtin/SumSqlAggregator.java | 10 +- .../NestedDataOperatorConversions.java | 57 +-- .../calcite/parser/DruidSqlParserUtils.java | 305 +++++++------ .../sql/calcite/planner/DruidPlanner.java | 248 +++++++---- .../sql/calcite/planner/DruidRexExecutor.java | 48 +- .../sql/calcite/planner/IngestHandler.java | 134 +++--- .../sql/calcite/planner/QueryHandler.java | 116 ++--- .../planner/RelParameterizerShuttle.java | 8 +- .../planner/SqlParameterizerShuttle.java | 29 +- .../calcite/planner/SqlStatementHandler.java | 2 - .../sql/calcite/rel/DruidJoinQueryRel.java | 11 +- .../druid/sql/calcite/rel/DruidQuery.java | 12 +- .../calcite/rule/DruidLogicalValuesRule.java | 21 +- .../sql/calcite/run/NativeSqlEngine.java | 12 +- .../druid/sql/calcite/run/SqlEngines.java | 8 +- .../sql/calcite/table/RowSignatures.java | 5 + .../org/apache/druid/sql/guice/SqlModule.java | 5 - .../apache/druid/sql/http/SqlResource.java | 11 +- .../apache/druid/sql/SqlStatementTest.java | 168 ++++--- .../sql/avatica/DruidAvaticaHandlerTest.java | 242 ++++++---- .../druid/sql/avatica/DruidStatementTest.java | 15 +- .../sql/calcite/BaseCalciteQueryTest.java | 35 +- .../sql/calcite/CalciteInsertDmlTest.java | 151 +++---- .../sql/calcite/CalciteJoinQueryTest.java | 52 ++- .../CalciteMultiValueStringQueryTest.java | 15 +- .../calcite/CalciteNestedDataQueryTest.java | 10 +- .../calcite/CalciteParameterQueryTest.java | 17 +- .../druid/sql/calcite/CalciteQueryTest.java | 259 +++++------ .../sql/calcite/CalciteReplaceDmlTest.java | 124 +++--- .../sql/calcite/CalciteSelectQueryTest.java | 17 +- .../druid/sql/calcite/QueryTestRunner.java | 4 +- .../parser/DruidSqlParserUtilsTest.java | 19 +- .../rule/DruidLogicalValuesRuleTest.java | 29 +- .../druid/sql/calcite/util/QueryLogHook.java | 17 + .../druid/sql/http/SqlResourceTest.java | 395 ++++++++++------ 74 files changed, 2522 insertions(+), 2106 deletions(-) create mode 100644 extensions-core/multi-stage-query/src/test/java/org/apache/druid/msq/test/VerifyMSQSupportedNativeQueriesFactory.java delete mode 100644 processing/src/main/java/org/apache/druid/error/DruidAssertionError.java delete mode 100644 processing/src/main/java/org/apache/druid/error/ErrorAudience.java create mode 100644 processing/src/main/java/org/apache/druid/error/InvalidInput.java rename processing/src/main/java/org/apache/druid/error/{ErrorCode.java => InvalidSqlInput.java} (56%) delete mode 100644 processing/src/main/java/org/apache/druid/error/MetricCategory.java create mode 100644 processing/src/main/java/org/apache/druid/error/QueryExceptionCompat.java delete mode 100644 processing/src/main/java/org/apache/druid/error/SqlParseError.java delete mode 100644 processing/src/main/java/org/apache/druid/error/SqlUnsupportedError.java delete mode 100644 processing/src/main/java/org/apache/druid/error/SqlValidationError.java create mode 100644 processing/src/test/java/org/apache/druid/error/DruidExceptionMatcher.java rename server/src/main/java/org/apache/druid/error/RestExceptionEncoder.java => processing/src/test/java/org/apache/druid/matchers/DMatchers.java (73%) create mode 100644 processing/src/test/java/org/apache/druid/matchers/LambdaMatcher.java delete mode 100644 server/src/main/java/org/apache/druid/error/StandardRestExceptionEncoder.java diff --git a/extensions-core/multi-stage-query/src/main/java/org/apache/druid/msq/sql/MSQTaskSqlEngine.java b/extensions-core/multi-stage-query/src/main/java/org/apache/druid/msq/sql/MSQTaskSqlEngine.java index e021cef98b58..f76c2c392f3e 100644 --- a/extensions-core/multi-stage-query/src/main/java/org/apache/druid/msq/sql/MSQTaskSqlEngine.java +++ b/extensions-core/multi-stage-query/src/main/java/org/apache/druid/msq/sql/MSQTaskSqlEngine.java @@ -32,6 +32,7 @@ import org.apache.calcite.sql.type.SqlTypeName; import org.apache.calcite.tools.ValidationException; import org.apache.calcite.util.Pair; +import org.apache.druid.error.InvalidSqlInput; import org.apache.druid.java.util.common.IAE; import org.apache.druid.java.util.common.StringUtils; import org.apache.druid.java.util.common.granularity.Granularities; @@ -261,7 +262,7 @@ private static void validateNoDuplicateAliases(final List> } } - private static void validateLimitAndOffset(final RelNode topRel, final boolean limitOk) throws ValidationException + private static void validateLimitAndOffset(final RelNode topRel, final boolean limitOk) { Sort sort = null; @@ -283,13 +284,13 @@ private static void validateLimitAndOffset(final RelNode topRel, final boolean l // The segment generator relies on shuffle statistics to determine segment intervals when PARTITIONED BY is not ALL, // and LIMIT/OFFSET prevent shuffle statistics from being generated. This is because they always send everything // to a single partition, so there are no shuffle statistics. - throw new ValidationException( + throw InvalidSqlInput.exception( "INSERT and REPLACE queries cannot have a LIMIT unless PARTITIONED BY is \"ALL\"." ); } if (sort != null && sort.offset != null) { // Found an outer OFFSET that is not allowed. - throw new ValidationException("INSERT and REPLACE queries cannot have an OFFSET."); + throw InvalidSqlInput.exception("INSERT and REPLACE queries cannot have an OFFSET."); } } diff --git a/extensions-core/multi-stage-query/src/main/java/org/apache/druid/msq/sql/SqlTaskResource.java b/extensions-core/multi-stage-query/src/main/java/org/apache/druid/msq/sql/SqlTaskResource.java index 4684bac70a45..d270963db462 100644 --- a/extensions-core/multi-stage-query/src/main/java/org/apache/druid/msq/sql/SqlTaskResource.java +++ b/extensions-core/multi-stage-query/src/main/java/org/apache/druid/msq/sql/SqlTaskResource.java @@ -25,7 +25,7 @@ import com.google.inject.Inject; import org.apache.druid.common.exception.SanitizableException; import org.apache.druid.error.DruidException; -import org.apache.druid.error.StandardRestExceptionEncoder; +import org.apache.druid.error.ErrorResponse; import org.apache.druid.guice.annotations.MSQ; import org.apache.druid.indexer.TaskState; import org.apache.druid.java.util.common.guava.Sequence; @@ -65,21 +65,20 @@ import javax.ws.rs.core.Response; import javax.ws.rs.core.Response.Status; import javax.ws.rs.core.StreamingOutput; - import java.io.IOException; import java.util.Collections; /** * Endpoint for SQL execution using MSQ tasks. - * + *

* Unlike the SQL endpoint in {@link SqlResource}, this endpoint returns task IDs instead of inline results. Queries * are executed asynchronously using MSQ tasks via the indexing service (Overlord + MM or Indexer). This endpoint * does not provide a way for users to get the status or results of a query. That must be done using Overlord APIs * for status and reports. - * + *

* One exception: EXPLAIN query results are returned inline by this endpoint, in the same way as {@link SqlResource} * would return them. - * + *

* This endpoint does not support system tables or INFORMATION_SCHEMA. Queries on those tables result in errors. */ @Path("/druid/v2/sql/task/") @@ -131,7 +130,7 @@ public Response doGetEnabled(@Context final HttpServletRequest request) /** * Post a query task. - * + *

* Execution uses {@link MSQTaskSqlEngine} to ship the query off to the Overlord as an indexing task using * {@link org.apache.druid.msq.indexing.MSQControllerTask}. The task ID is returned immediately to the caller, * and execution proceeds asynchronously. @@ -163,7 +162,10 @@ public Response doPost( } catch (DruidException e) { stmt.reporter().failed(e); - return StandardRestExceptionEncoder.instance().encode(e); + return Response.status(e.getStatusCode()) + .type(MediaType.APPLICATION_JSON_TYPE) + .entity(new ErrorResponse(e)) + .build(); } // Kitchen-sinking the errors since they are all unchecked. // Just copied from SqlResource. diff --git a/extensions-core/multi-stage-query/src/test/java/org/apache/druid/msq/exec/MSQInsertTest.java b/extensions-core/multi-stage-query/src/test/java/org/apache/druid/msq/exec/MSQInsertTest.java index 31c19a02e11e..794c944adf26 100644 --- a/extensions-core/multi-stage-query/src/test/java/org/apache/druid/msq/exec/MSQInsertTest.java +++ b/extensions-core/multi-stage-query/src/test/java/org/apache/druid/msq/exec/MSQInsertTest.java @@ -24,8 +24,7 @@ import com.google.common.collect.ImmutableSet; import com.google.common.hash.HashFunction; import com.google.common.hash.Hashing; -import org.apache.druid.error.SqlParseError; -import org.apache.druid.error.SqlValidationError; +import org.apache.druid.error.DruidException; import org.apache.druid.hll.HyperLogLogCollector; import org.apache.druid.java.util.common.ISE; import org.apache.druid.java.util.common.Intervals; @@ -54,7 +53,6 @@ import org.mockito.Mockito; import javax.annotation.Nonnull; - import java.io.File; import java.io.IOException; import java.io.InputStream; @@ -541,11 +539,9 @@ public void testInsertOnFoo1WithMultiValueMeasureGroupBy() "INSERT INTO foo1 SELECT count(dim3) FROM foo WHERE dim3 IS NOT NULL GROUP BY 1 PARTITIONED BY ALL TIME") .setExpectedDataSource("foo1") .setQueryContext(context) - .setExpectedValidationErrorMatcher(CoreMatchers.allOf( - CoreMatchers.instanceOf(SqlValidationError.class), - ThrowableMessageMatcher.hasMessage(CoreMatchers.containsString( - "Aggregate expression is illegal in GROUP BY clause")) - )) + .setExpectedValidationErrorMatcher( + invalidSqlContains("Aggregate expression is illegal in GROUP BY clause") + ) .verifyPlanningErrors(); } @@ -967,7 +963,7 @@ public void testInsertWrongTypeTimestamp() .setExpectedRowSignature(rowSignature) .setQueryContext(context) .setExpectedValidationErrorMatcher(CoreMatchers.allOf( - CoreMatchers.instanceOf(SqlValidationError.class), + CoreMatchers.instanceOf(DruidException.class), ThrowableMessageMatcher.hasMessage(CoreMatchers.containsString( "Field \"__time\" must be of type TIMESTAMP")) )) @@ -977,14 +973,14 @@ public void testInsertWrongTypeTimestamp() @Test public void testIncorrectInsertQuery() { - testIngestQuery().setSql( - "insert into foo1 select __time, dim1 , count(*) as cnt from foo where dim1 is not null group by 1, 2 clustered by dim1") - .setExpectedValidationErrorMatcher(CoreMatchers.allOf( - CoreMatchers.instanceOf(SqlParseError.class), - ThrowableMessageMatcher.hasMessage(CoreMatchers.startsWith( - "CLUSTERED BY found before PARTITIONED BY. In Druid, the CLUSTERED BY clause must follow the PARTITIONED BY clause")) - )) - .verifyPlanningErrors(); + testIngestQuery() + .setSql( + "insert into foo1 select __time, dim1 , count(*) as cnt from foo where dim1 is not null group by 1, 2 clustered by dim1" + ) + .setExpectedValidationErrorMatcher(invalidSqlContains( + "LUSTERED BY found before PARTITIONED BY, CLUSTERED BY must come after the PARTITIONED BY clause" + )) + .verifyPlanningErrors(); } @@ -1098,7 +1094,7 @@ public void testInsertLimitWithPeriodGranularityThrowsException() + "LIMIT 50 " + "PARTITIONED BY MONTH") .setExpectedValidationErrorMatcher(CoreMatchers.allOf( - CoreMatchers.instanceOf(SqlValidationError.class), + CoreMatchers.instanceOf(DruidException.class), ThrowableMessageMatcher.hasMessage(CoreMatchers.containsString( "INSERT and REPLACE queries cannot have a LIMIT unless PARTITIONED BY is \"ALL\"")) )) @@ -1116,7 +1112,7 @@ public void testInsertOffsetThrowsException() + "OFFSET 10" + "PARTITIONED BY ALL TIME") .setExpectedValidationErrorMatcher(CoreMatchers.allOf( - CoreMatchers.instanceOf(SqlValidationError.class), + CoreMatchers.instanceOf(DruidException.class), ThrowableMessageMatcher.hasMessage(CoreMatchers.containsString( "INSERT and REPLACE queries cannot have an OFFSET")) )) diff --git a/extensions-core/multi-stage-query/src/test/java/org/apache/druid/msq/exec/MSQReplaceTest.java b/extensions-core/multi-stage-query/src/test/java/org/apache/druid/msq/exec/MSQReplaceTest.java index b777afacb8cb..42716297ab09 100644 --- a/extensions-core/multi-stage-query/src/test/java/org/apache/druid/msq/exec/MSQReplaceTest.java +++ b/extensions-core/multi-stage-query/src/test/java/org/apache/druid/msq/exec/MSQReplaceTest.java @@ -21,10 +21,7 @@ import com.google.common.collect.ImmutableList; import com.google.common.collect.ImmutableSet; -import org.apache.druid.error.SqlValidationError; -import org.apache.druid.error.DruidException; import org.apache.druid.common.config.NullHandling; -import org.apache.druid.error.SqlValidationError; import org.apache.druid.indexing.common.actions.RetrieveUsedSegmentsAction; import org.apache.druid.java.util.common.Intervals; import org.apache.druid.msq.test.CounterSnapshotMatcher; @@ -33,20 +30,16 @@ import org.apache.druid.msq.test.MSQTestTaskActionClient; import org.apache.druid.segment.column.ColumnType; import org.apache.druid.segment.column.RowSignature; -import org.apache.druid.sql.SqlPlanningException; import org.apache.druid.timeline.DataSegment; import org.apache.druid.timeline.SegmentId; import org.apache.druid.timeline.partition.DimensionRangeShardSpec; -import org.hamcrest.CoreMatchers; import org.junit.Test; -import org.junit.internal.matchers.ThrowableMessageMatcher; import org.junit.runner.RunWith; import org.junit.runners.Parameterized; import org.mockito.ArgumentMatchers; import org.mockito.Mockito; import javax.annotation.Nonnull; - import java.io.File; import java.io.IOException; import java.util.ArrayList; @@ -331,17 +324,15 @@ public void testReplaceOnFoo1WithWhereExtern() throws IOException @Test public void testReplaceIncorrectSyntax() { - testIngestQuery().setSql("REPLACE INTO foo1 OVERWRITE SELECT * FROM foo PARTITIONED BY ALL TIME") - .setExpectedDataSource("foo1") - .setQueryContext(context) - .setExpectedValidationErrorMatcher( - CoreMatchers.allOf( - CoreMatchers.instanceOf(SqlValidationError.class), - ThrowableMessageMatcher.hasMessage(CoreMatchers.containsString( - "Missing time chunk information in OVERWRITE clause for REPLACE. Use OVERWRITE WHERE <__time based condition> or OVERWRITE ALL to overwrite the entire table.")) - ) - ) - .verifyPlanningErrors(); + testIngestQuery() + .setSql("REPLACE INTO foo1 OVERWRITE SELECT * FROM foo PARTITIONED BY ALL TIME") + .setExpectedDataSource("foo1") + .setQueryContext(context) + .setExpectedValidationErrorMatcher(invalidSqlContains( + "Missing time chunk information in OVERWRITE clause for REPLACE. " + + "Use OVERWRITE WHERE <__time based condition> or OVERWRITE ALL to overwrite the entire table." + )) + .verifyPlanningErrors(); } @Test @@ -585,10 +576,8 @@ public void testReplaceLimitWithPeriodGranularityThrowsException() + "LIMIT 50" + "PARTITIONED BY MONTH") .setQueryContext(context) - .setExpectedValidationErrorMatcher(CoreMatchers.allOf( - CoreMatchers.instanceOf(SqlValidationError.class), - ThrowableMessageMatcher.hasMessage(CoreMatchers.containsString( - "INSERT and REPLACE queries cannot have a LIMIT unless PARTITIONED BY is \"ALL\"")) + .setExpectedValidationErrorMatcher(invalidSqlContains( + "INSERT and REPLACE queries cannot have a LIMIT unless PARTITIONED BY is \"ALL\"" )) .verifyPlanningErrors(); } @@ -603,10 +592,8 @@ public void testReplaceOffsetThrowsException() + "LIMIT 50 " + "OFFSET 10" + "PARTITIONED BY ALL TIME") - .setExpectedValidationErrorMatcher(CoreMatchers.allOf( - CoreMatchers.instanceOf(SqlValidationError.class), - ThrowableMessageMatcher.hasMessage(CoreMatchers.containsString( - "INSERT and REPLACE queries cannot have an OFFSET")) + .setExpectedValidationErrorMatcher(invalidSqlContains( + "INSERT and REPLACE queries cannot have an OFFSET" )) .setQueryContext(context) .verifyPlanningErrors(); diff --git a/extensions-core/multi-stage-query/src/test/java/org/apache/druid/msq/exec/MSQSelectTest.java b/extensions-core/multi-stage-query/src/test/java/org/apache/druid/msq/exec/MSQSelectTest.java index c2f91ec1ce12..ff5480611944 100644 --- a/extensions-core/multi-stage-query/src/test/java/org/apache/druid/msq/exec/MSQSelectTest.java +++ b/extensions-core/multi-stage-query/src/test/java/org/apache/druid/msq/exec/MSQSelectTest.java @@ -25,9 +25,8 @@ import org.apache.druid.data.input.impl.CsvInputFormat; import org.apache.druid.data.input.impl.JsonInputFormat; import org.apache.druid.data.input.impl.LocalInputSource; -import org.apache.druid.error.SqlParseError; -import org.apache.druid.error.SqlUnsupportedError; -import org.apache.druid.error.SqlValidationError; +import org.apache.druid.error.DruidException; +import org.apache.druid.error.DruidExceptionMatcher; import org.apache.druid.frame.util.DurableStorageUtils; import org.apache.druid.java.util.common.DateTimes; import org.apache.druid.java.util.common.ISE; @@ -74,7 +73,6 @@ import org.apache.druid.sql.calcite.planner.ColumnMappings; import org.apache.druid.sql.calcite.planner.JoinAlgorithm; import org.apache.druid.sql.calcite.planner.PlannerContext; -import org.apache.druid.sql.calcite.planner.UnsupportedSQLQueryException; import org.apache.druid.sql.calcite.util.CalciteTests; import org.hamcrest.CoreMatchers; import org.junit.Test; @@ -85,7 +83,6 @@ import org.mockito.Mockito; import javax.annotation.Nonnull; - import java.io.File; import java.io.IOException; import java.util.ArrayList; @@ -1188,10 +1185,9 @@ public void testIncorrectSelectQuery() { testSelectQuery() .setSql("select a from ") - .setExpectedValidationErrorMatcher(CoreMatchers.allOf( - CoreMatchers.instanceOf(SqlParseError.class), - ThrowableMessageMatcher.hasMessage(CoreMatchers.startsWith("SQL-Parse-UnexpectedToken: line=[1], column=[10], token=[from ],")) - )) + .setExpectedValidationErrorMatcher( + invalidSqlContains("Received an unexpected token [from ] (line [1], column [10]), acceptable options") + ) .setQueryContext(context) .verifyPlanningErrors(); } @@ -1203,11 +1199,7 @@ public void testSelectOnInformationSchemaSource() .setSql("SELECT * FROM INFORMATION_SCHEMA.SCHEMATA") .setQueryContext(context) .setExpectedValidationErrorMatcher( - CoreMatchers.allOf( - CoreMatchers.instanceOf(SqlValidationError.class), - ThrowableMessageMatcher.hasMessage(CoreMatchers.startsWith( - "SQL-Validation-WrongEngineForTable: tables=[INFORMATION_SCHEMA.SCHEMATA], engine=[msq-task]")) - ) + invalidSqlIs("Cannot query table(s) [INFORMATION_SCHEMA.SCHEMATA] with SQL engine [msq-task]") ) .verifyPlanningErrors(); } @@ -1219,11 +1211,7 @@ public void testSelectOnSysSource() .setSql("SELECT * FROM sys.segments") .setQueryContext(context) .setExpectedValidationErrorMatcher( - CoreMatchers.allOf( - CoreMatchers.instanceOf(SqlValidationError.class), - ThrowableMessageMatcher.hasMessage(CoreMatchers.startsWith( - "SQL-Validation-WrongEngineForTable: tables=[sys.segments], engine=[msq-task]")) - ) + invalidSqlIs("Cannot query table(s) [sys.segments] with SQL engine [msq-task]") ) .verifyPlanningErrors(); } @@ -1235,11 +1223,7 @@ public void testSelectOnSysSourceWithJoin() .setSql("select s.segment_id, s.num_rows, f.dim1 from sys.segments as s, foo as f") .setQueryContext(context) .setExpectedValidationErrorMatcher( - CoreMatchers.allOf( - CoreMatchers.instanceOf(SqlValidationError.class), - ThrowableMessageMatcher.hasMessage(CoreMatchers.startsWith( - "SQL-Validation-WrongEngineForTable: tables=[sys.segments], engine=[msq-task]")) - ) + invalidSqlIs("Cannot query table(s) [sys.segments] with SQL engine [msq-task]") ) .verifyPlanningErrors(); } @@ -1252,11 +1236,7 @@ public void testSelectOnSysSourceContainingWith() + "select segment_source.segment_id, segment_source.num_rows from segment_source") .setQueryContext(context) .setExpectedValidationErrorMatcher( - CoreMatchers.allOf( - CoreMatchers.instanceOf(SqlValidationError.class), - ThrowableMessageMatcher.hasMessage(CoreMatchers.startsWith( - "SQL-Validation-WrongEngineForTable: tables=[sys.segments], engine=[msq-task]")) - ) + invalidSqlIs("Cannot query table(s) [sys.segments] with SQL engine [msq-task]") ) .verifyPlanningErrors(); } @@ -1645,8 +1625,13 @@ public void testTimeColumnAggregationFromExtern() throws IOException + "FROM kttm_data " + "GROUP BY 1") .setExpectedValidationErrorMatcher( - ThrowableMessageMatcher.hasMessage(CoreMatchers.containsString( - "LATEST() aggregator depends on __time column")) + new DruidExceptionMatcher(DruidException.Persona.ADMIN, DruidException.Category.INVALID_INPUT, "adhoc") + .expectMessageIs( + "Query planning failed for unknown reason, our best guess is this " + + "[LATEST and EARLIEST aggregators implicitly depend on the __time column, " + + "but the table queried doesn't contain a __time column. " + + "Please use LATEST_BY or EARLIEST_BY and specify the column explicitly.]" + ) ) .setExpectedRowSignature(rowSignature) .verifyPlanningErrors(); @@ -1677,7 +1662,7 @@ public void testGroupByWithComplexColumnThrowsUnsupportedException() .setSql("select unique_dim1 from foo2 group by unique_dim1") .setQueryContext(context) .setExpectedExecutionErrorMatcher(CoreMatchers.allOf( - CoreMatchers.instanceOf(SqlUnsupportedError.class), + CoreMatchers.instanceOf(DruidException.class), ThrowableMessageMatcher.hasMessage(CoreMatchers.containsString( "SQL requires a group-by on a column of type COMPLEX that is unsupported")) )) diff --git a/extensions-core/multi-stage-query/src/test/java/org/apache/druid/msq/test/VerifyMSQSupportedNativeQueriesFactory.java b/extensions-core/multi-stage-query/src/test/java/org/apache/druid/msq/test/VerifyMSQSupportedNativeQueriesFactory.java new file mode 100644 index 000000000000..e69de29bb2d1 diff --git a/processing/src/main/java/org/apache/druid/common/utils/IdUtils.java b/processing/src/main/java/org/apache/druid/common/utils/IdUtils.java index 2d3f33010162..88d4d0d413ba 100644 --- a/processing/src/main/java/org/apache/druid/common/utils/IdUtils.java +++ b/processing/src/main/java/org/apache/druid/common/utils/IdUtils.java @@ -21,10 +21,9 @@ import com.google.common.annotations.VisibleForTesting; import com.google.common.base.Joiner; -import com.google.common.base.Preconditions; import com.google.common.base.Strings; +import org.apache.druid.error.InvalidInput; import org.apache.druid.java.util.common.DateTimes; -import org.apache.druid.java.util.common.IAE; import org.joda.time.DateTime; import org.joda.time.Interval; @@ -43,23 +42,32 @@ public class IdUtils public static String validateId(String thingToValidate, String stringToValidate) { - Preconditions.checkArgument( - !Strings.isNullOrEmpty(stringToValidate), - "%s cannot be null or empty. Please provide a %s.", thingToValidate, thingToValidate - ); - Preconditions.checkArgument( - !stringToValidate.startsWith("."), - "%s cannot start with the '.' character.", thingToValidate - ); - Preconditions.checkArgument( - !stringToValidate.contains("/"), - "%s cannot contain the '/' character.", thingToValidate - ); + if (Strings.isNullOrEmpty(stringToValidate)) { + throw InvalidInput.exception("Invalid value for field [%s]: must not be null", thingToValidate); + } + if (stringToValidate.startsWith(".")) { + throw InvalidInput.exception( + "Invalid value for field [%s]: Value [%s] cannot start with '.'.", + thingToValidate, + stringToValidate + ); + } + if (stringToValidate.contains("/")) { + throw InvalidInput.exception( + "Invalid value for field [%s]: Value [%s] cannot contain '/'.", + thingToValidate, + stringToValidate + ); + } + Matcher m = INVALIDCHARS.matcher(stringToValidate); - Preconditions.checkArgument( - !m.matches(), - "%s cannot contain whitespace character except space.", thingToValidate - ); + if (m.matches()) { + throw InvalidInput.exception( + "Invalid value for field [%s]: Value [%s] contains illegal whitespace characters. Only space is allowed.", + thingToValidate, + stringToValidate + ); + } for (int i = 0; i < stringToValidate.length(); i++) { final char c = stringToValidate.charAt(i); @@ -68,7 +76,13 @@ public static String validateId(String thingToValidate, String stringToValidate) // znode paths. The first two ranges are control characters, the second two ranges correspond to surrogate // pairs. This means that characters outside the basic multilingual plane, such as emojis, are not allowed. 😢 if (c > 0 && c < 31 || c > 127 && c < 159 || c > '\ud800' && c < '\uf8ff' || c > '\ufff0' && c < '\uffff') { - throw new IAE("%s cannot contain character #%d (at position %d).", thingToValidate, (int) c, i); + throw InvalidInput.exception( + "Invalid value for field [%s]: Value [%s] contains illegal UTF8 character [#%d] at position [%d]", + thingToValidate, + stringToValidate, + (int) c, + i + ); } } @@ -94,7 +108,12 @@ public static String newTaskId(String typeName, String dataSource, @Nullable Int return newTaskId(null, typeName, dataSource, interval); } - public static String newTaskId(@Nullable String idPrefix, String typeName, String dataSource, @Nullable Interval interval) + public static String newTaskId( + @Nullable String idPrefix, + String typeName, + String dataSource, + @Nullable Interval interval + ) { return newTaskId(idPrefix, getRandomId(), DateTimes.nowUtc(), typeName, dataSource, interval); } diff --git a/processing/src/main/java/org/apache/druid/error/DruidAssertionError.java b/processing/src/main/java/org/apache/druid/error/DruidAssertionError.java deleted file mode 100644 index d6edf2ecd8cd..000000000000 --- a/processing/src/main/java/org/apache/druid/error/DruidAssertionError.java +++ /dev/null @@ -1,73 +0,0 @@ -/* - * Licensed to the Apache Software Foundation (ASF) under one - * or more contributor license agreements. See the NOTICE file - * distributed with this work for additional information - * regarding copyright ownership. The ASF licenses this file - * to you under the Apache License, Version 2.0 (the - * "License"); you may not use this file except in compliance - * with the License. You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, - * software distributed under the License is distributed on an - * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY - * KIND, either express or implied. See the License for the - * specific language governing permissions and limitations - * under the License. - */ - -package org.apache.druid.error; - -import org.apache.druid.java.util.common.UOE; -import org.apache.druid.query.QueryException; - -import java.net.HttpURLConnection; - -public class DruidAssertionError extends DruidException -{ - public DruidAssertionError( - String message - ) - { - this(null, message); - } - - public DruidAssertionError( - Throwable cause, - String message - ) - { - super( - cause, - ErrorCode.fullCode(ErrorCode.INTERNAL_GROUP, "AssertionFailed"), - message - ); - this.legacyCode = QueryException.UNSUPPORTED_OPERATION_ERROR_CODE; - this.legacyClass = UOE.class.getName(); - } - - public static DruidException forMessage(String message) - { - return new DruidAssertionError(SIMPLE_MESSAGE) - .withValue(MESSAGE_KEY, message); - } - - public static DruidException forCause(Throwable cause, String message) - { - return new DruidAssertionError(cause, SIMPLE_MESSAGE) - .withValue(MESSAGE_KEY, message); - } - - @Override - public ErrorAudience audience() - { - return ErrorAudience.DRUID_DEVELOPER; - } - - @Override - public int httpStatus() - { - return HttpURLConnection.HTTP_INTERNAL_ERROR; - } -} diff --git a/processing/src/main/java/org/apache/druid/error/DruidException.java b/processing/src/main/java/org/apache/druid/error/DruidException.java index 08df486d0342..2237402f54a6 100644 --- a/processing/src/main/java/org/apache/druid/error/DruidException.java +++ b/processing/src/main/java/org/apache/druid/error/DruidException.java @@ -19,196 +19,371 @@ package org.apache.druid.error; -import org.apache.commons.text.StringSubstitutor; +import com.fasterxml.jackson.annotation.JsonValue; +import com.google.common.base.Preconditions; +import org.apache.druid.java.util.common.StringUtils; import javax.annotation.concurrent.NotThreadSafe; - -import java.util.ArrayList; import java.util.LinkedHashMap; -import java.util.List; import java.util.Map; -import java.util.Objects; -import java.util.Properties; /** - * Represents an error condition exposed to the user and/or operator of Druid. + * Represents an error condition exposed to the user and/or operator of Druid. Given that a DruidException is intended + * to be delivered to the end user, it should generally never be caught. DruidExceptions are generated at terminal + * points where the operation that was happening cannot make forward progress. As such, the only reason to catch a + * DruidException is if the code has some extra context that it wants to add to the message of the DruidException using + * {@link #prependAndBuild(String, Object...)}. If code wants to catch and handle an exception instead, it should not + * be using the DruidException. + *

+ * Said another way, when a developer builds a DruidException in the code, they should be confident that the exception + * will make its way back to the user. DruidException is always the answer to "how do I generate an error message and + * deliver it to the user"? + *

* Every error consists of: *

    - *
  • An error code.
  • - *
  • A set of zero or more parameters.
  • - *
  • A default error message "template".
  • + *
  • A target persona
  • + *
  • A categorization of the error
  • + *
  • An error code
  • + *
  • An error message
  • + *
  • A context (possibly empty)
  • *
*

- * The error code is a unique identifier for each and every distinct - * kind of error. Codes should follow the pattern of - * @{code --} such as - * @{code SQL-VALIDATION-UNKNOWN_COLUMN}. *

- * The message template is a user-visible explanation for the error. - * The message is a template because it contains named placeholders - * to fill in with parameters:
- * "Line ${line}, Column ${column}: Column [${name}] not found"
+ * The target persona indicates who the message is written for. This is important for 2 reasons + *

    + *
  1. It identifies why the developer is creating the exception and who they believe can take action on it. + * This context allows for code reviewers and other developers to evaluate the message with this context in mind
  2. + *
  3. It can be used as a way to control which error messages should be routed where. For example, a user-targetted + * error message should be able to be exposed directly to the user, while an operator-targetted error message should + * perhaps be routed to the operators of the system instead of the end user firing a query.
  4. + *
*

- * The parameters are the values to fill in the placeholders in the - * template. Each subclass defines the parameters for that error, along - * with the required mapping to placeholders. + * The category indicates what kind of failure occurred. This is leveraged to align response codes (e.g. HTTP response + * codes) for similar exception messages. *

- * With this system, extensions can translate the messages to the needs - * of a specific system. For example, if system generates SQL, then telling - * the user the line number of the error is just confusing. In that system, - * the error could be translated to:
- * "Field '${name}' is not defined. Check the field list."
+ * The error code is a code that indicates a grouping of error messages. There is no forced structure around whether + * a specific error code can be reused for different problems or not. That is, an error code like "adhoc" will get + * reused in many different places as it's the basic error code used whenever a DruidException is created in-line. But, + * we might decide that a specific type of error should be identified explicitly by its error code and should only mean + * one thing, in which case that error code might only exist on a single error. *

- * Exceptions are mutable and must not be modified by two threads concurrently. - * However, it is highly unlikely that such concurrent access would occur: that's - * not how exceptions work. Exceptions can be exchanged across threads, as long - * as only one thread at a time mutates the exception. + * The error message is a message written targetting the target persona. It should have values interpolated into it + * in order to be as meaningful as possible for the target persona without leaking potentially sensitive information. *

- * Druid exceptions allow the calling method (or thread) to add context and set - * the host name. It is often easier for a higher-level method to fill in this - * Information than to pass the information into every method. For example: - *


- * void doTheRead(Reader reader) {
- *   try {
- *      // read some stuff
- *   } catch (IOException e) {
- *     throw new DruidIOException(e);
- *   }
- * }
+ * The context is a place to add extra information about the error that is not necessarily interpolated into the
+ * error message.  It's a way to carry extra information that might be useful to a developer, but not necessarily to
+ * the target persona.
  *
- * void outer(File theFile) {
- *   try (Reader reader = open(theFile)) {
- *     doTheRead(reader)
- *   } catch (DruidException e) {
- *      e.setFileName(theFile.getName());
- *      throw e;
- *   }
- * }
- * 
+ * Notes for developers working with DruidException + *

+ * A DruidException can be built from one of 2 static methods: {@link #forPersona} or {@link #fromFailure(Failure)}. + * The only way to set a specific error code is to build a DruidException from a Failure, when built in-line using + * forPersona, it will always be an "adhoc" error. *

- * Exceptions are not serializable. Instead, exceptions are translated - * to some other form when sent over the wire. + * Additionally, DruidException is not intended to be directly serialized. The intention is that something converts + * it into an {@link ErrorResponse} first using {@link ErrorResponse#ErrorResponse(DruidException)} and then that + * ErrorResponse is used for serialization. DruidException carries a {@link #toErrorResponse()} method because there + * are some code paths that directly serialize Exceptions and adjusting them was deemed out-of-scope for the PR that + * introduced DruidException. */ @NotThreadSafe -public abstract class DruidException extends RuntimeException +public class DruidException extends RuntimeException { - public static final String SIMPLE_MESSAGE = "${message}"; - public static final String MESSAGE_KEY = "message"; + /** + * Starts building an "adhoc" DruidException targetting the specific persona. + * + * @param persona the target persona of the exception message + * @return a builder that can be used to complete the creation of the DruidException + */ + public static DruidExceptionBuilder forPersona(Persona persona) + { + return new DruidExceptionBuilder("adhoc").forPersona(persona); + } - private final String code; - private final String message; - protected final Map values = new LinkedHashMap<>(); - protected String legacyCode; - protected String legacyClass; + /** + * Builds a DruidException using the provided Failure class. The errorCode is determined by the + * specific Failure class being used and the Failure class is responsible for setting all other + * required fields of the DruidException + * + * @param failure failure implementation to use to build the DruidException + * @return DruidException instance built from the Failure instance provided + */ + public static DruidException fromFailure(Failure failure) + { + return failure.makeException(new DruidExceptionBuilder(failure.getErrorCode())); + } - public DruidException( - final String code, + private final Persona targetPersona; + private final Category category; + private final String errorCode; + protected final Map context = new LinkedHashMap<>(); + + private DruidException( + Throwable cause, + final String errorCode, + Persona targetPersona, + Category category, final String message ) { - this(null, code, message); + this(cause, errorCode, targetPersona, category, message, false); } - public DruidException( - final Throwable cause, - final String code, - final String message + private DruidException( + Throwable throwable, + final String errorCode, + Persona targetPersona, + Category category, + String message, + boolean deserialized ) { - super(code, cause); - this.code = code; - this.message = message; + super(message, throwable, true, !deserialized); + this.errorCode = Preconditions.checkNotNull(errorCode, "errorCode"); + this.targetPersona = Preconditions.checkNotNull(targetPersona, "targetPersona"); + this.category = Preconditions.checkNotNull(category, "category"); } - public DruidException withValue(String key, Object value) + public DruidException withContext(String key, Object value) { - values.put(key, Objects.toString(value)); + context.put(key, value == null ? null : value.toString()); return this; } - public DruidException withValues(Map values) + public DruidException withContext(Map values) { - this.values.putAll(values); + this.context.putAll(values); return this; } - /** - * The error code is a summary of the error returned to the user. Multiple errors - * map to the same code: the code is more like a category of errors. Error codes - * must be backward compatible, even if the prior "codes" are awkward. - */ - public String errorCode() + public Persona getTargetPersona() { - return code; + return targetPersona; } - public String message() + public Category getCategory() { - return message; + return category; } - public Map values() + public String getErrorCode() { - return values; + return errorCode; } - // Used primarily when logging an error. - @Override - public String getMessage() + public String getContextValue(String key) { - if (values.isEmpty()) { - return code; - } - List entries = new ArrayList<>(); - for (Map.Entry entry : values.entrySet()) { - entries.add(entry.getKey() + "=[" + entry.getValue() + "]"); - } - return code + ": " + String.join(", ", entries); + return context.get(key); } - // For debugging. - @Override - public String toString() + public Map getContext() { - return format(message); + return context; } - public String format(String template) + public int getStatusCode() { - StringSubstitutor sub = new StringSubstitutor(values); - return sub.replace(template); + return category.getExpectedStatus(); } - public String format(Properties catalog) + /** + * Returns this DruidException as an ErrorResponse. This method exists for compatibility with some older code + * paths that serialize out Exceptions directly using Jackson. Instead of serializing a DruidException + * directly, code should be structured to take the DruidException and build an ErrorResponse from it to be + * used to push across the wire. + *

+ * As such, this method should be deleted in some future world. Anyone wondering how to serialize and deserialize + * a DruidException should look at {@link ErrorResponse} and leverage that instead of this. + * + * @return an ErrorResponse + */ + @JsonValue + public ErrorResponse toErrorResponse() { - String template = catalog.getProperty(code); - if (template == null) { - return toString(); - } else { - return format(template); - } + return new ErrorResponse(this); } - public ErrorResponse toErrorResponse(Properties catalog) + /** + * Builds a new DruidException with a message that is the result of prepending the message passed as a parameter + * with the message already on the DruidException. + * + * @param msg Message to be prepended, can be a Java format string + * @param args Arguments to be passed to the message if it is a Java format string + * @return a new DruidException with prepended-message + */ + public DruidException prependAndBuild(String msg, Object... args) + { + return new DruidException( + this, + errorCode, + targetPersona, + category, + StringUtils.format("%s: %s", StringUtils.nonStrictFormat(msg, args), getMessage()) + ).withContext(context); + } + + /** + * The persona that the message on a DruidException is targetting + */ + public enum Persona { - return new ErrorResponse( - code, - format(catalog), - legacyClass, - null - ); + /** + * Represents the end-user, a persona who is issuing queries to the Druid Query APIs + */ + USER, + /** + * Represents an administrative user, a persona who is interacting with admin APIs and understands Druid query + * concepts without necessarily owning the infrastructure and operations of the cluster + */ + ADMIN, + /** + * Represents a persona who actively owns and operates the cluster. This persona is not assumed to understand + * Druid query concepts, but instead understand cluster operational concepts. + */ + OPERATOR, + /** + * Represents someone who has all of the context and knowledge to be actively diving into the Druid codebase. + * This persona exists as a catch-all for anything that is so deep and technically in the weeds that it is not + * possible to make a message that will make sense to a different persona. Generally speaking, there is a hope + * that only DEFENSIVE error messages will target this persona. + */ + DEVELOPER } - public abstract ErrorAudience audience(); - public abstract int httpStatus(); + /** + * Category of error. The simplest way to describe this is that it exists as a classification of errors that + * enables us to identify the expected response code (e.g. HTTP status code) of a specific DruidException + */ + public enum Category + { + /** + * Means that the exception is being created defensively, because we want to validate something but expect that + * it should never actually be hit. Using this category is good to provide an indication to future reviewers and + * developers that the case being checked is not intended to actually be able to occur in the wild. + */ + DEFENSIVE(500), + /** + * Means that the input provided was malformed in some way. Generally speaking, it is hoped that errors of this + * category have messages written either targetting the USER or ADMIN personas as those are the general users + * of the APIs who could generate invalid inputs. + */ + INVALID_INPUT(400), + /** + * Means that the error is a problem with authorization. + */ + UNAUTHORIZED(401), + /** + * Means that some capacity limit was exceeded, this could be due to throttling or due to some system limit + */ + CAPACITY_EXCEEDED(429), + /** + * Means that the query was canceled for some reason + */ + CANCELED(500), + /** + * Indicates a server-side failure of some sort at runtime + */ + RUNTIME_FAILURE(500), + /** + * A timeout happened + */ + TIMEOUT(504), + /** + * Indicates some unsupported behavior was requested. TODO + */ + UNSUPPORTED(501), + /** + * A catch-all for any time when we cannot come up with a meaningful categorization. This is hopefully only + * used when converting generic exceptions from frameworks and libraries that we do not control into DruidExcpetions + */ + UNCATEGORIZED(500); + + private final int expectedStatus; - public MetricCategory metricCategory() + Category(int expectedStatus) + { + this.expectedStatus = expectedStatus; + } + + public int getExpectedStatus() + { + return expectedStatus; + } + } + + public static class DruidExceptionBuilder { - return MetricCategory.FAILED; + private String errorCode; + private Persona targetPersona; + private Category category; + + private boolean deserialized = false; + + private DruidExceptionBuilder(String errorCode) + { + this.errorCode = errorCode; + } + + public DruidExceptionBuilder forPersona(Persona targetPersona) + { + this.targetPersona = targetPersona; + return this; + } + + public DruidExceptionBuilder ofCategory(Category category) + { + this.category = category; + return this; + } + + /** + * Exists for ErrorMessage to be able to indicate that the exception was deserialized and (therefore) + * should not carry any stack-trace as the stack-trace generated would be to the deserialization code rather than + * the actual error. + * + * @return the builder + */ + DruidExceptionBuilder wasDeserialized() + { + this.deserialized = true; + return this; + } + + public DruidException build(String formatMe, Object... vals) + { + return build(null, formatMe, vals); + } + + public DruidException build(Throwable cause, String formatMe, Object... vals) + { + return new DruidException( + cause, + errorCode, + targetPersona, + category, + StringUtils.nonStrictFormat(formatMe, vals), + deserialized + ); + } } - public String getErrorCode() + public abstract static class Failure { - return legacyCode; + private final String errorCode; + + public Failure( + String errorCode + ) + { + this.errorCode = errorCode; + } + + public String getErrorCode() + { + return errorCode; + } + + protected abstract DruidException makeException(DruidExceptionBuilder bob); } + } diff --git a/processing/src/main/java/org/apache/druid/error/ErrorAudience.java b/processing/src/main/java/org/apache/druid/error/ErrorAudience.java deleted file mode 100644 index 698d1256f754..000000000000 --- a/processing/src/main/java/org/apache/druid/error/ErrorAudience.java +++ /dev/null @@ -1,40 +0,0 @@ -/* - * Licensed to the Apache Software Foundation (ASF) under one - * or more contributor license agreements. See the NOTICE file - * distributed with this work for additional information - * regarding copyright ownership. The ASF licenses this file - * to you under the Apache License, Version 2.0 (the - * "License"); you may not use this file except in compliance - * with the License. You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, - * software distributed under the License is distributed on an - * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY - * KIND, either express or implied. See the License for the - * specific language governing permissions and limitations - * under the License. - */ - -package org.apache.druid.error; - -/** - * The set of persona (audiences) for Druid exceptions. The audience is - * not a technical factor: it is merely a way to encourage developers to - * think about who can act on an error message. All errors go to the user - * who submitted the request, but perhaps in a simplified, redacted form. - * Such messages also target the actual audience: the Druid admin, the - * cluster admin, a Druid developer, etc. - *

- * Sometimes the target audience is not known, or is ambiguous. In that - * case, just use {@link ErrorAudience#VARIOUS}. - */ -public enum ErrorAudience -{ - USER, - DRUID_ADMIN, - CLUSTER_ADMIN, - DRUID_DEVELOPER, - VARIOUS -} diff --git a/processing/src/main/java/org/apache/druid/error/ErrorResponse.java b/processing/src/main/java/org/apache/druid/error/ErrorResponse.java index 8fabb377f775..f65615a94eb3 100644 --- a/processing/src/main/java/org/apache/druid/error/ErrorResponse.java +++ b/processing/src/main/java/org/apache/druid/error/ErrorResponse.java @@ -20,63 +20,197 @@ package org.apache.druid.error; import com.fasterxml.jackson.annotation.JsonCreator; -import com.fasterxml.jackson.annotation.JsonInclude; -import com.fasterxml.jackson.annotation.JsonInclude.Include; -import com.fasterxml.jackson.annotation.JsonProperty; +import com.fasterxml.jackson.annotation.JsonValue; +import org.apache.druid.query.QueryException; import javax.annotation.Nullable; +import java.util.LinkedHashMap; +import java.util.Map; /** - * Union of the {@link org.apache.druid.query.QueryException} and - * {@link DruidExceptionV1} fields. Used in tests to deserialize errors which may - * be in either format. + * A Response Object that represents an error to be returned over the wire. This object carries legacy bits to + * deal with compatibility issues of converging the error responses from {@link QueryException} + * with the intended going-forward error responses from {@link DruidException} + *

+ * The intent is that eventually {@link QueryException} is completely subsumed by + * {@link DruidException} in which case the legacy bits of this class can hopefully also be removed. + *

+ * The intended long-term schema of output is an object that looks like + *

+ * { + * "errorCode": `a code string`, + * "persona": USER | ADMIN | OPERATOR | DEVELOPER + * "category": DEFENSIVE | INVALID_INPUT | UNAUTHORIZED | CAPACITY_EXCEEDED | CANCELED | RUNTIME_FAILURE | TIMEOUT | UNSUPPORTED | UNCATEGORIZED + * "errorMessage": `a message for the intended audience` + * "context": `a map of extra context values that might be helpful` + * } + *

+ * In the interim, there are extra fields that also end up included so that the wire-schema can also be interpretted + * and handled by clients that are built assuming they are looking at QueryExceptions. These extra fields are + *

+ * { + * "error": `an error code from QueryException` | "druidException" + * "errorClass": `the error class, as used by QueryException` + * "host": `the host that the exception occurred on, as used by QueryException` + * } + *

+ * These 3 top-level fields are deprecated and will eventually disappear from API responses. The values can, instead, + * be pulled from the context object of an "legacyQueryException" errorCode object. The field names in the context + * object map as follows + * * "error" -> "legacyErrorCode" + * * "errorClass" -> "errorClass" + * * "host" -> "host" */ public class ErrorResponse { - private final String msg; - private final String code; - private final String errorClass; - private final String host; - @JsonCreator - public ErrorResponse( - @JsonProperty("error") @Nullable String errorCode, - @JsonProperty("errorMessage") @Nullable String errorMessage, - @JsonProperty("errorClass") @Nullable String errorClass, - @JsonProperty("host") @Nullable String host - ) + public static ErrorResponse fromMap(Map map) { - this.msg = errorMessage; - this.code = errorCode; - this.errorClass = errorClass; - this.host = host; + // TODO: perhaps need to have normal DruidExceptions set error too just so that they can masquerade as + // QueryExceptions on initial release. + final DruidException.Failure failure; + + final Object legacyErrorType = map.get("error"); + if (!"druidException".equals(legacyErrorType)) { + // The non "druidException" errorCode field means that we are deserializing a legacy QueryException object rather + // than deserializing a DruidException. So, we make a QueryException, map it to a DruidException and build + // our response from that DruidException. This allows all code after us to only consider DruidException + // and helps aid the removal of QueryException. + failure = new QueryExceptionCompat( + new QueryException( + nullOrString(map.get("error")), + nullOrString(map.get("errorMessage")), + nullOrString(map.get("errorClass")), + nullOrString(map.get("host")) + ) + ); + } else { + failure = new DruidException.Failure(stringOrFailure(map, "errorCode")) + { + @Override + protected DruidException makeException(DruidException.DruidExceptionBuilder bob) + { + final DruidException retVal = bob.forPersona(DruidException.Persona.valueOf(stringOrFailure(map, "persona"))) + .ofCategory(DruidException.Category.valueOf(stringOrFailure( + map, + "category" + ))) + .build(stringOrFailure(map, "errorMessage")); + + final Object context = map.get("context"); + if (context instanceof Map) { + //noinspection unchecked + retVal.withContext((Map) context); + } + + return retVal; + } + }; + } + return new ErrorResponse(DruidException.fromFailure(new DeserializedFailure(failure))); } - @Nullable - @JsonProperty("error") - @JsonInclude(Include.NON_NULL) - public String getErrorCode() + private final DruidException underlyingException; + + public ErrorResponse(DruidException underlyingException) + { + this.underlyingException = underlyingException; + } + + @JsonValue + public Map getAsMap() + { + final LinkedHashMap retVal = new LinkedHashMap<>(); + + // This if statement is a compatibility layer to help bridge the time while we are introducing the DruidException. + // In a future release, QueryException should be completely eliminated, at which point we should also be + // able to eliminate this compatibility layer. + if (QueryExceptionCompat.ERROR_CODE.equals(underlyingException.getErrorCode())) { + retVal.put("error", underlyingException.getContextValue("legacyErrorCode")); + retVal.put("errorClass", underlyingException.getContextValue("errorClass")); + retVal.put("host", underlyingException.getContextValue("host")); + } else { + retVal.put("error", "druidException"); + } + + retVal.put("errorCode", underlyingException.getErrorCode()); + retVal.put("persona", underlyingException.getTargetPersona()); + retVal.put("category", underlyingException.getCategory()); + retVal.put("errorMessage", underlyingException.getMessage()); + retVal.put("context", underlyingException.getContext()); + + return retVal; + } + + public DruidException getUnderlyingException() { - return code; + return underlyingException; } - @JsonProperty("errorMessage") - public String getMessage() + @Nullable + private static String nullOrString(Object o) { - return msg; + return o == null ? null : o.toString(); } - @JsonProperty - @JsonInclude(Include.NON_NULL) - public String getErrorClass() + private static String stringOrFailure(Map map, String key) { - return errorClass; + final Object o = map.get(key); + if (o instanceof String) { + return (String) o; + } + + final DruidException problem = DruidException + .forPersona(DruidException.Persona.DEVELOPER) + .ofCategory(DruidException.Category.DEFENSIVE) + .build("Got an error response that had a non-String value [%s] for key [%s]", o, key); + + for (Map.Entry entry : map.entrySet()) { + final Object value = entry.getValue(); + if (value != null) { + problem.withContext(entry.getKey(), value.toString()); + } + } + + throw problem; } - @JsonProperty - @JsonInclude(Include.NON_NULL) - public String getHost() + private static class DeserializedFailure extends DruidException.Failure { - return host; + private final DruidException.Failure delegate; + + public DeserializedFailure( + DruidException.Failure delegate + ) + { + super(delegate.getErrorCode()); + this.delegate = delegate; + } + + @Override + protected DruidException makeException(DruidException.DruidExceptionBuilder bob) + { + // By setting wasDeserialized, we get the initial exception built with no stack-trace, we then create a new + // exception with the exact same values that will contain our current stack-trace and to be relevant inside + // of the current process. It's a little bit of a weird dance to create a new exception with the same stuff, + // it might be nice to have a DelegatingDruidException or something like that which looks like a DruidException + // but just delegates everything. That's something that can be explored another day though. + bob.wasDeserialized(); + final DruidException cause = delegate.makeException(bob); + + return DruidException.fromFailure( + new DruidException.Failure(cause.getErrorCode()) + { + @Override + protected DruidException makeException(DruidException.DruidExceptionBuilder bob) + { + return bob.forPersona(cause.getTargetPersona()) + .ofCategory(cause.getCategory()) + .build(cause, cause.getMessage()) + .withContext(cause.getContext()); + } + } + ); + } } } diff --git a/processing/src/main/java/org/apache/druid/error/InvalidInput.java b/processing/src/main/java/org/apache/druid/error/InvalidInput.java new file mode 100644 index 000000000000..ce50d4db3763 --- /dev/null +++ b/processing/src/main/java/org/apache/druid/error/InvalidInput.java @@ -0,0 +1,63 @@ +/* + * Licensed to the Apache Software Foundation (ASF) under one + * or more contributor license agreements. See the NOTICE file + * distributed with this work for additional information + * regarding copyright ownership. The ASF licenses this file + * to you under the Apache License, Version 2.0 (the + * "License"); you may not use this file except in compliance + * with the License. You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, + * software distributed under the License is distributed on an + * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY + * KIND, either express or implied. See the License for the + * specific language governing permissions and limitations + * under the License. + */ + +package org.apache.druid.error; + +public class InvalidInput extends DruidException.Failure +{ + public static DruidException exception(String msg, Object... args) + { + return exception(null, msg, args); + } + + public static DruidException exception(Throwable t, String msg, Object... args) + { + return DruidException.fromFailure(new InvalidInput(t, msg, args)); + } + + private final Throwable t; + private final String msg; + private final Object[] args; + + public InvalidInput( + Throwable t, + String msg, + Object... args + ) + { + super("invalidInput"); + this.t = t; + this.msg = msg; + this.args = args; + } + + + @Override + public DruidException makeException(DruidException.DruidExceptionBuilder bob) + { + bob = bob.forPersona(DruidException.Persona.USER) + .ofCategory(DruidException.Category.INVALID_INPUT); + + if (t == null) { + return bob.build(msg, args); + } else { + return bob.build(t, msg, args); + } + } +} diff --git a/processing/src/main/java/org/apache/druid/error/ErrorCode.java b/processing/src/main/java/org/apache/druid/error/InvalidSqlInput.java similarity index 56% rename from processing/src/main/java/org/apache/druid/error/ErrorCode.java rename to processing/src/main/java/org/apache/druid/error/InvalidSqlInput.java index 174703d335f5..17a392962f9b 100644 --- a/processing/src/main/java/org/apache/druid/error/ErrorCode.java +++ b/processing/src/main/java/org/apache/druid/error/InvalidSqlInput.java @@ -19,19 +19,32 @@ package org.apache.druid.error; -public class ErrorCode +public class InvalidSqlInput extends InvalidInput { - public static final String SQL_GROUP = "SQL"; - public static final String INTERNAL_GROUP = "INTERNAL"; + public static DruidException exception(String msg, Object... args) + { + return exception(null, msg, args); + } - public static final String SQL_VALIDATION_GROUP = SQL_GROUP + "-Validation"; - public static final String SQL_PARSE_GROUP = SQL_GROUP + "-Parse"; - public static final String SQL_UNSUPPORTED_GROUP = SQL_GROUP + "-Unsupported"; + public static DruidException exception(Throwable t, String msg, Object... args) + { + return DruidException.fromFailure(new InvalidSqlInput(t, msg, args)); + } - public static final String GENERAL_TAIL = "General"; + public InvalidSqlInput( + Throwable t, + String msg, + Object... args + ) + { + super(t, msg, args); + } - public static String fullCode(String base, String tail) + @Override + public DruidException makeException(DruidException.DruidExceptionBuilder bob) { - return base + "-" + tail; + final DruidException retVal = super.makeException(bob); + retVal.withContext("sourceType", "sql"); + return retVal; } } diff --git a/processing/src/main/java/org/apache/druid/error/MetricCategory.java b/processing/src/main/java/org/apache/druid/error/MetricCategory.java deleted file mode 100644 index 91328a960c8a..000000000000 --- a/processing/src/main/java/org/apache/druid/error/MetricCategory.java +++ /dev/null @@ -1,27 +0,0 @@ -/* - * Licensed to the Apache Software Foundation (ASF) under one - * or more contributor license agreements. See the NOTICE file - * distributed with this work for additional information - * regarding copyright ownership. The ASF licenses this file - * to you under the Apache License, Version 2.0 (the - * "License"); you may not use this file except in compliance - * with the License. You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, - * software distributed under the License is distributed on an - * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY - * KIND, either express or implied. See the License for the - * specific language governing permissions and limitations - * under the License. - */ - -package org.apache.druid.error; - -public enum MetricCategory -{ - INTERRUPTED, - TIME_OUT, - FAILED -} diff --git a/processing/src/main/java/org/apache/druid/error/QueryExceptionCompat.java b/processing/src/main/java/org/apache/druid/error/QueryExceptionCompat.java new file mode 100644 index 000000000000..1829a41046e9 --- /dev/null +++ b/processing/src/main/java/org/apache/druid/error/QueryExceptionCompat.java @@ -0,0 +1,72 @@ +/* + * Licensed to the Apache Software Foundation (ASF) under one + * or more contributor license agreements. See the NOTICE file + * distributed with this work for additional information + * regarding copyright ownership. The ASF licenses this file + * to you under the Apache License, Version 2.0 (the + * "License"); you may not use this file except in compliance + * with the License. You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, + * software distributed under the License is distributed on an + * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY + * KIND, either express or implied. See the License for the + * specific language governing permissions and limitations + * under the License. + */ + +package org.apache.druid.error; + +import org.apache.druid.query.QueryException; + +public class QueryExceptionCompat extends DruidException.Failure +{ + public static final String ERROR_CODE = "legacyQueryException"; + + private final QueryException exception; + + public QueryExceptionCompat( + QueryException exception + ) + { + super(ERROR_CODE); + this.exception = exception; + } + + @Override + protected DruidException makeException(DruidException.DruidExceptionBuilder bob) + { + return bob.forPersona(DruidException.Persona.OPERATOR) + .ofCategory(convertFailType(exception.getFailType())) + .build(exception.getMessage()) + .withContext("host", exception.getHost()) + .withContext("errorClass", exception.getErrorClass()) + .withContext("legacyErrorCode", exception.getErrorCode()); + } + + private DruidException.Category convertFailType(QueryException.FailType failType) + { + switch (failType) { + case USER_ERROR: + return DruidException.Category.INVALID_INPUT; + case UNAUTHORIZED: + return DruidException.Category.UNAUTHORIZED; + case CAPACITY_EXCEEDED: + return DruidException.Category.CAPACITY_EXCEEDED; + case QUERY_RUNTIME_FAILURE: + return DruidException.Category.RUNTIME_FAILURE; + case CANCELED: + return DruidException.Category.CANCELED; + case UNKNOWN: + return DruidException.Category.UNCATEGORIZED; + case UNSUPPORTED: + return DruidException.Category.UNSUPPORTED; + case TIMEOUT: + return DruidException.Category.TIMEOUT; + default: + return DruidException.Category.UNCATEGORIZED; + } + } +} diff --git a/processing/src/main/java/org/apache/druid/error/README.md b/processing/src/main/java/org/apache/druid/error/README.md index 6dc0d96d6722..f9b4e0e14dfc 100644 --- a/processing/src/main/java/org/apache/druid/error/README.md +++ b/processing/src/main/java/org/apache/druid/error/README.md @@ -1,3 +1,25 @@ + + +WARNING WARNING +TODO: this README has not been adjusted to align with the current code + # Guide to Druid Error Messages Errors in Druid are complex. Errors come from both Druid code and from libraries. @@ -243,8 +265,7 @@ error. Example: ```json { "errorCode": "", - "errorMessage": "", - "": ", ... + "errorMessage": "" } ``` diff --git a/processing/src/main/java/org/apache/druid/error/SqlParseError.java b/processing/src/main/java/org/apache/druid/error/SqlParseError.java deleted file mode 100644 index c8b99a95217d..000000000000 --- a/processing/src/main/java/org/apache/druid/error/SqlParseError.java +++ /dev/null @@ -1,72 +0,0 @@ -/* - * Licensed to the Apache Software Foundation (ASF) under one - * or more contributor license agreements. See the NOTICE file - * distributed with this work for additional information - * regarding copyright ownership. The ASF licenses this file - * to you under the Apache License, Version 2.0 (the - * "License"); you may not use this file except in compliance - * with the License. You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, - * software distributed under the License is distributed on an - * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY - * KIND, either express or implied. See the License for the - * specific language governing permissions and limitations - * under the License. - */ - -package org.apache.druid.error; - -import org.apache.druid.query.QueryException; - -import java.net.HttpURLConnection; - -/** - * SQL query parse failed. - */ -public class SqlParseError extends DruidException -{ - public SqlParseError( - String code, - String message - ) - { - this(null, code, message); - } - - public SqlParseError( - Throwable cause, - String code, - String message - ) - { - super( - cause, - ErrorCode.fullCode(ErrorCode.SQL_PARSE_GROUP, code), - fullMessage(message) - ); - this.legacyCode = QueryException.SQL_PARSE_FAILED_ERROR_CODE; - // For backward compatibility. - // Calcite classes not visible here, so using a string - this.legacyClass = "org.apache.calcite.sql.parser.SqlParseException"; - } - - public static String fullMessage(String message) - { - return "Line ${line}, column ${column}: " + message; - } - - @Override - public ErrorAudience audience() - { - return ErrorAudience.USER; - } - - @Override - public int httpStatus() - { - return HttpURLConnection.HTTP_BAD_REQUEST; - } -} diff --git a/processing/src/main/java/org/apache/druid/error/SqlUnsupportedError.java b/processing/src/main/java/org/apache/druid/error/SqlUnsupportedError.java deleted file mode 100644 index bd165325d197..000000000000 --- a/processing/src/main/java/org/apache/druid/error/SqlUnsupportedError.java +++ /dev/null @@ -1,94 +0,0 @@ -/* - * Licensed to the Apache Software Foundation (ASF) under one - * or more contributor license agreements. See the NOTICE file - * distributed with this work for additional information - * regarding copyright ownership. The ASF licenses this file - * to you under the Apache License, Version 2.0 (the - * "License"); you may not use this file except in compliance - * with the License. You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, - * software distributed under the License is distributed on an - * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY - * KIND, either express or implied. See the License for the - * specific language governing permissions and limitations - * under the License. - */ - -package org.apache.druid.error; - -import org.apache.druid.query.QueryException; - -import java.net.HttpURLConnection; - -/** - * SQL query validation failed, because a SQL statement asked Druid to do - * something which it does not support. This message indicates that the - * unsupported thing is by design, not because we've not gotten to it yet. - * For example, asking for `MAX(VARCHAR)` is not supported because it does - * not make sense. Use a different exception if the error is due to something - * that Druid should support, but doesn't yet. - * - * @see {@link SqlValidationError} for the general validation error case. - */ -public class SqlUnsupportedError extends DruidException -{ - public SqlUnsupportedError( - String code, - String message - ) - { - this(null, code, message); - } - - public SqlUnsupportedError( - Throwable cause, - String code, - String message - ) - { - super( - cause, - ErrorCode.fullCode(ErrorCode.SQL_UNSUPPORTED_GROUP, code), - message - ); - // For backward compatibility. - // Calcite classes not visible here, so using a string - this.legacyClass = "org.apache.calcite.plan.RelOptPlanner$CannotPlanException"; - this.legacyCode = QueryException.SQL_QUERY_UNSUPPORTED_ERROR_CODE; - } - - @Override - public ErrorAudience audience() - { - return ErrorAudience.USER; - } - - @Override - public int httpStatus() - { - return HttpURLConnection.HTTP_BAD_REQUEST; - } - - public static DruidException unsupportedAggType(String agg, Object type) - { - return new SqlUnsupportedError( - "InvalidAggArg", - "${fn} aggregation is not supported for type [${type}]" - ) - .withValue("fn", agg) - .withValue("type", type); - } - - public static DruidException cannotUseOperator(String op, Throwable cause) - { - throw new SqlUnsupportedError( - "Operator", - "Cannot use [${op}]: [${message}]" - ) - .withValue("op", op) - .withValue(DruidException.MESSAGE_KEY, cause.getMessage()); - } -} diff --git a/processing/src/main/java/org/apache/druid/error/SqlValidationError.java b/processing/src/main/java/org/apache/druid/error/SqlValidationError.java deleted file mode 100644 index 16ccae3519b2..000000000000 --- a/processing/src/main/java/org/apache/druid/error/SqlValidationError.java +++ /dev/null @@ -1,78 +0,0 @@ -/* - * Licensed to the Apache Software Foundation (ASF) under one - * or more contributor license agreements. See the NOTICE file - * distributed with this work for additional information - * regarding copyright ownership. The ASF licenses this file - * to you under the Apache License, Version 2.0 (the - * "License"); you may not use this file except in compliance - * with the License. You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, - * software distributed under the License is distributed on an - * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY - * KIND, either express or implied. See the License for the - * specific language governing permissions and limitations - * under the License. - */ - -package org.apache.druid.error; - -import org.apache.druid.query.QueryException; - -import java.net.HttpURLConnection; - -/** - * SQL query validation failed, most likely due to a problem in the SQL statement - * which the user provided. - * - * @see {@link SqlUnsupportedError} for the special case - * in which the SQL asked to do something Druid does not support. - */ -public class SqlValidationError extends DruidException -{ - public SqlValidationError( - String code, - String message - ) - { - this(null, code, message); - } - - public SqlValidationError( - Throwable cause, - String code, - String message - ) - { - super( - cause, - ErrorCode.fullCode(ErrorCode.SQL_VALIDATION_GROUP, code), - message - ); - this.legacyClass = "org.apache.calcite.tools.ValidationException"; - this.legacyCode = QueryException.PLAN_VALIDATION_FAILED_ERROR_CODE; - } - - @Override - public ErrorAudience audience() - { - return ErrorAudience.USER; - } - - @Override - public int httpStatus() - { - return HttpURLConnection.HTTP_BAD_REQUEST; - } - - public static DruidException forCause(Throwable e) - { - return new SqlValidationError( - ErrorCode.GENERAL_TAIL, - SIMPLE_MESSAGE - ) - .withValue(MESSAGE_KEY, e.getMessage()); - } -} diff --git a/processing/src/main/java/org/apache/druid/java/util/common/logger/Logger.java b/processing/src/main/java/org/apache/druid/java/util/common/logger/Logger.java index 3872c2cf7686..1639bf5378f8 100644 --- a/processing/src/main/java/org/apache/druid/java/util/common/logger/Logger.java +++ b/processing/src/main/java/org/apache/druid/java/util/common/logger/Logger.java @@ -33,6 +33,37 @@ import java.util.function.BiConsumer; import java.util.stream.Stream; +/** + * A Logger for usage inside of Druid. Provides a layer that allows for simple changes to the logging framework + * with minimal changes to the Druid code. + * + * Log levels are used as an indication of urgency around the behavior that is being logged. The intended generic + * rubric for when to use the different logging levels is as follows. + * + * DEBUG: something that a developer wants to look at while actively debugging, but should not be included by default. + * + * INFO: a message that is useful to have when trying to retro-actively understand what happened in a running system. + * There is often a fine line between INFO and DEBUG. We want information from INFO logs but do not want to spam log + * files either. One rubric to use to help determine if something should be INFO or DEBUG is how often we expect the + * line to be logged. If there is clarity that it will happen in a controlled manner such that it does not spam the + * logs, then INFO is fine. Additionally, it can be okay to log at INFO level even if there is a risk of spamming the + * log file in the case that the log line only happens in specific "error-oriented" situations, this is because such + * error-oriented situations are more likely to necessitate reading and understanding the logs to eliminate the error. + * Additionally, it is perfectly acceptable and reasonable to log an exception at INFO level. + * + * WARN: a message that indicates something bad has happened in the system that a human should potentially investigate. + * While it is bad and deserves investigation, it is of a nature that it should be able to wait until the next + * "business day" for investigation instead of needing immediate attention. + * + * ERROR: a message that indicates that something bad has happened such that a human operator should take immediate + * intervention to triage and resolve the issue as it runs a risk to the smooth operations of the system. Logs at + * the ERROR level should generally be severe enough to warrant paging someone in the middle of the night. + * + * Even though this is the intended rubric, it is very difficult to ensure that, e.g. all ERROR log lines are pageable + * offenses. As such, it is questionable whether an operator should actually ALWAYS page on every ERROR log line, + * but as a directional target of when and how to log things, the above rubric should be used to evaluate if a log + * line is at the correct level. + */ public class Logger { @VisibleForTesting diff --git a/processing/src/main/java/org/apache/druid/segment/nested/NestedPathFinder.java b/processing/src/main/java/org/apache/druid/segment/nested/NestedPathFinder.java index b275199bb0e7..5d0596a6cc82 100644 --- a/processing/src/main/java/org/apache/druid/segment/nested/NestedPathFinder.java +++ b/processing/src/main/java/org/apache/druid/segment/nested/NestedPathFinder.java @@ -19,7 +19,8 @@ package org.apache.druid.segment.nested; -import org.apache.druid.java.util.common.IAE; +import org.apache.druid.error.InvalidInput; +import org.apache.druid.java.util.common.StringUtils; import javax.annotation.Nullable; import java.util.ArrayList; @@ -74,7 +75,7 @@ public static List parseJsonPath(@Nullable String path) List parts = new ArrayList<>(); if (!path.startsWith(JSON_PATH_ROOT)) { - badFormatJsonPath(path, "must start with '$'"); + badFormatJsonPath(path, "it must start with '$'"); } if (path.length() == 1) { @@ -97,7 +98,7 @@ public static List parseJsonPath(@Nullable String path) partMark = i + 1; } else if (current == '[' && arrayMark < 0 && quoteMark < 0) { if (dotMark == (i - 1) && dotMark != 0) { - badFormatJsonPath(path, "invalid position " + i + " for '[', must not follow '.' or must be contained with '"); + badFormatJsonPath(path, "found '[' at invalid position [%s], must not follow '.' or must be contained with '", i); } if (dotMark >= 0 && i > 1) { parts.add(new NestedPathField(getPathSubstring(path, partMark, i))); @@ -115,13 +116,13 @@ public static List parseJsonPath(@Nullable String path) partMark = i + 1; } catch (NumberFormatException ignored) { - badFormatJsonPath(path, "expected number for array specifier got " + maybeNumber + " instead. Use ' if this value was meant to be a field name"); + badFormatJsonPath(path, "array specifier [%s] should be a number, it was not. Use ' if this value was meant to be a field name", maybeNumber); } } else if (dotMark == -1 && arrayMark == -1) { badFormatJsonPath(path, "path parts must be separated with '.'"); } else if (current == '\'' && quoteMark < 0) { if (arrayMark != i - 1) { - badFormatJsonPath(path, "' must be immediately after '['"); + badFormatJsonPath(path, "single-quote (') must be immediately after '['"); } quoteMark = i; partMark = i + 1; @@ -130,7 +131,7 @@ public static List parseJsonPath(@Nullable String path) if (arrayMark >= 0) { continue; } - badFormatJsonPath(path, "closing ' must immediately precede ']'"); + badFormatJsonPath(path, "closing single-quote (') must immediately precede ']'"); } parts.add(new NestedPathField(getPathSubstring(path, partMark, i))); @@ -147,7 +148,7 @@ public static List parseJsonPath(@Nullable String path) // add the last element, this should never be an array because they close themselves if (partMark < path.length()) { if (quoteMark != -1) { - badFormatJsonPath(path, "unterminated '"); + badFormatJsonPath(path, "unterminated single-quote (')"); } if (arrayMark != -1) { badFormatJsonPath(path, "unterminated '['"); @@ -195,7 +196,7 @@ public static List parseJqPath(@Nullable String path) List parts = new ArrayList<>(); if (path.charAt(0) != '.') { - badFormat(path, "must start with '.'"); + badFormat(path, "it must start with '.'"); } int partMark = -1; // position to start the next substring to build the path part @@ -217,13 +218,13 @@ public static List parseJqPath(@Nullable String path) parts.add(new NestedPathField(getPathSubstring(path, partMark, i))); dotMark = -1; } else { - badFormat(path, "invalid position " + i + " for '?'"); + badFormat(path, "found '?' at invalid position [%s]", i); } } partMark = i + 1; } else if (current == '[' && arrayMark < 0 && quoteMark < 0) { if (dotMark == (i - 1) && dotMark != 0) { - badFormat(path, "invalid position " + i + " for '[', must not follow '.' or must be contained with '\"'"); + badFormat(path, "found '[' at invalid position [%s], must not follow '.' or must be contained with '\"'", i); } if (dotMark >= 0 && i > 1) { parts.add(new NestedPathField(getPathSubstring(path, partMark, i))); @@ -241,16 +242,16 @@ public static List parseJqPath(@Nullable String path) partMark = i + 1; } catch (NumberFormatException ignored) { - badFormat(path, "expected number for array specifier got " + maybeNumber + " instead. Use \"\" if this value was meant to be a field name"); + badFormat(path, "array specifier [%s] should be a number, it was not. Use \"\" if this value was meant to be a field name", maybeNumber); } } else if (dotMark == -1 && arrayMark == -1) { badFormat(path, "path parts must be separated with '.'"); } else if (current == '"' && quoteMark < 0) { if (partMark != i) { - badFormat(path, "invalid position " + i + " for '\"', must immediately follow '.' or '['"); + badFormat(path, "found '\"' at invalid position [%s], it must immediately follow '.' or '['", i); } if (arrayMark > 0 && arrayMark != i - 1) { - badFormat(path, "'\"' within '[' must be immediately after"); + badFormat(path, "'\"' within '[', must be immediately after"); } quoteMark = i; partMark = i + 1; @@ -295,14 +296,14 @@ private static String getPathSubstring(String path, int start, int end) return path.substring(start, end); } - private static void badFormat(String path, String message) + private static void badFormat(String path, String message, Object... args) { - throw new IAE("Bad format, '%s' is not a valid 'jq' path: %s", path, message); + throw InvalidInput.exception("jq path [%s] is invalid, %s", path, StringUtils.format(message, args)); } - private static void badFormatJsonPath(String path, String message) + private static void badFormatJsonPath(String path, String message, Object... args) { - throw new IAE("Bad format, '%s' is not a valid JSONPath path: %s", path, message); + throw InvalidInput.exception("JSONPath [%s] is invalid, %s", path, StringUtils.format(message, args)); } /** diff --git a/processing/src/test/java/org/apache/druid/error/DruidExceptionMatcher.java b/processing/src/test/java/org/apache/druid/error/DruidExceptionMatcher.java new file mode 100644 index 000000000000..d25f6e7f068f --- /dev/null +++ b/processing/src/test/java/org/apache/druid/error/DruidExceptionMatcher.java @@ -0,0 +1,103 @@ +/* + * Licensed to the Apache Software Foundation (ASF) under one + * or more contributor license agreements. See the NOTICE file + * distributed with this work for additional information + * regarding copyright ownership. The ASF licenses this file + * to you under the Apache License, Version 2.0 (the + * "License"); you may not use this file except in compliance + * with the License. You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, + * software distributed under the License is distributed on an + * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY + * KIND, either express or implied. See the License for the + * specific language governing permissions and limitations + * under the License. + */ + +package org.apache.druid.error; + +import org.apache.druid.matchers.DMatchers; +import org.hamcrest.Description; +import org.hamcrest.DiagnosingMatcher; +import org.hamcrest.Matcher; +import org.hamcrest.Matchers; +import org.hamcrest.core.AllOf; + +import java.util.ArrayList; + +public class DruidExceptionMatcher extends DiagnosingMatcher +{ + public static DruidExceptionMatcher invalidInput() + { + return new DruidExceptionMatcher( + DruidException.Persona.USER, + DruidException.Category.INVALID_INPUT, + "invalidInput" + ); + } + + public static DruidExceptionMatcher invalidSqlInput() + { + return invalidInput().expectContext("sourceType", "sql"); + } + + private final AllOf delegate; + private final ArrayList> matcherList; + + public DruidExceptionMatcher( + DruidException.Persona targetPersona, + DruidException.Category category, + String errorCode + ) + { + matcherList = new ArrayList<>(); + matcherList.add(DMatchers.fn("targetPersona", DruidException::getTargetPersona, Matchers.is(targetPersona))); + matcherList.add(DMatchers.fn("category", DruidException::getCategory, Matchers.is(category))); + matcherList.add(DMatchers.fn("errorCode", DruidException::getErrorCode, Matchers.is(errorCode))); + + delegate = new AllOf<>(matcherList); + } + + public DruidExceptionMatcher expectContext(String key, String value) + { + matcherList.add(DMatchers.fn("context", DruidException::getContext, Matchers.hasEntry(key, value))); + return this; + } + + public DruidExceptionMatcher expectMessageIs(String s) + { + return expectMessage(Matchers.equalTo(s)); + } + + public DruidExceptionMatcher expectMessageContains(String contains) + { + return expectMessage(Matchers.containsString(contains)); + } + + public DruidExceptionMatcher expectMessage(Matcher messageMatcher) + { + matcherList.add(DMatchers.fn("message", DruidException::getMessage, messageMatcher)); + return this; + } + + public DruidExceptionMatcher expectException(Matcher causeMatcher) + { + matcherList.add(DMatchers.fn("cause", DruidException::getCause, causeMatcher)); + return this; + } + + @Override + protected boolean matches(Object item, Description mismatchDescription) + { + return delegate.matches(item, mismatchDescription); + } + + @Override + public void describeTo(Description description) + { + delegate.describeTo(description); + } +} diff --git a/server/src/main/java/org/apache/druid/error/RestExceptionEncoder.java b/processing/src/test/java/org/apache/druid/matchers/DMatchers.java similarity index 73% rename from server/src/main/java/org/apache/druid/error/RestExceptionEncoder.java rename to processing/src/test/java/org/apache/druid/matchers/DMatchers.java index 29050f012b1c..a6bc598bbafd 100644 --- a/server/src/main/java/org/apache/druid/error/RestExceptionEncoder.java +++ b/processing/src/test/java/org/apache/druid/matchers/DMatchers.java @@ -17,12 +17,16 @@ * under the License. */ -package org.apache.druid.error; +package org.apache.druid.matchers; -import javax.ws.rs.core.Response; +import org.hamcrest.Matcher; -public interface RestExceptionEncoder +import java.util.function.Function; + +public class DMatchers { - Response encode(DruidException e); - Response.ResponseBuilder builder(DruidException e); + public static LambdaMatcher fn(String name, Function fn, Matcher matcher) + { + return new LambdaMatcher<>(name + ": ", fn, matcher); + } } diff --git a/processing/src/test/java/org/apache/druid/matchers/LambdaMatcher.java b/processing/src/test/java/org/apache/druid/matchers/LambdaMatcher.java new file mode 100644 index 000000000000..3eb50466ee1a --- /dev/null +++ b/processing/src/test/java/org/apache/druid/matchers/LambdaMatcher.java @@ -0,0 +1,62 @@ +/* + * Licensed to the Apache Software Foundation (ASF) under one + * or more contributor license agreements. See the NOTICE file + * distributed with this work for additional information + * regarding copyright ownership. The ASF licenses this file + * to you under the Apache License, Version 2.0 (the + * "License"); you may not use this file except in compliance + * with the License. You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, + * software distributed under the License is distributed on an + * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY + * KIND, either express or implied. See the License for the + * specific language governing permissions and limitations + * under the License. + */ + +package org.apache.druid.matchers; + +import org.hamcrest.Description; +import org.hamcrest.DiagnosingMatcher; +import org.hamcrest.Matcher; + +import java.util.function.Function; + +public class LambdaMatcher extends DiagnosingMatcher +{ + private final String name; + private final Function fn; + private final Matcher matcher; + + public LambdaMatcher( + String name, + Function fn, + Matcher matcher + ) + { + this.name = name; + this.fn = fn; + this.matcher = matcher; + } + + @Override + protected boolean matches(Object item, Description mismatchDescription) + { + final S result = fn.apply((T) item); + if (!matcher.matches(result)) { + matcher.describeMismatch(result, mismatchDescription); + return false; + } + return true; + } + + @Override + public void describeTo(Description description) + { + description.appendText(name); + matcher.describeTo(description); + } +} diff --git a/server/src/main/java/org/apache/druid/error/StandardRestExceptionEncoder.java b/server/src/main/java/org/apache/druid/error/StandardRestExceptionEncoder.java deleted file mode 100644 index b1517bf8dd71..000000000000 --- a/server/src/main/java/org/apache/druid/error/StandardRestExceptionEncoder.java +++ /dev/null @@ -1,88 +0,0 @@ -/* - * Licensed to the Apache Software Foundation (ASF) under one - * or more contributor license agreements. See the NOTICE file - * distributed with this work for additional information - * regarding copyright ownership. The ASF licenses this file - * to you under the Apache License, Version 2.0 (the - * "License"); you may not use this file except in compliance - * with the License. You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, - * software distributed under the License is distributed on an - * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY - * KIND, either express or implied. See the License for the - * specific language governing permissions and limitations - * under the License. - */ - -package org.apache.druid.error; - -import org.apache.druid.java.util.common.logger.Logger; - -import javax.ws.rs.core.MediaType; -import javax.ws.rs.core.Response; -import javax.ws.rs.core.Response.ResponseBuilder; - -import java.io.BufferedReader; -import java.io.File; -import java.io.FileInputStream; -import java.io.IOException; -import java.io.InputStreamReader; -import java.io.Reader; -import java.nio.charset.StandardCharsets; -import java.util.Properties; - -public class StandardRestExceptionEncoder implements RestExceptionEncoder -{ - private static final RestExceptionEncoder INSTANCE = new StandardRestExceptionEncoder(); - private static final Logger LOG = new Logger(StandardRestExceptionEncoder.class); - - private final Properties catalog; - - public static RestExceptionEncoder instance() - { - return INSTANCE; - } - - public StandardRestExceptionEncoder() - { - // Load the default error catalog, if it exists. - this.catalog = new Properties(); - File catalogFile = new File("conf/druid/errors.properties"); - if (catalogFile.isFile()) { - try (Reader reader = new BufferedReader( - new InputStreamReader( - new FileInputStream(catalogFile), - StandardCharsets.UTF_8))) { - this.catalog.load(reader); - LOG.info( - "Loaded [%d] entries from error catalog file [%s]", - catalog.size(), - catalogFile.getAbsolutePath() - ); - } - catch (IOException e) { - // Warn about failures, but don't take the server down. We'll run - // with standard errors. - LOG.error(e, "Failed to load error catalog file [%s]", catalogFile.getAbsolutePath()); - } - } - } - - @Override - public ResponseBuilder builder(DruidException e) - { - return Response - .status(Response.Status.fromStatusCode(e.httpStatus())) - .entity(e.toErrorResponse(catalog)) - .type(MediaType.APPLICATION_JSON); - } - - @Override - public Response encode(DruidException e) - { - return builder(e).build(); - } -} diff --git a/server/src/main/java/org/apache/druid/server/QueryResource.java b/server/src/main/java/org/apache/druid/server/QueryResource.java index 96194eed5caf..f4a7ab3edb75 100644 --- a/server/src/main/java/org/apache/druid/server/QueryResource.java +++ b/server/src/main/java/org/apache/druid/server/QueryResource.java @@ -34,7 +34,6 @@ import com.google.common.collect.Iterables; import com.google.inject.Inject; import org.apache.druid.client.DirectDruidClient; -import org.apache.druid.error.StandardRestExceptionEncoder; import org.apache.druid.guice.LazySingleton; import org.apache.druid.guice.annotations.Json; import org.apache.druid.guice.annotations.Self; @@ -538,8 +537,7 @@ public QueryResourceQueryResultPusher( QueryResource.this.counter, queryLifecycle.getQueryId(), MediaType.valueOf(io.getResponseWriter().getResponseType()), - ImmutableMap.of(), - StandardRestExceptionEncoder.instance() + ImmutableMap.of() ); this.req = req; this.queryLifecycle = queryLifecycle; diff --git a/server/src/main/java/org/apache/druid/server/QueryResultPusher.java b/server/src/main/java/org/apache/druid/server/QueryResultPusher.java index 09cb7b4657f8..a7c8705c1180 100644 --- a/server/src/main/java/org/apache/druid/server/QueryResultPusher.java +++ b/server/src/main/java/org/apache/druid/server/QueryResultPusher.java @@ -24,7 +24,8 @@ import com.google.common.io.CountingOutputStream; import org.apache.druid.client.DirectDruidClient; import org.apache.druid.error.DruidException; -import org.apache.druid.error.RestExceptionEncoder; +import org.apache.druid.error.ErrorResponse; +import org.apache.druid.error.QueryExceptionCompat; import org.apache.druid.java.util.common.ISE; import org.apache.druid.java.util.common.RE; import org.apache.druid.java.util.common.StringUtils; @@ -45,8 +46,6 @@ import javax.servlet.http.HttpServletResponse; import javax.ws.rs.core.MediaType; import javax.ws.rs.core.Response; -import javax.ws.rs.core.StreamingOutput; - import java.io.Closeable; import java.io.IOException; import java.io.OutputStream; @@ -64,7 +63,6 @@ public abstract class QueryResultPusher private final QueryResource.QueryMetricCounter counter; private final MediaType contentType; private final Map extraHeaders; - private final RestExceptionEncoder exceptionEncoder; private StreamingHttpResponseAccumulator accumulator; private AsyncContext asyncContext; @@ -78,8 +76,7 @@ public QueryResultPusher( QueryResource.QueryMetricCounter counter, String queryId, MediaType contentType, - Map extraHeaders, - RestExceptionEncoder exceptionEncoder + Map extraHeaders ) { this.request = request; @@ -90,7 +87,6 @@ public QueryResultPusher( this.counter = counter; this.contentType = contentType; this.extraHeaders = extraHeaders; - this.exceptionEncoder = exceptionEncoder; } /** @@ -118,19 +114,10 @@ public QueryResultPusher( @Nullable public Response push() { - // Create the results writer outside the try/catch block. The block uses - // the results writer on failure. But, if start() fails, we have a null - // resultsWriter and we'll get an NPE. Instead, if start() fails, just - // let any exception bubble up. ResultsWriter resultsWriter = null; try { resultsWriter = start(); - } - catch (RuntimeException e) { - log.warn(e, "Failed to obtain the results writer for query [%s]", queryId); - throw e; - } - try { + final Response.ResponseBuilder startResponse = resultsWriter.start(); if (startResponse != null) { startResponse.header(QueryResource.QUERY_ID_RESPONSE_HEADER, queryId); @@ -192,10 +179,6 @@ public Response push() catch (IOException ioEx) { return handleQueryException(resultsWriter, new QueryInterruptedException(ioEx)); } - catch (Throwable t) { - // May only occur in tests. - return handleQueryException(resultsWriter, new QueryInterruptedException(t)); - } finally { if (accumulator != null) { try { @@ -225,58 +208,47 @@ public Response push() @Nullable private Response handleQueryException(ResultsWriter resultsWriter, QueryException e) { - if (accumulator != null && accumulator.isInitialized()) { - // We already started sending a response when we got the error message. In this case we just give up - // and hope that the partial stream generates a meaningful failure message for our client. We could consider - // also throwing the exception body into the response to make it easier for the client to choke if it manages - // to parse a meaningful object out, but that's potentially an API change so we leave that as an exercise for - // the future. + return handleDruidException(resultsWriter, DruidException.fromFailure(new QueryExceptionCompat(e))); + } + private Response handleDruidException(ResultsWriter resultsWriter, DruidException e) + { + if (resultsWriter != null) { resultsWriter.recordFailure(e); - - // This case is always a failure because the error happened mid-stream of sending results back. Therefore, - // we do not believe that the response stream was actually usable counter.incrementFailed(); - return null; + + if (accumulator != null && accumulator.isInitialized()) { + // We already started sending a response when we got the error message. In this case we just give up + // and hope that the partial stream generates a meaningful failure message for our client. We could consider + // also throwing the exception body into the response to make it easier for the client to choke if it manages + // to parse a meaningful object out, but that's potentially an API change so we leave that as an exercise for + // the future. + return null; + } } - final QueryException.FailType failType = e.getFailType(); - switch (failType) { - case USER_ERROR: + switch (e.getCategory()) { + case INVALID_INPUT: case UNAUTHORIZED: - case QUERY_RUNTIME_FAILURE: + case RUNTIME_FAILURE: case CANCELED: counter.incrementInterrupted(); break; case CAPACITY_EXCEEDED: case UNSUPPORTED: + case UNCATEGORIZED: counter.incrementFailed(); break; case TIMEOUT: counter.incrementTimedOut(); break; - case UNKNOWN: - log.warn( - e, - "Unknown errorCode[%s], support needs to be added for error handling.", - e.getErrorCode() - ); - counter.incrementFailed(); } - resultsWriter.recordFailure(e); - - final int responseStatus = failType.getExpectedStatus(); - if (response == null) { - // No response object yet, so assume we haven't started the async context and is safe to return Response final Response.ResponseBuilder bob = Response - .status(responseStatus) + .status(e.getStatusCode()) .type(contentType) - .entity((StreamingOutput) output -> { - writeException(e, output); - output.close(); - }); + .entity(new ErrorResponse(e)); bob.header(QueryResource.QUERY_ID_RESPONSE_HEADER, queryId); for (Map.Entry entry : extraHeaders.entrySet()) { @@ -289,7 +261,7 @@ private Response handleQueryException(ResultsWriter resultsWriter, QueryExceptio QueryResource.NO_STACK_LOGGER.warn(e, "Response was committed without the accumulator writing anything!?"); } - response.setStatus(responseStatus); + response.setStatus(e.getStatusCode()); response.setHeader("Content-Type", contentType.toString()); try (ServletOutputStream out = response.getOutputStream()) { writeException(e, out); @@ -305,45 +277,6 @@ private Response handleQueryException(ResultsWriter resultsWriter, QueryExceptio } } - private Response handleDruidException(ResultsWriter resultsWriter, DruidException e) - { - if (accumulator != null && accumulator.isInitialized()) { - // We already started sending a response when we got the error message. In this case we just give up - // and hope that the partial stream generates a meaningful failure message for our client. We could consider - // also throwing the exception body into the response to make it easier for the client to choke if it manages - // to parse a meaningful object out, but that's potentially an API change so we leave that as an exercise for - // the future. - - resultsWriter.recordFailure(e); - - // This case is always a failure because the error happened mid-stream of sending results back. Therefore, - // we do not believe that the response stream was actually usable - counter.incrementFailed(); - return null; - } - - switch (e.metricCategory()) { - case INTERRUPTED: - counter.incrementInterrupted(); - break; - case TIME_OUT: - counter.incrementTimedOut(); - break; - default: - counter.incrementFailed(); - break; - } - - resultsWriter.recordFailure(e); - - final Response.ResponseBuilder bob = exceptionEncoder.builder(e); - bob.header(QueryResource.QUERY_ID_RESPONSE_HEADER, queryId); - for (Map.Entry entry : extraHeaders.entrySet()) { - bob.header(entry.getKey(), entry.getValue()); - } - return bob.build(); - } - public interface ResultsWriter extends Closeable { /** diff --git a/sql/pom.xml b/sql/pom.xml index eb2c1bf7a39c..d032a64f5e2f 100644 --- a/sql/pom.xml +++ b/sql/pom.xml @@ -229,6 +229,11 @@ test-jar test + + org.hamcrest + hamcrest-all + test + org.hamcrest hamcrest-core diff --git a/sql/src/main/codegen/includes/common.ftl b/sql/src/main/codegen/includes/common.ftl index 2eccdbc2a5a5..8de677647bef 100644 --- a/sql/src/main/codegen/includes/common.ftl +++ b/sql/src/main/codegen/includes/common.ftl @@ -65,7 +65,6 @@ org.apache.druid.java.util.common.Pair PartitionGranularity e = Expression(ExprContext.ACCEPT_SUB_QUERY) { granularity = DruidSqlParserUtils.convertSqlNodeToGranularityThrowingParseExceptions(e); - DruidSqlParserUtils.throwIfUnsupportedGranularityInPartitionedBy(granularity); unparseString = e.toString(); } ) diff --git a/sql/src/main/codegen/includes/insert.ftl b/sql/src/main/codegen/includes/insert.ftl index c0e04bc77245..a0482dbf8a6d 100644 --- a/sql/src/main/codegen/includes/insert.ftl +++ b/sql/src/main/codegen/includes/insert.ftl @@ -38,7 +38,9 @@ SqlNode DruidSqlInsertEof() : ] { if (clusteredBy != null && partitionedBy.lhs == null) { - throw new ParseException("CLUSTERED BY found before PARTITIONED BY. In Druid, the CLUSTERED BY clause must follow the PARTITIONED BY clause"); + throw org.apache.druid.sql.calcite.parser.DruidSqlParserUtils.problemParsing( + "CLUSTERED BY found before PARTITIONED BY, CLUSTERED BY must come after the PARTITIONED BY clause" + ); } } // EOF is also present in SqlStmtEof but EOF is a special case and a single EOF can be consumed multiple times. diff --git a/sql/src/main/codegen/includes/replace.ftl b/sql/src/main/codegen/includes/replace.ftl index ed8dbb10eed2..5d47c9195816 100644 --- a/sql/src/main/codegen/includes/replace.ftl +++ b/sql/src/main/codegen/includes/replace.ftl @@ -58,7 +58,9 @@ SqlNode DruidSqlReplaceEof() : ] { if (clusteredBy != null && partitionedBy.lhs == null) { - throw new ParseException("CLUSTERED BY found before PARTITIONED BY. In Druid, the CLUSTERED BY clause must follow the PARTITIONED BY clause"); + throw org.apache.druid.sql.calcite.parser.DruidSqlParserUtils.problemParsing( + "CLUSTERED BY found before PARTITIONED BY, CLUSTERED BY must come after the PARTITIONED BY clause" + ); } } // EOF is also present in SqlStmtEof but EOF is a special case and a single EOF can be consumed multiple times. diff --git a/sql/src/main/java/org/apache/druid/sql/DirectStatement.java b/sql/src/main/java/org/apache/druid/sql/DirectStatement.java index 7499ecb2a68a..0bcf0f684caf 100644 --- a/sql/src/main/java/org/apache/druid/sql/DirectStatement.java +++ b/sql/src/main/java/org/apache/druid/sql/DirectStatement.java @@ -21,7 +21,8 @@ import com.google.common.annotations.VisibleForTesting; import org.apache.calcite.plan.RelOptPlanner; -import org.apache.druid.error.DruidAssertionError; +import org.apache.druid.error.DruidException; +import org.apache.druid.error.InvalidSqlInput; import org.apache.druid.java.util.common.ISE; import org.apache.druid.java.util.common.StringUtils; import org.apache.druid.java.util.common.logger.Logger; @@ -228,13 +229,20 @@ public ResultSet plan() } catch (RelOptPlanner.CannotPlanException e) { // Not sure if this is even thrown here. - throw new DruidAssertionError(e, "Cannot plan SQL query"); + throw DruidException.forPersona(DruidException.Persona.DEVELOPER) + .ofCategory(DruidException.Category.UNCATEGORIZED) + .build(e, "Problem planning SQL query"); } catch (RuntimeException e) { state = State.FAILED; reporter.failed(e); throw e; } + catch (AssertionError e) { + state = State.FAILED; + reporter.failed(e); + throw InvalidSqlInput.exception(e, "Calcite assertion violated: [%s]", e.getMessage()); + } } /** diff --git a/sql/src/main/java/org/apache/druid/sql/calcite/aggregation/builtin/ArraySqlAggregator.java b/sql/src/main/java/org/apache/druid/sql/calcite/aggregation/builtin/ArraySqlAggregator.java index 50df9ab83410..ec914dac7f8e 100644 --- a/sql/src/main/java/org/apache/druid/sql/calcite/aggregation/builtin/ArraySqlAggregator.java +++ b/sql/src/main/java/org/apache/druid/sql/calcite/aggregation/builtin/ArraySqlAggregator.java @@ -51,7 +51,6 @@ import org.apache.druid.sql.calcite.rel.VirtualColumnRegistry; import javax.annotation.Nullable; - import java.util.List; import java.util.stream.Collectors; @@ -166,9 +165,6 @@ static class ArrayAggReturnTypeInference implements SqlReturnTypeInference public RelDataType inferReturnType(SqlOperatorBinding sqlOperatorBinding) { RelDataType type = sqlOperatorBinding.getOperandType(0); - if (type instanceof RowSignatures.ComplexSqlType) { - throw DruidException.unsupportedError("Cannot use ARRAY_AGG on complex inputs %s", type); - } return sqlOperatorBinding.getTypeFactory().createArrayType( type, -1 diff --git a/sql/src/main/java/org/apache/druid/sql/calcite/aggregation/builtin/AvgSqlAggregator.java b/sql/src/main/java/org/apache/druid/sql/calcite/aggregation/builtin/AvgSqlAggregator.java index e7e252cf8e38..8d84ab30760f 100644 --- a/sql/src/main/java/org/apache/druid/sql/calcite/aggregation/builtin/AvgSqlAggregator.java +++ b/sql/src/main/java/org/apache/druid/sql/calcite/aggregation/builtin/AvgSqlAggregator.java @@ -32,8 +32,8 @@ import org.apache.druid.query.aggregation.AggregatorFactory; import org.apache.druid.query.aggregation.post.ArithmeticPostAggregator; import org.apache.druid.query.aggregation.post.FieldAccessPostAggregator; +import org.apache.druid.segment.column.ColumnType; import org.apache.druid.segment.column.RowSignature; -import org.apache.druid.segment.column.ValueType; import org.apache.druid.sql.calcite.aggregation.Aggregation; import org.apache.druid.sql.calcite.aggregation.Aggregations; import org.apache.druid.sql.calcite.aggregation.SqlAggregator; @@ -95,12 +95,12 @@ public Aggregation toDruidAggregation( final DruidExpression arg = Iterables.getOnlyElement(arguments); final ExprMacroTable macroTable = plannerContext.getExprMacroTable(); - final ValueType sumType; + final ColumnType sumType; // Use 64-bit sum regardless of the type of the AVG aggregator. if (SqlTypeName.INT_TYPES.contains(aggregateCall.getType().getSqlTypeName())) { - sumType = ValueType.LONG; + sumType = ColumnType.LONG; } else { - sumType = ValueType.DOUBLE; + sumType = ColumnType.DOUBLE; } final String fieldName; diff --git a/sql/src/main/java/org/apache/druid/sql/calcite/aggregation/builtin/EarliestLatestAnySqlAggregator.java b/sql/src/main/java/org/apache/druid/sql/calcite/aggregation/builtin/EarliestLatestAnySqlAggregator.java index e184373cc776..0137689a8512 100644 --- a/sql/src/main/java/org/apache/druid/sql/calcite/aggregation/builtin/EarliestLatestAnySqlAggregator.java +++ b/sql/src/main/java/org/apache/druid/sql/calcite/aggregation/builtin/EarliestLatestAnySqlAggregator.java @@ -35,9 +35,8 @@ import org.apache.calcite.sql.type.SqlTypeName; import org.apache.calcite.sql.type.SqlTypeUtil; import org.apache.calcite.util.Optionality; -import org.apache.druid.error.DruidAssertionError; -import org.apache.druid.error.SqlUnsupportedError; -import org.apache.druid.error.SqlValidationError; +import org.apache.druid.error.DruidException; +import org.apache.druid.error.InvalidSqlInput; import org.apache.druid.query.aggregation.AggregatorFactory; import org.apache.druid.query.aggregation.any.DoubleAnyAggregatorFactory; import org.apache.druid.query.aggregation.any.FloatAnyAggregatorFactory; @@ -64,7 +63,6 @@ import org.apache.druid.sql.calcite.rel.VirtualColumnRegistry; import javax.annotation.Nullable; - import java.util.Collections; import java.util.List; import java.util.stream.Collectors; @@ -79,7 +77,13 @@ enum AggregatorType { EARLIEST { @Override - AggregatorFactory createAggregatorFactory(String name, String fieldName, String timeColumn, ColumnType type, int maxStringBytes) + AggregatorFactory createAggregatorFactory( + String name, + String fieldName, + String timeColumn, + ColumnType type, + int maxStringBytes + ) { switch (type.getType()) { case LONG: @@ -92,14 +96,20 @@ AggregatorFactory createAggregatorFactory(String name, String fieldName, String case COMPLEX: return new StringFirstAggregatorFactory(name, fieldName, timeColumn, maxStringBytes); default: - throw SqlUnsupportedError.unsupportedAggType("EARLIEST", type); + throw SimpleSqlAggregator.badTypeException(fieldName, "EARLIEST", type); } } }, LATEST { @Override - AggregatorFactory createAggregatorFactory(String name, String fieldName, String timeColumn, ColumnType type, int maxStringBytes) + AggregatorFactory createAggregatorFactory( + String name, + String fieldName, + String timeColumn, + ColumnType type, + int maxStringBytes + ) { switch (type.getType()) { case LONG: @@ -112,14 +122,20 @@ AggregatorFactory createAggregatorFactory(String name, String fieldName, String case COMPLEX: return new StringLastAggregatorFactory(name, fieldName, timeColumn, maxStringBytes); default: - throw SqlUnsupportedError.unsupportedAggType("LATEST", type); + throw SimpleSqlAggregator.badTypeException(fieldName, "LATEST", type); } } }, ANY_VALUE { @Override - AggregatorFactory createAggregatorFactory(String name, String fieldName, String timeColumn, ColumnType type, int maxStringBytes) + AggregatorFactory createAggregatorFactory( + String name, + String fieldName, + String timeColumn, + ColumnType type, + int maxStringBytes + ) { switch (type.getType()) { case LONG: @@ -131,7 +147,7 @@ AggregatorFactory createAggregatorFactory(String name, String fieldName, String case STRING: return new StringAnyAggregatorFactory(name, fieldName, maxStringBytes); default: - throw SqlUnsupportedError.unsupportedAggType("ANY", type); + throw SimpleSqlAggregator.badTypeException(fieldName, "ANY", type); } } }; @@ -189,21 +205,30 @@ public Aggregation toDruidAggregation( final String aggregatorName = finalizeAggregations ? Calcites.makePrefixedName(name, "a") : name; final ColumnType outputType = Calcites.getColumnTypeForRelDataType(aggregateCall.getType()); if (outputType == null) { - throw new DruidAssertionError( - "[${fn}] cannot translate output SQL type [${type}] to a Druid type" - ) - .withValue("fn", aggregateCall.getName()) - .withValue("type", aggregateCall.getType().getSqlTypeName()); + throw DruidException.forPersona(DruidException.Persona.ADMIN) + .ofCategory(DruidException.Category.DEFENSIVE) + .build( + "Cannot convert output SQL type[%s] to a Druid type for function [%s]", + aggregateCall.getName(), + aggregateCall.getType().getSqlTypeName() + ); } final String fieldName = getColumnName(plannerContext, virtualColumnRegistry, args.get(0), rexNodes.get(0)); if (!rowSignature.contains(ColumnHolder.TIME_COLUMN_NAME) && (aggregatorType == AggregatorType.LATEST || aggregatorType == AggregatorType.EARLIEST)) { - plannerContext.setPlanningError("%s() aggregator depends on __time column, the underlying datasource " - + "or extern function you are querying doesn't contain __time column, " - + "Please use %s_BY() and specify the time column you want to use", - aggregatorType.name(), - aggregatorType.name() + // This code is being run as part of the exploratory volcano planner, currently, the definition of these + // aggregators does not tell Calcite that they depend on a __time column being in existence, instead we are + // allowing the volcano planner to explore paths that put projections which eliminate the time column in between + // the table scan and the aggregation and then relying on this check to tell Calcite that the plan is bogus. + // In some future, it would be good to make the aggregator definition capable of telling Calcite that it depends + // on a __time column to be in existence. Or perhaps we should just kill these aggregators and have everything + // move to the _BY aggregators that require an explicit definition. Either way, for now, we set this potential + // error and let the volcano planner continue exploring + plannerContext.setPlanningError( + "LATEST and EARLIEST aggregators implicitly depend on the __time column, but the " + + "table queried doesn't contain a __time column. Please use LATEST_BY or EARLIEST_BY " + + "and specify the column explicitly." ); return null; } @@ -219,7 +244,11 @@ public Aggregation toDruidAggregation( maxStringBytes = RexLiteral.intValue(rexNodes.get(1)); } catch (AssertionError ae) { - plannerContext.setPlanningError("The second argument '%s' to function '%s' is not a number", rexNodes.get(1), aggregateCall.getName()); + plannerContext.setPlanningError( + "The second argument '%s' to function '%s' is not a number", + rexNodes.get(1), + aggregateCall.getName() + ); return null; } theAggFactory = aggregatorType.createAggregatorFactory( @@ -231,12 +260,11 @@ public Aggregation toDruidAggregation( ); break; default: - throw new SqlValidationError( - "WrongArgCount", - "[${fn}] expects 1 or 2 arguments but found [${count}]" - ) - .withValue("fn", aggregateCall.getName()) - .withValue("count", args.size()); + throw InvalidSqlInput.exception( + "Function [%s] expects 1 or 2 arguments but found [%s]", + aggregateCall.getName(), + args.size() + ); } return Aggregation.create( diff --git a/sql/src/main/java/org/apache/druid/sql/calcite/aggregation/builtin/MaxSqlAggregator.java b/sql/src/main/java/org/apache/druid/sql/calcite/aggregation/builtin/MaxSqlAggregator.java index 71d4cf3e620b..4f29b276a544 100644 --- a/sql/src/main/java/org/apache/druid/sql/calcite/aggregation/builtin/MaxSqlAggregator.java +++ b/sql/src/main/java/org/apache/druid/sql/calcite/aggregation/builtin/MaxSqlAggregator.java @@ -22,14 +22,12 @@ import org.apache.calcite.rel.core.AggregateCall; import org.apache.calcite.sql.SqlAggFunction; import org.apache.calcite.sql.fun.SqlStdOperatorTable; -import org.apache.druid.error.SqlUnsupportedError; import org.apache.druid.math.expr.ExprMacroTable; import org.apache.druid.query.aggregation.AggregatorFactory; import org.apache.druid.query.aggregation.DoubleMaxAggregatorFactory; import org.apache.druid.query.aggregation.FloatMaxAggregatorFactory; import org.apache.druid.query.aggregation.LongMaxAggregatorFactory; import org.apache.druid.segment.column.ColumnType; -import org.apache.druid.segment.column.ValueType; import org.apache.druid.sql.calcite.aggregation.Aggregation; import org.apache.druid.sql.calcite.planner.Calcites; @@ -53,17 +51,17 @@ Aggregation getAggregation( if (valueType == null) { return null; } - return Aggregation.create(createMaxAggregatorFactory(valueType.getType(), name, fieldName, macroTable)); + return Aggregation.create(createMaxAggregatorFactory(valueType, name, fieldName, macroTable)); } private static AggregatorFactory createMaxAggregatorFactory( - final ValueType aggregationType, + final ColumnType aggregationType, final String name, final String fieldName, final ExprMacroTable macroTable ) { - switch (aggregationType) { + switch (aggregationType.getType()) { case LONG: return new LongMaxAggregatorFactory(name, fieldName, null, macroTable); case FLOAT: @@ -73,7 +71,7 @@ private static AggregatorFactory createMaxAggregatorFactory( default: // This error refers to the Druid type. But, we're in SQL validation. // It should refer to the SQL type. - throw SqlUnsupportedError.unsupportedAggType("MAX", aggregationType); + throw SimpleSqlAggregator.badTypeException(fieldName, "MAX", aggregationType); } } } diff --git a/sql/src/main/java/org/apache/druid/sql/calcite/aggregation/builtin/MinSqlAggregator.java b/sql/src/main/java/org/apache/druid/sql/calcite/aggregation/builtin/MinSqlAggregator.java index a6e7bda3e7fe..93b87d376b52 100644 --- a/sql/src/main/java/org/apache/druid/sql/calcite/aggregation/builtin/MinSqlAggregator.java +++ b/sql/src/main/java/org/apache/druid/sql/calcite/aggregation/builtin/MinSqlAggregator.java @@ -22,7 +22,6 @@ import org.apache.calcite.rel.core.AggregateCall; import org.apache.calcite.sql.SqlAggFunction; import org.apache.calcite.sql.fun.SqlStdOperatorTable; -import org.apache.druid.error.SqlUnsupportedError; import org.apache.druid.math.expr.ExprMacroTable; import org.apache.druid.query.aggregation.AggregatorFactory; import org.apache.druid.query.aggregation.DoubleMinAggregatorFactory; @@ -67,7 +66,7 @@ private static AggregatorFactory createMinAggregatorFactory( case DOUBLE: return new DoubleMinAggregatorFactory(name, fieldName, null, macroTable); default: - throw SqlUnsupportedError.unsupportedAggType("MIN", aggregationType); + throw SimpleSqlAggregator.badTypeException(fieldName, "MIN", aggregationType); } } } diff --git a/sql/src/main/java/org/apache/druid/sql/calcite/aggregation/builtin/SimpleSqlAggregator.java b/sql/src/main/java/org/apache/druid/sql/calcite/aggregation/builtin/SimpleSqlAggregator.java index 643a99394fec..3ac68483611a 100644 --- a/sql/src/main/java/org/apache/druid/sql/calcite/aggregation/builtin/SimpleSqlAggregator.java +++ b/sql/src/main/java/org/apache/druid/sql/calcite/aggregation/builtin/SimpleSqlAggregator.java @@ -23,7 +23,10 @@ import org.apache.calcite.rel.core.AggregateCall; import org.apache.calcite.rel.core.Project; import org.apache.calcite.rex.RexBuilder; +import org.apache.druid.error.DruidException; +import org.apache.druid.error.InvalidSqlInput; import org.apache.druid.math.expr.ExprMacroTable; +import org.apache.druid.segment.column.ColumnType; import org.apache.druid.segment.column.RowSignature; import org.apache.druid.sql.calcite.aggregation.Aggregation; import org.apache.druid.sql.calcite.aggregation.Aggregations; @@ -45,6 +48,11 @@ */ public abstract class SimpleSqlAggregator implements SqlAggregator { + public static DruidException badTypeException(String columnName, String agg, ColumnType type) + { + return InvalidSqlInput.exception("Aggregation [%s] does not support type [%s], column [%s]", agg, type, columnName); + } + @Nullable @Override public Aggregation toDruidAggregation( diff --git a/sql/src/main/java/org/apache/druid/sql/calcite/aggregation/builtin/StringSqlAggregator.java b/sql/src/main/java/org/apache/druid/sql/calcite/aggregation/builtin/StringSqlAggregator.java index 6f0e3daf6561..4411ebd33cdc 100644 --- a/sql/src/main/java/org/apache/druid/sql/calcite/aggregation/builtin/StringSqlAggregator.java +++ b/sql/src/main/java/org/apache/druid/sql/calcite/aggregation/builtin/StringSqlAggregator.java @@ -27,6 +27,7 @@ import org.apache.calcite.rex.RexLiteral; import org.apache.calcite.rex.RexNode; import org.apache.calcite.sql.SqlAggFunction; +import org.apache.calcite.sql.SqlCallBinding; import org.apache.calcite.sql.SqlFunctionCategory; import org.apache.calcite.sql.SqlKind; import org.apache.calcite.sql.SqlOperatorBinding; @@ -36,7 +37,6 @@ import org.apache.calcite.sql.type.SqlTypeFamily; import org.apache.calcite.sql.type.SqlTypeName; import org.apache.calcite.util.Optionality; -import org.apache.druid.error.SqlUnsupportedError; import org.apache.druid.java.util.common.HumanReadableBytes; import org.apache.druid.java.util.common.StringUtils; import org.apache.druid.math.expr.ExprMacroTable; @@ -56,7 +56,6 @@ import org.apache.druid.sql.calcite.table.RowSignatures; import javax.annotation.Nullable; - import java.util.List; import java.util.Objects; import java.util.stream.Collectors; @@ -198,7 +197,16 @@ public RelDataType inferReturnType(SqlOperatorBinding sqlOperatorBinding) { RelDataType type = sqlOperatorBinding.getOperandType(0); if (type instanceof RowSignatures.ComplexSqlType) { - throw SqlUnsupportedError.unsupportedAggType("STRING_AGG", type); + String columnName = ""; + if (sqlOperatorBinding instanceof SqlCallBinding) { + columnName = ((SqlCallBinding) sqlOperatorBinding).getCall().operand(0).toString(); + } + + throw SimpleSqlAggregator.badTypeException( + columnName, + "STRING_AGG", + ((RowSignatures.ComplexSqlType) type).getColumnType() + ); } return Calcites.createSqlTypeWithNullability( sqlOperatorBinding.getTypeFactory(), diff --git a/sql/src/main/java/org/apache/druid/sql/calcite/aggregation/builtin/SumSqlAggregator.java b/sql/src/main/java/org/apache/druid/sql/calcite/aggregation/builtin/SumSqlAggregator.java index b9d2d236b3bf..148c4dd0d0c6 100644 --- a/sql/src/main/java/org/apache/druid/sql/calcite/aggregation/builtin/SumSqlAggregator.java +++ b/sql/src/main/java/org/apache/druid/sql/calcite/aggregation/builtin/SumSqlAggregator.java @@ -31,14 +31,12 @@ import org.apache.calcite.sql.type.OperandTypes; import org.apache.calcite.sql.type.ReturnTypes; import org.apache.calcite.util.Optionality; -import org.apache.druid.error.SqlUnsupportedError; import org.apache.druid.math.expr.ExprMacroTable; import org.apache.druid.query.aggregation.AggregatorFactory; import org.apache.druid.query.aggregation.DoubleSumAggregatorFactory; import org.apache.druid.query.aggregation.FloatSumAggregatorFactory; import org.apache.druid.query.aggregation.LongSumAggregatorFactory; import org.apache.druid.segment.column.ColumnType; -import org.apache.druid.segment.column.ValueType; import org.apache.druid.sql.calcite.aggregation.Aggregation; import org.apache.druid.sql.calcite.planner.Calcites; @@ -70,17 +68,17 @@ Aggregation getAggregation( if (valueType == null) { return null; } - return Aggregation.create(createSumAggregatorFactory(valueType.getType(), name, fieldName, macroTable)); + return Aggregation.create(createSumAggregatorFactory(valueType, name, fieldName, macroTable)); } static AggregatorFactory createSumAggregatorFactory( - final ValueType aggregationType, + final ColumnType aggregationType, final String name, final String fieldName, final ExprMacroTable macroTable ) { - switch (aggregationType) { + switch (aggregationType.getType()) { case LONG: return new LongSumAggregatorFactory(name, fieldName, null, macroTable); case FLOAT: @@ -88,7 +86,7 @@ static AggregatorFactory createSumAggregatorFactory( case DOUBLE: return new DoubleSumAggregatorFactory(name, fieldName, null, macroTable); default: - throw SqlUnsupportedError.unsupportedAggType("SUM", aggregationType); + throw SimpleSqlAggregator.badTypeException(fieldName, "SUM", aggregationType); } } diff --git a/sql/src/main/java/org/apache/druid/sql/calcite/expression/builtin/NestedDataOperatorConversions.java b/sql/src/main/java/org/apache/druid/sql/calcite/expression/builtin/NestedDataOperatorConversions.java index f76a092b765c..c5503f7eb85f 100644 --- a/sql/src/main/java/org/apache/druid/sql/calcite/expression/builtin/NestedDataOperatorConversions.java +++ b/sql/src/main/java/org/apache/druid/sql/calcite/expression/builtin/NestedDataOperatorConversions.java @@ -40,8 +40,8 @@ import org.apache.calcite.sql.type.SqlTypeName; import org.apache.calcite.sql.type.SqlTypeTransforms; import org.apache.calcite.sql2rel.SqlRexConvertlet; -import org.apache.druid.error.SqlUnsupportedError; import org.apache.druid.error.DruidException; +import org.apache.druid.error.InvalidSqlInput; import org.apache.druid.java.util.common.IAE; import org.apache.druid.java.util.common.StringUtils; import org.apache.druid.math.expr.Expr; @@ -61,8 +61,8 @@ import org.apache.druid.sql.calcite.planner.convertlet.DruidConvertletFactory; import org.apache.druid.sql.calcite.table.RowSignatures; +import javax.annotation.Nonnull; import javax.annotation.Nullable; - import java.util.Collections; import java.util.List; @@ -197,16 +197,7 @@ public DruidExpression toDruidExpression( } // pre-normalize path so that the same expressions with different jq syntax are collapsed final String path = (String) pathExpr.eval(InputBindings.nilBindings()).value(); - final List parts; - try { - parts = NestedPathFinder.parseJsonPath(path); - } - catch (IllegalArgumentException iae) { - throw SqlUnsupportedError.cannotUseOperator( - call.getOperator().getName(), - iae - ); - } + final List parts = extractNestedPathParts(call, path); final String jsonPath = NestedPathFinder.toNormalizedJsonPath(parts); final DruidExpression.ExpressionGenerator builder = (args) -> "json_query(" + args.get(0).getExpression() + ",'" + jsonPath + "')"; @@ -386,16 +377,9 @@ public DruidExpression toDruidExpression( } // pre-normalize path so that the same expressions with different jq syntax are collapsed final String path = (String) pathExpr.eval(InputBindings.nilBindings()).value(); - final List parts; - try { - parts = NestedPathFinder.parseJsonPath(path); - } - catch (IllegalArgumentException iae) { - throw SqlUnsupportedError.cannotUseOperator( - call.getOperator().getName(), - iae - ); - } + + final List parts = extractNestedPathParts(call, path); + final String jsonPath = NestedPathFinder.toNormalizedJsonPath(parts); final DruidExpression.ExpressionGenerator builder = (args) -> "json_value(" + args.get(0).getExpression() + ",'" + jsonPath + "', '" + druidType.asTypeString() + "')"; @@ -520,7 +504,7 @@ public DruidExpression toDruidExpression( parts = NestedPathFinder.parseJsonPath(path); } catch (IllegalArgumentException iae) { - throw new UnsupportedSQLQueryException( + throw InvalidSqlInput.exception( "Cannot use [%s]: [%s]", call.getOperator().getName(), iae.getMessage() @@ -683,17 +667,7 @@ public DruidExpression toDruidExpression( } // pre-normalize path so that the same expressions with different jq syntax are collapsed final String path = (String) pathExpr.eval(InputBindings.nilBindings()).value(); - final List parts; - try { - parts = NestedPathFinder.parseJsonPath(path); - } - catch (IllegalArgumentException iae) { - throw new SqlUnsupportedError( - iae, - "JSON path [%s] is not supported", - call.getOperator().getName() - ); - } + final List parts = extractNestedPathParts(call, path); final String jsonPath = NestedPathFinder.toNormalizedJsonPath(parts); final DruidExpression.ExpressionGenerator builder = (args) -> "json_value(" + args.get(0).getExpression() + ",'" + jsonPath + "')"; @@ -896,4 +870,19 @@ public DruidExpression toDruidExpression( ); } } + + @Nonnull + private static List extractNestedPathParts(RexCall call, String path) + { + try { + return NestedPathFinder.parseJsonPath(path); + } + catch (IllegalArgumentException iae) { + final String name = call.getOperator().getName(); + throw DruidException + .forPersona(DruidException.Persona.USER) + .ofCategory(DruidException.Category.INVALID_INPUT) + .build(iae, "Error when processing path [%s], operator [%s] is not useable", path, name); + } + } } diff --git a/sql/src/main/java/org/apache/druid/sql/calcite/parser/DruidSqlParserUtils.java b/sql/src/main/java/org/apache/druid/sql/calcite/parser/DruidSqlParserUtils.java index 9425b3d96f81..60fe551c6799 100644 --- a/sql/src/main/java/org/apache/druid/sql/calcite/parser/DruidSqlParserUtils.java +++ b/sql/src/main/java/org/apache/druid/sql/calcite/parser/DruidSqlParserUtils.java @@ -34,7 +34,7 @@ import org.apache.calcite.sql.SqlTimestampLiteral; import org.apache.calcite.tools.ValidationException; import org.apache.druid.error.DruidException; -import org.apache.druid.error.SqlValidationError; +import org.apache.druid.error.InvalidSqlInput; import org.apache.druid.java.util.common.StringUtils; import org.apache.druid.java.util.common.granularity.Granularity; import org.apache.druid.java.util.common.granularity.GranularityType; @@ -87,15 +87,11 @@ public static Granularity convertSqlNodeToGranularityThrowingParseExceptions(Sql } } - private static final String PARITION_BY_ERROR = "Encountered [${expr}] after PARTITIONED BY. " - + "Expected HOUR, DAY, MONTH, YEAR, ALL TIME, FLOOR function or " - + TimeFloorOperatorConversion.SQL_FUNCTION_NAME + " function"; - /** * This method is used to extract the granularity from a SqlNode representing following function calls: * 1. FLOOR(__time TO TimeUnit) * 2. TIME_FLOOR(__time, 'PT1H') - * + *

* Validation on the sqlNode is contingent to following conditions: * 1. sqlNode is an instance of SqlCall * 2. Operator is either one of TIME_FLOOR or FLOOR @@ -103,7 +99,7 @@ public static Granularity convertSqlNodeToGranularityThrowingParseExceptions(Sql * 4. First operand is a SimpleIdentifier representing __time * 5. If operator is TIME_FLOOR, the second argument is a literal, and can be converted to the Granularity class * 6. If operator is FLOOR, the second argument is a TimeUnit, and can be mapped using {@link TimeUnits} - * + *

* Since it is to be used primarily while parsing the SqlNode, it is wrapped in {@code convertSqlNodeToGranularityThrowingParseExceptions} * * @param sqlNode SqlNode representing a call to a function @@ -113,11 +109,7 @@ public static Granularity convertSqlNodeToGranularityThrowingParseExceptions(Sql public static Granularity convertSqlNodeToGranularity(SqlNode sqlNode) throws ParseException { if (!(sqlNode instanceof SqlCall)) { - throw new SqlValidationError( - "InvalidPartitionBy", - PARITION_BY_ERROR - ) - .withValue("expr", sqlNode.toString()); + throw makeInvalidPartitionByException(sqlNode); } SqlCall sqlCall = (SqlCall) sqlNode; @@ -166,7 +158,9 @@ public static Granularity convertSqlNodeToGranularity(SqlNode sqlNode) throws Pa catch (IllegalArgumentException e) { throw new ParseException(StringUtils.format("%s is an invalid period string", granularitySqlNode.toString())); } - return new PeriodGranularity(period, null, null); + final PeriodGranularity retVal = new PeriodGranularity(period, null, null); + validateSupportedGranularityForPartitionedBy(sqlNode, retVal); + return retVal; } else if ("FLOOR".equalsIgnoreCase(operatorName)) { // If the floor function is of form FLOOR(__time TO DAY) SqlNode granularitySqlNode = operandList.get(1); @@ -187,15 +181,22 @@ public static Granularity convertSqlNodeToGranularity(SqlNode sqlNode) throws Pa granularityIntervalQualifier.timeUnitRange.toString() ) ); - return new PeriodGranularity(period, null, null); + final PeriodGranularity retVal = new PeriodGranularity(period, null, null); + validateSupportedGranularityForPartitionedBy(sqlNode, retVal); + return retVal; } // Shouldn't reach here - throw new SqlValidationError( - "InvalidPartitionBy", - PARITION_BY_ERROR - ) - .withValue("expr", sqlNode.toString()); + throw makeInvalidPartitionByException(sqlNode); + } + + private static DruidException makeInvalidPartitionByException(SqlNode sqlNode) + { + return InvalidSqlInput.exception( + "Invalid granularity [%s] after PARTITIONED BY. " + + "Expected HOUR, DAY, MONTH, YEAR, ALL TIME, FLOOR() or TIME_FLOOR()", + sqlNode + ); } /** @@ -203,7 +204,7 @@ public static Granularity convertSqlNodeToGranularity(SqlNode sqlNode) throws Pa * be used in creating an ingestion spec. If the sqlNode is an SqlLiteral of {@link #ALL}, returns a singleton list of * "ALL". Otherwise, it converts and optimizes the query using {@link MoveTimeFiltersToIntervals} into a list of * intervals which contain all valid values of time as per the query. - * + *

* The following validations are performed * 1. Only __time column and timestamp literals are present in the query * 2. The interval after optimization is not empty @@ -211,8 +212,8 @@ public static Granularity convertSqlNodeToGranularity(SqlNode sqlNode) throws Pa * 4. The intervals after adjusting for timezone are aligned with the granularity parameter * * @param replaceTimeQuery Sql node representing the query - * @param granularity granularity of the query for validation - * @param dateTimeZone timezone + * @param granularity granularity of the query for validation + * @param dateTimeZone timezone * @return List of string representation of intervals * @throws ValidationException if the SqlNode cannot be converted to a list of intervals */ @@ -233,30 +234,31 @@ public static List validateQueryAndConvertToIntervals( List intervals = filtration.getIntervals(); if (filtration.getDimFilter() != null) { - throw new SqlValidationError( - "OverwriteWhereIsNotTime", - "Only " + ColumnHolder.TIME_COLUMN_NAME + " column is supported in OVERWRITE WHERE clause" + throw InvalidSqlInput.exception( + "OVERWRITE WHERE clause only supports filtering on the __time column, got [%s]", + filtration.getDimFilter() ); } if (intervals.isEmpty()) { - throw new SqlValidationError( - "OverwriteEmptyIntervals", - "Intervals for REPLACE are empty" + throw InvalidSqlInput.exception( + "The OVERWRITE WHERE clause [%s] produced no time intervals, are the bounds overly restrictive?", + dimFilter, + intervals ); } for (Interval interval : intervals) { DateTime intervalStart = interval.getStart(); DateTime intervalEnd = interval.getEnd(); - if (!granularity.bucketStart(intervalStart).equals(intervalStart) || !granularity.bucketStart(intervalEnd).equals(intervalEnd)) { - throw new SqlValidationError( - "OverwriteUnalignedInterval", - "OVERWRITE WHERE clause contains an interval [${interval}]" + - " which is not aligned with PARTITIONED BY granularity [${granularity}]" - ) - .withValue("interval", intervals) - .withValue("granularity", granularity); + if (!granularity.bucketStart(intervalStart).equals(intervalStart) + || !granularity.bucketStart(intervalEnd).equals(intervalEnd)) { + throw InvalidSqlInput.exception( + "OVERWRITE WHERE clause identified interval [%s]" + + " which is not aligned with PARTITIONED BY granularity [%s]", + interval, + granularity + ); } } return intervals @@ -268,7 +270,7 @@ public static List validateQueryAndConvertToIntervals( /** * Extracts and converts the information in the CLUSTERED BY clause to a new SqlOrderBy node. * - * @param query sql query + * @param query sql query * @param clusteredByList List of clustered by columns * @return SqlOrderBy node containing the clusteredByList information * @throws ValidationException if any of the clustered by columns contain DESCENDING order. @@ -330,103 +332,107 @@ public static void validateClusteredByColumns(final SqlNodeList clusteredByNodes * are AND, OR, NOT, >, <, >=, <= and BETWEEN operators in the sql query. * * @param replaceTimeQuery Sql node representing the query - * @param dateTimeZone timezone + * @param dateTimeZone timezone * @return Dimfilter for the query * @throws ValidationException if the SqlNode cannot be converted a Dimfilter */ public static DimFilter convertQueryToDimFilter(SqlNode replaceTimeQuery, DateTimeZone dateTimeZone) { if (!(replaceTimeQuery instanceof SqlBasicCall)) { - log.error("Expected SqlBasicCall during parsing, but found " + replaceTimeQuery.getClass().getName()); - throw new SqlValidationError( - "InvalidOverwriteWhere", - "Invalid OVERWRITE WHERE clause" + throw InvalidSqlInput.exception( + "Invalid OVERWRITE WHERE clause [%s]: expected clause including AND, OR, NOT, >, <, >=, <= OR BETWEEN operators", + replaceTimeQuery ); } - String columnName; - SqlBasicCall sqlBasicCall = (SqlBasicCall) replaceTimeQuery; - List operandList = sqlBasicCall.getOperandList(); - switch (sqlBasicCall.getOperator().getKind()) { - case AND: - List dimFilters = new ArrayList<>(); - for (SqlNode sqlNode : sqlBasicCall.getOperandList()) { - dimFilters.add(convertQueryToDimFilter(sqlNode, dateTimeZone)); - } - return new AndDimFilter(dimFilters); - case OR: - dimFilters = new ArrayList<>(); - for (SqlNode sqlNode : sqlBasicCall.getOperandList()) { - dimFilters.add(convertQueryToDimFilter(sqlNode, dateTimeZone)); - } - return new OrDimFilter(dimFilters); - case NOT: - return new NotDimFilter(convertQueryToDimFilter(sqlBasicCall.getOperandList().get(0), dateTimeZone)); - case GREATER_THAN_OR_EQUAL: - columnName = parseColumnName(operandList.get(0)); - return new BoundDimFilter( - columnName, - parseTimeStampWithTimeZone(operandList.get(1), dateTimeZone), - null, - false, - null, - null, - null, - StringComparators.NUMERIC - ); - case LESS_THAN_OR_EQUAL: - columnName = parseColumnName(operandList.get(0)); - return new BoundDimFilter( - columnName, - null, - parseTimeStampWithTimeZone(operandList.get(1), dateTimeZone), - null, - false, - null, - null, - StringComparators.NUMERIC - ); - case GREATER_THAN: - columnName = parseColumnName(operandList.get(0)); - return new BoundDimFilter( - columnName, - parseTimeStampWithTimeZone(operandList.get(1), dateTimeZone), - null, - true, - null, - null, - null, - StringComparators.NUMERIC - ); - case LESS_THAN: - columnName = parseColumnName(operandList.get(0)); - return new BoundDimFilter( - columnName, - null, - parseTimeStampWithTimeZone(operandList.get(1), dateTimeZone), - null, - true, - null, - null, - StringComparators.NUMERIC - ); - case BETWEEN: - columnName = parseColumnName(operandList.get(0)); - return new BoundDimFilter( - columnName, - parseTimeStampWithTimeZone(operandList.get(1), dateTimeZone), - parseTimeStampWithTimeZone(operandList.get(2), dateTimeZone), - false, - false, - null, - null, - StringComparators.NUMERIC - ); - default: - throw new SqlValidationError( - "OverwriteWhereExpr", - "Unsupported operation in OVERWRITE WHERE clause: [${expr]]" - ) - .withValue("expr", sqlBasicCall.getOperator().getName()); + + try { + String columnName; + SqlBasicCall sqlBasicCall = (SqlBasicCall) replaceTimeQuery; + List operandList = sqlBasicCall.getOperandList(); + switch (sqlBasicCall.getOperator().getKind()) { + case AND: + List dimFilters = new ArrayList<>(); + for (SqlNode sqlNode : sqlBasicCall.getOperandList()) { + dimFilters.add(convertQueryToDimFilter(sqlNode, dateTimeZone)); + } + return new AndDimFilter(dimFilters); + case OR: + dimFilters = new ArrayList<>(); + for (SqlNode sqlNode : sqlBasicCall.getOperandList()) { + dimFilters.add(convertQueryToDimFilter(sqlNode, dateTimeZone)); + } + return new OrDimFilter(dimFilters); + case NOT: + return new NotDimFilter(convertQueryToDimFilter(sqlBasicCall.getOperandList().get(0), dateTimeZone)); + case GREATER_THAN_OR_EQUAL: + columnName = parseColumnName(operandList.get(0)); + return new BoundDimFilter( + columnName, + parseTimeStampWithTimeZone(operandList.get(1), dateTimeZone), + null, + false, + null, + null, + null, + StringComparators.NUMERIC + ); + case LESS_THAN_OR_EQUAL: + columnName = parseColumnName(operandList.get(0)); + return new BoundDimFilter( + columnName, + null, + parseTimeStampWithTimeZone(operandList.get(1), dateTimeZone), + null, + false, + null, + null, + StringComparators.NUMERIC + ); + case GREATER_THAN: + columnName = parseColumnName(operandList.get(0)); + return new BoundDimFilter( + columnName, + parseTimeStampWithTimeZone(operandList.get(1), dateTimeZone), + null, + true, + null, + null, + null, + StringComparators.NUMERIC + ); + case LESS_THAN: + columnName = parseColumnName(operandList.get(0)); + return new BoundDimFilter( + columnName, + null, + parseTimeStampWithTimeZone(operandList.get(1), dateTimeZone), + null, + true, + null, + null, + StringComparators.NUMERIC + ); + case BETWEEN: + columnName = parseColumnName(operandList.get(0)); + return new BoundDimFilter( + columnName, + parseTimeStampWithTimeZone(operandList.get(1), dateTimeZone), + parseTimeStampWithTimeZone(operandList.get(2), dateTimeZone), + false, + false, + null, + null, + StringComparators.NUMERIC + ); + default: + throw InvalidSqlInput.exception( + "Unsupported operation [%s] in OVERWRITE WHERE clause.", + sqlBasicCall.getOperator().getName() + ); + } + } + catch (DruidException e) { + throw e.prependAndBuild("Invalid OVERWRITE WHERE clause [%s]", replaceTimeQuery); } } @@ -440,10 +446,7 @@ public static DimFilter convertQueryToDimFilter(SqlNode replaceTimeQuery, DateTi public static String parseColumnName(SqlNode sqlNode) { if (!(sqlNode instanceof SqlIdentifier)) { - throw new SqlValidationError( - "OverwriteWhereInvalidForm", - "OVERWRITE WHERE expressions must be of the form __time TIMESTAMP" - ); + throw InvalidSqlInput.exception("Cannot parse column name from SQL expression [%s]", sqlNode); } return ((SqlIdentifier) sqlNode).getSimple(); } @@ -451,18 +454,15 @@ public static String parseColumnName(SqlNode sqlNode) /** * Converts a {@link SqlNode} into a timestamp, taking into account the timezone * - * @param sqlNode the SQL node + * @param sqlNode the SQL node * @param timeZone timezone * @return the timestamp string as milliseconds from epoch * @throws DruidException if the SQL node is not a SqlTimestampLiteral */ - public static String parseTimeStampWithTimeZone(SqlNode sqlNode, DateTimeZone timeZone) + private static String parseTimeStampWithTimeZone(SqlNode sqlNode, DateTimeZone timeZone) { if (!(sqlNode instanceof SqlTimestampLiteral)) { - throw new SqlValidationError( - "OverwriteWhereInvalidForm", - "OVERWRITE WHERE expressions must be of the form __time TIMESTAMP" - ); + throw InvalidSqlInput.exception("Cannot get a timestamp from sql expression [%s]", sqlNode); } Timestamp sqlTimestamp = Timestamp.valueOf(((SqlTimestampLiteral) sqlNode).toFormattedString()); @@ -470,26 +470,23 @@ public static String parseTimeStampWithTimeZone(SqlNode sqlNode, DateTimeZone ti return String.valueOf(zonedTimestamp.toInstant().toEpochMilli()); } - /** - * Throws an IAE with appropriate message if the granularity supplied is not present in - * {@link org.apache.druid.java.util.common.granularity.Granularities}. It also filters out NONE as it is not a valid - * granularity that can be supplied in PARTITIONED BY - */ - public static void throwIfUnsupportedGranularityInPartitionedBy(Granularity granularity) + public static void validateSupportedGranularityForPartitionedBy(SqlNode originalNode, Granularity granularity) { if (!GranularityType.isStandard(granularity)) { - throw new SqlValidationError( - "PartitionedByGrain", - "The granularity specified in PARTITIONED BY is not supported.\nValid granularities: ${supported}" - ) - .withValue( - "supported", - Arrays.stream(GranularityType.values()) - .filter(granularityType -> !granularityType.equals(GranularityType.NONE)) - .map(Enum::name) - .map(StringUtils::toLowerCase) - .collect(Collectors.joining(", ")) - ); + throw InvalidSqlInput.exception( + "The granularity specified in PARTITIONED BY [%s] is not supported. Valid options: [%s]", + originalNode == null ? granularity : originalNode, + Arrays.stream(GranularityType.values()) + .filter(granularityType -> !granularityType.equals(GranularityType.NONE)) + .map(Enum::name) + .map(StringUtils::toLowerCase) + .collect(Collectors.joining(", ")) + ); } } + + public static DruidException problemParsing(String message) + { + return InvalidSqlInput.exception(message); + } } diff --git a/sql/src/main/java/org/apache/druid/sql/calcite/planner/DruidPlanner.java b/sql/src/main/java/org/apache/druid/sql/calcite/planner/DruidPlanner.java index f03bf0bde3cc..fc6c08a04033 100644 --- a/sql/src/main/java/org/apache/druid/sql/calcite/planner/DruidPlanner.java +++ b/sql/src/main/java/org/apache/druid/sql/calcite/planner/DruidPlanner.java @@ -20,38 +20,37 @@ package org.apache.druid.sql.calcite.planner; import com.fasterxml.jackson.databind.ObjectMapper; +import com.google.common.base.Joiner; import com.google.common.base.Preconditions; import org.apache.calcite.plan.RelOptPlanner; +import org.apache.calcite.runtime.CalciteContextException; import org.apache.calcite.schema.SchemaPlus; import org.apache.calcite.sql.SqlExplain; import org.apache.calcite.sql.SqlKind; import org.apache.calcite.sql.SqlNode; import org.apache.calcite.sql.parser.SqlParseException; +import org.apache.calcite.sql.parser.SqlParserPos; import org.apache.calcite.tools.FrameworkConfig; import org.apache.calcite.tools.ValidationException; -import org.apache.druid.error.DruidAssertionError; import org.apache.druid.error.DruidException; -import org.apache.druid.error.ErrorCode; -import org.apache.druid.error.SqlParseError; -import org.apache.druid.error.SqlUnsupportedError; -import org.apache.druid.error.SqlValidationError; +import org.apache.druid.error.InvalidSqlInput; import org.apache.druid.query.QueryContext; import org.apache.druid.server.security.Access; import org.apache.druid.server.security.Resource; import org.apache.druid.server.security.ResourceAction; import org.apache.druid.sql.calcite.parser.DruidSqlInsert; import org.apache.druid.sql.calcite.parser.DruidSqlReplace; +import org.apache.druid.sql.calcite.parser.ParseException; +import org.apache.druid.sql.calcite.parser.Token; import org.apache.druid.sql.calcite.run.SqlEngine; import org.joda.time.DateTimeZone; import java.io.Closeable; +import java.util.ArrayList; import java.util.HashSet; -import java.util.LinkedHashMap; import java.util.Map; import java.util.Set; import java.util.function.Function; -import java.util.regex.Matcher; -import java.util.regex.Pattern; /** * Druid SQL planner. Wraps the underlying Calcite planner with Druid-specific @@ -65,6 +64,10 @@ */ public class DruidPlanner implements Closeable { + + public static final Joiner SPACE_JOINER = Joiner.on(" "); + public static final Joiner COMMA_JOINER = Joiner.on(", "); + public enum State { START, VALIDATED, PREPARED, PLANNED @@ -173,21 +176,17 @@ private SqlStatementHandler createHandler(final SqlNode node) if (query.isA(SqlKind.QUERY)) { return new QueryHandler.SelectHandler(handlerContext, query, explain); } - throw new SqlUnsupportedError( - "Statement", - "Unsupported SQL statement [${statement}]" - ) - .withValue("statement", node.getKind()); + throw InvalidSqlInput.exception("Unsupported SQL statement [%s]", node.getKind()); } /** * Prepare a SQL query for execution, including some initial parsing and * validation and any dynamic parameter type resolution, to support prepared * statements via JDBC. - * + *

* Prepare reuses the validation done in {@link #validate()} which must be * called first. - * + *

* A query can be prepared on a data source without having permissions on * that data source. This odd state of affairs is necessary because * {@link org.apache.druid.sql.calcite.view.DruidViewMacro} prepares @@ -205,11 +204,10 @@ public PrepareResult prepare() * Authorizes the statement. Done within the planner to enforce the authorization * step within the planner's state machine. * - * @param authorizer a function from resource actions to a {@link Access} result. + * @param authorizer a function from resource actions to a {@link Access} result. * @param extraActions set of additional resource actions beyond those inferred - * from the query itself. Specifically, the set of context keys to - * authorize. - * + * from the query itself. Specifically, the set of context keys to + * authorize. * @return the return value from the authorizer */ public AuthResult authorize( @@ -329,92 +327,176 @@ public static DruidException translateException(Exception e) return inner; } catch (ValidationException inner) { - return parseValidationMessage(inner, false); + return parseValidationMessage(inner); } catch (SqlParseException inner) { - return parseParserMessage(inner); + final Throwable cause = inner.getCause(); + if (cause instanceof DruidException) { + return (DruidException) cause; + } + + if (cause instanceof ParseException) { + ParseException parseException = (ParseException) cause; + final SqlParserPos failurePosition = inner.getPos(); + final String theUnexpectedToken = getUnexpectedTokenString(parseException); + + final String[] tokenDictionary = inner.getTokenImages(); + final int[][] expectedTokenSequences = inner.getExpectedTokenSequences(); + final ArrayList expectedTokens = new ArrayList<>(expectedTokenSequences.length); + for (int[] expectedTokenSequence : expectedTokenSequences) { + String[] strings = new String[expectedTokenSequence.length]; + for (int i = 0; i < expectedTokenSequence.length; ++i) { + strings[i] = tokenDictionary[expectedTokenSequence[i]]; + } + expectedTokens.add(SPACE_JOINER.join(strings)); + } + + return InvalidSqlInput + .exception( + inner, + "Received an unexpected token [%s] (line [%s], column [%s]), acceptable options: [%s]", + theUnexpectedToken, + failurePosition.getLineNum(), + failurePosition.getColumnNum(), + COMMA_JOINER.join(expectedTokens) + ) + .withContext("line", failurePosition.getLineNum()) + .withContext("column", failurePosition.getColumnNum()) + .withContext("endLine", failurePosition.getEndLineNum()) + .withContext("endColumn", failurePosition.getEndColumnNum()) + .withContext("token", theUnexpectedToken) + .withContext("expected", expectedTokens); + + } + + return DruidException.forPersona(DruidException.Persona.DEVELOPER) + .ofCategory(DruidException.Category.UNCATEGORIZED) + .build( + inner, + "Unable to parse the SQL, unrecognized error from calcite: [%s]", + inner.getMessage() + ); } catch (RelOptPlanner.CannotPlanException inner) { - return parseValidationMessage(inner, true); + return DruidException.forPersona(DruidException.Persona.USER) + .ofCategory(DruidException.Category.INVALID_INPUT) + .build(inner, inner.getMessage()); } catch (Exception inner) { // Anything else. Should not get here. Anything else should already have // been translated to a DruidException unless it is an unexpected exception. - return DruidAssertionError.forCause(inner, e.getMessage()); + return DruidException.forPersona(DruidException.Persona.ADMIN) + .ofCategory(DruidException.Category.UNCATEGORIZED) + .build(inner, inner.getMessage()); } } - private static DruidException parseValidationMessage(Exception e, boolean unsupported) + private static DruidException parseValidationMessage(Exception e) { if (e.getCause() instanceof DruidException) { return (DruidException) e.getCause(); } - // Calcite exception that probably includes a position. - String msg = e.getMessage(); - Pattern p = Pattern.compile("(?:org\\..*: )From line (\\d+), column (\\d+) to line \\d+, column \\d+: (.*)$"); - Matcher m = p.matcher(msg); - Exception cause; - String errorMsg; - Map values = new LinkedHashMap<>(); - if (m.matches()) { - cause = null; - values.put("line", m.group(1)); - values.put("column", m.group(2)); - values.put(DruidException.MESSAGE_KEY, m.group(3)); - errorMsg = SqlParseError.fullMessage(DruidException.SIMPLE_MESSAGE); - } else { - cause = e; - values.put(DruidException.MESSAGE_KEY, msg); - errorMsg = DruidException.SIMPLE_MESSAGE; + + Throwable maybeContextException = e; + CalciteContextException contextException = null; + while (maybeContextException != null) { + if (maybeContextException instanceof CalciteContextException) { + contextException = (CalciteContextException) maybeContextException; + break; + } + maybeContextException = maybeContextException.getCause(); } - if (unsupported) { - return new SqlUnsupportedError( - cause, - ErrorCode.GENERAL_TAIL, - DruidException.SIMPLE_MESSAGE - ) - .withValues(values); - } else { - return new SqlValidationError( + + if (contextException != null) { + return InvalidSqlInput + .exception( e, - ErrorCode.GENERAL_TAIL, - errorMsg - ) - .withValues(values); + "%s (line [%s], column [%s])", + // the CalciteContextException .getMessage() assumes cause is non-null, so this should be fine + contextException.getCause().getMessage(), + contextException.getPosLine(), + contextException.getPosColumn() + ) + .withContext("line", String.valueOf(contextException.getPosLine())) + .withContext("column", String.valueOf(contextException.getPosColumn())) + .withContext("endLine", String.valueOf(contextException.getEndPosLine())) + .withContext("endColumn", String.valueOf(contextException.getEndPosColumn())); + } else { + return DruidException.forPersona(DruidException.Persona.USER) + .ofCategory(DruidException.Category.UNCATEGORIZED) + .build(e, "Uncategorized calcite error message: [%s]", e.getMessage()); } } - private static DruidException parseParserMessage(Exception e) + /** + * Grabs the unexpected token string. This code is borrowed with minimal adjustments from + * {@link ParseException#getMessage()}. It is possible that if that code changes, we need to also + * change this code to match it. + * + * @param parseException the parse exception to extract from + * @return the String representation of the unexpected token string + */ + private static String getUnexpectedTokenString(ParseException parseException) { - if (e.getCause() instanceof DruidException) { - return (DruidException) e.getCause(); + int maxSize = 0; + for (int[] ints : parseException.expectedTokenSequences) { + if (maxSize < ints.length) { + maxSize = ints.length; + } } - // Calcite exception that probably includes a position. The normal parse - // exception is rather cumbersome. Clean it up a bit. - final String msg = e.getMessage(); - Pattern p = Pattern.compile( - "Encountered \"(.*)\" at line (\\d+), column (\\d+).\nWas expecting one of:\n(.*)", - Pattern.MULTILINE | Pattern.DOTALL - ); - Matcher m = p.matcher(msg); - if (!m.matches()) { - return new SqlParseError( - e, - ErrorCode.GENERAL_TAIL, - DruidException.SIMPLE_MESSAGE - ) - .withValue(DruidException.MESSAGE_KEY, e.getMessage()); + + StringBuilder bob = new StringBuilder(); + Token tok = parseException.currentToken.next; + for (int i = 0; i < maxSize; i++) { + if (i != 0) { + bob.append(" "); + } + if (tok.kind == 0) { + bob.append(""); + break; + } + char ch; + for (int i1 = 0; i1 < tok.image.length(); i1++) { + switch (tok.image.charAt(i1)) { + case 0: + continue; + case '\b': + bob.append("\\b"); + continue; + case '\t': + bob.append("\\t"); + continue; + case '\n': + bob.append("\\n"); + continue; + case '\f': + bob.append("\\f"); + continue; + case '\r': + bob.append("\\r"); + continue; + case '\"': + bob.append("\\\""); + continue; + case '\'': + bob.append("\\\'"); + continue; + case '\\': + bob.append("\\\\"); + continue; + default: + if ((ch = tok.image.charAt(i1)) < 0x20 || ch > 0x7e) { + String s = "0000" + Integer.toString(ch, 16); + bob.append("\\u").append(s.substring(s.length() - 4, s.length())); + } else { + bob.append(ch); + } + continue; + } + } + tok = tok.next; } - Pattern p2 = Pattern.compile("[ .]*\n\\ s+"); - Matcher m2 = p2.matcher(m.group(4).trim()); - String choices = m2.replaceAll(", "); - return new SqlParseError( - "UnexpectedToken", - SqlParseError.fullMessage("unexpected token [${token}]\nExpected ${expected}") - ) - .withValue("line", m.group(2)) - .withValue("column", m.group(3)) - .withValue("token", m.group(1)) - .withValue("expected", choices); + return bob.toString(); } + } diff --git a/sql/src/main/java/org/apache/druid/sql/calcite/planner/DruidRexExecutor.java b/sql/src/main/java/org/apache/druid/sql/calcite/planner/DruidRexExecutor.java index ae2364626a3f..844d9896ae8d 100644 --- a/sql/src/main/java/org/apache/druid/sql/calcite/planner/DruidRexExecutor.java +++ b/sql/src/main/java/org/apache/druid/sql/calcite/planner/DruidRexExecutor.java @@ -23,8 +23,7 @@ import org.apache.calcite.rex.RexExecutor; import org.apache.calcite.rex.RexNode; import org.apache.calcite.sql.type.SqlTypeName; -import org.apache.druid.error.SqlUnsupportedError; -import org.apache.druid.error.SqlValidationError; +import org.apache.druid.error.InvalidSqlInput; import org.apache.druid.java.util.common.DateTimes; import org.apache.druid.math.expr.Expr; import org.apache.druid.math.expr.ExprEval; @@ -89,12 +88,7 @@ public void reduce( // as a primitive long/float/double. // ExprEval.isNumericNull checks whether the parsed primitive value is null or not. if (!constExp.getType().isNullable() && exprResult.isNumericNull()) { - throw new SqlValidationError( - "InvalidConstant", - "Illegal ${type} constant [${expr}]" - ) - .withValue("type", "DATE") - .withValue("expr", constExp); + throw InvalidSqlInput.exception("Illegal DATE constant [%s]", constExp); } literal = rexBuilder.makeDateLiteral( @@ -108,12 +102,7 @@ public void reduce( // as a primitive long/float/double. // ExprEval.isNumericNull checks whether the parsed primitive value is null or not. if (!constExp.getType().isNullable() && exprResult.isNumericNull()) { - throw new SqlValidationError( - "InvalidConstant", - "Illegal ${type} constant [${expr}]" - ) - .withValue("type", "TIMESTAMP") - .withValue("expr", constExp); + throw InvalidSqlInput.exception("Illegal TIMESTAMP constant [%s]", constExp); } literal = Calcites.jodaToCalciteTimestampLiteral( @@ -137,15 +126,12 @@ public void reduce( // the query can execute. double exprResultDouble = exprResult.asDouble(); if (Double.isNaN(exprResultDouble) || Double.isInfinite(exprResultDouble)) { - String expression = druidExpression.getExpression(); - throw new SqlUnsupportedError( - "UnsupportedExpr", - "[${expr}] evaluates to [${eval}] that is not supported in SQL. " + - "You can either cast the expression as BIGINT ('CAST(%s as BIGINT)') " + - "or VARCHAR ('CAST(%s as VARCHAR)') or change the expression itself" - ) - .withValue("expr", expression) - .withValue("eval", exprResultDouble); + throw InvalidSqlInput.exception( + "Expression [%s] evaluates to an unsupported value [%s], expected something that" + + " can be a Double. Consider casting with 'CAST(

AS BIGINT)'", + druidExpression.getExpression(), + exprResultDouble + ); } bigDecimal = BigDecimal.valueOf(exprResult.asDouble()); } @@ -175,14 +161,14 @@ public void reduce( if (doubleVal == null) { resultAsBigDecimalList.add(null); } else if (Double.isNaN(doubleVal.doubleValue()) || Double.isInfinite(doubleVal.doubleValue())) { - String expression = druidExpression.getExpression(); - throw new SqlUnsupportedError( - "ArrayElement", - "[${expr}] contains an element that evaluates to [${eval}] which is not supported in SQL. " + - "You can either cast the element in the ARRAY to BIGINT or VARCHAR or change the expression itself" - ) - .withValue("expr", expression) - .withValue("eval", doubleVal.doubleValue()); + throw InvalidSqlInput.exception( + "Expression [%s] was expected to generate values that are all Doubles," + + " but entry at index[%d] was not: [%s]." + + " Consider Casting values to ensure a consistent type.", + druidExpression.getExpression(), + resultAsBigDecimalList.size(), + doubleVal + ); } else { resultAsBigDecimalList.add(BigDecimal.valueOf(doubleVal.doubleValue())); } diff --git a/sql/src/main/java/org/apache/druid/sql/calcite/planner/IngestHandler.java b/sql/src/main/java/org/apache/druid/sql/calcite/planner/IngestHandler.java index 3b1be7ecb52e..52b4efcfeb07 100644 --- a/sql/src/main/java/org/apache/druid/sql/calcite/planner/IngestHandler.java +++ b/sql/src/main/java/org/apache/druid/sql/calcite/planner/IngestHandler.java @@ -20,7 +20,6 @@ package org.apache.druid.sql.calcite.planner; import com.fasterxml.jackson.core.JsonProcessingException; -import com.google.common.annotations.VisibleForTesting; import com.google.common.collect.Iterables; import org.apache.calcite.jdbc.CalciteSchema; import org.apache.calcite.rel.RelRoot; @@ -36,8 +35,8 @@ import org.apache.calcite.tools.ValidationException; import org.apache.calcite.util.Pair; import org.apache.druid.common.utils.IdUtils; -import org.apache.druid.error.SqlUnsupportedError; -import org.apache.druid.error.SqlValidationError; +import org.apache.druid.error.DruidException; +import org.apache.druid.error.InvalidSqlInput; import org.apache.druid.java.util.common.granularity.Granularity; import org.apache.druid.server.security.Action; import org.apache.druid.server.security.Resource; @@ -56,12 +55,6 @@ public abstract class IngestHandler extends QueryHandler { private static final Pattern UNNAMED_COLUMN_PATTERN = Pattern.compile("^EXPR\\$\\d+$", Pattern.CASE_INSENSITIVE); - @VisibleForTesting - public static final String UNNAMED_INGESTION_COLUMN_ERROR = - "Cannot ingest expressions that do not have an alias " - + "or columns with names like EXPR$[digit].\n" - + "E.g. if you are ingesting \"func(X)\", then you can rewrite it as " - + "\"func(X) as myColumn\""; protected final Granularity ingestionGranularity; protected String targetDatasource; @@ -87,12 +80,10 @@ protected static SqlNode convertQuery(DruidSqlIngest sqlNode) SqlOrderBy sqlOrderBy = (SqlOrderBy) query; SqlNodeList orderByList = sqlOrderBy.orderList; if (!(orderByList == null || orderByList.equals(SqlNodeList.EMPTY))) { - String opName = sqlNode.getOperator().getName(); - throw new SqlValidationError( - "InsertOrderBy", - "Cannot use ORDER BY with ${op}, use CLUSTERED BY instead" - ) - .withValue("op", opName); + throw InvalidSqlInput.exception( + "Cannot use an ORDER BY clause on a Query of type [%s], use CLUSTERED BY instead", + sqlNode.getOperator().getName() + ); } } if (sqlNode.getClusteredBy() != null) { @@ -100,11 +91,7 @@ protected static SqlNode convertQuery(DruidSqlIngest sqlNode) } if (!query.isA(SqlKind.QUERY)) { - throw new SqlValidationError( - "Unsupported", - "Cannot execute SQL statement [%{op}]" - ) - .withValue("op", query.getKind()); + throw InvalidSqlInput.exception("Unexpected SQL statement type [%s], expected it to be a QUERY", query.getKind()); } return query; } @@ -120,11 +107,10 @@ protected String operationName() public void validate() { if (ingestNode().getPartitionedBy() == null) { - throw new SqlValidationError( - "InsertWithoutPartitionBy", - "${op} statements must specify the PARTITIONED BY clause explicitly" - ) - .withValue("op", operationName()); + throw InvalidSqlInput.exception( + "Operation [%s] requires a PARTITIONED BY to be explicitly defined, but none was found.", + operationName() + ); } try { PlannerContext plannerContext = handlerContext.plannerContext(); @@ -136,22 +122,17 @@ public void validate() } } catch (JsonProcessingException e) { - throw new SqlValidationError( - "PartitionGrain", - "Invalid partition granularity [${grain}]" - ) - .withValue("grain", ingestionGranularity); + throw InvalidSqlInput.exception(e, "Invalid partition granularity [%s]", ingestionGranularity); } super.validate(); // Check if CTX_SQL_OUTER_LIMIT is specified and fail the query if it is. CTX_SQL_OUTER_LIMIT being provided causes // the number of rows inserted to be limited which is likely to be confusing and unintended. if (handlerContext.queryContextMap().get(PlannerContext.CTX_SQL_OUTER_LIMIT) != null) { - throw new SqlValidationError( - "InsertContext", - "Context parameter [%{param}] cannot be provided with [${op}]" - ) - .withValue("param", PlannerContext.CTX_SQL_OUTER_LIMIT) - .withValue("op", operationName()); + throw InvalidSqlInput.exception( + "Context parameter [%s] cannot be provided on operator [%s]", + PlannerContext.CTX_SQL_OUTER_LIMIT, + operationName() + ); } targetDatasource = validateAndGetDataSourceForIngest(); resourceActions.add(new ResourceAction(new Resource(targetDatasource, ResourceType.DATASOURCE), Action.WRITE)); @@ -163,7 +144,8 @@ protected RelDataType returnedRowType() final RelDataTypeFactory typeFactory = rootQueryRel.rel.getCluster().getTypeFactory(); return handlerContext.engine().resultTypeForInsert( typeFactory, - rootQueryRel.validatedRowType); + rootQueryRel.validatedRowType + ); } /** @@ -174,15 +156,15 @@ private String validateAndGetDataSourceForIngest() { final SqlInsert insert = ingestNode(); if (insert.isUpsert()) { - throw new SqlUnsupportedError("UPSERT", "UPSERT is not supported."); + throw InvalidSqlInput.exception("UPSERT is not supported."); } if (insert.getTargetColumnList() != null) { - throw new SqlUnsupportedError( - "InsertList", - "[${op}] with a target column list is not supported" - ) - .withValue("op", operationName()); + throw InvalidSqlInput.exception( + "Operation [%s] cannot be run with a target column list, given [%s (%s)]", + operationName(), + insert.getTargetTable(), insert.getTargetColumnList() + ); } final SqlIdentifier tableIdentifier = (SqlIdentifier) insert.getTargetTable(); @@ -190,11 +172,9 @@ private String validateAndGetDataSourceForIngest() if (tableIdentifier.names.isEmpty()) { // I don't think this can happen, but include a branch for it just in case. - throw new SqlValidationError( - "NoInsertTarget", - "[${op}] requires a target table" - ) - .withValue("op", operationName()); + throw DruidException.forPersona(DruidException.Persona.USER) + .ofCategory(DruidException.Category.DEFENSIVE) + .build("Operation [%s] requires a target table", operationName()); } else if (tableIdentifier.names.size() == 1) { // Unqualified name. dataSource = Iterables.getOnlyElement(tableIdentifier.names); @@ -206,21 +186,15 @@ private String validateAndGetDataSourceForIngest() if (tableIdentifier.names.size() == 2 && defaultSchemaName.equals(tableIdentifier.names.get(0))) { dataSource = tableIdentifier.names.get(1); } else { - throw new SqlValidationError( - "InsertNotDatasource", - "Cannot [${op}] into [${table}] because it is not a Druid datasource" - ) - .withValue("op", operationName()) - .withValue("table", tableIdentifier); + throw InvalidSqlInput.exception( + "Table [%s] does not support operation [%s] because it is not a Druid datasource", + tableIdentifier, + operationName() + ); } } - try { - IdUtils.validateId(operationName() + " dataSource", dataSource); - } - catch (IllegalArgumentException e) { - throw SqlValidationError.forCause(e); - } + IdUtils.validateId("table", dataSource); return dataSource; } @@ -238,15 +212,20 @@ protected QueryMaker buildQueryMaker(final RelRoot rootQueryRel) throws Validati return handlerContext.engine().buildQueryMakerForInsert( targetDatasource, rootQueryRel, - handlerContext.plannerContext()); + handlerContext.plannerContext() + ); } - private void validateColumnsForIngestion(RelRoot rootQueryRel) throws ValidationException + private void validateColumnsForIngestion(RelRoot rootQueryRel) { // Check that there are no unnamed columns in the insert. for (Pair field : rootQueryRel.fields) { if (UNNAMED_COLUMN_PATTERN.matcher(field.right).matches()) { - throw new ValidationException(UNNAMED_INGESTION_COLUMN_ERROR); + throw InvalidSqlInput.exception( + "Insertion requires columns to be named, but at least one of the columns was unnamed. This is usually " + + "the result of applying a function without having an AS clause, please ensure that all function calls" + + "are named with an AS clause as in \"func(X) as myColumn\"." + ); } } } @@ -268,7 +247,8 @@ public InsertHandler( handlerContext, sqlNode, convertQuery(sqlNode), - explain); + explain + ); this.sqlNode = sqlNode; } @@ -282,12 +262,10 @@ protected DruidSqlIngest ingestNode() public void validate() { if (!handlerContext.plannerContext().featureAvailable(EngineFeature.CAN_INSERT)) { - throw new SqlUnsupportedError( - "UnsupportedEngineOp", - "Cannot execute ${op} with SQL engine [${engine}]" - ) - .withValue("op", "INSERT") - .withValue("engine", handlerContext.engine().name()); + throw InvalidSqlInput.exception( + "INSERT operations are not supported by requested SQL engine [%s], consider using MSQ.", + handlerContext.engine().name() + ); } super.validate(); } @@ -338,17 +316,14 @@ protected DruidSqlIngest ingestNode() public void validate() { if (!handlerContext.plannerContext().featureAvailable(EngineFeature.CAN_REPLACE)) { - throw new SqlUnsupportedError( - "UnsupportedEngineOp", - "Cannot execute ${op} with SQL engine [${engine}]" - ) - .withValue("op", "REPLACE") - .withValue("engine", handlerContext.engine().name()); + throw InvalidSqlInput.exception( + "REPLACE operations are not supported by the requested SQL engine [%s]. Consider using MSQ.", + handlerContext.engine().name() + ); } SqlNode replaceTimeQuery = sqlNode.getReplaceTimeQuery(); if (replaceTimeQuery == null) { - throw new SqlValidationError( - "OverwriteTimeRange", + throw InvalidSqlInput.exception( "Missing time chunk information in OVERWRITE clause for REPLACE. Use " + "OVERWRITE WHERE <__time based condition> or OVERWRITE ALL to overwrite the entire table." ); @@ -357,7 +332,8 @@ public void validate() replaceIntervals = DruidSqlParserUtils.validateQueryAndConvertToIntervals( replaceTimeQuery, ingestionGranularity, - handlerContext.timeZone()); + handlerContext.timeZone() + ); super.validate(); if (replaceIntervals != null) { handlerContext.queryContextMap().put( diff --git a/sql/src/main/java/org/apache/druid/sql/calcite/planner/QueryHandler.java b/sql/src/main/java/org/apache/druid/sql/calcite/planner/QueryHandler.java index f9130dd51572..c11d600e2622 100644 --- a/sql/src/main/java/org/apache/druid/sql/calcite/planner/QueryHandler.java +++ b/sql/src/main/java/org/apache/druid/sql/calcite/planner/QueryHandler.java @@ -55,8 +55,7 @@ import org.apache.calcite.tools.ValidationException; import org.apache.calcite.util.Pair; import org.apache.druid.error.DruidException; -import org.apache.druid.error.SqlUnsupportedError; -import org.apache.druid.error.SqlValidationError; +import org.apache.druid.error.InvalidSqlInput; import org.apache.druid.java.util.common.guava.BaseSequence; import org.apache.druid.java.util.common.guava.Sequences; import org.apache.druid.java.util.emitter.EmittingLogger; @@ -202,14 +201,13 @@ public PlannerResult plan() // Consider BINDABLE convention when necessary. Used for metadata tables. if (!handlerContext.plannerContext().featureAvailable(EngineFeature.ALLOW_BINDABLE_PLAN)) { - throw new SqlValidationError( - "WrongEngineForTable", - "Cannot query table(s) [%{tables}] with SQL engine [${engine}]" - ) - .withValue("tables", bindableTables.stream() - .map(table -> Joiner.on(".").join(table.getQualifiedName())) - .collect(Collectors.joining(", "))) - .withValue("engine", handlerContext.engine().name()); + throw InvalidSqlInput.exception( + "Cannot query table(s) [%s] with SQL engine [%s]", + bindableTables.stream() + .map(table -> Joiner.on(".").join(table.getQualifiedName())) + .collect(Collectors.joining(", ")), + handlerContext.engine().name() + ); } return planWithBindableConvention(); @@ -222,6 +220,10 @@ public PlannerResult plan() throw buildSQLPlanningError(e); } catch (RuntimeException e) { + if (e instanceof DruidException) { + throw e; + } + // Calcite throws a Runtime exception as the result of an IllegalTargetException // as the result of invoking a method dynamically, when that method throws an // exception. Unwrap the exception if this exception is from Calcite. @@ -284,10 +286,10 @@ public void visit(RelNode node, int ordinal, RelNode parent) * things that are not directly translatable to native Druid queries such * as system tables and just a general purpose (but definitely not optimized) * fall-back. - * + *

* See {@link #planWithDruidConvention} which will handle things which are * directly translatable to native Druid queries. - * + *

* The bindable path handles parameter substitution of any values not * bound by the earlier steps. */ @@ -323,43 +325,43 @@ private PlannerResult planWithBindableConvention() } else { final BindableRel theRel = bindableRel; final DataContext dataContext = plannerContext.createDataContext( - planner.getTypeFactory(), - plannerContext.getParameters() + planner.getTypeFactory(), + plannerContext.getParameters() ); final Supplier> resultsSupplier = () -> { final Enumerable enumerable = theRel.bind(dataContext); final Enumerator enumerator = enumerable.enumerator(); return QueryResponse.withEmptyContext( Sequences.withBaggage(new BaseSequence<>( - new BaseSequence.IteratorMaker>() - { - @Override - public QueryHandler.EnumeratorIterator make() + new BaseSequence.IteratorMaker>() { - return new QueryHandler.EnumeratorIterator<>(new Iterator() + @Override + public QueryHandler.EnumeratorIterator make() { - @Override - public boolean hasNext() + return new QueryHandler.EnumeratorIterator<>(new Iterator() { - return enumerator.moveNext(); - } - - @Override - public Object[] next() - { - return (Object[]) enumerator.current(); - } - }); - } - - @Override - public void cleanup(QueryHandler.EnumeratorIterator iterFromMake) - { + @Override + public boolean hasNext() + { + return enumerator.moveNext(); + } + + @Override + public Object[] next() + { + return (Object[]) enumerator.current(); + } + }); + } + + @Override + public void cleanup(QueryHandler.EnumeratorIterator iterFromMake) + { + } } - } - ), enumerator::close) - ); + ), enumerator::close) + ); }; return new PlannerResult(resultsSupplier, rootQueryRel.validatedRowType); } @@ -572,12 +574,11 @@ protected PlannerResult planWithDruidConvention() throws ValidationException * This method wraps the root with a {@link LogicalSort} that applies a limit (no ordering change). If the outer rel * is already a {@link Sort}, we can merge our outerLimit into it, similar to what is going on in * {@link org.apache.druid.sql.calcite.rule.SortCollapseRule}. - * + *

* The {@link PlannerContext#CTX_SQL_OUTER_LIMIT} flag that controls this wrapping is meant for internal use only by * the web console, allowing it to apply a limit to queries without rewriting the original SQL. * * @param root root node - * * @return root node wrapped with a limiting logical sort if a limit is specified in the query context. */ @Nullable @@ -628,19 +629,20 @@ private DruidException buildSQLPlanningError(RelOptPlanner.CannotPlanException e errorMessage = exception.getMessage(); } if (errorMessage == null) { - return new SqlUnsupportedError( - exception, - "Query", - "Query not supported. Please check Broker logs for additional details." - ); + throw DruidException.forPersona(DruidException.Persona.OPERATOR) + .ofCategory(DruidException.Category.UNSUPPORTED) + .build(exception, "Unhandled Query Planning Failure, see broker logs for details"); } else { // Planning errors are more like hints: it isn't guaranteed that the planning error is actually what went wrong. - return new SqlUnsupportedError( - exception, - "QueryWithReason", - "Query not supported. Possible error: ${message}" - ) - .withValue("message", errorMessage); + // For this reason, we consider these as targetting a more expert persona, i.e. the admin instead of the actual + // user. + throw DruidException.forPersona(DruidException.Persona.ADMIN) + .ofCategory(DruidException.Category.INVALID_INPUT) + .build( + exception, + "Query planning failed for unknown reason, our best guess is this [%s]", + errorMessage + ); } } @@ -649,7 +651,8 @@ public static class SelectHandler extends QueryHandler public SelectHandler( HandlerContext handlerContext, SqlNode sqlNode, - SqlExplain explain) + SqlExplain explain + ) { super(handlerContext, sqlNode, explain); } @@ -658,11 +661,7 @@ public SelectHandler( public void validate() { if (!handlerContext.plannerContext().featureAvailable(EngineFeature.CAN_SELECT)) { - throw new SqlValidationError( - "WrongEngineForSelect", - "Cannot execute SELECT with SQL engine [${engine}]" - ) - .withValue("engine", handlerContext.engine().name()); + throw InvalidSqlInput.exception("Cannot execute SELECT with SQL engine [%s]", handlerContext.engine().name()); } super.validate(); } @@ -688,7 +687,8 @@ protected QueryMaker buildQueryMaker(final RelRoot rootQueryRel) throws Validati { return handlerContext.engine().buildQueryMakerForSelect( rootQueryRel, - handlerContext.plannerContext()); + handlerContext.plannerContext() + ); } } diff --git a/sql/src/main/java/org/apache/druid/sql/calcite/planner/RelParameterizerShuttle.java b/sql/src/main/java/org/apache/druid/sql/calcite/planner/RelParameterizerShuttle.java index ca47ffacfd61..4fc27d3af403 100644 --- a/sql/src/main/java/org/apache/druid/sql/calcite/planner/RelParameterizerShuttle.java +++ b/sql/src/main/java/org/apache/druid/sql/calcite/planner/RelParameterizerShuttle.java @@ -44,7 +44,7 @@ import org.apache.calcite.rex.RexShuttle; import org.apache.calcite.sql.type.SqlTypeName; import org.apache.druid.error.DruidException; -import org.apache.druid.error.SqlValidationError; +import org.apache.druid.error.InvalidSqlInput; /** * Traverse {@link RelNode} tree and replaces all {@link RexDynamicParam} with {@link org.apache.calcite.rex.RexLiteral} @@ -220,10 +220,6 @@ private RexNode bind(RexNode node, RexBuilder builder, RelDataTypeFactory typeFa private static DruidException unbound(RexDynamicParam dynamicParam) { - return new SqlValidationError( - "UnboundParameter", - "Parameter at position [${index}] is not bound" - ) - .withValue("index", dynamicParam.getIndex() + 1); + return InvalidSqlInput.exception("No value bound for parameter (position [%s])", dynamicParam.getIndex() + 1); } } diff --git a/sql/src/main/java/org/apache/druid/sql/calcite/planner/SqlParameterizerShuttle.java b/sql/src/main/java/org/apache/druid/sql/calcite/planner/SqlParameterizerShuttle.java index bdaa019c0582..8420912742df 100644 --- a/sql/src/main/java/org/apache/druid/sql/calcite/planner/SqlParameterizerShuttle.java +++ b/sql/src/main/java/org/apache/druid/sql/calcite/planner/SqlParameterizerShuttle.java @@ -29,7 +29,7 @@ import org.apache.calcite.sql.util.SqlShuttle; import org.apache.calcite.util.TimestampString; import org.apache.druid.error.DruidException; -import org.apache.druid.error.SqlValidationError; +import org.apache.druid.error.InvalidSqlInput; import java.util.ArrayList; import java.util.Arrays; @@ -39,7 +39,7 @@ * Replaces all {@link SqlDynamicParam} encountered in an {@link SqlNode} tree * with a {@link SqlLiteral} if a value binding exists for the parameter, if * possible. This is used in tandem with {@link RelParameterizerShuttle}. - * + *

* It is preferable that all parameters are placed here to pick up as many * optimizations as possible, but the facilities to convert jdbc types to * {@link SqlLiteral} are a bit less rich here than exist for converting a @@ -47,7 +47,7 @@ * {@link org.apache.calcite.rex.RexLiteral}, which is why * {@link SqlParameterizerShuttle} and {@link RelParameterizerShuttle} * both exist. - * + *

* As it turns out, most parameters will be replaced in this shuttle. * The one exception are DATE types expressed as integers. For reasons * known only to Calcite, the {@code RexBuilder.clean()} method, used by @@ -108,11 +108,7 @@ public SqlNode visit(SqlDynamicParam param) private static DruidException unbound(SqlDynamicParam param) { - return new SqlValidationError( - "UnboundParameter", - "Parameter at position [${index}] is not bound" - ) - .withValue("index", param.getIndex() + 1); + return InvalidSqlInput.exception("No value bound for parameter (position [%s])", param.getIndex() + 1); } /** @@ -133,11 +129,7 @@ private SqlNode createArrayLiteral(Object value, int posn) List args = new ArrayList<>(list.size()); for (Object element : list) { if (element == null) { - throw new SqlValidationError( - "NullParameter", - "Parameter [${posn}]: An array parameter cannot contain null values" - ) - .withValue("posn", posn + 1); + throw InvalidSqlInput.exception("An array parameter [%s] cannot contain null values", posn + 1); } SqlNode node; if (element instanceof String) { @@ -149,12 +141,11 @@ private SqlNode createArrayLiteral(Object value, int posn) } else if (element instanceof Boolean) { node = SqlLiteral.createBoolean((Boolean) value, SqlParserPos.ZERO); } else { - throw new SqlValidationError( - "InvalidParameter", - "Parameter [${posn}]: An array parameter does not allow values of type[${type}]" - ) - .withValue("posn", posn + 1) - .withValue("type", value.getClass().getSimpleName()); + throw InvalidSqlInput.exception( + "An array parameter [%s] cannot contain values of type [%s]", + posn + 1, + value.getClass() + ); } args.add(node); } diff --git a/sql/src/main/java/org/apache/druid/sql/calcite/planner/SqlStatementHandler.java b/sql/src/main/java/org/apache/druid/sql/calcite/planner/SqlStatementHandler.java index 40eeb326f8ad..267feae1f0f3 100644 --- a/sql/src/main/java/org/apache/druid/sql/calcite/planner/SqlStatementHandler.java +++ b/sql/src/main/java/org/apache/druid/sql/calcite/planner/SqlStatementHandler.java @@ -21,8 +21,6 @@ import com.fasterxml.jackson.databind.ObjectMapper; import org.apache.calcite.schema.SchemaPlus; -import org.apache.calcite.sql.SqlNode; -import org.apache.druid.error.DruidException; import org.apache.druid.query.QueryContext; import org.apache.druid.server.security.ResourceAction; import org.apache.druid.sql.calcite.run.SqlEngine; diff --git a/sql/src/main/java/org/apache/druid/sql/calcite/rel/DruidJoinQueryRel.java b/sql/src/main/java/org/apache/druid/sql/calcite/rel/DruidJoinQueryRel.java index 0cc17dcb2679..4ea14c3a7414 100644 --- a/sql/src/main/java/org/apache/druid/sql/calcite/rel/DruidJoinQueryRel.java +++ b/sql/src/main/java/org/apache/druid/sql/calcite/rel/DruidJoinQueryRel.java @@ -38,7 +38,7 @@ import org.apache.calcite.rel.type.RelDataType; import org.apache.calcite.rex.RexNode; import org.apache.calcite.sql.SqlKind; -import org.apache.druid.error.SqlUnsupportedError; +import org.apache.druid.error.InvalidSqlInput; import org.apache.druid.java.util.common.ISE; import org.apache.druid.java.util.common.Pair; import org.apache.druid.java.util.common.StringUtils; @@ -360,11 +360,10 @@ public static JoinType toDruidJoinType(JoinRelType calciteJoinType) case INNER: return JoinType.INNER; default: - throw new SqlUnsupportedError( - "JoinType", - "Cannot handle joinType [${type}]" - ) - .withValue("type", calciteJoinType); + throw InvalidSqlInput.exception( + "Cannot handle joinType [%s]", + calciteJoinType + ); } } diff --git a/sql/src/main/java/org/apache/druid/sql/calcite/rel/DruidQuery.java b/sql/src/main/java/org/apache/druid/sql/calcite/rel/DruidQuery.java index 1f71a9212cbb..d0fcacf5df26 100644 --- a/sql/src/main/java/org/apache/druid/sql/calcite/rel/DruidQuery.java +++ b/sql/src/main/java/org/apache/druid/sql/calcite/rel/DruidQuery.java @@ -1430,12 +1430,14 @@ private ScanQuery toScanQuery() } if (!plannerContext.featureAvailable(EngineFeature.SCAN_ORDER_BY_NON_TIME) && !orderByColumns.isEmpty()) { - if (orderByColumns.size() > 1 || orderByColumns.stream() - .anyMatch(orderBy -> !orderBy.getColumnName().equals(ColumnHolder.TIME_COLUMN_NAME))) { - // Cannot handle this ordering. - // Scan cannot ORDER BY non-time columns. + if (orderByColumns.size() > 1 || !ColumnHolder.TIME_COLUMN_NAME.equals(orderByColumns.get(0).getColumnName())) { + // We cannot handle this ordering, but we encounter this ordering as part of the exploration of the volcano + // planner, which means that the query that we are looking right now might only be doing this as one of the + // potential branches of exploration rather than being a semantic requirement of the query itself. So, it is + // not safe to send an error message telling the end-user exactly what is happening, instead we need to set the + // planning error and hope. plannerContext.setPlanningError( - "SQL query requires order by non-time column %s, which is not supported.", + "SQL query requires order by non-time column [%s], which is not supported.", orderByColumns ); return null; diff --git a/sql/src/main/java/org/apache/druid/sql/calcite/rule/DruidLogicalValuesRule.java b/sql/src/main/java/org/apache/druid/sql/calcite/rule/DruidLogicalValuesRule.java index 945f05e5522f..ea71dfd90986 100644 --- a/sql/src/main/java/org/apache/druid/sql/calcite/rule/DruidLogicalValuesRule.java +++ b/sql/src/main/java/org/apache/druid/sql/calcite/rule/DruidLogicalValuesRule.java @@ -25,7 +25,7 @@ import org.apache.calcite.plan.RelOptRuleCall; import org.apache.calcite.rel.logical.LogicalValues; import org.apache.calcite.rex.RexLiteral; -import org.apache.druid.error.SqlUnsupportedError; +import org.apache.druid.error.InvalidSqlInput; import org.apache.druid.query.InlineDataSource; import org.apache.druid.segment.column.RowSignature; import org.apache.druid.sql.calcite.planner.Calcites; @@ -42,7 +42,7 @@ * This rule is used when the query directly reads in-memory tuples. For example, given a query of * `SELECT 1 + 1`, the query planner will create {@link LogicalValues} that contains one tuple, * which in turn containing one column of value 2. - * + *

* The query planner can sometimes reduce a regular query to a query that reads in-memory tuples. * For example, `SELECT count(*) FROM foo WHERE 1 = 0` is reduced to `SELECT 0`. This rule will * be used for this case as well. @@ -126,23 +126,18 @@ static Object getValueFromLiteral(RexLiteral literal, PlannerContext plannerCont return Calcites.calciteDateTimeLiteralToJoda(literal, plannerContext.getTimeZone()).getMillis(); case NULL: if (!literal.isNull()) { - throw new SqlUnsupportedError( - "NonNullConst", - "Non-null constant [${expr}] for a NULL literal" - ) - .withValue("expr", literal); + throw InvalidSqlInput.exception("Expected a NULL literal, but got non-null constant [%s]", literal); } return null; case TIMESTAMP_WITH_LOCAL_TIME_ZONE: case TIME: case TIME_WITH_LOCAL_TIME_ZONE: default: - throw new SqlUnsupportedError( - "Literal", - "Literal [${expr}] type [${type}] is not supported" - ) - .withValue("expr", literal) - .withValue("type", literal.getType().getSqlTypeName()); + throw InvalidSqlInput.exception( + "Cannot handle literal [%s] of unsupported type [%s].", + literal, + literal.getType().getSqlTypeName() + ); } } } diff --git a/sql/src/main/java/org/apache/druid/sql/calcite/run/NativeSqlEngine.java b/sql/src/main/java/org/apache/druid/sql/calcite/run/NativeSqlEngine.java index 722e1e3ce63b..f5d9056246b3 100644 --- a/sql/src/main/java/org/apache/druid/sql/calcite/run/NativeSqlEngine.java +++ b/sql/src/main/java/org/apache/druid/sql/calcite/run/NativeSqlEngine.java @@ -25,9 +25,9 @@ import org.apache.calcite.rel.RelRoot; import org.apache.calcite.rel.type.RelDataType; import org.apache.calcite.rel.type.RelDataTypeFactory; +import org.apache.druid.error.InvalidSqlInput; import org.apache.druid.guice.LazySingleton; import org.apache.druid.java.util.common.IAE; -import org.apache.druid.java.util.common.StringUtils; import org.apache.druid.query.groupby.GroupByQuery; import org.apache.druid.query.timeboundary.TimeBoundaryQuery; import org.apache.druid.server.QueryLifecycleFactory; @@ -145,18 +145,12 @@ public QueryMaker buildQueryMakerForInsert( * Validates that {@link PlannerContext#CTX_SQL_JOIN_ALGORITHM} is {@link JoinAlgorithm#BROADCAST}. This is the * only join algorithm supported by native queries. */ - private static void validateJoinAlgorithm(final Map queryContext) throws ValidationException + private static void validateJoinAlgorithm(final Map queryContext) { final JoinAlgorithm joinAlgorithm = PlannerContext.getJoinAlgorithm(queryContext); if (joinAlgorithm != JoinAlgorithm.BROADCAST) { - throw new ValidationException( - StringUtils.format( - "Join algorithm [%s] is not supported by engine [%s]", - joinAlgorithm, - NAME - ) - ); + throw InvalidSqlInput.exception("Join algorithm [%s] is not supported by engine [%s]", joinAlgorithm, NAME); } } } diff --git a/sql/src/main/java/org/apache/druid/sql/calcite/run/SqlEngines.java b/sql/src/main/java/org/apache/druid/sql/calcite/run/SqlEngines.java index 5d6d8f3b995b..cc7bef80f712 100644 --- a/sql/src/main/java/org/apache/druid/sql/calcite/run/SqlEngines.java +++ b/sql/src/main/java/org/apache/druid/sql/calcite/run/SqlEngines.java @@ -20,7 +20,7 @@ package org.apache.druid.sql.calcite.run; import org.apache.calcite.tools.ValidationException; -import org.apache.druid.error.SqlValidationError; +import org.apache.druid.error.InvalidInput; import java.util.Map; import java.util.Set; @@ -42,11 +42,7 @@ public static void validateNoSpecialContextKeys( { for (String contextParameterName : queryContext.keySet()) { if (specialContextKeys.contains(contextParameterName)) { - throw new SqlValidationError( - "IllegalContext", - "Query context parameter [${param}] is not allowed" - ) - .withValue("param", contextParameterName); + throw InvalidInput.exception("Query context parameter [%s] is not allowed", contextParameterName); } } } diff --git a/sql/src/main/java/org/apache/druid/sql/calcite/table/RowSignatures.java b/sql/src/main/java/org/apache/druid/sql/calcite/table/RowSignatures.java index 13e2c268d06b..32abe56ee8d6 100644 --- a/sql/src/main/java/org/apache/druid/sql/calcite/table/RowSignatures.java +++ b/sql/src/main/java/org/apache/druid/sql/calcite/table/RowSignatures.java @@ -220,6 +220,11 @@ protected void generateTypeString(StringBuilder sb, boolean withDetail) sb.append(columnType.asTypeString()); } + public ColumnType getColumnType() + { + return columnType; + } + public String getComplexTypeName() { return columnType.getComplexTypeName(); diff --git a/sql/src/main/java/org/apache/druid/sql/guice/SqlModule.java b/sql/src/main/java/org/apache/druid/sql/guice/SqlModule.java index d8ac8178a89d..56d0d2d5d41f 100644 --- a/sql/src/main/java/org/apache/druid/sql/guice/SqlModule.java +++ b/sql/src/main/java/org/apache/druid/sql/guice/SqlModule.java @@ -27,8 +27,6 @@ import com.google.inject.Module; import com.google.inject.Provides; import org.apache.druid.catalog.model.TableDefnRegistry; -import org.apache.druid.error.RestExceptionEncoder; -import org.apache.druid.error.StandardRestExceptionEncoder; import org.apache.druid.guice.LazySingleton; import org.apache.druid.guice.PolyBind; import org.apache.druid.guice.annotations.NativeQuery; @@ -126,9 +124,6 @@ public void configure(Binder binder) // Default do-nothing catalog resolver binder.bind(CatalogResolver.class).toInstance(CatalogResolver.NULL_RESOLVER); - - // Default exception encoder - binder.bind(RestExceptionEncoder.class).toInstance(StandardRestExceptionEncoder.instance()); } private boolean isEnabled() diff --git a/sql/src/main/java/org/apache/druid/sql/http/SqlResource.java b/sql/src/main/java/org/apache/druid/sql/http/SqlResource.java index a664966e3de4..4adea5d8d84e 100644 --- a/sql/src/main/java/org/apache/druid/sql/http/SqlResource.java +++ b/sql/src/main/java/org/apache/druid/sql/http/SqlResource.java @@ -23,7 +23,6 @@ import com.google.common.base.Preconditions; import com.google.inject.Inject; import org.apache.druid.common.exception.SanitizableException; -import org.apache.druid.error.RestExceptionEncoder; import org.apache.druid.guice.annotations.NativeQuery; import org.apache.druid.guice.annotations.Self; import org.apache.druid.java.util.common.StringUtils; @@ -57,7 +56,6 @@ import javax.ws.rs.core.MediaType; import javax.ws.rs.core.Response; import javax.ws.rs.core.Response.Status; - import java.io.IOException; import java.io.OutputStream; import java.util.LinkedHashMap; @@ -82,7 +80,6 @@ public class SqlResource private final ServerConfig serverConfig; private final ResponseContextConfig responseContextConfig; private final DruidNode selfNode; - private final RestExceptionEncoder exceptionEncoder; @Inject SqlResource( @@ -92,8 +89,7 @@ public class SqlResource final SqlLifecycleManager sqlLifecycleManager, final ServerConfig serverConfig, ResponseContextConfig responseContextConfig, - @Self DruidNode selfNode, - RestExceptionEncoder exceptionEncoder + @Self DruidNode selfNode ) { this.jsonMapper = Preconditions.checkNotNull(jsonMapper, "jsonMapper"); @@ -103,7 +99,6 @@ public class SqlResource this.serverConfig = Preconditions.checkNotNull(serverConfig, "serverConfig"); this.responseContextConfig = responseContextConfig; this.selfNode = selfNode; - this.exceptionEncoder = exceptionEncoder; } @POST @@ -231,8 +226,7 @@ public SqlResourceQueryResultPusher( SqlResource.QUERY_METRIC_COUNTER, sqlQueryId, MediaType.APPLICATION_JSON_TYPE, - headers, - exceptionEncoder + headers ); this.sqlQueryId = sqlQueryId; this.stmt = stmt; @@ -346,6 +340,5 @@ public void writeException(Exception ex, OutputStream out) throws IOException } out.write(jsonMapper.writeValueAsBytes(ex)); } - } } diff --git a/sql/src/test/java/org/apache/druid/sql/SqlStatementTest.java b/sql/src/test/java/org/apache/druid/sql/SqlStatementTest.java index 45af1d6d7044..ac1364848aa5 100644 --- a/sql/src/test/java/org/apache/druid/sql/SqlStatementTest.java +++ b/sql/src/test/java/org/apache/druid/sql/SqlStatementTest.java @@ -25,8 +25,8 @@ import com.google.common.util.concurrent.MoreExecutors; import org.apache.calcite.rel.type.RelDataType; import org.apache.calcite.rel.type.RelDataTypeField; -import org.apache.druid.error.SqlParseError; -import org.apache.druid.error.SqlValidationError; +import org.apache.druid.error.DruidException; +import org.apache.druid.error.DruidExceptionMatcher; import org.apache.druid.java.util.common.ISE; import org.apache.druid.java.util.common.concurrent.Execs; import org.apache.druid.java.util.common.guava.LazySequence; @@ -36,7 +36,6 @@ import org.apache.druid.query.DefaultQueryConfig; import org.apache.druid.query.Query; import org.apache.druid.query.QueryContexts; -import org.apache.druid.query.QueryException; import org.apache.druid.query.QueryRunnerFactoryConglomerate; import org.apache.druid.segment.join.JoinableFactoryWrapper; import org.apache.druid.server.QueryScheduler; @@ -62,17 +61,18 @@ import org.apache.druid.sql.calcite.util.SpecificSegmentsQuerySegmentWalker; import org.apache.druid.sql.http.SqlQuery; import org.easymock.EasyMock; +import org.hamcrest.MatcherAssert; import org.junit.After; import org.junit.AfterClass; import org.junit.Assert; import org.junit.Before; import org.junit.BeforeClass; +import org.junit.ClassRule; import org.junit.Rule; import org.junit.Test; import org.junit.rules.TemporaryFolder; import javax.servlet.http.HttpServletRequest; - import java.io.IOException; import java.util.Collections; import java.util.List; @@ -88,12 +88,12 @@ public class SqlStatementTest { private static QueryRunnerFactoryConglomerate conglomerate; + private static SpecificSegmentsQuerySegmentWalker walker; private static Closer resourceCloser; - @Rule - public TemporaryFolder temporaryFolder = new TemporaryFolder(); + @ClassRule + public static TemporaryFolder temporaryFolder = new TemporaryFolder(); @Rule public QueryLogHook queryLogHook = QueryLogHook.create(); - private SpecificSegmentsQuerySegmentWalker walker; private TestRequestLogger testRequestLogger; private ListeningExecutorService executorService; private SqlStatementFactory sqlStatementFactory; @@ -101,21 +101,11 @@ public class SqlStatementTest ImmutableMap.of("DEFAULT_KEY", "DEFAULT_VALUE")); @BeforeClass - public static void setUpClass() + public static void setUpClass() throws Exception { resourceCloser = Closer.create(); conglomerate = QueryStackTests.createQueryRunnerFactoryConglomerate(resourceCloser); - } - - @AfterClass - public static void tearDownClass() throws IOException - { - resourceCloser.close(); - } - @Before - public void setUp() throws Exception - { final QueryScheduler scheduler = new QueryScheduler( 5, ManualQueryPrioritizationStrategy.INSTANCE, @@ -128,15 +118,25 @@ public Sequence run(Query query, Sequence resultSequence) { return super.run( query, - new LazySequence(() -> { - return resultSequence; - }) + new LazySequence(() -> resultSequence) ); } }; - executorService = MoreExecutors.listeningDecorator(Execs.multiThreaded(8, "test_sql_resource_%s")); walker = CalciteTests.createMockWalker(conglomerate, temporaryFolder.newFolder(), scheduler); + resourceCloser.register(walker); + } + + @AfterClass + public static void tearDownClass() throws IOException + { + resourceCloser.close(); + } + + @Before + public void setUp() throws Exception + { + executorService = MoreExecutors.listeningDecorator(Execs.multiThreaded(8, "test_sql_resource_%s")); final PlannerConfig plannerConfig = PlannerConfig.builder().serializeComplexValues(false).build(); final DruidSchemaCatalog rootSchema = CalciteTests.createMockRootSchema( @@ -181,8 +181,6 @@ public Sequence run(Query query, Sequence resultSequence) @After public void tearDown() throws Exception { - walker.close(); - walker = null; executorService.shutdownNow(); executorService.awaitTermination(2, TimeUnit.SECONDS); } @@ -225,7 +223,8 @@ public void testDirectHappyPath() { SqlQueryPlus sqlReq = queryPlus( "SELECT COUNT(*) AS cnt, 'foo' AS TheFoo FROM druid.foo", - CalciteTests.REGULAR_USER_AUTH_RESULT); + CalciteTests.REGULAR_USER_AUTH_RESULT + ); DirectStatement stmt = sqlStatementFactory.directStatement(sqlReq); ResultSet resultSet = stmt.plan(); assertTrue(resultSet.runnable()); @@ -246,7 +245,8 @@ public void testDirectPlanTwice() { SqlQueryPlus sqlReq = queryPlus( "SELECT COUNT(*) AS cnt, 'foo' AS TheFoo FROM druid.foo", - CalciteTests.REGULAR_USER_AUTH_RESULT); + CalciteTests.REGULAR_USER_AUTH_RESULT + ); DirectStatement stmt = sqlStatementFactory.directStatement(sqlReq); stmt.plan(); try { @@ -263,7 +263,8 @@ public void testDirectExecTwice() { SqlQueryPlus sqlReq = queryPlus( "SELECT COUNT(*) AS cnt, 'foo' AS TheFoo FROM druid.foo", - CalciteTests.REGULAR_USER_AUTH_RESULT); + CalciteTests.REGULAR_USER_AUTH_RESULT + ); DirectStatement stmt = sqlStatementFactory.directStatement(sqlReq); ResultSet resultSet = stmt.plan(); resultSet.run(); @@ -281,21 +282,19 @@ public void testDirectSyntaxError() { SqlQueryPlus sqlReq = queryPlus( "SELECT COUNT(*) AS cnt, 'foo' AS", - CalciteTests.REGULAR_USER_AUTH_RESULT); + CalciteTests.REGULAR_USER_AUTH_RESULT + ); DirectStatement stmt = sqlStatementFactory.directStatement(sqlReq); try { stmt.execute(); fail(); } - catch (SqlParseError e) { - // Expected - Assert.assertEquals( - "SQL-Parse-UnexpectedToken", - e.errorCode() - ); - Assert.assertEquals( - QueryException.SQL_PARSE_FAILED_ERROR_CODE, - e.getErrorCode() + catch (DruidException e) { + MatcherAssert.assertThat( + e, + DruidExceptionMatcher + .invalidSqlInput() + .expectMessageContains("Received an unexpected token [AS ]") ); } } @@ -305,21 +304,19 @@ public void testDirectValidationError() { SqlQueryPlus sqlReq = queryPlus( "SELECT COUNT(*) AS cnt, 'foo' AS TheFoo FROM druid.bogus", - CalciteTests.REGULAR_USER_AUTH_RESULT); + CalciteTests.REGULAR_USER_AUTH_RESULT + ); DirectStatement stmt = sqlStatementFactory.directStatement(sqlReq); try { stmt.execute(); fail(); } - catch (SqlValidationError e) { - // Expected - Assert.assertEquals( - "SQL-Validation-General", - e.errorCode() - ); - Assert.assertEquals( - QueryException.PLAN_VALIDATION_FAILED_ERROR_CODE, - e.getErrorCode() + catch (DruidException e) { + MatcherAssert.assertThat( + e, + DruidExceptionMatcher + .invalidSqlInput() + .expectMessageContains("Object 'bogus' not found within 'druid'") ); } } @@ -329,7 +326,8 @@ public void testDirectPermissionError() { SqlQueryPlus sqlReq = queryPlus( "select count(*) from forbiddenDatasource", - CalciteTests.REGULAR_USER_AUTH_RESULT); + CalciteTests.REGULAR_USER_AUTH_RESULT + ); DirectStatement stmt = sqlStatementFactory.directStatement(sqlReq); try { stmt.execute(); @@ -353,7 +351,7 @@ private SqlQuery makeQuery(String sql) false, null, null - ); + ); } @Test @@ -362,7 +360,7 @@ public void testHttpHappyPath() HttpStatement stmt = sqlStatementFactory.httpStatement( makeQuery("SELECT COUNT(*) AS cnt, 'foo' AS TheFoo FROM druid.foo"), request(true) - ); + ); List results = stmt.execute().getResults().toList(); assertEquals(1, results.size()); assertEquals(6L, results.get(0)[0]); @@ -375,16 +373,17 @@ public void testHttpSyntaxError() HttpStatement stmt = sqlStatementFactory.httpStatement( makeQuery("SELECT COUNT(*) AS cnt, 'foo' AS"), request(true) - ); + ); try { stmt.execute(); fail(); } - catch (SqlParseError e) { - // Expected - Assert.assertEquals( - QueryException.SQL_PARSE_FAILED_ERROR_CODE, - e.getErrorCode() + catch (DruidException e) { + MatcherAssert.assertThat( + e, + DruidExceptionMatcher + .invalidSqlInput() + .expectMessageContains("Received an unexpected token [AS ]") ); } } @@ -395,20 +394,17 @@ public void testHttpValidationError() HttpStatement stmt = sqlStatementFactory.httpStatement( makeQuery("SELECT COUNT(*) AS cnt, 'foo' AS TheFoo FROM druid.bogus"), request(true) - ); + ); try { stmt.execute(); fail(); } - catch (SqlValidationError e) { - // Expected - Assert.assertEquals( - "SQL-Validation-General", - e.errorCode() - ); - Assert.assertEquals( - QueryException.PLAN_VALIDATION_FAILED_ERROR_CODE, - e.getErrorCode() + catch (DruidException e) { + MatcherAssert.assertThat( + e, + DruidExceptionMatcher + .invalidSqlInput() + .expectMessageContains("Object 'bogus' not found within 'druid'") ); } } @@ -419,7 +415,7 @@ public void testHttpPermissionError() HttpStatement stmt = sqlStatementFactory.httpStatement( makeQuery("select count(*) from forbiddenDatasource"), request(false) - ); + ); try { stmt.execute(); fail(); @@ -437,7 +433,8 @@ public void testPreparedHappyPath() { SqlQueryPlus sqlReq = queryPlus( "SELECT COUNT(*) AS cnt, 'foo' AS TheFoo FROM druid.foo", - CalciteTests.REGULAR_USER_AUTH_RESULT); + CalciteTests.REGULAR_USER_AUTH_RESULT + ); PreparedStatement stmt = sqlStatementFactory.preparedStatement(sqlReq); PrepareResult prepareResult = stmt.prepare(); @@ -467,17 +464,19 @@ public void testPrepareSyntaxError() { SqlQueryPlus sqlReq = queryPlus( "SELECT COUNT(*) AS cnt, 'foo' AS", - CalciteTests.REGULAR_USER_AUTH_RESULT); + CalciteTests.REGULAR_USER_AUTH_RESULT + ); PreparedStatement stmt = sqlStatementFactory.preparedStatement(sqlReq); try { stmt.prepare(); fail(); } - catch (SqlParseError e) { - // Expected - Assert.assertEquals( - QueryException.SQL_PARSE_FAILED_ERROR_CODE, - e.getErrorCode() + catch (DruidException e) { + MatcherAssert.assertThat( + e, + DruidExceptionMatcher + .invalidSqlInput() + .expectMessageContains("Received an unexpected token [AS ]") ); } } @@ -487,21 +486,19 @@ public void testPrepareValidationError() { SqlQueryPlus sqlReq = queryPlus( "SELECT COUNT(*) AS cnt, 'foo' AS TheFoo FROM druid.bogus", - CalciteTests.REGULAR_USER_AUTH_RESULT); + CalciteTests.REGULAR_USER_AUTH_RESULT + ); PreparedStatement stmt = sqlStatementFactory.preparedStatement(sqlReq); try { stmt.prepare(); fail(); } - catch (SqlValidationError e) { - // Expected - Assert.assertEquals( - "SQL-Validation-General", - e.errorCode() - ); - Assert.assertEquals( - QueryException.PLAN_VALIDATION_FAILED_ERROR_CODE, - e.getErrorCode() + catch (DruidException e) { + MatcherAssert.assertThat( + e, + DruidExceptionMatcher + .invalidSqlInput() + .expectMessageContains("Object 'bogus' not found within 'druid'") ); } } @@ -511,7 +508,8 @@ public void testPreparePermissionError() { SqlQueryPlus sqlReq = queryPlus( "select count(*) from forbiddenDatasource", - CalciteTests.REGULAR_USER_AUTH_RESULT); + CalciteTests.REGULAR_USER_AUTH_RESULT + ); PreparedStatement stmt = sqlStatementFactory.preparedStatement(sqlReq); try { stmt.prepare(); diff --git a/sql/src/test/java/org/apache/druid/sql/avatica/DruidAvaticaHandlerTest.java b/sql/src/test/java/org/apache/druid/sql/avatica/DruidAvaticaHandlerTest.java index cd036d04c3ac..88a237bd42d1 100644 --- a/sql/src/test/java/org/apache/druid/sql/avatica/DruidAvaticaHandlerTest.java +++ b/sql/src/test/java/org/apache/druid/sql/avatica/DruidAvaticaHandlerTest.java @@ -45,6 +45,7 @@ import org.apache.druid.initialization.CoreInjectorBuilder; import org.apache.druid.java.util.common.DateTimes; import org.apache.druid.java.util.common.Pair; +import org.apache.druid.java.util.common.RE; import org.apache.druid.java.util.common.StringUtils; import org.apache.druid.java.util.common.concurrent.Execs; import org.apache.druid.java.util.common.guava.Yielder; @@ -96,6 +97,7 @@ import org.junit.Assert; import org.junit.Before; import org.junit.BeforeClass; +import org.junit.ClassRule; import org.junit.Rule; import org.junit.Test; import org.junit.rules.TemporaryFolder; @@ -123,6 +125,7 @@ import java.util.Map; import java.util.Properties; import java.util.Set; +import java.util.concurrent.CountDownLatch; import java.util.concurrent.ExecutionException; import java.util.concurrent.ScheduledExecutorService; @@ -144,21 +147,27 @@ public class DruidAvaticaHandlerTest extends CalciteTestBase // This must match the number of Connection objects created in testTooManyStatements() AVATICA_CONFIG.maxConnections = CONNECTION_LIMIT; AVATICA_CONFIG.maxStatementsPerConnection = STATEMENT_LIMIT; + System.setProperty("user.timezone", "UTC"); } private static final String DUMMY_SQL_QUERY_ID = "dummy"; + @ClassRule + public static TemporaryFolder temporaryFolder = new TemporaryFolder(); + private static QueryRunnerFactoryConglomerate conglomerate; + private static SpecificSegmentsQuerySegmentWalker walker; private static Closer resourceCloser; private final boolean nullNumeric = !NullHandling.replaceWithDefault(); @BeforeClass - public static void setUpClass() + public static void setUpClass() throws Exception { resourceCloser = Closer.create(); conglomerate = QueryStackTests.createQueryRunnerFactoryConglomerate(resourceCloser); - System.setProperty("user.timezone", "UTC"); + walker = CalciteTests.createMockWalker(conglomerate, temporaryFolder.newFolder()); + resourceCloser.register(walker); } @AfterClass @@ -167,16 +176,12 @@ public static void tearDownClass() throws IOException resourceCloser.close(); } - @Rule - public TemporaryFolder temporaryFolder = new TemporaryFolder(); - @Rule public QueryLogHook queryLogHook = QueryLogHook.create(); private final PlannerConfig plannerConfig = new PlannerConfig(); private final DruidOperatorTable operatorTable = CalciteTests.createOperatorTable(); private final ExprMacroTable macroTable = CalciteTests.createExprMacroTable(); - private SpecificSegmentsQuerySegmentWalker walker; private ServerWrapper server; private Connection client; private Connection clientNoTrailingSlash; @@ -249,60 +254,66 @@ protected String getJdbcUrlTail() protected AbstractAvaticaHandler getAvaticaHandler(final DruidMeta druidMeta) { return new DruidAvaticaJsonHandler( - druidMeta, - new DruidNode("dummy", "dummy", false, 1, null, true, false), - new AvaticaMonitor() + druidMeta, + new DruidNode("dummy", "dummy", false, 1, null, true, false), + new AvaticaMonitor() ); } @Before public void setUp() throws Exception { - walker = CalciteTests.createMockWalker(conglomerate, temporaryFolder.newFolder()); final DruidSchemaCatalog rootSchema = makeRootSchema(); testRequestLogger = new TestRequestLogger(); injector = new CoreInjectorBuilder(new StartupInjectorBuilder().build()) - .addModule(binder -> { - binder.bindConstant().annotatedWith(Names.named("serviceName")).to("test"); - binder.bindConstant().annotatedWith(Names.named("servicePort")).to(0); - binder.bindConstant().annotatedWith(Names.named("tlsServicePort")).to(-1); - binder.bind(AuthenticatorMapper.class).toInstance(CalciteTests.TEST_AUTHENTICATOR_MAPPER); - binder.bind(AuthorizerMapper.class).toInstance(CalciteTests.TEST_AUTHORIZER_MAPPER); - binder.bind(Escalator.class).toInstance(CalciteTests.TEST_AUTHENTICATOR_ESCALATOR); - binder.bind(RequestLogger.class).toInstance(testRequestLogger); - binder.bind(DruidSchemaCatalog.class).toInstance(rootSchema); - for (NamedSchema schema : rootSchema.getNamedSchemas().values()) { - Multibinder.newSetBinder(binder, NamedSchema.class).addBinding().toInstance(schema); + .addModule( + binder -> { + binder.bindConstant().annotatedWith(Names.named("serviceName")).to("test"); + binder.bindConstant().annotatedWith(Names.named("servicePort")).to(0); + binder.bindConstant().annotatedWith(Names.named("tlsServicePort")).to(-1); + binder.bind(AuthenticatorMapper.class).toInstance(CalciteTests.TEST_AUTHENTICATOR_MAPPER); + binder.bind(AuthorizerMapper.class).toInstance(CalciteTests.TEST_AUTHORIZER_MAPPER); + binder.bind(Escalator.class).toInstance(CalciteTests.TEST_AUTHENTICATOR_ESCALATOR); + binder.bind(RequestLogger.class).toInstance(testRequestLogger); + binder.bind(DruidSchemaCatalog.class).toInstance(rootSchema); + for (NamedSchema schema : rootSchema.getNamedSchemas().values()) { + Multibinder.newSetBinder(binder, NamedSchema.class).addBinding().toInstance(schema); + } + binder.bind(QueryLifecycleFactory.class) + .toInstance(CalciteTests.createMockQueryLifecycleFactory(walker, conglomerate)); + binder.bind(DruidOperatorTable.class).toInstance(operatorTable); + binder.bind(ExprMacroTable.class).toInstance(macroTable); + binder.bind(PlannerConfig.class).toInstance(plannerConfig); + binder.bind(String.class) + .annotatedWith(DruidSchemaName.class) + .toInstance(CalciteTests.DRUID_SCHEMA_NAME); + binder.bind(AvaticaServerConfig.class).toInstance(AVATICA_CONFIG); + binder.bind(ServiceEmitter.class).to(NoopServiceEmitter.class); + binder.bind(QuerySchedulerProvider.class).in(LazySingleton.class); + binder.bind(QueryScheduler.class) + .toProvider(QuerySchedulerProvider.class) + .in(LazySingleton.class); + binder.install(new SqlModule.SqlStatementFactoryModule()); + binder.bind(new TypeLiteral>() + { + }).toInstance(Suppliers.ofInstance(new DefaultQueryConfig(ImmutableMap.of()))); + binder.bind(CalciteRulesManager.class).toInstance(new CalciteRulesManager(ImmutableSet.of())); + binder.bind(JoinableFactoryWrapper.class).toInstance(CalciteTests.createJoinableFactoryWrapper()); + binder.bind(CatalogResolver.class).toInstance(CatalogResolver.NULL_RESOLVER); } - binder.bind(QueryLifecycleFactory.class) - .toInstance(CalciteTests.createMockQueryLifecycleFactory(walker, conglomerate)); - binder.bind(DruidOperatorTable.class).toInstance(operatorTable); - binder.bind(ExprMacroTable.class).toInstance(macroTable); - binder.bind(PlannerConfig.class).toInstance(plannerConfig); - binder.bind(String.class) - .annotatedWith(DruidSchemaName.class) - .toInstance(CalciteTests.DRUID_SCHEMA_NAME); - binder.bind(AvaticaServerConfig.class).toInstance(AVATICA_CONFIG); - binder.bind(ServiceEmitter.class).to(NoopServiceEmitter.class); - binder.bind(QuerySchedulerProvider.class).in(LazySingleton.class); - binder.bind(QueryScheduler.class) - .toProvider(QuerySchedulerProvider.class) - .in(LazySingleton.class); - binder.install(new SqlModule.SqlStatementFactoryModule()); - binder.bind(new TypeLiteral>(){}).toInstance(Suppliers.ofInstance(new DefaultQueryConfig(ImmutableMap.of()))); - binder.bind(CalciteRulesManager.class).toInstance(new CalciteRulesManager(ImmutableSet.of())); - binder.bind(JoinableFactoryWrapper.class).toInstance(CalciteTests.createJoinableFactoryWrapper()); - binder.bind(CatalogResolver.class).toInstance(CatalogResolver.NULL_RESOLVER); - } - ) + ) .build(); DruidMeta druidMeta = injector.getInstance(DruidMeta.class); server = new ServerWrapper(druidMeta); client = server.getUserConnection(); superuserClient = server.getConnection(CalciteTests.TEST_SUPERUSER_NAME, "druid"); - clientNoTrailingSlash = DriverManager.getConnection(StringUtils.maybeRemoveTrailingSlash(server.url), CalciteTests.TEST_SUPERUSER_NAME, "druid"); + clientNoTrailingSlash = DriverManager.getConnection( + StringUtils.maybeRemoveTrailingSlash(server.url), + CalciteTests.TEST_SUPERUSER_NAME, + "druid" + ); final Properties propertiesLosAngeles = new Properties(); propertiesLosAngeles.setProperty("sqlTimeZone", "America/Los_Angeles"); @@ -324,8 +335,6 @@ public void tearDown() throws Exception clientNoTrailingSlash = null; server = null; } - walker.close(); - walker = null; } @Test @@ -820,35 +829,48 @@ public void testDatabaseMetaDataColumnsWithSuperuser() throws SQLException } @Test(timeout = 90_000L) - public void testConcurrentQueries() throws InterruptedException, ExecutionException + public void testConcurrentQueries() { - final List> futures = new ArrayList<>(); - final ListeningExecutorService exec = MoreExecutors.listeningDecorator( - Execs.multiThreaded(AVATICA_CONFIG.getMaxStatementsPerConnection(), "DruidAvaticaHandlerTest-%d") - ); - for (int i = 0; i < 2000; i++) { - final String query = StringUtils.format("SELECT COUNT(*) + %s AS ci FROM foo", i); - futures.add( - exec.submit(() -> { - try ( - final Statement statement = client.createStatement(); - final ResultSet resultSet = statement.executeQuery(query) - ) { - final List> rows = getRows(resultSet); - return ((Number) Iterables.getOnlyElement(rows).get("ci")).intValue(); - } - catch (SQLException e) { - throw new RuntimeException(e); - } - }) - ); - } + queryLogHook.withSkippedLog( + v -> { + final List> futures = new ArrayList<>(); + final ListeningExecutorService exec = MoreExecutors.listeningDecorator( + Execs.multiThreaded(AVATICA_CONFIG.getMaxStatementsPerConnection(), "DruidAvaticaHandlerTest-%d") + ); + for (int i = 0; i < 2000; i++) { + final String query = StringUtils.format("SELECT COUNT(*) + %s AS ci FROM foo", i); + futures.add( + exec.submit(() -> { + try ( + final Statement statement = client.createStatement(); + final ResultSet resultSet = statement.executeQuery(query) + ) { + final List> rows = getRows(resultSet); + return ((Number) Iterables.getOnlyElement(rows).get("ci")).intValue(); + } + catch (SQLException e) { + throw new RuntimeException(e); + } + }) + ); + } - final List integers = Futures.allAsList(futures).get(); - for (int i = 0; i < 2000; i++) { - Assert.assertEquals(i + 6, (int) integers.get(i)); - } - exec.shutdown(); + final List integers; + try { + integers = Futures.allAsList(futures).get(); + } + catch (InterruptedException e) { + throw new RE(e); + } + catch (ExecutionException e) { + throw new RE(e); + } + for (int i = 0; i < 2000; i++) { + Assert.assertEquals(i + 6, (int) integers.get(i)); + } + exec.shutdown(); + } + ); } @Test @@ -1246,7 +1268,8 @@ public void testSqlRequestLogPrepared() throws SQLException @Test public void testParameterBinding() throws SQLException { - try (PreparedStatement statement = client.prepareStatement("SELECT COUNT(*) AS cnt FROM druid.foo WHERE dim1 = ? OR dim1 = ?")) { + try (PreparedStatement statement = client.prepareStatement( + "SELECT COUNT(*) AS cnt FROM druid.foo WHERE dim1 = ? OR dim1 = ?")) { statement.setString(1, "abc"); statement.setString(2, "def"); final ResultSet resultSet = statement.executeQuery(); @@ -1264,7 +1287,7 @@ public void testParameterBinding() throws SQLException public void testSysTableParameterBindingRegularUser() throws SQLException { try (PreparedStatement statement = - client.prepareStatement("SELECT COUNT(*) AS cnt FROM sys.servers WHERE servers.host = ?")) { + client.prepareStatement("SELECT COUNT(*) AS cnt FROM sys.servers WHERE servers.host = ?")) { statement.setString(1, "dummy"); Assert.assertThrows( @@ -1279,7 +1302,7 @@ public void testSysTableParameterBindingRegularUser() throws SQLException public void testSysTableParameterBindingSuperUser() throws SQLException { try (PreparedStatement statement = - superuserClient.prepareStatement("SELECT COUNT(*) AS cnt FROM sys.servers WHERE servers.host = ?")) { + superuserClient.prepareStatement("SELECT COUNT(*) AS cnt FROM sys.servers WHERE servers.host = ?")) { statement.setString(1, "dummy"); Assert.assertEquals( ImmutableList.of( @@ -1294,7 +1317,7 @@ public void testSysTableParameterBindingSuperUser() throws SQLException public void testExecuteMany() throws SQLException { try (PreparedStatement statement = - superuserClient.prepareStatement("SELECT COUNT(*) AS cnt FROM sys.servers WHERE servers.host = ?")) { + superuserClient.prepareStatement("SELECT COUNT(*) AS cnt FROM sys.servers WHERE servers.host = ?")) { statement.setString(1, "dummy"); Assert.assertEquals( ImmutableList.of( @@ -1586,7 +1609,7 @@ public void testUnauthorizedTable() { final String query = "SELECT * FROM " + CalciteTests.FORBIDDEN_DATASOURCE; final String expectedError = "Error 2 (00002) : Error while executing SQL \"" + - query + "\": Remote driver error: " + Access.DEFAULT_ERROR_MESSAGE; + query + "\": Remote driver error: " + Access.DEFAULT_ERROR_MESSAGE; try (Statement statement = client.createStatement()) { statement.executeQuery(query); } @@ -1624,10 +1647,11 @@ public Meta.Frame call() } /** - * Test the async aspect of the Avatica implementation. The fetch of the - * first batch takes 3 seconds (due to a sleep). However, the client will - * wait only 1 second. So, we should get ~3 empty batches before we get - * the first batch with rows. + * Test the async aspect of the Avatica implementation. Uses a countdown latches to provide + * deterministic asynchronous behavior of not having results ready for the first 3 fetches. + *

+ * We set the fetch timeout to a small 1ms value because we want the test to complete fast and + * are ensuring the proper happens-before relationships with latches instead of time. */ @Test public void testAsync() throws Exception @@ -1636,24 +1660,57 @@ public void testAsync() throws Exception config.maxConnections = CONNECTION_LIMIT; config.maxStatementsPerConnection = STATEMENT_LIMIT; config.maxRowsPerFrame = 2; - config.fetchTimeoutMs = 1000; + config.fetchTimeoutMs = 1; final List frames = new ArrayList<>(); final ScheduledExecutorService exec = Execs.scheduledSingleThreaded("testMaxRowsPerFrame"); + final CountDownLatch startLatch = new CountDownLatch(1); + final CountDownLatch resultsLatch = new CountDownLatch(1); DruidMeta druidMeta = new DruidMeta( makeStatementFactory(), config, new ErrorHandler(new ServerConfig()), exec, injector.getInstance(AuthenticatorMapper.class).getAuthenticatorChain(), - new ResultFetcherFactory(config.fetchTimeoutMs) { + new ResultFetcherFactory(config.fetchTimeoutMs) + { + + @Override + public int fetchTimeoutMs() + { + // We override fetchTimeoutMs because the constructor here is enforcing a minimum timeout of 1s, so we + // have to workaround the constructor code by overriding this method. Luckily the internal field is + // not actually being referenced internally and is instead routing through this method. In a future + // refactoring of this code, we should move such enforcement onto the configuration layer and now + // squirreled away inside a constructor. + return config.fetchTimeoutMs; + } + @Override public ResultFetcher newFetcher( final int limit, final Yielder yielder ) { - return new TestResultFetcher(limit, yielder); + return new ResultFetcher(limit, yielder) + { + @Override + public Meta.Frame call() + { + try { + if (offset() == 0) { + startLatch.await(); + } + } + catch (InterruptedException e) { + throw new RuntimeException(e); + } + + final Meta.Frame retVal = super.call(); + resultsLatch.countDown(); + return retVal; + } + }; } } ) @@ -1665,6 +1722,15 @@ public Frame fetch( final int fetchMaxRowCount ) throws NoSuchStatementException, MissingResultsException { + if (frames.size() == 3) { + startLatch.countDown(); + try { + resultsLatch.await(); + } + catch (InterruptedException e) { + throw new RE(e); + } + } Frame frame = super.fetch(statement, offset, fetchMaxRowCount); frames.add(frame); return frame; @@ -1679,10 +1745,14 @@ public Frame fetch( "SELECT dim1 FROM druid.foo")) { List> rows = getRows(resultSet); Assert.assertEquals(6, rows.size()); - Assert.assertTrue(frames.size() > 3); + Assert.assertEquals(6, frames.size()); // 3 empty frames and then 3 frames of 2 rows each - // There should be at least one empty frame due to timeout Assert.assertFalse(frames.get(0).rows.iterator().hasNext()); + Assert.assertFalse(frames.get(1).rows.iterator().hasNext()); + Assert.assertFalse(frames.get(2).rows.iterator().hasNext()); + Assert.assertTrue(frames.get(3).rows.iterator().hasNext()); + Assert.assertTrue(frames.get(4).rows.iterator().hasNext()); + Assert.assertTrue(frames.get(5).rows.iterator().hasNext()); } } diff --git a/sql/src/test/java/org/apache/druid/sql/avatica/DruidStatementTest.java b/sql/src/test/java/org/apache/druid/sql/avatica/DruidStatementTest.java index 618665c25f66..96b21619c99e 100644 --- a/sql/src/test/java/org/apache/druid/sql/avatica/DruidStatementTest.java +++ b/sql/src/test/java/org/apache/druid/sql/avatica/DruidStatementTest.java @@ -53,6 +53,7 @@ import org.junit.Assert; import org.junit.Before; import org.junit.BeforeClass; +import org.junit.ClassRule; import org.junit.Rule; import org.junit.Test; import org.junit.rules.TemporaryFolder; @@ -72,20 +73,23 @@ public class DruidStatementTest extends CalciteTestBase private static String SELECT_STAR_FROM_FOO = "SELECT * FROM druid.foo"; - @Rule - public TemporaryFolder temporaryFolder = new TemporaryFolder(); + @ClassRule + public static TemporaryFolder temporaryFolder = new TemporaryFolder(); @Rule public QueryLogHook queryLogHook = QueryLogHook.create(); + private static SpecificSegmentsQuerySegmentWalker walker; private static QueryRunnerFactoryConglomerate conglomerate; private static Closer resourceCloser; @BeforeClass - public static void setUpClass() + public static void setUpClass() throws Exception { resourceCloser = Closer.create(); conglomerate = QueryStackTests.createQueryRunnerFactoryConglomerate(resourceCloser); + walker = CalciteTests.createMockWalker(conglomerate, temporaryFolder.newFolder()); + resourceCloser.register(walker); } @AfterClass @@ -94,13 +98,11 @@ public static void tearDownClass() throws IOException resourceCloser.close(); } - private SpecificSegmentsQuerySegmentWalker walker; private SqlStatementFactory sqlStatementFactory; @Before public void setUp() throws Exception { - walker = CalciteTests.createMockWalker(conglomerate, temporaryFolder.newFolder()); final PlannerConfig plannerConfig = new PlannerConfig(); final DruidOperatorTable operatorTable = CalciteTests.createOperatorTable(); final ExprMacroTable macroTable = CalciteTests.createExprMacroTable(); @@ -129,8 +131,7 @@ public void setUp() throws Exception @After public void tearDown() throws Exception { - walker.close(); - walker = null; + } //----------------------------------------------------------------- diff --git a/sql/src/test/java/org/apache/druid/sql/calcite/BaseCalciteQueryTest.java b/sql/src/test/java/org/apache/druid/sql/calcite/BaseCalciteQueryTest.java index 1d9e76df15cc..6c570661bb71 100644 --- a/sql/src/test/java/org/apache/druid/sql/calcite/BaseCalciteQueryTest.java +++ b/sql/src/test/java/org/apache/druid/sql/calcite/BaseCalciteQueryTest.java @@ -26,11 +26,11 @@ import com.google.common.collect.ImmutableList; import com.google.common.collect.ImmutableMap; import com.google.inject.Injector; -import org.apache.calcite.plan.RelOptPlanner; import org.apache.commons.text.StringEscapeUtils; import org.apache.druid.annotations.UsedByJUnitParamsRunner; import org.apache.druid.common.config.NullHandling; import org.apache.druid.error.DruidException; +import org.apache.druid.error.DruidExceptionMatcher; import org.apache.druid.guice.DruidInjectorBuilder; import org.apache.druid.hll.VersionOneHyperLogLogCollector; import org.apache.druid.java.util.common.DateTimes; @@ -103,6 +103,7 @@ import org.apache.druid.sql.calcite.util.SqlTestFramework.StandardPlannerComponentSupplier; import org.apache.druid.sql.calcite.view.ViewManager; import org.apache.druid.sql.http.SqlParameter; +import org.hamcrest.MatcherAssert; import org.joda.time.DateTime; import org.joda.time.DateTimeZone; import org.joda.time.Interval; @@ -115,7 +116,6 @@ import org.junit.rules.TemporaryFolder; import javax.annotation.Nullable; - import java.io.IOException; import java.io.PrintStream; import java.util.Arrays; @@ -489,6 +489,16 @@ protected static void resetFramework() queryFramework = null; } + protected static DruidExceptionMatcher invalidSqlIs(String s) + { + return DruidExceptionMatcher.invalidSqlInput().expectMessageIs(s); + } + + protected static DruidExceptionMatcher invalidSqlContains(String s) + { + return DruidExceptionMatcher.invalidSqlInput().expectMessageContains(s); + } + @Rule public QueryLogHook getQueryLogHook() { @@ -639,10 +649,15 @@ public void assertQueryIsUnplannable(final PlannerConfig plannerConfig, final St testQuery(plannerConfig, sql, CalciteTests.REGULAR_USER_AUTH_RESULT, ImmutableList.of(), ImmutableList.of()); } catch (DruidException e) { - Assert.assertEquals( - sql, - StringUtils.format("Query not supported. Possible error: %s", expectedError), - e.toString() + MatcherAssert.assertThat( + e, + new DruidExceptionMatcher(DruidException.Persona.ADMIN, DruidException.Category.INVALID_INPUT, "adhoc") + .expectMessageIs( + StringUtils.format( + "Query planning failed for unknown reason, our best guess is this [%s]", + expectedError + ) + ) ); } catch (Exception e) { @@ -985,7 +1000,7 @@ public SqlStatementFactory getSqlStatementFactory( return getSqlStatementFactory( plannerConfig, new AuthConfig() - ); + ); } /** @@ -1027,6 +1042,7 @@ protected static boolean isRewriteJoinToFilter(final Map queryCo /** * Override not just the outer query context, but also the contexts of all subqueries. + * * @return */ public static Query recursivelyClearContext(final Query query, ObjectMapper queryJsonMapper) @@ -1149,7 +1165,10 @@ protected Map withTimestampResultContext( output.put(GroupByQuery.CTX_TIMESTAMP_RESULT_FIELD, timestampResultField); try { - output.put(GroupByQuery.CTX_TIMESTAMP_RESULT_FIELD_GRANULARITY, queryFramework().queryJsonMapper().writeValueAsString(granularity)); + output.put( + GroupByQuery.CTX_TIMESTAMP_RESULT_FIELD_GRANULARITY, + queryFramework().queryJsonMapper().writeValueAsString(granularity) + ); } catch (JsonProcessingException e) { throw new RuntimeException(e); diff --git a/sql/src/test/java/org/apache/druid/sql/calcite/CalciteInsertDmlTest.java b/sql/src/test/java/org/apache/druid/sql/calcite/CalciteInsertDmlTest.java index e9bf9cbca907..5ae0fa004a1a 100644 --- a/sql/src/test/java/org/apache/druid/sql/calcite/CalciteInsertDmlTest.java +++ b/sql/src/test/java/org/apache/druid/sql/calcite/CalciteInsertDmlTest.java @@ -26,9 +26,8 @@ import org.apache.druid.data.input.InputSource; import org.apache.druid.data.input.impl.CsvInputFormat; import org.apache.druid.data.input.impl.InlineInputSource; -import org.apache.druid.error.SqlParseError; -import org.apache.druid.error.SqlUnsupportedError; -import org.apache.druid.error.SqlValidationError; +import org.apache.druid.error.DruidException; +import org.apache.druid.error.DruidExceptionMatcher; import org.apache.druid.java.util.common.StringUtils; import org.apache.druid.java.util.common.granularity.Granularities; import org.apache.druid.java.util.common.granularity.Granularity; @@ -49,10 +48,11 @@ import org.apache.druid.sql.calcite.filtration.Filtration; import org.apache.druid.sql.calcite.parser.DruidSqlInsert; import org.apache.druid.sql.calcite.planner.Calcites; -import org.apache.druid.sql.calcite.planner.IngestHandler; +import org.apache.druid.sql.calcite.planner.PlannerConfig; import org.apache.druid.sql.calcite.planner.PlannerContext; import org.apache.druid.sql.calcite.util.CalciteTests; import org.hamcrest.CoreMatchers; +import org.hamcrest.MatcherAssert; import org.junit.Assert; import org.junit.Test; import org.junit.internal.matchers.ThrowableMessageMatcher; @@ -201,7 +201,11 @@ public void testInsertIntoInvalidDataSourceName() { testIngestionQuery() .sql("INSERT INTO \"in/valid\" SELECT dim1, dim2 FROM foo PARTITIONED BY ALL TIME") - .expectValidationError(SqlValidationError.class, "SQL-Validation-General: message=[INSERT dataSource cannot contain the '/' character.]") + .expectValidationError( + DruidExceptionMatcher.invalidInput().expectMessageIs( + "Invalid value for field [table]: Value [in/valid] cannot contain '/'." + ) + ) .verify(); } @@ -210,7 +214,9 @@ public void testInsertUsingColumnList() { testIngestionQuery() .sql("INSERT INTO dst (foo, bar) SELECT dim1, dim2 FROM foo PARTITIONED BY ALL TIME") - .expectValidationError(SqlUnsupportedError.class, "SQL-Unsupported-InsertList: op=[INSERT]") + .expectValidationError( + invalidSqlIs("Operation [INSERT] cannot be run with a target column list, given [dst (`foo`, `bar`)]") + ) .verify(); } @@ -219,7 +225,7 @@ public void testUpsert() { testIngestionQuery() .sql("UPSERT INTO dst SELECT * FROM foo PARTITIONED BY ALL TIME") - .expectValidationError(SqlUnsupportedError.class, "SQL-Unsupported-UPSERT") + .expectValidationError(invalidSqlIs("UPSERT is not supported.")) .verify(); } @@ -231,8 +237,8 @@ public void testSelectFromSystemTable() testIngestionQuery() .sql("INSERT INTO dst SELECT * FROM INFORMATION_SCHEMA.COLUMNS PARTITIONED BY ALL TIME") .expectValidationError( - SqlValidationError.class, - "SQL-Validation-WrongEngineForTable: tables=[INFORMATION_SCHEMA.COLUMNS], engine=[ingestion-test]" + DruidException.class, + "Cannot query table(s) [INFORMATION_SCHEMA.COLUMNS] with SQL engine [ingestion-test]" ) .verify(); } @@ -242,10 +248,9 @@ public void testInsertIntoSystemTable() { testIngestionQuery() .sql("INSERT INTO INFORMATION_SCHEMA.COLUMNS SELECT * FROM foo PARTITIONED BY ALL TIME") - .expectValidationError( - SqlValidationError.class, - "SQL-Validation-InsertNotDatasource: op=[INSERT], table=[INFORMATION_SCHEMA.COLUMNS]" - ) + .expectValidationError(invalidSqlIs( + "Table [INFORMATION_SCHEMA.COLUMNS] does not support operation [INSERT] because it is not a Druid datasource" + )) .verify(); } @@ -255,8 +260,7 @@ public void testInsertIntoView() testIngestionQuery() .sql("INSERT INTO view.aview SELECT * FROM foo PARTITIONED BY ALL TIME") .expectValidationError( - SqlValidationError.class, - "SQL-Validation-InsertNotDatasource: op=[INSERT], table=[view.aview]" + invalidSqlIs("Table [view.aview] does not support operation [INSERT] because it is not a Druid datasource") ) .verify(); } @@ -284,10 +288,9 @@ public void testInsertIntoNonexistentSchema() { testIngestionQuery() .sql("INSERT INTO nonexistent.dst SELECT * FROM foo PARTITIONED BY ALL TIME") - .expectValidationError( - SqlValidationError.class, - "SQL-Validation-InsertNotDatasource: op=[INSERT], table=[nonexistent.dst]" - ) + .expectValidationError(invalidSqlIs( + "Table [nonexistent.dst] does not support operation [INSERT] because it is not a Druid datasource" + )) .verify(); } @@ -368,11 +371,12 @@ public void testInsertFromExternalWithSchema() throw new RuntimeException(e); } testIngestionQuery() - .sql("INSERT INTO dst SELECT * FROM %s\n" + - " (x VARCHAR, y VARCHAR, z BIGINT)\n" + - "PARTITIONED BY ALL TIME", - extern - ) + .sql( + "INSERT INTO dst SELECT * FROM %s\n" + + " (x VARCHAR, y VARCHAR, z BIGINT)\n" + + "PARTITIONED BY ALL TIME", + extern + ) .authentication(CalciteTests.SUPER_USER_AUTH_RESULT) .expectTarget("dst", externalDataSource.getSignature()) .expectResources(dataSourceWrite("dst"), Externals.EXTERNAL_RESOURCE_ACTION) @@ -821,10 +825,9 @@ public void testInsertWithoutPartitionedByWithClusteredBy() + "SELECT __time, FLOOR(m1) as floor_m1, dim1, CEIL(m2) as ceil_m2 FROM foo " + "CLUSTERED BY 2, dim1 DESC, CEIL(m2)" ) - .expectValidationError( - SqlParseError.class, - "SQL-Parse-General: message=[CLUSTERED BY found before PARTITIONED BY. In Druid, the CLUSTERED BY clause must follow the PARTITIONED BY clause]" - ) + .expectValidationError(invalidSqlIs( + "CLUSTERED BY found before PARTITIONED BY, CLUSTERED BY must come after the PARTITIONED BY clause" + )) .verify(); } @@ -903,15 +906,10 @@ public void testInsertWithClusteredByAndOrderBy() ); Assert.fail("Exception should be thrown"); } - catch (SqlValidationError e) { - Assert.assertEquals( - "SQL-Validation-InsertOrderBy: op=[INSERT]", - e.getMessage() - ); - Assert.assertEquals( - "Cannot use ORDER BY with INSERT, use CLUSTERED BY instead", - e.toString() - ); + catch (DruidException e) { + MatcherAssert.assertThat(e, invalidSqlIs( + "Cannot use an ORDER BY clause on a Query of type [INSERT], use CLUSTERED BY instead" + )); } didTest = true; } @@ -928,15 +926,13 @@ public void testInsertWithPartitionedByContainingInvalidGranularity() ); Assert.fail("Exception should be thrown"); } - catch (SqlValidationError e) { - Assert.assertEquals( - "SQL-Validation-InvalidPartitionBy: expr=['invalid_granularity']", - e.getMessage() - ); - Assert.assertEquals( - "Encountered ['invalid_granularity'] after PARTITIONED BY. Expected HOUR, DAY, MONTH, YEAR, ALL TIME, FLOOR function or TIME_FLOOR function", - e.toString() - ); + catch (DruidException e) { + MatcherAssert.assertThat( + e, + invalidSqlIs( + "Invalid granularity ['invalid_granularity'] after PARTITIONED BY. " + + "Expected HOUR, DAY, MONTH, YEAR, ALL TIME, FLOOR() or TIME_FLOOR()" + )); } didTest = true; } @@ -955,14 +951,10 @@ public void testInsertWithOrderBy() ); Assert.fail("Exception should be thrown"); } - catch (SqlValidationError e) { - Assert.assertEquals( - "SQL-Validation-InsertOrderBy: op=[INSERT]", - e.getMessage() - ); - Assert.assertEquals( - "Cannot use ORDER BY with INSERT, use CLUSTERED BY instead", - e.toString() + catch (DruidException e) { + MatcherAssert.assertThat( + e, + invalidSqlIs("Cannot use an ORDER BY clause on a Query of type [INSERT], use CLUSTERED BY instead") ); } finally { @@ -973,8 +965,8 @@ public void testInsertWithOrderBy() @Test public void testInsertWithoutPartitionedBy() { - SqlValidationError e = Assert.assertThrows( - SqlValidationError.class, + DruidException e = Assert.assertThrows( + DruidException.class, () -> testQuery( StringUtils.format("INSERT INTO dst SELECT * FROM %s", externSql(externalDataSource)), @@ -982,10 +974,10 @@ public void testInsertWithoutPartitionedBy() ImmutableList.of() ) ); - Assert.assertEquals("SQL-Validation-InsertWithoutPartitionBy: op=[INSERT]", e.getMessage()); - Assert.assertEquals( - "INSERT statements must specify the PARTITIONED BY clause explicitly", - e.toString() + + MatcherAssert.assertThat( + e, + invalidSqlIs("Operation [INSERT] requires a PARTITIONED BY to be explicitly defined, but none was found.") ); didTest = true; } @@ -1326,14 +1318,13 @@ public void testInsertFromExternalAggregateAll() @Test public void testInsertWithInvalidSelectStatement() { + // This test fails because "count" is a reserved word and it is being used without quotes. So SQL is considering + // it a token instead of a name. It would be nice if our message was more direct telling the person that they + // used a reserved word instead of making them know that a "token" means Calcite is seeing a reserved word. But, + // that's an improvement for another day. testIngestionQuery() .sql("INSERT INTO t SELECT channel, added as count FROM foo PARTITIONED BY ALL") // count is a keyword - .expectValidationError( - CoreMatchers.allOf( - CoreMatchers.instanceOf(SqlParseError.class), - ThrowableMessageMatcher.hasMessage(CoreMatchers.startsWith("SQL-Parse-UnexpectedToken: line=[1], column=[37], token=[as count]")) - ) - ) + .expectValidationError(invalidSqlContains("Received an unexpected token [as count]")) .verify(); } @@ -1342,10 +1333,7 @@ public void testInsertWithUnnamedColumnInSelectStatement() { testIngestionQuery() .sql("INSERT INTO t SELECT dim1, dim2 || '-lol' FROM foo PARTITIONED BY ALL") - .expectValidationError( - SqlValidationError.class, - "SQL-Validation-General: message=[" + IngestHandler.UNNAMED_INGESTION_COLUMN_ERROR + "]" - ) + .expectValidationError(invalidSqlContains("Insertion requires columns to be named")) .verify(); } @@ -1354,10 +1342,7 @@ public void testInsertWithInvalidColumnNameInIngest() { testIngestionQuery() .sql("INSERT INTO t SELECT __time, dim1 AS EXPR$0 FROM foo PARTITIONED BY ALL") - .expectValidationError( - SqlValidationError.class, - "SQL-Validation-General: message=[" + IngestHandler.UNNAMED_INGESTION_COLUMN_ERROR + "]" - ) + .expectValidationError(invalidSqlContains("Insertion requires columns to be named")) .verify(); } @@ -1368,10 +1353,7 @@ public void testInsertWithUnnamedColumnInNestedSelectStatement() .sql("INSERT INTO test " + "SELECT __time, * FROM " + "(SELECT __time, LOWER(dim1) FROM foo) PARTITIONED BY ALL TIME") - .expectValidationError( - SqlValidationError.class, - "SQL-Validation-General: message=[" + IngestHandler.UNNAMED_INGESTION_COLUMN_ERROR + "]" - ) + .expectValidationError(invalidSqlContains("Insertion requires columns to be named")) .verify(); } @@ -1382,11 +1364,13 @@ public void testInsertQueryWithInvalidGranularity() .sql("insert into foo1 select __time, dim1 FROM foo partitioned by time_floor(__time, 'PT2H')") .expectValidationError( CoreMatchers.allOf( - CoreMatchers.instanceOf(SqlValidationError.class), + CoreMatchers.instanceOf(DruidException.class), ThrowableMessageMatcher.hasMessage(CoreMatchers.containsString( - "SQL-Validation-PartitionedByGrain")) + "The granularity specified in PARTITIONED BY [`time_floor`(`__time`, 'PT2H')] is not supported. " + + "Valid options: [second, minute, five_minute, ten_minute, fifteen_minute, thirty_minute, hour, " + + "six_hour, eight_hour, day, week, month, quarter, year, all]")) ) - ) + ) .verify(); } @@ -1407,7 +1391,7 @@ public void testInsertOnExternalDataSourceWithIncompatibleTimeColumnSignature() ) .expectValidationError( CoreMatchers.allOf( - CoreMatchers.instanceOf(SqlValidationError.class), + CoreMatchers.instanceOf(DruidException.class), ThrowableMessageMatcher.hasMessage(CoreMatchers.containsString( "EXTERN function with __time column can be used when __time column is of type long")) ) @@ -1425,8 +1409,7 @@ public void testInsertWithSqlOuterLimit() .context(context) .sql("INSERT INTO dst SELECT * FROM foo PARTITIONED BY ALL TIME") .expectValidationError( - SqlValidationError.class, - "SQL-Validation-InsertContext: param=[sqlOuterLimit], op=[INSERT]" + invalidSqlIs("Context parameter [sqlOuterLimit] cannot be provided on operator [INSERT]") ) .verify(); } diff --git a/sql/src/test/java/org/apache/druid/sql/calcite/CalciteJoinQueryTest.java b/sql/src/test/java/org/apache/druid/sql/calcite/CalciteJoinQueryTest.java index 1f45aebc8b52..5714cb538387 100644 --- a/sql/src/test/java/org/apache/druid/sql/calcite/CalciteJoinQueryTest.java +++ b/sql/src/test/java/org/apache/druid/sql/calcite/CalciteJoinQueryTest.java @@ -25,6 +25,8 @@ import junitparams.JUnitParamsRunner; import junitparams.Parameters; import org.apache.druid.common.config.NullHandling; +import org.apache.druid.error.DruidException; +import org.apache.druid.error.DruidExceptionMatcher; import org.apache.druid.java.util.common.DateTimes; import org.apache.druid.java.util.common.Intervals; import org.apache.druid.java.util.common.JodaUtils; @@ -89,8 +91,8 @@ import org.apache.druid.sql.calcite.expression.DruidExpression; import org.apache.druid.sql.calcite.filtration.Filtration; import org.apache.druid.sql.calcite.planner.PlannerConfig; -import org.apache.druid.sql.calcite.planner.UnsupportedSQLQueryException; import org.apache.druid.sql.calcite.util.CalciteTests; +import org.hamcrest.MatcherAssert; import org.joda.time.DateTimeZone; import org.joda.time.Period; import org.junit.Assert; @@ -1032,7 +1034,6 @@ public void testLeftJoinTwoLookupsUsingJoinOperator(Map queryCon ); } - @Test @Parameters(source = QueryContextForJoinProvider.class) public void testInnerJoinTableLookupLookupWithFilterWithOuterLimit(Map queryContext) @@ -1483,16 +1484,31 @@ public void testInnerJoinQueryOfLookup(Map queryContext) ); } - @Test(expected = UnsupportedSQLQueryException.class) + @Test @Parameters(source = QueryContextForJoinProvider.class) public void testTimeColumnAggregationsOnLookups(Map queryContext) { - testQuery( - "SELECT k, LATEST(v) v FROM lookup.lookyloo GROUP BY k", - queryContext, - ImmutableList.of(), - ImmutableList.of() - ); + try { + testQuery( + "SELECT k, LATEST(v) v FROM lookup.lookyloo GROUP BY k", + queryContext, + ImmutableList.of(), + ImmutableList.of() + ); + Assert.fail("Expected exception to be thrown."); + } + catch (DruidException e) { + MatcherAssert.assertThat( + e, + new DruidExceptionMatcher(DruidException.Persona.ADMIN, DruidException.Category.INVALID_INPUT, "adhoc") + .expectMessageIs( + "Query planning failed for unknown reason, our best guess is this " + + "[LATEST and EARLIEST aggregators implicitly depend on the __time column, " + + "but the table queried doesn't contain a __time column. " + + "Please use LATEST_BY or EARLIEST_BY and specify the column explicitly.]" + ) + ); + } } @Test @@ -3342,7 +3358,7 @@ public void testJoinOnConstantShouldFail(Map queryContext) { assertQueryIsUnplannable( "SELECT t1.dim1 from foo as t1 LEFT JOIN foo as t2 on t1.dim1 = '10.1'", - "Possible error: SQL is resulting in a join that has unsupported operand types." + "SQL is resulting in a join that has unsupported operand types." ); } @@ -3498,6 +3514,9 @@ public void testLeftJoinSubqueryWithNullKeyFilter(Map queryConte .context(queryContext) .build(); + boolean isJoinFilterRewriteEnabled = queryContext.getOrDefault(JOIN_FILTER_REWRITE_ENABLE_KEY, true) + .toString() + .equals("true"); testQuery( "SELECT dim1, l1.k\n" + "FROM foo\n" @@ -3505,7 +3524,16 @@ public void testLeftJoinSubqueryWithNullKeyFilter(Map queryConte + "WHERE l1.k IS NOT NULL\n", queryContext, ImmutableList.of(NullHandling.sqlCompatible() ? nullCompatibleModePlan : nonNullCompatibleModePlan), - ImmutableList.of(new Object[]{"abc", "abc"}) + NullHandling.sqlCompatible() || !isJoinFilterRewriteEnabled + ? ImmutableList.of(new Object[]{"abc", "abc"}) + : ImmutableList.of( + new Object[]{"10.1", ""}, + // this result is incorrect. TODO : fix this result when the JoinFilterAnalyzer bug is fixed + new Object[]{"2", ""}, + new Object[]{"1", ""}, + new Object[]{"def", ""}, + new Object[]{"abc", "abc"} + ) ); } @@ -5406,7 +5434,7 @@ public void testRegressionFilteredAggregatorsSubqueryJoins(Map q .dimension(new DefaultDimensionSpec("v0", "d0", ColumnType.LONG)) .metric(new InvertedTopNMetricSpec(new DimensionTopNMetricSpec( null, - StringComparators.NUMERIC + StringComparators.LEXICOGRAPHIC ))) .aggregators(new CountAggregatorFactory("a0")) .threshold(1) diff --git a/sql/src/test/java/org/apache/druid/sql/calcite/CalciteMultiValueStringQueryTest.java b/sql/src/test/java/org/apache/druid/sql/calcite/CalciteMultiValueStringQueryTest.java index 014dc7c2926a..d016f1795326 100644 --- a/sql/src/test/java/org/apache/druid/sql/calcite/CalciteMultiValueStringQueryTest.java +++ b/sql/src/test/java/org/apache/druid/sql/calcite/CalciteMultiValueStringQueryTest.java @@ -22,7 +22,7 @@ import com.google.common.collect.ImmutableList; import com.google.common.collect.ImmutableSet; import org.apache.druid.common.config.NullHandling; -import org.apache.druid.error.SqlValidationError; +import org.apache.druid.error.DruidException; import org.apache.druid.java.util.common.StringUtils; import org.apache.druid.java.util.common.granularity.Granularities; import org.apache.druid.math.expr.ExpressionProcessing; @@ -801,7 +801,11 @@ public void testMultiValueStringOrdinal() .setDataSource(CalciteTests.DATASOURCE3) .setInterval(querySegmentSpec(Filtration.eternity())) .setGranularity(Granularities.ALL) - .setVirtualColumns(expressionVirtualColumn("v0", "array_ordinal(\"dim3\",2)", ColumnType.STRING)) + .setVirtualColumns(expressionVirtualColumn( + "v0", + "array_ordinal(\"dim3\",2)", + ColumnType.STRING + )) .setDimensions( dimensions( new DefaultDimensionSpec("v0", "_d0", ColumnType.STRING) @@ -1797,7 +1801,7 @@ public void testMultiValueToArrayMoreArgs() testQueryThrows( "SELECT MV_TO_ARRAY(dim3,dim3) FROM druid.numfoo", exception -> { - exception.expect(SqlValidationError.class); + exception.expect(DruidException.class); exception.expectMessage("Invalid number of arguments to function"); } ); @@ -1809,7 +1813,7 @@ public void testMultiValueToArrayNoArgs() testQueryThrows( "SELECT MV_TO_ARRAY() FROM druid.numfoo", exception -> { - exception.expect(SqlValidationError.class); + exception.expect(DruidException.class); exception.expectMessage("Invalid number of arguments to function"); } ); @@ -2008,8 +2012,7 @@ public void testMultiValueStringOverlapFilterInconsistentUsage() "SELECT COALESCE(dim3, 'other') FROM druid.numfoo " + "WHERE MV_OVERLAP(COALESCE(dim3, ARRAY['other']), ARRAY['a', 'b', 'other']) LIMIT 5", e -> { - e.expect(SqlPlanningException.class); - e.expectMessage("Illegal mixing of types in CASE or COALESCE statement"); + e.expect(invalidSqlContains("Illegal mixing of types in CASE or COALESCE statement")); } ); diff --git a/sql/src/test/java/org/apache/druid/sql/calcite/CalciteNestedDataQueryTest.java b/sql/src/test/java/org/apache/druid/sql/calcite/CalciteNestedDataQueryTest.java index eefd2bc7eb53..83861485960b 100644 --- a/sql/src/test/java/org/apache/druid/sql/calcite/CalciteNestedDataQueryTest.java +++ b/sql/src/test/java/org/apache/druid/sql/calcite/CalciteNestedDataQueryTest.java @@ -32,7 +32,7 @@ import org.apache.druid.data.input.impl.LongDimensionSchema; import org.apache.druid.data.input.impl.StringDimensionSchema; import org.apache.druid.data.input.impl.TimestampSpec; -import org.apache.druid.error.SqlValidationError; +import org.apache.druid.error.DruidExceptionMatcher; import org.apache.druid.guice.DruidInjectorBuilder; import org.apache.druid.guice.NestedDataModule; import org.apache.druid.java.util.common.HumanReadableBytes; @@ -4172,9 +4172,11 @@ public void testGroupByInvalidPath() + "SUM(cnt) " + "FROM druid.nested GROUP BY 1", (expected) -> { - expected.expect(SqlValidationError.class); - expected.expectMessage( - "Cannot use [JSON_VALUE_VARCHAR]: [Bad format, '.array.[1]' is not a valid JSONPath path: must start with '$']"); + expected.expect( + DruidExceptionMatcher + .invalidInput() + .expectMessageIs("JSONPath [.array.[1]] is invalid, it must start with '$'") + ); } ); } diff --git a/sql/src/test/java/org/apache/druid/sql/calcite/CalciteParameterQueryTest.java b/sql/src/test/java/org/apache/druid/sql/calcite/CalciteParameterQueryTest.java index ac3b60bf0c3b..2266b8d6b825 100644 --- a/sql/src/test/java/org/apache/druid/sql/calcite/CalciteParameterQueryTest.java +++ b/sql/src/test/java/org/apache/druid/sql/calcite/CalciteParameterQueryTest.java @@ -22,7 +22,7 @@ import com.google.common.collect.ImmutableList; import org.apache.calcite.avatica.SqlType; import org.apache.druid.common.config.NullHandling; -import org.apache.druid.error.SqlValidationError; +import org.apache.druid.error.DruidExceptionMatcher; import org.apache.druid.java.util.common.DateTimes; import org.apache.druid.java.util.common.Intervals; import org.apache.druid.java.util.common.granularity.Granularities; @@ -577,8 +577,9 @@ public void testLongs() @Test public void testMissingParameter() { - expectedException.expect(SqlValidationError.class); - expectedException.expectMessage("SQL-Validation-UnboundParameter: index=[1]"); + expectedException.expect( + DruidExceptionMatcher.invalidSqlInput().expectMessageIs("No value bound for parameter (position [1])") + ); testQuery( "SELECT COUNT(*)\n" + "FROM druid.numfoo\n" @@ -592,8 +593,9 @@ public void testMissingParameter() @Test public void testPartiallyMissingParameter() { - expectedException.expect(SqlValidationError.class); - expectedException.expectMessage("SQL-Validation-UnboundParameter: index=[2]"); + expectedException.expect( + DruidExceptionMatcher.invalidSqlInput().expectMessageIs("No value bound for parameter (position [2])") + ); testQuery( "SELECT COUNT(*)\n" + "FROM druid.numfoo\n" @@ -610,8 +612,9 @@ public void testPartiallyMissingParameterInTheMiddle() List params = new ArrayList<>(); params.add(null); params.add(new SqlParameter(SqlType.INTEGER, 1)); - expectedException.expect(SqlValidationError.class); - expectedException.expectMessage("SQL-Validation-UnboundParameter: index=[1]"); + expectedException.expect( + DruidExceptionMatcher.invalidSqlInput().expectMessageIs("No value bound for parameter (position [1])") + ); testQuery( "SELECT 1 + ?, dim1 FROM foo LIMIT ?", ImmutableList.of(), diff --git a/sql/src/test/java/org/apache/druid/sql/calcite/CalciteQueryTest.java b/sql/src/test/java/org/apache/druid/sql/calcite/CalciteQueryTest.java index 374235386bf7..065355a2c284 100644 --- a/sql/src/test/java/org/apache/druid/sql/calcite/CalciteQueryTest.java +++ b/sql/src/test/java/org/apache/druid/sql/calcite/CalciteQueryTest.java @@ -25,8 +25,7 @@ import com.google.common.collect.ImmutableSet; import org.apache.calcite.runtime.CalciteContextException; import org.apache.druid.common.config.NullHandling; -import org.apache.druid.error.SqlUnsupportedError; -import org.apache.druid.error.SqlValidationError; +import org.apache.druid.error.DruidException; import org.apache.druid.java.util.common.DateTimes; import org.apache.druid.java.util.common.HumanReadableBytes; import org.apache.druid.java.util.common.Intervals; @@ -43,7 +42,6 @@ import org.apache.druid.query.Query; import org.apache.druid.query.QueryContexts; import org.apache.druid.query.QueryDataSource; -import org.apache.druid.query.QueryException; import org.apache.druid.query.ResourceLimitExceededException; import org.apache.druid.query.TableDataSource; import org.apache.druid.query.UnionDataSource; @@ -372,8 +370,8 @@ public void testInformationSchemaColumnsOnAnotherView() public void testCannotInsertWithNativeEngine() { notMsqCompatible(); - final SqlUnsupportedError e = Assert.assertThrows( - SqlUnsupportedError.class, + final DruidException e = Assert.assertThrows( + DruidException.class, () -> testQuery( "INSERT INTO dst SELECT * FROM foo PARTITIONED BY ALL", ImmutableList.of(), @@ -383,9 +381,7 @@ public void testCannotInsertWithNativeEngine() MatcherAssert.assertThat( e, - ThrowableMessageMatcher.hasMessage( - CoreMatchers.equalTo("SQL-Unsupported-UnsupportedEngineOp: op=[INSERT], engine=[native]") - ) + invalidSqlIs("INSERT operations are not supported by requested SQL engine [native], consider using MSQ.") ); } @@ -393,8 +389,8 @@ public void testCannotInsertWithNativeEngine() public void testCannotReplaceWithNativeEngine() { notMsqCompatible(); - final SqlUnsupportedError e = Assert.assertThrows( - SqlUnsupportedError.class, + final DruidException e = Assert.assertThrows( + DruidException.class, () -> testQuery( "REPLACE INTO dst OVERWRITE ALL SELECT * FROM foo PARTITIONED BY ALL", ImmutableList.of(), @@ -404,9 +400,7 @@ public void testCannotReplaceWithNativeEngine() MatcherAssert.assertThat( e, - ThrowableMessageMatcher.hasMessage( - CoreMatchers.equalTo("SQL-Unsupported-UnsupportedEngineOp: op=[REPLACE], engine=[native]") - ) + invalidSqlIs("REPLACE operations are not supported by the requested SQL engine [native]. Consider using MSQ.") ); } @@ -814,7 +808,7 @@ public void testLatestAggregators() @Test public void testEarliestByInvalidTimestamp() { - expectedException.expect(SqlValidationError.class); + expectedException.expect(DruidException.class); expectedException.expectMessage("Cannot apply 'EARLIEST_BY' to arguments of type 'EARLIEST_BY(, )"); testQuery( @@ -827,8 +821,9 @@ public void testEarliestByInvalidTimestamp() @Test public void testLatestByInvalidTimestamp() { - expectedException.expect(SqlValidationError.class); - expectedException.expectMessage("Cannot apply 'LATEST_BY' to arguments of type 'LATEST_BY(, )"); + expectedException.expect( + invalidSqlContains("Cannot apply 'LATEST_BY' to arguments of type 'LATEST_BY(, )") + ); testQuery( "SELECT LATEST_BY(m1, l1) FROM druid.numfoo", @@ -1070,19 +1065,19 @@ public void testStringLatestGroupByWithAlwaysFalseCondition() "SELECT LATEST(dim4, 10), dim2 FROM numfoo WHERE (dim1 = 'something' AND dim1 IN('something else')) GROUP BY dim2", ImmutableList.of( Druids.newScanQueryBuilder() - .dataSource(InlineDataSource.fromIterable( - ImmutableList.of(), - RowSignature.builder() - .add("EXPR$0", ColumnType.STRING) - .add("dim2", ColumnType.STRING) - .build() - )) - .intervals(querySegmentSpec(Filtration.eternity())) - .columns("EXPR$0", "dim2") - .context(QUERY_CONTEXT_DEFAULT) - .resultFormat(ResultFormat.RESULT_FORMAT_COMPACTED_LIST) - .legacy(false) - .build() + .dataSource(InlineDataSource.fromIterable( + ImmutableList.of(), + RowSignature.builder() + .add("EXPR$0", ColumnType.STRING) + .add("dim2", ColumnType.STRING) + .build() + )) + .intervals(querySegmentSpec(Filtration.eternity())) + .columns("EXPR$0", "dim2") + .context(QUERY_CONTEXT_DEFAULT) + .resultFormat(ResultFormat.RESULT_FORMAT_COMPACTED_LIST) + .legacy(false) + .build() ), ImmutableList.of() ); @@ -1095,19 +1090,19 @@ public void testStringLatestByGroupByWithAlwaysFalseCondition() "SELECT LATEST_BY(dim4, __time, 10), dim2 FROM numfoo WHERE (dim1 = 'something' AND dim1 IN('something else')) GROUP BY dim2", ImmutableList.of( Druids.newScanQueryBuilder() - .dataSource(InlineDataSource.fromIterable( - ImmutableList.of(), - RowSignature.builder() - .add("EXPR$0", ColumnType.STRING) - .add("dim2", ColumnType.STRING) - .build() - )) - .intervals(querySegmentSpec(Filtration.eternity())) - .columns("EXPR$0", "dim2") - .context(QUERY_CONTEXT_DEFAULT) - .resultFormat(ResultFormat.RESULT_FORMAT_COMPACTED_LIST) - .legacy(false) - .build() + .dataSource(InlineDataSource.fromIterable( + ImmutableList.of(), + RowSignature.builder() + .add("EXPR$0", ColumnType.STRING) + .add("dim2", ColumnType.STRING) + .build() + )) + .intervals(querySegmentSpec(Filtration.eternity())) + .columns("EXPR$0", "dim2") + .context(QUERY_CONTEXT_DEFAULT) + .resultFormat(ResultFormat.RESULT_FORMAT_COMPACTED_LIST) + .legacy(false) + .build() ), ImmutableList.of() ); @@ -2903,18 +2898,8 @@ public void testUnionAllTablesColumnCountMismatch() ); Assert.fail("query execution should fail"); } - catch (SqlValidationError e) { - Assert.assertTrue( - e.toString().contains("Column count mismatch in UNION ALL") - ); - Assert.assertEquals( - "SQL-Validation-General", - e.errorCode() - ); - Assert.assertEquals( - QueryException.PLAN_VALIDATION_FAILED_ERROR_CODE, - e.getErrorCode() - ); + catch (DruidException e) { + MatcherAssert.assertThat(e, invalidSqlIs("Column count mismatch in UNION ALL (line [3], column [42])")); } } @@ -2978,7 +2963,8 @@ public void testUnionAllTablesColumnTypeMismatchStringLong() + "WHERE dim2 = 'a' OR dim2 = 'en'\n" + "GROUP BY 1, 2", "SQL requires union between inputs that are not simple table scans and involve a " + - "filter or aliasing. Or column types of tables being unioned are not of same type."); + "filter or aliasing. Or column types of tables being unioned are not of same type." + ); } @Test @@ -2986,6 +2972,7 @@ public void testUnionAllTablesWhenMappingIsRequired() { // Cannot plan this UNION ALL operation, because the column swap would require generating a subquery. + msqCompatible(); assertQueryIsUnplannable( "SELECT\n" + "c, COUNT(*)\n" @@ -2993,7 +2980,7 @@ public void testUnionAllTablesWhenMappingIsRequired() + "WHERE c = 'a' OR c = 'def'\n" + "GROUP BY 1", "SQL requires union between two tables " + - "and column names queried for each table are different Left: [dim1], Right: [dim2]." + "and column names queried for each table are different Left: [dim1], Right: [dim2]." ); } @@ -3018,7 +3005,7 @@ public void testUnionAllTablesWhenCastAndMappingIsRequired() + "WHERE c = 'a' OR c = 'def'\n" + "GROUP BY 1", "SQL requires union between inputs that are not simple table scans and involve " + - "a filter or aliasing. Or column types of tables being unioned are not of same type." + "a filter or aliasing. Or column types of tables being unioned are not of same type." ); } @@ -3181,14 +3168,8 @@ public void testUnionAllThreeTablesColumnCountMismatch1() ); Assert.fail("query execution should fail"); } - catch (SqlValidationError e) { - Assert.assertTrue( - e.getMessage().contains("Column count mismatch in UNION ALL") - ); - Assert.assertEquals( - QueryException.PLAN_VALIDATION_FAILED_ERROR_CODE, - e.getErrorCode() - ); + catch (DruidException e) { + MatcherAssert.assertThat(e, invalidSqlIs("Column count mismatch in UNION ALL (line [3], column [45])")); } } @@ -3207,18 +3188,8 @@ public void testUnionAllThreeTablesColumnCountMismatch2() ); Assert.fail("query execution should fail"); } - catch (SqlValidationError e) { - Assert.assertTrue( - e.toString().contains("Column count mismatch in UNION ALL") - ); - Assert.assertEquals( - "SQL-Validation-General", - e.errorCode() - ); - Assert.assertEquals( - QueryException.PLAN_VALIDATION_FAILED_ERROR_CODE, - e.getErrorCode() - ); + catch (DruidException e) { + MatcherAssert.assertThat(e, invalidSqlIs("Column count mismatch in UNION ALL (line [3], column [45])")); } } @@ -3237,18 +3208,8 @@ public void testUnionAllThreeTablesColumnCountMismatch3() ); Assert.fail("query execution should fail"); } - catch (SqlValidationError e) { - Assert.assertTrue( - e.toString().contains("Column count mismatch in UNION ALL") - ); - Assert.assertEquals( - "SQL-Validation-General", - e.errorCode() - ); - Assert.assertEquals( - QueryException.PLAN_VALIDATION_FAILED_ERROR_CODE, - e.getErrorCode() - ); + catch (DruidException e) { + MatcherAssert.assertThat(e, invalidSqlIs("Column count mismatch in UNION ALL (line [3], column [70])")); } } @@ -5655,7 +5616,7 @@ public void testUnplannableQueries() final Map queries = ImmutableMap.of( // SELECT query with order by non-__time. "SELECT dim1 FROM druid.foo ORDER BY dim1", - "Possible error: SQL query requires order by non-time column [dim1 ASC], which is not supported.", + "SQL query requires order by non-time column [[dim1 ASC]], which is not supported.", // JOIN condition with not-equals (<>). "SELECT foo.dim1, foo.dim2, l.k, l.v\n" @@ -5777,7 +5738,9 @@ public void testArrayAggQueryOnComplexDatatypes() .build() ), ImmutableList.of( - new Object[]{"[\"AQAAAEAAAA==\",\"AQAAAQAAAAHNBA==\",\"AQAAAQAAAAOzAg==\",\"AQAAAQAAAAFREA==\",\"AQAAAQAAAACyEA==\",\"AQAAAQAAAAEkAQ==\"]"} + new Object[]{ + "[\"AQAAAEAAAA==\",\"AQAAAQAAAAHNBA==\",\"AQAAAQAAAAOzAg==\",\"AQAAAQAAAAFREA==\",\"AQAAAQAAAACyEA==\",\"AQAAAQAAAAEkAQ==\"]" + } ) ); } @@ -5789,17 +5752,10 @@ public void testStringAggQueryOnComplexDatatypes() testQuery("SELECT STRING_AGG(unique_dim1, ',') FROM druid.foo", ImmutableList.of(), ImmutableList.of()); Assert.fail("query execution should fail"); } - catch (SqlValidationError e) { - Assert.assertTrue( - e.toString().contains("STRING_AGG aggregation is not supported for type [COMPLEX]") - ); - Assert.assertEquals( - "SQL-Validation-General", - e.errorCode() - ); - Assert.assertEquals( - QueryException.PLAN_VALIDATION_FAILED_ERROR_CODE, - e.getErrorCode() + catch (DruidException e) { + MatcherAssert.assertThat( + e, + invalidSqlIs("Aggregation [STRING_AGG] does not support type [COMPLEX], column [foo.unique_dim1]") ); } } @@ -5936,10 +5892,12 @@ public void testCountStarWithTimeInIntervalFilterNonLiteral() "SELECT COUNT(*) FROM druid.foo " + "WHERE TIME_IN_INTERVAL(__time, dim1)", expected -> { - expected.expect(CoreMatchers.instanceOf(SqlValidationError.class)); - expected.expect(ThrowableMessageMatcher.hasMessage(CoreMatchers.containsString( - "SQL-Validation-General: line=[1], column=[38], " + - "message=[Cannot apply 'TIME_IN_INTERVAL' to arguments of type 'TIME_IN_INTERVAL(, )'. Supported form(s): TIME_IN_INTERVAL(, )]")) + expected.expect( + invalidSqlIs( + "Cannot apply 'TIME_IN_INTERVAL' to arguments of type " + + "'TIME_IN_INTERVAL(, )'. Supported form(s): " + + "TIME_IN_INTERVAL(, ) (line [1], column [38])" + ) ); } ); @@ -6078,15 +6036,14 @@ public void testCountStarWithTimeFilterUsingStringLiteralsInvalid_isUnplannable( // Strings are implicitly cast to timestamps. Test an invalid string. // This error message isn't ideal but it is at least better than silently ignoring the problem. String sql = "SELECT COUNT(*) FROM druid.foo\n" - + "WHERE __time >= 'z2000-01-01 00:00:00' AND __time < '2001-01-01 00:00:00'\n"; + + "WHERE __time >= 'z2000-01-01 00:00:00' AND __time < '2001-01-01 00:00:00'\n"; try { testBuilder().sql(sql).run(); } - catch (SqlValidationError e) { - Assert.assertEquals( - sql, - "Illegal TIMESTAMP constant [CAST('z2000-01-01 00:00:00'):TIMESTAMP(3) NOT NULL]", - e.toString() + catch (DruidException e) { + MatcherAssert.assertThat( + e, + invalidSqlIs("Illegal TIMESTAMP constant [CAST('z2000-01-01 00:00:00'):TIMESTAMP(3) NOT NULL]") ); } catch (Exception e) { @@ -7586,7 +7543,8 @@ public void testQueryWithMoreThanMaxNumericInFilter() { notMsqCompatible(); expectedException.expect(UOE.class); - expectedException.expectMessage("The number of values in the IN clause for [dim6] in query exceeds configured maxNumericFilter limit of [2] for INs. Cast [3] values of IN clause to String"); + expectedException.expectMessage( + "The number of values in the IN clause for [dim6] in query exceeds configured maxNumericFilter limit of [2] for INs. Cast [3] values of IN clause to String"); testQuery( PLANNER_CONFIG_MAX_NUMERIC_IN_FILTER, @@ -11339,17 +11297,12 @@ public void testTimeExtractWithTooFewArguments() testQuery("SELECT TIME_EXTRACT(__time) FROM druid.foo", ImmutableList.of(), ImmutableList.of()); Assert.fail("query execution should fail"); } - catch (SqlValidationError e) { - Assert.assertTrue( - e.toString().contains("Invalid number of arguments to function 'TIME_EXTRACT'. Was expecting 2 arguments") - ); - Assert.assertEquals( - "SQL-Validation-General", - e.errorCode() - ); - Assert.assertEquals( - QueryException.PLAN_VALIDATION_FAILED_ERROR_CODE, - e.getErrorCode() + catch (DruidException e) { + MatcherAssert.assertThat( + e, + invalidSqlIs( + "Invalid number of arguments to function 'TIME_EXTRACT'. Was expecting 2 arguments (line [1], column [8])" + ) ); } } @@ -12872,8 +12825,8 @@ public void testTimeStampAddZeroDayPeriod() //Since adding a zero period does not change the timestamp, just compare the stamp with the orignal TestDataBuilder.ROWS1.stream() - .map(row -> new Object[]{row.getTimestampFromEpoch()}) - .collect(Collectors.toList()) + .map(row -> new Object[]{row.getTimestampFromEpoch()}) + .collect(Collectors.toList()) ); } @@ -12901,8 +12854,8 @@ public void testTimeStampAddZeroMonthPeriod() //Since adding a zero period does not change the timestamp, just compare the stamp with the orignal TestDataBuilder.ROWS1.stream() - .map(row -> new Object[]{row.getTimestampFromEpoch()}) - .collect(Collectors.toList()) + .map(row -> new Object[]{row.getTimestampFromEpoch()}) + .collect(Collectors.toList()) ); } @@ -12932,8 +12885,8 @@ public void testTimeStampAddZeroYearPeriod() //Since adding a zero period does not change the timestamp, just compare the stamp with the orignal TestDataBuilder.ROWS1.stream() - .map(row -> new Object[]{row.getTimestampFromEpoch()}) - .collect(Collectors.toList()) + .map(row -> new Object[]{row.getTimestampFromEpoch()}) + .collect(Collectors.toList()) ); } @@ -12970,8 +12923,8 @@ public void testTimeStampAddConversion() // verify if query results match the given TestDataBuilder.ROWS1.stream() - .map(r -> new Object[]{periodGranularity.increment(r.getTimestamp()).getMillis()}) - .collect(Collectors.toList()) + .map(r -> new Object[]{periodGranularity.increment(r.getTimestamp()).getMillis()}) + .collect(Collectors.toList()) ); // @@ -12999,8 +12952,8 @@ public void testTimeStampAddConversion() // verify if query results match the given // "cnt" for each row is 1 TestDataBuilder.ROWS1.stream() - .map(row -> new Object[]{periodGranularity.increment(row.getTimestamp()).getMillis()}) - .collect(Collectors.toList()) + .map(row -> new Object[]{periodGranularity.increment(row.getTimestamp()).getMillis()}) + .collect(Collectors.toList()) ); } @@ -14005,7 +13958,7 @@ public void testStringAggExpression() ); } - @Test(expected = SqlUnsupportedError.class) + @Test(expected = DruidException.class) public void testStringAggExpressionNonConstantSeparator() { testQuery( @@ -14154,7 +14107,7 @@ public void testHumanReadableFormatFunction() @Test public void testHumanReadableFormatFunctionExceptionWithWrongNumberType() { - this.expectedException.expect(SqlValidationError.class); + this.expectedException.expect(DruidException.class); this.expectedException.expectMessage("Supported form(s): HUMAN_READABLE_BINARY_BYTE_FORMAT(Number, [Precision])"); testQuery( "SELECT HUMAN_READABLE_BINARY_BYTE_FORMAT('45678')", @@ -14166,7 +14119,7 @@ public void testHumanReadableFormatFunctionExceptionWithWrongNumberType() @Test public void testHumanReadableFormatFunctionWithWrongPrecisionType() { - this.expectedException.expect(SqlValidationError.class); + this.expectedException.expect(DruidException.class); this.expectedException.expectMessage("Supported form(s): HUMAN_READABLE_BINARY_BYTE_FORMAT(Number, [Precision])"); testQuery( "SELECT HUMAN_READABLE_BINARY_BYTE_FORMAT(45678, '2')", @@ -14178,7 +14131,7 @@ public void testHumanReadableFormatFunctionWithWrongPrecisionType() @Test public void testHumanReadableFormatFunctionWithInvalidNumberOfArguments() { - this.expectedException.expect(SqlValidationError.class); + this.expectedException.expect(DruidException.class); /* * frankly speaking, the exception message thrown here is a little bit confusing @@ -14589,21 +14542,21 @@ public void testComplexDecode() testQuery( "SELECT COMPLEX_DECODE_BASE64('hyperUnique',PARSE_JSON(TO_JSON_STRING(unique_dim1))) from druid.foo LIMIT 10", ImmutableList.of( - Druids.newScanQueryBuilder() - .dataSource(CalciteTests.DATASOURCE1) - .intervals(querySegmentSpec(Filtration.eternity())) - .columns("v0") - .virtualColumns( - expressionVirtualColumn( - "v0", - "complex_decode_base64('hyperUnique',parse_json(to_json_string(\"unique_dim1\")))", - ColumnType.ofComplex("hyperUnique") - ) - ) - .resultFormat(ResultFormat.RESULT_FORMAT_COMPACTED_LIST) - .legacy(false) - .limit(10) - .build() + Druids.newScanQueryBuilder() + .dataSource(CalciteTests.DATASOURCE1) + .intervals(querySegmentSpec(Filtration.eternity())) + .columns("v0") + .virtualColumns( + expressionVirtualColumn( + "v0", + "complex_decode_base64('hyperUnique',parse_json(to_json_string(\"unique_dim1\")))", + ColumnType.ofComplex("hyperUnique") + ) + ) + .resultFormat(ResultFormat.RESULT_FORMAT_COMPACTED_LIST) + .legacy(false) + .limit(10) + .build() ), ImmutableList.of( new Object[]{"\"AQAAAEAAAA==\""}, diff --git a/sql/src/test/java/org/apache/druid/sql/calcite/CalciteReplaceDmlTest.java b/sql/src/test/java/org/apache/druid/sql/calcite/CalciteReplaceDmlTest.java index 15c48254f2b2..5c4e061089d4 100644 --- a/sql/src/test/java/org/apache/druid/sql/calcite/CalciteReplaceDmlTest.java +++ b/sql/src/test/java/org/apache/druid/sql/calcite/CalciteReplaceDmlTest.java @@ -22,9 +22,8 @@ import com.fasterxml.jackson.databind.ObjectMapper; import com.google.common.collect.ImmutableList; import com.google.common.collect.ImmutableMap; -import org.apache.druid.error.SqlParseError; -import org.apache.druid.error.SqlUnsupportedError; -import org.apache.druid.error.SqlValidationError; +import org.apache.druid.error.DruidException; +import org.apache.druid.error.DruidExceptionMatcher; import org.apache.druid.java.util.common.StringUtils; import org.apache.druid.java.util.common.granularity.Granularities; import org.apache.druid.java.util.common.jackson.JacksonUtils; @@ -43,6 +42,7 @@ import org.apache.druid.sql.calcite.parser.DruidSqlReplace; import org.apache.druid.sql.calcite.planner.PlannerContext; import org.apache.druid.sql.calcite.util.CalciteTests; +import org.hamcrest.MatcherAssert; import org.junit.Assert; import org.junit.Test; @@ -51,8 +51,6 @@ import java.util.HashMap; import java.util.Map; -import static org.junit.Assert.assertEquals; - public class CalciteReplaceDmlTest extends CalciteIngestionDmlTest { private static final Map REPLACE_ALL_TIME_CHUNKS = ImmutableMap.of( @@ -220,10 +218,9 @@ public void testReplaceForUnsupportedDeleteWhereClause() { testIngestionQuery() .sql("REPLACE INTO dst OVERWRITE WHERE __time LIKE '20__-02-01' SELECT * FROM foo PARTITIONED BY MONTH") - .expectValidationError( - SqlValidationError.class, - "SQL-Validation-OverwriteWhereExpr: expr=[LIKE]" - ) + .expectValidationError(invalidSqlIs( + "Invalid OVERWRITE WHERE clause [`__time` LIKE '20__-02-01']: Unsupported operation [LIKE] in OVERWRITE WHERE clause." + )) .verify(); } @@ -232,10 +229,9 @@ public void testReplaceForInvalidDeleteWhereClause() { testIngestionQuery() .sql("REPLACE INTO dst OVERWRITE WHERE TRUE SELECT * FROM foo PARTITIONED BY MONTH") - .expectValidationError( - SqlValidationError.class, - "SQL-Validation-InvalidOverwriteWhere" - ) + .expectValidationError(invalidSqlIs( + "Invalid OVERWRITE WHERE clause [TRUE]: expected clause including AND, OR, NOT, >, <, >=, <= OR BETWEEN operators" + )) .verify(); } @@ -244,10 +240,9 @@ public void testReplaceForDeleteWhereClauseOnUnsupportedColumns() { testIngestionQuery() .sql("REPLACE INTO dst OVERWRITE WHERE dim1 > TIMESTAMP '2000-01-05 00:00:00' SELECT * FROM foo PARTITIONED BY ALL TIME") - .expectValidationError( - SqlValidationError.class, - "SQL-Validation-OverwriteWhereIsNotTime" - ) + .expectValidationError(invalidSqlIs( + "OVERWRITE WHERE clause only supports filtering on the __time column, got [947030400000 < dim1 as numeric]" + )) .verify(); } @@ -257,7 +252,9 @@ public void testReplaceWithOrderBy() { testIngestionQuery() .sql("REPLACE INTO dst OVERWRITE ALL SELECT * FROM foo ORDER BY dim1 PARTITIONED BY ALL TIME") - .expectValidationError(SqlValidationError.class, "SQL-Validation-InsertOrderBy: op=[REPLACE]") + .expectValidationError(invalidSqlIs( + "Cannot use an ORDER BY clause on a Query of type [REPLACE], use CLUSTERED BY instead" + )) .verify(); } @@ -267,8 +264,10 @@ public void testReplaceForMisalignedPartitionInterval() testIngestionQuery() .sql("REPLACE INTO dst OVERWRITE WHERE __time >= TIMESTAMP '2000-01-05 00:00:00' AND __time <= TIMESTAMP '2000-01-06 00:00:00' SELECT * FROM foo PARTITIONED BY MONTH") .expectValidationError( - SqlValidationError.class, - "SQL-Validation-OverwriteUnalignedInterval: interval=[[2000-01-05T00:00:00.000Z/2000-01-06T00:00:00.001Z]], granularity=[{type=period, period=P1M, timeZone=UTC, origin=null}]" + invalidSqlIs( + "OVERWRITE WHERE clause identified interval [2000-01-05T00:00:00.000Z/2000-01-06T00:00:00.001Z] " + + "which is not aligned with PARTITIONED BY granularity [{type=period, period=P1M, timeZone=UTC, origin=null}]" + ) ) .verify(); } @@ -278,10 +277,10 @@ public void testReplaceForInvalidPartition() { testIngestionQuery() .sql("REPLACE INTO dst OVERWRITE WHERE __time >= TIMESTAMP '2000-01-05 00:00:00' AND __time <= TIMESTAMP '2000-02-05 00:00:00' SELECT * FROM foo PARTITIONED BY ALL TIME") - .expectValidationError( - SqlValidationError.class, - "SQL-Validation-OverwriteUnalignedInterval: interval=[[2000-01-05T00:00:00.000Z/2000-02-05T00:00:00.001Z]], granularity=[AllGranularity]" - ) + .expectValidationError(invalidSqlIs( + "OVERWRITE WHERE clause identified interval [2000-01-05T00:00:00.000Z/2000-02-05T00:00:00.001Z] " + + "which is not aligned with PARTITIONED BY granularity [AllGranularity]" + )) .verify(); } @@ -292,10 +291,10 @@ public void testReplaceFromTableWithEmptyInterval() .sql("REPLACE INTO dst OVERWRITE WHERE " + "__time < TIMESTAMP '2000-01-01' AND __time > TIMESTAMP '2000-01-01' " + "SELECT * FROM foo PARTITIONED BY MONTH") - .expectValidationError( - SqlValidationError.class, - "SQL-Validation-OverwriteEmptyIntervals" - ) + .expectValidationError(invalidSqlIs( + "The OVERWRITE WHERE clause [(__time as numeric < 946684800000 && 946684800000 < __time as numeric)] " + + "produced no time intervals, are the bounds overly restrictive?" + )) .verify(); } @@ -304,7 +303,7 @@ public void testReplaceForWithInvalidInterval() { testIngestionQuery() .sql("REPLACE INTO dst OVERWRITE WHERE __time >= TIMESTAMP '2000-01-INVALID0:00' AND __time <= TIMESTAMP '2000-02-05 00:00:00' SELECT * FROM foo PARTITIONED BY ALL TIME") - .expectValidationError(SqlParseError.class) + .expectValidationError(DruidException.class) .verify(); } @@ -313,7 +312,7 @@ public void testReplaceForWithoutPartitionSpec() { testIngestionQuery() .sql("REPLACE INTO dst SELECT * FROM foo PARTITIONED BY ALL TIME") - .expectValidationError(SqlValidationError.class) + .expectValidationError(DruidException.class) .verify(); } @@ -383,7 +382,11 @@ public void testReplaceIntoInvalidDataSourceName() { testIngestionQuery() .sql("REPLACE INTO \"in/valid\" OVERWRITE ALL SELECT dim1, dim2 FROM foo PARTITIONED BY ALL TIME") - .expectValidationError(SqlValidationError.class, "SQL-Validation-General: message=[REPLACE dataSource cannot contain the '/' character.]") + .expectValidationError( + DruidExceptionMatcher + .invalidInput() + .expectMessageIs("Invalid value for field [table]: Value [in/valid] cannot contain '/'.") + ) .verify(); } @@ -392,7 +395,9 @@ public void testReplaceUsingColumnList() { testIngestionQuery() .sql("REPLACE INTO dst (foo, bar) OVERWRITE ALL SELECT dim1, dim2 FROM foo PARTITIONED BY ALL TIME") - .expectValidationError(SqlUnsupportedError.class, "SQL-Unsupported-InsertList: op=[REPLACE]") + .expectValidationError( + invalidSqlIs("Operation [REPLACE] cannot be run with a target column list, given [dst (`foo`, `bar`)]") + ) .verify(); } @@ -401,7 +406,9 @@ public void testReplaceWithoutPartitionedBy() { testIngestionQuery() .sql("REPLACE INTO dst OVERWRITE ALL SELECT __time, FLOOR(m1) as floor_m1, dim1 FROM foo") - .expectValidationError(SqlValidationError.class, "SQL-Validation-InsertWithoutPartitionBy: op=[REPLACE]") + .expectValidationError(invalidSqlIs( + "Operation [REPLACE] requires a PARTITIONED BY to be explicitly defined, but none was found." + )) .verify(); } @@ -410,7 +417,9 @@ public void testReplaceWithoutPartitionedByWithClusteredBy() { testIngestionQuery() .sql("REPLACE INTO dst OVERWRITE ALL SELECT __time, FLOOR(m1) as floor_m1, dim1 FROM foo CLUSTERED BY dim1") - .expectValidationError(SqlParseError.class, "SQL-Parse-General: message=[CLUSTERED BY found before PARTITIONED BY. In Druid, the CLUSTERED BY clause must follow the PARTITIONED BY clause]") + .expectValidationError(invalidSqlIs( + "CLUSTERED BY found before PARTITIONED BY, CLUSTERED BY must come after the PARTITIONED BY clause" + )) .verify(); } @@ -419,7 +428,10 @@ public void testReplaceWithoutOverwriteClause() { testIngestionQuery() .sql("REPLACE INTO dst SELECT * FROM foo PARTITIONED BY ALL TIME") - .expectValidationError(SqlValidationError.class, "SQL-Validation-OverwriteTimeRange") + .expectValidationError(invalidSqlIs( + "Missing time chunk information in OVERWRITE clause for REPLACE. " + + "Use OVERWRITE WHERE <__time based condition> or OVERWRITE ALL to overwrite the entire table." + )) .verify(); } @@ -428,7 +440,10 @@ public void testReplaceWithoutCompleteOverwriteClause() { testIngestionQuery() .sql("REPLACE INTO dst OVERWRITE SELECT * FROM foo PARTITIONED BY ALL TIME") - .expectValidationError(SqlValidationError.class, "SQL-Validation-OverwriteTimeRange") + .expectValidationError(invalidSqlIs( + "Missing time chunk information in OVERWRITE clause for REPLACE. " + + "Use OVERWRITE WHERE <__time based condition> or OVERWRITE ALL to overwrite the entire table." + )) .verify(); } @@ -437,10 +452,10 @@ public void testReplaceIntoSystemTable() { testIngestionQuery() .sql("REPLACE INTO INFORMATION_SCHEMA.COLUMNS OVERWRITE ALL SELECT * FROM foo PARTITIONED BY ALL TIME") - .expectValidationError( - SqlValidationError.class, - "SQL-Validation-InsertNotDatasource: op=[REPLACE], table=[INFORMATION_SCHEMA.COLUMNS]" - ) + .expectValidationError(invalidSqlIs( + "Table [INFORMATION_SCHEMA.COLUMNS] does not support operation [REPLACE]" + + " because it is not a Druid datasource" + )) .verify(); } @@ -449,10 +464,9 @@ public void testReplaceIntoView() { testIngestionQuery() .sql("REPLACE INTO view.aview OVERWRITE ALL SELECT * FROM foo PARTITIONED BY ALL TIME") - .expectValidationError( - SqlValidationError.class, - "SQL-Validation-InsertNotDatasource: op=[REPLACE], table=[view.aview]" - ) + .expectValidationError(invalidSqlIs( + "Table [view.aview] does not support operation [REPLACE] because it is not a Druid datasource" + )) .verify(); } @@ -479,10 +493,9 @@ public void testReplaceIntoNonexistentSchema() { testIngestionQuery() .sql("REPLACE INTO nonexistent.dst OVERWRITE ALL SELECT * FROM foo PARTITIONED BY ALL TIME") - .expectValidationError( - SqlValidationError.class, - "SQL-Validation-InsertNotDatasource: op=[REPLACE], table=[nonexistent.dst]" - ) + .expectValidationError(invalidSqlIs( + "Table [nonexistent.dst] does not support operation [REPLACE] because it is not a Druid datasource" + )) .verify(); } @@ -587,10 +600,13 @@ public void testReplaceWithPartitionedByContainingInvalidGranularity() ); Assert.fail("Exception should be thrown"); } - catch (SqlParseError e) { - assertEquals( - "SQL-Parse-General: message=[SQL-Parse-InvalidPartitionBy: expr=['invalid_granularity']]", - e.getMessage() + catch (DruidException e) { + MatcherAssert.assertThat( + e, + invalidSqlIs( + "Invalid granularity ['invalid_granularity'] after PARTITIONED BY. " + + "Expected HOUR, DAY, MONTH, YEAR, ALL TIME, FLOOR() or TIME_FLOOR()" + ) ); } didTest = true; @@ -911,7 +927,9 @@ public void testReplaceWithSqlOuterLimit() testIngestionQuery() .context(context) .sql("REPLACE INTO dst OVERWRITE ALL SELECT * FROM foo PARTITIONED BY ALL TIME") - .expectValidationError(SqlValidationError.class, "SQL-Validation-InsertContext: param=[sqlOuterLimit], op=[REPLACE]") + .expectValidationError(DruidExceptionMatcher.invalidInput().expectMessageIs( + "Context parameter [sqlOuterLimit] cannot be provided on operator [REPLACE]" + )) .verify(); } } diff --git a/sql/src/test/java/org/apache/druid/sql/calcite/CalciteSelectQueryTest.java b/sql/src/test/java/org/apache/druid/sql/calcite/CalciteSelectQueryTest.java index 7cb287bd3c2c..cc60a27acdd1 100644 --- a/sql/src/test/java/org/apache/druid/sql/calcite/CalciteSelectQueryTest.java +++ b/sql/src/test/java/org/apache/druid/sql/calcite/CalciteSelectQueryTest.java @@ -21,7 +21,7 @@ import com.google.common.collect.ImmutableList; import org.apache.druid.common.config.NullHandling; -import org.apache.druid.error.SqlValidationError; +import org.apache.druid.error.DruidException; import org.apache.druid.java.util.common.DateTimes; import org.apache.druid.java.util.common.Intervals; import org.apache.druid.java.util.common.granularity.Granularities; @@ -311,9 +311,10 @@ public void testSelectConstantExpressionFromTable() @Test public void testSelectConstantExpressionEquivalentToNaN() { - expectedException.expectMessage( - "SQL-Unsupported-UnsupportedExpr: expr=[(log10(0) - log10(0))], eval=[NaN]" - ); + expectedException.expect(invalidSqlIs( + "Expression [(log10(0) - log10(0))] evaluates to an unsupported value [NaN], " + + "expected something that can be a Double. Consider casting with 'CAST(

AS BIGINT)'" + )); testQuery( "SELECT log10(0) - log10(0), dim1 FROM foo LIMIT 1", ImmutableList.of(), @@ -324,8 +325,10 @@ public void testSelectConstantExpressionEquivalentToNaN() @Test public void testSelectConstantExpressionEquivalentToInfinity() { - expectedException.expectMessage( - "SQL-Unsupported-UnsupportedExpr: expr=[log10(0)], eval=[-Infinity]"); + expectedException.expect(invalidSqlIs( + "Expression [log10(0)] evaluates to an unsupported value [-Infinity], " + + "expected something that can be a Double. Consider casting with 'CAST(AS BIGINT)'" + )); testQuery( "SELECT log10(0), dim1 FROM foo LIMIT 1", ImmutableList.of(), @@ -965,7 +968,7 @@ public void testSelectCurrentTimePrecisionTooHigh() testQueryThrows( "SELECT CURRENT_TIMESTAMP(4)", expectedException -> { - expectedException.expect(SqlValidationError.class); + expectedException.expect(DruidException.class); expectedException.expectMessage( "Argument to function 'CURRENT_TIMESTAMP' must be a valid precision between '0' and '3'" ); diff --git a/sql/src/test/java/org/apache/druid/sql/calcite/QueryTestRunner.java b/sql/src/test/java/org/apache/druid/sql/calcite/QueryTestRunner.java index 34c8e4904146..963e1e0b23bc 100644 --- a/sql/src/test/java/org/apache/druid/sql/calcite/QueryTestRunner.java +++ b/sql/src/test/java/org/apache/druid/sql/calcite/QueryTestRunner.java @@ -618,7 +618,9 @@ public void verify() public QueryTestRunner(QueryTestBuilder builder) { QueryTestConfig config = builder.config; - Assume.assumeTrue(!config.isRunningMSQ() || builder.msqCompatible); + if (config.isRunningMSQ()) { + Assume.assumeTrue(builder.msqCompatible); + } if (builder.expectedResultsVerifier == null && builder.expectedResults != null) { builder.expectedResultsVerifier = config.defaultResultsVerifier( builder.expectedResults, diff --git a/sql/src/test/java/org/apache/druid/sql/calcite/parser/DruidSqlParserUtilsTest.java b/sql/src/test/java/org/apache/druid/sql/calcite/parser/DruidSqlParserUtilsTest.java index 1f295ea3587a..0600935ae073 100644 --- a/sql/src/test/java/org/apache/druid/sql/calcite/parser/DruidSqlParserUtilsTest.java +++ b/sql/src/test/java/org/apache/druid/sql/calcite/parser/DruidSqlParserUtilsTest.java @@ -31,10 +31,12 @@ import org.apache.calcite.sql.SqlPostfixOperator; import org.apache.calcite.sql.fun.SqlStdOperatorTable; import org.apache.calcite.sql.parser.SqlParserPos; -import org.apache.calcite.tools.ValidationException; +import org.apache.druid.error.DruidException; +import org.apache.druid.error.DruidExceptionMatcher; import org.apache.druid.java.util.common.granularity.Granularities; import org.apache.druid.java.util.common.granularity.Granularity; import org.apache.druid.sql.calcite.expression.builtin.TimeFloorOperatorConversion; +import org.hamcrest.MatcherAssert; import org.junit.Assert; import org.junit.Test; import org.junit.experimental.runners.Enclosed; @@ -199,13 +201,18 @@ public static class FloorToGranularityConversionErrorsTest public void testConvertSqlNodeToGranularityWithIncorrectNode() { SqlNode sqlNode = SqlLiteral.createCharString("day", SqlParserPos.ZERO); - ParseException e = Assert.assertThrows( - ParseException.class, + DruidException e = Assert.assertThrows( + DruidException.class, () -> DruidSqlParserUtils.convertSqlNodeToGranularityThrowingParseExceptions(sqlNode) ); - Assert.assertEquals( - "Encountered 'day' after PARTITIONED BY. Expected HOUR, DAY, MONTH, YEAR, ALL TIME, FLOOR function or TIME_FLOOR function", - e.getMessage() + MatcherAssert.assertThat( + e, + DruidExceptionMatcher + .invalidSqlInput() + .expectMessageIs( + "Invalid granularity ['day'] after PARTITIONED BY. " + + "Expected HOUR, DAY, MONTH, YEAR, ALL TIME, FLOOR() or TIME_FLOOR()" + ) ); } diff --git a/sql/src/test/java/org/apache/druid/sql/calcite/rule/DruidLogicalValuesRuleTest.java b/sql/src/test/java/org/apache/druid/sql/calcite/rule/DruidLogicalValuesRuleTest.java index de7a005852d2..73054f506a29 100644 --- a/sql/src/test/java/org/apache/druid/sql/calcite/rule/DruidLogicalValuesRuleTest.java +++ b/sql/src/test/java/org/apache/druid/sql/calcite/rule/DruidLogicalValuesRuleTest.java @@ -29,10 +29,10 @@ import org.apache.calcite.util.DateString; import org.apache.calcite.util.TimeString; import org.apache.calcite.util.TimestampString; +import org.apache.druid.error.DruidExceptionMatcher; import org.apache.druid.java.util.common.DateTimes; import org.apache.druid.sql.calcite.planner.DruidTypeSystem; import org.apache.druid.sql.calcite.planner.PlannerContext; -import org.apache.druid.sql.calcite.planner.UnsupportedSQLQueryException; import org.joda.time.DateTime; import org.joda.time.DateTimeZone; import org.junit.Assert; @@ -177,8 +177,14 @@ public void testGetValueFromTimestampWithLocalTimeZoneLiteral() new TimestampString("2021-04-01 16:54:31"), 0 ); - expectedException.expect(UnsupportedSQLQueryException.class); - expectedException.expectMessage("TIMESTAMP_WITH_LOCAL_TIME_ZONE type is not supported"); + expectedException.expect( + DruidExceptionMatcher + .invalidSqlInput() + .expectMessageIs( + "Cannot handle literal [2021-04-01 16:54:31:TIMESTAMP_WITH_LOCAL_TIME_ZONE(0)] " + + "of unsupported type [TIMESTAMP_WITH_LOCAL_TIME_ZONE]." + ) + ); DruidLogicalValuesRule.getValueFromLiteral(literal, DEFAULT_CONTEXT); } @@ -186,8 +192,11 @@ public void testGetValueFromTimestampWithLocalTimeZoneLiteral() public void testGetValueFromTimeLiteral() { RexLiteral literal = REX_BUILDER.makeTimeLiteral(new TimeString("16:54:31"), 0); - expectedException.expect(UnsupportedSQLQueryException.class); - expectedException.expectMessage("TIME type is not supported"); + expectedException.expect( + DruidExceptionMatcher + .invalidSqlInput() + .expectMessageIs("Cannot handle literal [16:54:31] of unsupported type [TIME].") + ); DruidLogicalValuesRule.getValueFromLiteral(literal, DEFAULT_CONTEXT); } @@ -195,8 +204,14 @@ public void testGetValueFromTimeLiteral() public void testGetValueFromTimeWithLocalTimeZoneLiteral() { RexLiteral literal = REX_BUILDER.makeTimeWithLocalTimeZoneLiteral(new TimeString("16:54:31"), 0); - expectedException.expect(UnsupportedSQLQueryException.class); - expectedException.expectMessage("TIME_WITH_LOCAL_TIME_ZONE type is not supported"); + expectedException.expect( + DruidExceptionMatcher + .invalidSqlInput() + .expectMessageIs( + "Cannot handle literal [16:54:31:TIME_WITH_LOCAL_TIME_ZONE(0)] " + + "of unsupported type [TIME_WITH_LOCAL_TIME_ZONE]." + ) + ); DruidLogicalValuesRule.getValueFromLiteral(literal, DEFAULT_CONTEXT); } diff --git a/sql/src/test/java/org/apache/druid/sql/calcite/util/QueryLogHook.java b/sql/src/test/java/org/apache/druid/sql/calcite/util/QueryLogHook.java index 967926681fb2..c95e2e609204 100644 --- a/sql/src/test/java/org/apache/druid/sql/calcite/util/QueryLogHook.java +++ b/sql/src/test/java/org/apache/druid/sql/calcite/util/QueryLogHook.java @@ -31,6 +31,7 @@ import org.junit.runners.model.Statement; import java.util.List; +import java.util.concurrent.atomic.AtomicBoolean; import java.util.function.Consumer; import java.util.function.Supplier; @@ -43,6 +44,7 @@ public class QueryLogHook implements TestRule private final Supplier objectMapperSupplier; private final List> recordedQueries = Lists.newCopyOnWriteArrayList(); + private final AtomicBoolean skipLog = new AtomicBoolean(false); public QueryLogHook(final Supplier objectMapperSupplier) { @@ -69,6 +71,17 @@ public List> getRecordedQueries() return ImmutableList.copyOf(recordedQueries); } + public void withSkippedLog(Consumer consumer) + { + try { + skipLog.set(true); + consumer.accept(null); + } + finally { + skipLog.set(false); + } + } + @Override public Statement apply(final Statement base, final Description description) { @@ -80,6 +93,10 @@ public void evaluate() throws Throwable clearRecordedQueries(); final Consumer function = query -> { + if (skipLog.get()) { + return; + } + try { recordedQueries.add((Query) query); log.info( diff --git a/sql/src/test/java/org/apache/druid/sql/http/SqlResourceTest.java b/sql/src/test/java/org/apache/druid/sql/http/SqlResourceTest.java index 586f9eeb916e..912c6378f762 100644 --- a/sql/src/test/java/org/apache/druid/sql/http/SqlResourceTest.java +++ b/sql/src/test/java/org/apache/druid/sql/http/SqlResourceTest.java @@ -34,8 +34,10 @@ import org.apache.druid.common.exception.AllowedRegexErrorResponseTransformStrategy; import org.apache.druid.common.exception.ErrorResponseTransformStrategy; import org.apache.druid.common.guava.SettableSupplier; +import org.apache.druid.error.DruidException; +import org.apache.druid.error.DruidExceptionMatcher; import org.apache.druid.error.ErrorResponse; -import org.apache.druid.error.StandardRestExceptionEncoder; +import org.apache.druid.error.QueryExceptionCompat; import org.apache.druid.jackson.DefaultObjectMapper; import org.apache.druid.java.util.common.ISE; import org.apache.druid.java.util.common.NonnullPair; @@ -83,7 +85,6 @@ import org.apache.druid.sql.HttpStatement; import org.apache.druid.sql.PreparedStatement; import org.apache.druid.sql.SqlLifecycleManager; -import org.apache.druid.sql.SqlPlanningException.PlanningError; import org.apache.druid.sql.SqlQueryPlus; import org.apache.druid.sql.SqlStatementFactory; import org.apache.druid.sql.SqlToolbox; @@ -106,8 +107,11 @@ import org.hamcrest.CoreMatchers; import org.hamcrest.MatcherAssert; import org.junit.After; +import org.junit.AfterClass; import org.junit.Assert; import org.junit.Before; +import org.junit.BeforeClass; +import org.junit.ClassRule; import org.junit.Rule; import org.junit.Test; import org.junit.rules.TemporaryFolder; @@ -119,7 +123,6 @@ import javax.ws.rs.core.Response; import javax.ws.rs.core.Response.Status; import javax.ws.rs.core.StreamingOutput; - import java.io.ByteArrayOutputStream; import java.io.IOException; import java.nio.charset.StandardCharsets; @@ -136,11 +139,13 @@ import java.util.concurrent.Future; import java.util.concurrent.TimeUnit; import java.util.concurrent.atomic.AtomicLong; +import java.util.concurrent.atomic.AtomicReference; import java.util.function.Consumer; import java.util.function.Function; import java.util.function.Supplier; import java.util.stream.Collectors; +@SuppressWarnings("ALL") public class SqlResourceTest extends CalciteTestBase { public static final DruidNode DUMMY_DRUID_NODE = new DruidNode("dummy", "dummy", false, 1, null, true, false); @@ -162,13 +167,17 @@ public class SqlResourceTest extends CalciteTestBase private static final List EXPECTED_SQL_TYPES_FOR_RESULT_FORMAT_TESTS = Arrays.asList("TIMESTAMP", "VARCHAR", "VARCHAR", "VARCHAR", "BIGINT", "FLOAT", "DOUBLE", "OTHER", "VARCHAR"); + private static Closer staticCloser = Closer.create(); private static QueryRunnerFactoryConglomerate conglomerate; - private static Closer resourceCloser; - @Rule - public TemporaryFolder temporaryFolder = new TemporaryFolder(); + @ClassRule + public static TemporaryFolder temporaryFolder = new TemporaryFolder(); + private static SpecificSegmentsQuerySegmentWalker walker; + private static QueryScheduler scheduler; + @Rule public QueryLogHook queryLogHook = QueryLogHook.create(); - private SpecificSegmentsQuerySegmentWalker walker; + + private Closer resourceCloser; private TestRequestLogger testRequestLogger; private SqlResource resource; private MockHttpServletRequest req; @@ -186,15 +195,13 @@ public class SqlResourceTest extends CalciteTestBase private final SettableSupplier responseContextSupplier = new SettableSupplier<>(); private Consumer onExecute = NULL_ACTION; - private Supplier schedulerBaggage = () -> null; + private static final AtomicReference> SCHEDULER_BAGGAGE = new AtomicReference<>(); - @Before - public void setUp() throws Exception + @BeforeClass + public static void setupClass() throws Exception { - resourceCloser = Closer.create(); - conglomerate = QueryStackTests.createQueryRunnerFactoryConglomerate(resourceCloser); - - final QueryScheduler scheduler = new QueryScheduler( + conglomerate = QueryStackTests.createQueryRunnerFactoryConglomerate(staticCloser); + scheduler = new QueryScheduler( 5, ManualQueryPrioritizationStrategy.INSTANCE, new HiLoQueryLaningStrategy(40), @@ -207,15 +214,29 @@ public Sequence run(Query query, Sequence resultSequence) return super.run( query, new LazySequence<>(() -> { - schedulerBaggage.get(); + SCHEDULER_BAGGAGE.get().get(); return resultSequence; }) ); } }; + walker = CalciteTests.createMockWalker(conglomerate, temporaryFolder.newFolder(), scheduler); + staticCloser.register(walker); + } + + @AfterClass + public static void teardownClass() throws Exception + { + staticCloser.close(); + } + + @Before + public void setUp() throws Exception + { + SCHEDULER_BAGGAGE.set(() -> null); + resourceCloser = Closer.create(); executorService = MoreExecutors.listeningDecorator(Execs.multiThreaded(8, "test_sql_resource_%s")); - walker = CalciteTests.createMockWalker(conglomerate, temporaryFolder.newFolder(), scheduler); final PlannerConfig plannerConfig = PlannerConfig.builder().serializeComplexValues(false).build(); final DruidSchemaCatalog rootSchema = CalciteTests.createMockRootSchema( @@ -311,8 +332,7 @@ public PreparedStatement preparedStatement(SqlQueryPlus sqlRequest) lifecycleManager, new ServerConfig(), TEST_RESPONSE_CONTEXT_CONFIG, - DUMMY_DRUID_NODE, - StandardRestExceptionEncoder.instance() + DUMMY_DRUID_NODE ); } @@ -324,8 +344,8 @@ MockHttpServletRequest request() @After public void tearDown() throws Exception { - walker.close(); - walker = null; + SCHEDULER_BAGGAGE.set(() -> null); + executorService.shutdownNow(); executorService.awaitTermination(2, TimeUnit.SECONDS); resourceCloser.close(); @@ -654,7 +674,7 @@ public void testArrayResultFormatWithErrorAfterSecondRow() throws Exception sequenceMapFnSupplier.set(errorAfterSecondRowMapFn()); final String query = "SELECT cnt FROM foo"; - final Pair response = + final Pair response = doPostRaw(new SqlQuery(query, ResultFormat.ARRAY, false, false, false, null, null), req); // Truncated response: missing final ] @@ -668,7 +688,7 @@ public void testObjectResultFormatWithErrorAfterFirstRow() throws Exception sequenceMapFnSupplier.set(errorAfterSecondRowMapFn()); final String query = "SELECT cnt FROM foo"; - final Pair response = + final Pair response = doPostRaw(new SqlQuery(query, ResultFormat.OBJECT, false, false, false, null, null), req); // Truncated response: missing final ] @@ -682,7 +702,7 @@ public void testArrayLinesResultFormatWithErrorAfterFirstRow() throws Exception sequenceMapFnSupplier.set(errorAfterSecondRowMapFn()); final String query = "SELECT cnt FROM foo"; - final Pair response = + final Pair response = doPostRaw(new SqlQuery(query, ResultFormat.ARRAYLINES, false, false, false, null, null), req); // Truncated response: missing final LFLF @@ -696,7 +716,7 @@ public void testObjectLinesResultFormatWithErrorAfterFirstRow() throws Exception sequenceMapFnSupplier.set(errorAfterSecondRowMapFn()); final String query = "SELECT cnt FROM foo"; - final Pair response = + final Pair response = doPostRaw(new SqlQuery(query, ResultFormat.OBJECTLINES, false, false, false, null, null), req); // Truncated response: missing final LFLF @@ -710,7 +730,7 @@ public void testCsvResultFormatWithErrorAfterFirstRow() throws Exception sequenceMapFnSupplier.set(errorAfterSecondRowMapFn()); final String query = "SELECT cnt FROM foo"; - final Pair response = + final Pair response = doPostRaw(new SqlQuery(query, ResultFormat.CSV, false, false, false, null, null), req); // Truncated response: missing final LFLF @@ -855,7 +875,7 @@ public void testArrayResultFormatWithHeader_nullColumnType() throws Exception public void testArrayLinesResultFormat() throws Exception { final String query = "SELECT *, CASE dim2 WHEN '' THEN dim2 END FROM foo LIMIT 2"; - final Pair pair = doPostRaw( + final Pair pair = doPostRaw( new SqlQuery(query, ResultFormat.ARRAYLINES, false, false, false, null, null) ); Assert.assertNull(pair.lhs); @@ -900,7 +920,7 @@ public void testArrayLinesResultFormat() throws Exception public void testArrayLinesResultFormatWithHeader() throws Exception { final String query = "SELECT *, CASE dim2 WHEN '' THEN dim2 END FROM foo LIMIT 2"; - final Pair pair = doPostRaw( + final Pair pair = doPostRaw( new SqlQuery(query, ResultFormat.ARRAYLINES, true, true, true, null, null) ); Assert.assertNull(pair.lhs); @@ -948,7 +968,7 @@ public void testArrayLinesResultFormatWithHeader() throws Exception public void testArrayLinesResultFormatWithHeader_nullColumnType() throws Exception { final String query = "SELECT (1, 2) FROM INFORMATION_SCHEMA.COLUMNS LIMIT 1"; - final Pair pair = doPostRaw( + final Pair pair = doPostRaw( new SqlQuery(query, ResultFormat.ARRAYLINES, true, true, true, null, null) ); Assert.assertNull(pair.lhs); @@ -1022,7 +1042,7 @@ public void testObjectResultFormat() throws Exception public void testObjectLinesResultFormat() throws Exception { final String query = "SELECT *, CASE dim2 WHEN '' THEN dim2 END FROM foo LIMIT 2"; - final Pair pair = doPostRaw( + final Pair pair = doPostRaw( new SqlQuery(query, ResultFormat.OBJECTLINES, false, false, false, null, null) ); Assert.assertNull(pair.lhs); @@ -1079,7 +1099,7 @@ public void testObjectLinesResultFormat() throws Exception public void testObjectLinesResultFormatWithMinimalHeader() throws Exception { final String query = "SELECT *, CASE dim2 WHEN '' THEN dim2 END FROM foo LIMIT 2"; - final Pair pair = + final Pair pair = doPostRaw(new SqlQuery(query, ResultFormat.OBJECTLINES, true, false, false, null, null)); Assert.assertNull(pair.lhs); final String response = pair.rhs; @@ -1139,7 +1159,7 @@ public void testObjectLinesResultFormatWithMinimalHeader() throws Exception public void testObjectLinesResultFormatWithFullHeader() throws Exception { final String query = "SELECT *, CASE dim2 WHEN '' THEN dim2 END FROM foo LIMIT 2"; - final Pair pair = + final Pair pair = doPostRaw(new SqlQuery(query, ResultFormat.OBJECTLINES, true, true, true, null, null)); Assert.assertNull(pair.lhs); final String response = pair.rhs; @@ -1205,7 +1225,7 @@ public void testObjectLinesResultFormatWithFullHeader() throws Exception public void testObjectLinesResultFormatWithFullHeader_nullColumnType() throws Exception { final String query = "SELECT (1, 2) FROM INFORMATION_SCHEMA.COLUMNS LIMIT 1"; - final Pair pair = + final Pair pair = doPostRaw(new SqlQuery(query, ResultFormat.OBJECTLINES, true, true, true, null, null)); Assert.assertNull(pair.lhs); final String response = pair.rhs; @@ -1234,7 +1254,7 @@ public void testObjectLinesResultFormatWithFullHeader_nullColumnType() throws Ex public void testCsvResultFormat() throws Exception { final String query = "SELECT *, CASE dim2 WHEN '' THEN dim2 END FROM foo LIMIT 2"; - final Pair pair = doPostRaw( + final Pair pair = doPostRaw( new SqlQuery(query, ResultFormat.CSV, false, false, false, null, null) ); Assert.assertNull(pair.lhs); @@ -1256,7 +1276,7 @@ public void testCsvResultFormat() throws Exception public void testCsvResultFormatWithHeaders() throws Exception { final String query = "SELECT *, CASE dim2 WHEN '' THEN dim2 END FROM foo LIMIT 2"; - final Pair pair = doPostRaw( + final Pair pair = doPostRaw( new SqlQuery(query, ResultFormat.CSV, true, true, true, null, null) ); Assert.assertNull(pair.lhs); @@ -1281,7 +1301,7 @@ public void testCsvResultFormatWithHeaders() throws Exception public void testCsvResultFormatWithHeaders_nullColumnType() throws Exception { final String query = "SELECT (1, 2) FROM INFORMATION_SCHEMA.COLUMNS LIMIT 1"; - final Pair pair = doPostRaw( + final Pair pair = doPostRaw( new SqlQuery(query, ResultFormat.CSV, true, true, true, null, null) ); Assert.assertNull(pair.lhs); @@ -1342,12 +1362,12 @@ public void testExplainCountStar() throws Exception @Test public void testCannotParse() throws Exception { - ErrorResponse exception = postSyncForException("FROM druid.foo", Status.BAD_REQUEST.getStatusCode()); + ErrorResponse errorResponse = postSyncForException("FROM druid.foo", Status.BAD_REQUEST.getStatusCode()); - Assert.assertNotNull(exception); - Assert.assertEquals(PlanningError.SQL_PARSE_ERROR.getErrorCode(), exception.getErrorCode()); - Assert.assertEquals(PlanningError.SQL_PARSE_ERROR.getErrorClass(), exception.getErrorClass()); - Assert.assertTrue(exception.getMessage().contains("Line [1], Column [1]: unexpected token [FROM]")); + validateInvalidSqlError( + errorResponse, + "Received an unexpected token [FROM] (line [1], column [1]), acceptable options: [\"INSERT\", \"UPSERT\", " + ); checkSqlRequestLog(false); Assert.assertTrue(lifecycleManager.getAll("id").isEmpty()); } @@ -1355,12 +1375,15 @@ public void testCannotParse() throws Exception @Test public void testCannotValidate() throws Exception { - ErrorResponse exception = postSyncForException("SELECT dim4 FROM druid.foo", Status.BAD_REQUEST.getStatusCode()); + ErrorResponse errorResponse = postSyncForException( + "SELECT dim4 FROM druid.foo", + Status.BAD_REQUEST.getStatusCode() + ); - Assert.assertNotNull(exception); - Assert.assertEquals(PlanningError.VALIDATION_ERROR.getErrorCode(), exception.getErrorCode()); - Assert.assertEquals(PlanningError.VALIDATION_ERROR.getErrorClass(), exception.getErrorClass()); - Assert.assertTrue(exception.getMessage().contains("Column 'dim4' not found in any table")); + validateInvalidSqlError( + errorResponse, + "Column 'dim4' not found in any table" + ); checkSqlRequestLog(false); Assert.assertTrue(lifecycleManager.getAll("id").isEmpty()); } @@ -1373,13 +1396,14 @@ public void testCannotConvert() throws Exception ErrorResponse exception = postSyncForException(unsupportedQuery, Status.BAD_REQUEST.getStatusCode()); Assert.assertTrue((Boolean) req.getAttribute(AuthConfig.DRUID_AUTHORIZATION_CHECKED)); - Assert.assertNotNull(exception); - Assert.assertEquals(QueryException.SQL_QUERY_UNSUPPORTED_ERROR_CODE, exception.getErrorCode()); - Assert.assertEquals(PlanningError.UNSUPPORTED_SQL_ERROR.getErrorClass(), exception.getErrorClass()); - Assert.assertTrue( - exception.getMessage() - .contains("Query not supported. " + - "Possible error: SQL query requires order by non-time column [dim1 ASC], which is not supported.") + + validateErrorResponse( + exception, + "adhoc", + DruidException.Persona.ADMIN, + DruidException.Category.INVALID_INPUT, + "Query planning failed for unknown reason, our best guess is this " + + "[SQL query requires order by non-time column [[dim1 ASC]], which is not supported.]" ); checkSqlRequestLog(false); Assert.assertTrue(lifecycleManager.getAll("id").isEmpty()); @@ -1394,16 +1418,14 @@ public void testCannotConvert() throws Exception public void testCannotConvert_UnsupportedSQLQueryException() throws Exception { // max(string) unsupported - ErrorResponse exception = postSyncForException( + ErrorResponse errorResponse = postSyncForException( "SELECT max(dim1) FROM druid.foo", Status.BAD_REQUEST.getStatusCode() ); - Assert.assertNotNull(exception); - Assert.assertEquals(PlanningError.UNSUPPORTED_SQL_ERROR.getErrorCode(), exception.getErrorCode()); - Assert.assertEquals(PlanningError.UNSUPPORTED_SQL_ERROR.getErrorClass(), exception.getErrorClass()); - Assert.assertTrue( - exception.getMessage().contains("MAX does not support type [STRING]") + validateInvalidSqlError( + errorResponse, + "Aggregation [MAX] does not support type [STRING], column [v0]" ); checkSqlRequestLog(false); Assert.assertTrue(lifecycleManager.getAll("id").isEmpty()); @@ -1412,7 +1434,7 @@ public void testCannotConvert_UnsupportedSQLQueryException() throws Exception @Test public void testResourceLimitExceeded() throws Exception { - final QueryException exception = doPost( + final ErrorResponse errorResponse = doPost( new SqlQuery( "SELECT DISTINCT dim1 FROM foo", ResultFormat.OBJECT, @@ -1424,10 +1446,12 @@ public void testResourceLimitExceeded() throws Exception ) ).lhs; - Assert.assertNotNull(exception); - Assert.assertEquals(exception.getErrorCode(), QueryException.RESOURCE_LIMIT_EXCEEDED_ERROR_CODE); - Assert.assertEquals(exception.getErrorClass(), ResourceLimitExceededException.class.getName()); - checkSqlRequestLog(false); + validateLegacyQueryExceptionErrorResponse( + errorResponse, + QueryException.RESOURCE_LIMIT_EXCEEDED_ERROR_CODE, + ResourceLimitExceededException.class.getName(), + "" + ); Assert.assertTrue(lifecycleManager.getAll("id").isEmpty()); } @@ -1456,9 +1480,12 @@ public void testUnsupportedQueryThrowsException() throws Exception 501 ); - Assert.assertNotNull(exception); - Assert.assertEquals(QueryException.QUERY_UNSUPPORTED_ERROR_CODE, exception.getErrorCode()); - Assert.assertEquals(QueryUnsupportedException.class.getName(), exception.getErrorClass()); + validateLegacyQueryExceptionErrorResponse( + exception, + QueryException.QUERY_UNSUPPORTED_ERROR_CODE, + QueryUnsupportedException.class.getName(), + "" + ); Assert.assertTrue(lifecycleManager.getAll("id").isEmpty()); } @@ -1534,8 +1561,7 @@ public ErrorResponseTransformStrategy getErrorResponseTransformStrategy() } }, TEST_RESPONSE_CONTEXT_CONFIG, - DUMMY_DRUID_NODE, - StandardRestExceptionEncoder.instance() + DUMMY_DRUID_NODE ); String errorMessage = "This will be supported in Druid 9999"; @@ -1553,49 +1579,38 @@ public ErrorResponseTransformStrategy getErrorResponseTransformStrategy() 501 ); - Assert.assertNotNull(exception); - Assert.assertNull(exception.getMessage()); - Assert.assertNull(exception.getHost()); - Assert.assertEquals(exception.getErrorCode(), QueryException.QUERY_UNSUPPORTED_ERROR_CODE); - Assert.assertNull(exception.getErrorClass()); + validateLegacyQueryExceptionErrorResponse( + exception, + QueryException.QUERY_UNSUPPORTED_ERROR_CODE, + "org.apache.druid.query.QueryUnsupportedException", + "This will be supported in Druid 9999" + ); Assert.assertTrue(lifecycleManager.getAll("id").isEmpty()); } + /** + * There are various points where Calcite feels it is acceptable to throw an AssertionError when it receives bad + * input. This is unfortunate as a java.lang.Error is very clearly documented to be something that nobody should + * try to catch. But, we can editorialize all we want, we still have to deal with it. So, this test reproduces + * the AssertionError behavior by using the substr() command. At the time that this test was written, the + * SQL substr assumes a literal for the second argument. The code ends up calling `RexLiteral.intValue` on the + * argument, which ends up calling a method that fails with an AssertionError, so this should generate the + * bad behavior for us. This test is validating that our exception handling deals with this meaningfully. + * If this test starts failing, it could be indicative of us not handling the AssertionErrors well anymore, + * OR it could be indicative of this specific code path not throwing an AssertionError anymore. If we run + * into the latter case, we should seek out a new code path that generates the error from Calcite. In the best + * world, this test starts failing because Calcite has moved all of its execptions away from AssertionErrors + * and we can no longer reproduce the behavior through Calcite, in that world, we should remove our own handling + * and this test at the same time. + * + * @throws Exception + */ @Test public void testAssertionErrorThrowsErrorWithFilterResponse() throws Exception { - resource = new SqlResource( - JSON_MAPPER, - CalciteTests.TEST_AUTHORIZER_MAPPER, - sqlStatementFactory, - lifecycleManager, - new ServerConfig() - { - @Override - public boolean isShowDetailedJettyErrors() - { - return true; - } - - @Override - public ErrorResponseTransformStrategy getErrorResponseTransformStrategy() - { - return new AllowedRegexErrorResponseTransformStrategy(ImmutableList.of()); - } - }, - TEST_RESPONSE_CONTEXT_CONFIG, - DUMMY_DRUID_NODE, - StandardRestExceptionEncoder.instance() - ); - - String errorMessage = "could not assert"; - failOnExecute(errorMessage); - onExecute = s -> { - throw new AssertionError(errorMessage); - }; ErrorResponse exception = postSyncForException( new SqlQuery( - "SELECT ANSWER TO LIFE", + "SELECT *, substr(dim2, strpos(dim2, 'hi')+2, 2) FROM foo LIMIT 2", ResultFormat.OBJECT, false, false, @@ -1603,14 +1618,15 @@ public ErrorResponseTransformStrategy getErrorResponseTransformStrategy() ImmutableMap.of("sqlQueryId", "id"), null ), - Status.INTERNAL_SERVER_ERROR.getStatusCode() + Status.BAD_REQUEST.getStatusCode() ); - Assert.assertNotNull(exception); - Assert.assertNull(exception.getMessage()); - Assert.assertNull(exception.getHost()); - Assert.assertEquals("Unknown exception", exception.getErrorCode()); - Assert.assertNull(exception.getErrorClass()); + MatcherAssert.assertThat( + exception.getUnderlyingException(), + DruidExceptionMatcher + .invalidSqlInput() + .expectMessageIs("Calcite assertion violated: [not a literal: +(STRPOS($2, 'hi'), 2)]") + ); Assert.assertTrue(lifecycleManager.getAll("id").isEmpty()); } @@ -1621,7 +1637,7 @@ public void testTooManyRequests() throws Exception CountDownLatch queriesScheduledLatch = new CountDownLatch(numQueries - 1); CountDownLatch runQueryLatch = new CountDownLatch(1); - schedulerBaggage = () -> { + SCHEDULER_BAGGAGE.set(() -> { queriesScheduledLatch.countDown(); try { runQueryLatch.await(); @@ -1630,7 +1646,7 @@ public void testTooManyRequests() throws Exception throw new RE(e); } return null; - }; + }); final String sqlQueryId = "tooManyRequestsTest"; @@ -1658,7 +1674,7 @@ public void testTooManyRequests() throws Exception } queriesScheduledLatch.await(); - schedulerBaggage = () -> null; + SCHEDULER_BAGGAGE.set(() -> null); futures.add(executorService.submit(() -> { try { final Response retVal = postForSyncResponse( @@ -1734,9 +1750,12 @@ public void testQueryTimeoutException() throws Exception 504 ); - Assert.assertNotNull(exception); - Assert.assertEquals(exception.getErrorCode(), QueryException.QUERY_TIMEOUT_ERROR_CODE); - Assert.assertEquals(exception.getErrorClass(), QueryTimeoutException.class.getName()); + validateLegacyQueryExceptionErrorResponse( + exception, + QueryException.QUERY_TIMEOUT_ERROR_CODE, + QueryTimeoutException.class.getName(), + "" + ); Assert.assertTrue(lifecycleManager.getAll(sqlQueryId).isEmpty()); } @@ -1766,8 +1785,13 @@ public void testCancelBetweenValidateAndPlan() throws Exception Response queryResponse = future.get(); assertStatusAndCommonHeaders(queryResponse, Status.INTERNAL_SERVER_ERROR.getStatusCode()); - QueryException exception = deserializeResponse(queryResponse, QueryException.class); - Assert.assertEquals("Query cancelled", exception.getErrorCode()); + ErrorResponse exception = deserializeResponse(queryResponse, ErrorResponse.class); + validateLegacyQueryExceptionErrorResponse( + exception, + "Query cancelled", + null, + "" + ); } @Test @@ -1794,8 +1818,8 @@ public void testCancelBetweenPlanAndExecute() throws Exception Response queryResponse = future.get(); assertStatusAndCommonHeaders(queryResponse, Status.INTERNAL_SERVER_ERROR.getStatusCode()); - QueryException exception = deserializeResponse(queryResponse, QueryException.class); - Assert.assertEquals("Query cancelled", exception.getErrorCode()); + ErrorResponse exception = deserializeResponse(queryResponse, ErrorResponse.class); + validateLegacyQueryExceptionErrorResponse(exception, "Query cancelled", null, ""); } @Test @@ -1858,7 +1882,7 @@ public void testQueryContextException() throws Exception BaseQuery.SQL_QUERY_ID, sqlQueryId ); - final QueryException queryContextException = doPost( + final ErrorResponse errorResponse = doPost( new SqlQuery( "SELECT 1337", ResultFormat.OBJECT, @@ -1869,10 +1893,13 @@ public void testQueryContextException() throws Exception null ) ).lhs; - Assert.assertNotNull(queryContextException); - Assert.assertEquals(QueryException.BAD_QUERY_CONTEXT_ERROR_CODE, queryContextException.getErrorCode()); - Assert.assertEquals(BadQueryContextException.ERROR_CLASS, queryContextException.getErrorClass()); - Assert.assertTrue(queryContextException.getMessage().contains("2000'")); + + validateLegacyQueryExceptionErrorResponse( + errorResponse, + QueryException.BAD_QUERY_CONTEXT_ERROR_CODE, + BadQueryContextException.ERROR_CLASS, + "2000'" + ); checkSqlRequestLog(false); Assert.assertTrue(lifecycleManager.getAll(sqlQueryId).isEmpty()); } @@ -1886,11 +1913,9 @@ public void testQueryContextKeyNotAllowed() throws Exception Status.BAD_REQUEST.getStatusCode() ); - Assert.assertNotNull(exception); - Assert.assertEquals(PlanningError.VALIDATION_ERROR.getErrorCode(), exception.getErrorCode()); - MatcherAssert.assertThat( - exception.getMessage(), - CoreMatchers.containsString("Query context parameter [sqlInsertSegmentGranularity] is not allowed") + validateInvalidInputError( + exception, + "Query context parameter [sqlInsertSegmentGranularity] is not allowed" ); checkSqlRequestLog(false); } @@ -1919,7 +1944,7 @@ private static SqlQuery createSimpleQueryWithId(String sqlQueryId, String sql) return new SqlQuery(sql, null, false, false, false, ImmutableMap.of(BaseQuery.SQL_QUERY_ID, sqlQueryId), null); } - private Pair>> doPost(final SqlQuery query) throws Exception + private Pair>> doPost(final SqlQuery query) throws Exception { return doPost(query, new TypeReference>>() { @@ -1927,7 +1952,7 @@ private Pair>> doPost(final SqlQuery qu } // Returns either an error or a result, assuming the result is a JSON object. - private Pair doPost( + private Pair doPost( final SqlQuery query, final TypeReference typeReference ) throws Exception @@ -1935,30 +1960,30 @@ private Pair doPost( return doPost(query, req, typeReference); } - private Pair doPostRaw(final SqlQuery query) throws Exception + private Pair doPostRaw(final SqlQuery query) throws Exception { return doPostRaw(query, req); } // Returns either an error or a result, assuming the result is a JSON object. @SuppressWarnings("unchecked") - private Pair doPost( + private Pair doPost( final SqlQuery query, final MockHttpServletRequest req, final TypeReference typeReference ) throws Exception { - final Pair pair = doPostRaw(query, req); + final Pair pair = doPostRaw(query, req); if (pair.rhs == null) { //noinspection unchecked - return (Pair) pair; + return (Pair) pair; } else { return Pair.of(pair.lhs, JSON_MAPPER.readValue(pair.rhs, typeReference)); } } // Returns either an error or a result. - private Pair doPostRaw(final SqlQuery query, final MockHttpServletRequest req) + private Pair doPostRaw(final SqlQuery query, final MockHttpServletRequest req) throws Exception { MockHttpServletResponse response = postForAsyncResponse(query, req); @@ -1966,7 +1991,7 @@ private Pair doPostRaw(final SqlQuery query, final MockH if (response.getStatus() == 200) { return Pair.of(null, new String(response.baos.toByteArray(), StandardCharsets.UTF_8)); } else { - return Pair.of(JSON_MAPPER.readValue(response.baos.toByteArray(), QueryException.class), null); + return Pair.of(JSON_MAPPER.readValue(response.baos.toByteArray(), ErrorResponse.class), null); } } @@ -2276,4 +2301,100 @@ public QueryResponse run() }; } } + + private DruidException validateErrorResponse( + ErrorResponse errorResponse, + String errorCode, + DruidException.Persona targetPersona, + DruidException.Category category, + String messageContainsString + ) + { + Assert.assertNotNull(errorResponse); + + DruidException exception = errorResponse.getUnderlyingException(); + + Assert.assertEquals(errorCode, exception.getErrorCode()); + Assert.assertEquals(targetPersona, exception.getTargetPersona()); + Assert.assertEquals(category, exception.getCategory()); + if (messageContainsString == null) { + Assert.assertNull(exception.getMessage()); + } else { + MatcherAssert.assertThat(exception.getMessage(), CoreMatchers.containsString(messageContainsString)); + } + + return exception; + } + + private DruidException validateInvalidSqlError( + ErrorResponse response, + String containsString + ) + { + final DruidException exception = validateInvalidInputError(response, containsString); + Assert.assertEquals("sql", exception.getContextValue("sourceType")); + + return exception; + } + + @Nonnull + private DruidException validateInvalidInputError(ErrorResponse response, String containsString) + { + return validateErrorResponse( + response, + "invalidInput", + DruidException.Persona.USER, + DruidException.Category.INVALID_INPUT, + containsString + ); + } + + private DruidException validateLegacyQueryExceptionErrorResponse( + ErrorResponse errorResponse, + String legacyCode, + String errorClass, + String messageContainsString + ) + { + DruidException exception = validateErrorResponse( + errorResponse, + QueryExceptionCompat.ERROR_CODE, + DruidException.Persona.OPERATOR, + convertToCategory(legacyCode), + messageContainsString + ); + + Assert.assertEquals(legacyCode, exception.getContextValue("legacyErrorCode")); + Assert.assertEquals(errorClass, exception.getContextValue("errorClass")); + + return exception; + } + + private static DruidException.Category convertToCategory(String legacyErrorCode) + { + // This code is copied from QueryExceptionCompat at the time of writing. This is because these mappings + // are fundamentally part of the API, so reusing the code from there runs the risk that changes in the mapping + // would change the API but not break the unit tests. So, the unit test uses its own mapping to ensure + // that we are validating and aware of API-affecting changes. + switch (QueryException.fromErrorCode(legacyErrorCode)) { + case USER_ERROR: + return DruidException.Category.INVALID_INPUT; + case UNAUTHORIZED: + return DruidException.Category.UNAUTHORIZED; + case CAPACITY_EXCEEDED: + return DruidException.Category.CAPACITY_EXCEEDED; + case QUERY_RUNTIME_FAILURE: + return DruidException.Category.RUNTIME_FAILURE; + case CANCELED: + return DruidException.Category.CANCELED; + case UNKNOWN: + return DruidException.Category.UNCATEGORIZED; + case UNSUPPORTED: + return DruidException.Category.UNSUPPORTED; + case TIMEOUT: + return DruidException.Category.TIMEOUT; + default: + return DruidException.Category.UNCATEGORIZED; + } + } } From d37d42a0d63b6118bd9fae4f2cdaebb62cc64074 Mon Sep 17 00:00:00 2001 From: imply-cheddar Date: Wed, 14 Jun 2023 11:57:37 +0900 Subject: [PATCH 11/17] Review comments and test fixes after rebasing to master --- .../druid/msq/sql/MSQTaskSqlEngine.java | 3 +- .../apache/druid/msq/exec/MSQInsertTest.java | 9 ++-- ...erifyMSQSupportedNativeQueriesFactory.java | 0 .../apache/druid/error/DruidException.java | 46 ++++++++++++++++++- .../druid/error/DruidExceptionMatcher.java | 14 +++--- .../{DMatchers.java => DruidMatchers.java} | 2 +- .../druid/server/QueryResultPusher.java | 1 + .../planner/SqlParameterizerShuttle.java | 10 ++-- .../sql/calcite/CalciteIngestionDmlTest.java | 11 ++++- .../sql/calcite/CalciteInsertDmlTest.java | 6 +-- .../sql/calcite/CalciteJoinQueryTest.java | 2 +- .../druid/sql/calcite/CalciteQueryTest.java | 3 +- 12 files changed, 77 insertions(+), 30 deletions(-) delete mode 100644 extensions-core/multi-stage-query/src/test/java/org/apache/druid/msq/test/VerifyMSQSupportedNativeQueriesFactory.java rename processing/src/test/java/org/apache/druid/matchers/{DMatchers.java => DruidMatchers.java} (97%) diff --git a/extensions-core/multi-stage-query/src/main/java/org/apache/druid/msq/sql/MSQTaskSqlEngine.java b/extensions-core/multi-stage-query/src/main/java/org/apache/druid/msq/sql/MSQTaskSqlEngine.java index f76c2c392f3e..7d27103e0b92 100644 --- a/extensions-core/multi-stage-query/src/main/java/org/apache/druid/msq/sql/MSQTaskSqlEngine.java +++ b/extensions-core/multi-stage-query/src/main/java/org/apache/druid/msq/sql/MSQTaskSqlEngine.java @@ -251,13 +251,12 @@ private static void validateInsert( * queries, because we use these output names to generate columns in segments. They must be unique. */ private static void validateNoDuplicateAliases(final List> fieldMappings) - throws ValidationException { final Set aliasesSeen = new HashSet<>(); for (final Pair field : fieldMappings) { if (!aliasesSeen.add(field.right)) { - throw new ValidationException("Duplicate field in SELECT: [" + field.right + "]"); + throw InvalidSqlInput.exception("Duplicate field in SELECT: [%s]", field.right); } } } diff --git a/extensions-core/multi-stage-query/src/test/java/org/apache/druid/msq/exec/MSQInsertTest.java b/extensions-core/multi-stage-query/src/test/java/org/apache/druid/msq/exec/MSQInsertTest.java index 794c944adf26..b4ba676b5890 100644 --- a/extensions-core/multi-stage-query/src/test/java/org/apache/druid/msq/exec/MSQInsertTest.java +++ b/extensions-core/multi-stage-query/src/test/java/org/apache/druid/msq/exec/MSQInsertTest.java @@ -24,6 +24,7 @@ import com.google.common.collect.ImmutableSet; import com.google.common.hash.HashFunction; import com.google.common.hash.Hashing; +import org.apache.druid.common.config.NullHandling; import org.apache.druid.error.DruidException; import org.apache.druid.hll.HyperLogLogCollector; import org.apache.druid.java.util.common.ISE; @@ -1028,11 +1029,9 @@ public void testInsertDuplicateColumnNames() + " )\n" + ") PARTITIONED by day") .setQueryContext(context) - .setExpectedValidationErrorMatcher(CoreMatchers.allOf( - CoreMatchers.instanceOf(SqlPlanningException.class), - ThrowableMessageMatcher.hasMessage(CoreMatchers.containsString( - "Duplicate field in SELECT: [namespace]")) - )) + .setExpectedValidationErrorMatcher( + invalidSqlIs("Duplicate field in SELECT: [namespace]") + ) .verifyPlanningErrors(); } diff --git a/extensions-core/multi-stage-query/src/test/java/org/apache/druid/msq/test/VerifyMSQSupportedNativeQueriesFactory.java b/extensions-core/multi-stage-query/src/test/java/org/apache/druid/msq/test/VerifyMSQSupportedNativeQueriesFactory.java deleted file mode 100644 index e69de29bb2d1..000000000000 diff --git a/processing/src/main/java/org/apache/druid/error/DruidException.java b/processing/src/main/java/org/apache/druid/error/DruidException.java index 2237402f54a6..097909415917 100644 --- a/processing/src/main/java/org/apache/druid/error/DruidException.java +++ b/processing/src/main/java/org/apache/druid/error/DruidException.java @@ -39,6 +39,48 @@ * will make its way back to the user. DruidException is always the answer to "how do I generate an error message and * deliver it to the user"? *

+ * At the time that DruidException was introduced, this type of "show this to the user please" exception was largely + * handled by created {@link org.apache.druid.java.util.common.RE}, {@link org.apache.druid.java.util.common.IAE}, or + * {@link org.apache.druid.java.util.common.ISE} objects. It is intended that DruidException replaces all usage of + * these exceptions where the intention is to deliver a message to the user, which we believe to be the vast majority + * of usages. In cases where those exceptions are with the intention of being caught and acted upon, they should + * no change should occur. + * + * Notes about exception messages + * + * Firstly, exception messages should always be written with the notions from the style conventions covered in + * {@code dev/style-conventions.md}. Whenever possible, we should also try to provide an action to take to resolve + * the issue. + * + * Secondly, given that the DruidException requires defining a target persona, exception messages should always be + * written with that target persona in mind. Reviewers should use the targetPersona as added input to help validate + * that an exception message in meaningful. + * + * For example, at the time that this exception was introduced, there is an exception that the router throws which is + * an {@link org.apache.druid.java.util.common.ISE} with the message {@code "No default server found!"}. This + * exception is thrown when the router is unable to find a broker to forward a request to. It is completely + * meaningless to an end-user trying to run a query (what's a default server? why does it need to be found?). If we + * were to convert the exception to a DruidException and keep the same message, we should mark it as targetting the + * DEVELOPER persona as that is the only persona who should actually be able to figure out what a default server is + * and why it is important. That said, does it makes sense for an exception that means "router cannot find a broker + * to forward the query to" to only be targetting the DEVELOPER? The answer to that is no, it's something that should + * really be made meaningful to a wider group. Some options could be + * + * USER persona: Cannot find a queryable server, contact your cluster administrator to validate that all services are + * operational + * + * OPERATOR persona: Router unable to find a broker, check that brokers are up and active + * + * The user-facing message doesn't talk about any Druid-specific concepts and just tries to relay a high-level + * understanding of what happened. The admin-facing message includes Druid notions in it as it expects that an Admin + * will understand the various node types of Druid. + * + * If we think about this error more, we will realize that it's fundamentally something wrong with the cluster setup, + * which is something that we would expect an operator to be in charge of. So, we would pick the OPERATOR persona + * message, which also allows us to include more specific information about what server was not found and provide a + * more meaningful action to take (check the health of your brokers). + * + * Description of fields of DruidException * Every error consists of: *

    *
  • A target persona
  • @@ -52,7 +94,7 @@ * The target persona indicates who the message is written for. This is important for 2 reasons *
      *
    1. It identifies why the developer is creating the exception and who they believe can take action on it. - * This context allows for code reviewers and other developers to evaluate the message with this context in mind
    2. + * This context allows for code reviewers and other developers to evaluate the message with the persona in mind *
    3. It can be used as a way to control which error messages should be routed where. For example, a user-targetted * error message should be able to be exposed directly to the user, while an operator-targetted error message should * perhaps be routed to the operators of the system instead of the end user firing a query.
    4. @@ -289,7 +331,7 @@ public enum Category */ TIMEOUT(504), /** - * Indicates some unsupported behavior was requested. TODO + * Indicates some unsupported behavior was requested. */ UNSUPPORTED(501), /** diff --git a/processing/src/test/java/org/apache/druid/error/DruidExceptionMatcher.java b/processing/src/test/java/org/apache/druid/error/DruidExceptionMatcher.java index d25f6e7f068f..5741942f149c 100644 --- a/processing/src/test/java/org/apache/druid/error/DruidExceptionMatcher.java +++ b/processing/src/test/java/org/apache/druid/error/DruidExceptionMatcher.java @@ -19,7 +19,7 @@ package org.apache.druid.error; -import org.apache.druid.matchers.DMatchers; +import org.apache.druid.matchers.DruidMatchers; import org.hamcrest.Description; import org.hamcrest.DiagnosingMatcher; import org.hamcrest.Matcher; @@ -54,16 +54,16 @@ public DruidExceptionMatcher( ) { matcherList = new ArrayList<>(); - matcherList.add(DMatchers.fn("targetPersona", DruidException::getTargetPersona, Matchers.is(targetPersona))); - matcherList.add(DMatchers.fn("category", DruidException::getCategory, Matchers.is(category))); - matcherList.add(DMatchers.fn("errorCode", DruidException::getErrorCode, Matchers.is(errorCode))); + matcherList.add(DruidMatchers.fn("targetPersona", DruidException::getTargetPersona, Matchers.is(targetPersona))); + matcherList.add(DruidMatchers.fn("category", DruidException::getCategory, Matchers.is(category))); + matcherList.add(DruidMatchers.fn("errorCode", DruidException::getErrorCode, Matchers.is(errorCode))); delegate = new AllOf<>(matcherList); } public DruidExceptionMatcher expectContext(String key, String value) { - matcherList.add(DMatchers.fn("context", DruidException::getContext, Matchers.hasEntry(key, value))); + matcherList.add(DruidMatchers.fn("context", DruidException::getContext, Matchers.hasEntry(key, value))); return this; } @@ -79,13 +79,13 @@ public DruidExceptionMatcher expectMessageContains(String contains) public DruidExceptionMatcher expectMessage(Matcher messageMatcher) { - matcherList.add(DMatchers.fn("message", DruidException::getMessage, messageMatcher)); + matcherList.add(DruidMatchers.fn("message", DruidException::getMessage, messageMatcher)); return this; } public DruidExceptionMatcher expectException(Matcher causeMatcher) { - matcherList.add(DMatchers.fn("cause", DruidException::getCause, causeMatcher)); + matcherList.add(DruidMatchers.fn("cause", DruidException::getCause, causeMatcher)); return this; } diff --git a/processing/src/test/java/org/apache/druid/matchers/DMatchers.java b/processing/src/test/java/org/apache/druid/matchers/DruidMatchers.java similarity index 97% rename from processing/src/test/java/org/apache/druid/matchers/DMatchers.java rename to processing/src/test/java/org/apache/druid/matchers/DruidMatchers.java index a6bc598bbafd..a3b94d613579 100644 --- a/processing/src/test/java/org/apache/druid/matchers/DMatchers.java +++ b/processing/src/test/java/org/apache/druid/matchers/DruidMatchers.java @@ -23,7 +23,7 @@ import java.util.function.Function; -public class DMatchers +public class DruidMatchers { public static LambdaMatcher fn(String name, Function fn, Matcher matcher) { diff --git a/server/src/main/java/org/apache/druid/server/QueryResultPusher.java b/server/src/main/java/org/apache/druid/server/QueryResultPusher.java index a7c8705c1180..074beb545b43 100644 --- a/server/src/main/java/org/apache/druid/server/QueryResultPusher.java +++ b/server/src/main/java/org/apache/druid/server/QueryResultPusher.java @@ -237,6 +237,7 @@ private Response handleDruidException(ResultsWriter resultsWriter, DruidExceptio case CAPACITY_EXCEEDED: case UNSUPPORTED: case UNCATEGORIZED: + case DEFENSIVE: counter.incrementFailed(); break; case TIMEOUT: diff --git a/sql/src/main/java/org/apache/druid/sql/calcite/planner/SqlParameterizerShuttle.java b/sql/src/main/java/org/apache/druid/sql/calcite/planner/SqlParameterizerShuttle.java index 8420912742df..6619f48704e4 100644 --- a/sql/src/main/java/org/apache/druid/sql/calcite/planner/SqlParameterizerShuttle.java +++ b/sql/src/main/java/org/apache/druid/sql/calcite/planner/SqlParameterizerShuttle.java @@ -127,9 +127,10 @@ private SqlNode createArrayLiteral(Object value, int posn) list = Arrays.asList((Object[]) value); } List args = new ArrayList<>(list.size()); - for (Object element : list) { + for (int i = 0, listSize = list.size(); i < listSize; i++) { + Object element = list.get(i); if (element == null) { - throw InvalidSqlInput.exception("An array parameter [%s] cannot contain null values", posn + 1); + throw InvalidSqlInput.exception("parameter [%d] is an array, with an illegal null at index [%d]", posn + 1, i); } SqlNode node; if (element instanceof String) { @@ -142,9 +143,10 @@ private SqlNode createArrayLiteral(Object value, int posn) node = SqlLiteral.createBoolean((Boolean) value, SqlParserPos.ZERO); } else { throw InvalidSqlInput.exception( - "An array parameter [%s] cannot contain values of type [%s]", + "parameter [%d] is an array, with an illegal value of type [%s] at index [%d]", posn + 1, - value.getClass() + value.getClass(), + i ); } args.add(node); diff --git a/sql/src/test/java/org/apache/druid/sql/calcite/CalciteIngestionDmlTest.java b/sql/src/test/java/org/apache/druid/sql/calcite/CalciteIngestionDmlTest.java index 22855033990b..071c8ae04d31 100644 --- a/sql/src/test/java/org/apache/druid/sql/calcite/CalciteIngestionDmlTest.java +++ b/sql/src/test/java/org/apache/druid/sql/calcite/CalciteIngestionDmlTest.java @@ -41,7 +41,6 @@ import org.apache.druid.initialization.DruidModule; import org.apache.druid.java.util.common.ISE; import org.apache.druid.java.util.common.StringUtils; -import org.apache.druid.java.util.common.UOE; import org.apache.druid.java.util.common.granularity.Granularity; import org.apache.druid.metadata.input.InputSourceModule; import org.apache.druid.query.Query; @@ -457,7 +456,7 @@ static class TestFileInputSource extends AbstractInputSource implements Splittab @Nonnull public Set getTypes() { - throw new UOE("This inputSource does not support input source based security"); + throw new CalciteIngestDmlTestException("getTypes()"); } @JsonProperty @@ -509,4 +508,12 @@ public int hashCode() return Objects.hash(files); } } + + static class CalciteIngestDmlTestException extends RuntimeException + { + public CalciteIngestDmlTestException(String message) + { + super(message); + } + } } diff --git a/sql/src/test/java/org/apache/druid/sql/calcite/CalciteInsertDmlTest.java b/sql/src/test/java/org/apache/druid/sql/calcite/CalciteInsertDmlTest.java index 5ae0fa004a1a..93f2c7951ce5 100644 --- a/sql/src/test/java/org/apache/druid/sql/calcite/CalciteInsertDmlTest.java +++ b/sql/src/test/java/org/apache/druid/sql/calcite/CalciteInsertDmlTest.java @@ -48,7 +48,6 @@ import org.apache.druid.sql.calcite.filtration.Filtration; import org.apache.druid.sql.calcite.parser.DruidSqlInsert; import org.apache.druid.sql.calcite.planner.Calcites; -import org.apache.druid.sql.calcite.planner.PlannerConfig; import org.apache.druid.sql.calcite.planner.PlannerContext; import org.apache.druid.sql.calcite.util.CalciteTests; import org.hamcrest.CoreMatchers; @@ -563,9 +562,8 @@ public void testInsertFromExternalWithoutSecuritySupportWithInputsourceSecurityE .expectLogicalPlanFrom("insertFromExternal") .expectValidationError( CoreMatchers.allOf( - CoreMatchers.instanceOf(SqlPlanningException.class), - ThrowableMessageMatcher.hasMessage(CoreMatchers.equalTo( - "org.apache.druid.java.util.common.UOE: This inputSource does not support input source based security")) + CoreMatchers.instanceOf(CalciteIngestDmlTestException.class), + ThrowableMessageMatcher.hasMessage(CoreMatchers.equalTo("getTypes()")) ) ) .verify(); diff --git a/sql/src/test/java/org/apache/druid/sql/calcite/CalciteJoinQueryTest.java b/sql/src/test/java/org/apache/druid/sql/calcite/CalciteJoinQueryTest.java index 5714cb538387..31c0f5da35b4 100644 --- a/sql/src/test/java/org/apache/druid/sql/calcite/CalciteJoinQueryTest.java +++ b/sql/src/test/java/org/apache/druid/sql/calcite/CalciteJoinQueryTest.java @@ -3514,7 +3514,7 @@ public void testLeftJoinSubqueryWithNullKeyFilter(Map queryConte .context(queryContext) .build(); - boolean isJoinFilterRewriteEnabled = queryContext.getOrDefault(JOIN_FILTER_REWRITE_ENABLE_KEY, true) + boolean isJoinFilterRewriteEnabled = queryContext.getOrDefault(QueryContexts.JOIN_FILTER_REWRITE_ENABLE_KEY, true) .toString() .equals("true"); testQuery( diff --git a/sql/src/test/java/org/apache/druid/sql/calcite/CalciteQueryTest.java b/sql/src/test/java/org/apache/druid/sql/calcite/CalciteQueryTest.java index 065355a2c284..792c25a5e1d7 100644 --- a/sql/src/test/java/org/apache/druid/sql/calcite/CalciteQueryTest.java +++ b/sql/src/test/java/org/apache/druid/sql/calcite/CalciteQueryTest.java @@ -2972,7 +2972,6 @@ public void testUnionAllTablesWhenMappingIsRequired() { // Cannot plan this UNION ALL operation, because the column swap would require generating a subquery. - msqCompatible(); assertQueryIsUnplannable( "SELECT\n" + "c, COUNT(*)\n" @@ -5896,7 +5895,7 @@ public void testCountStarWithTimeInIntervalFilterNonLiteral() invalidSqlIs( "Cannot apply 'TIME_IN_INTERVAL' to arguments of type " + "'TIME_IN_INTERVAL(, )'. Supported form(s): " - + "TIME_IN_INTERVAL(, ) (line [1], column [38])" + + "'TIME_IN_INTERVAL(, )' (line [1], column [38])" ) ); } From f2551bf1914e55a08fa49500db29bab00933258c Mon Sep 17 00:00:00 2001 From: imply-cheddar Date: Wed, 14 Jun 2023 21:14:13 +0900 Subject: [PATCH 12/17] Test fixes --- extensions-core/datasketches/pom.xml | 5 + .../sql/ThetaSketchSqlAggregatorTest.java | 2 +- ...ArrayOfDoublesSketchSqlAggregatorTest.java | 316 +++++++++--------- ...iaryDataManagerManualAddAndDeleteTest.java | 54 +-- .../common/exception/DruidException.java | 1 + .../org/apache/druid/error/ErrorResponse.java | 2 - .../druid/common/utils/IdUtilsTest.java | 72 ++-- .../druid/error/DruidExceptionMatcher.java | 23 ++ .../segment/nested/NestedPathFinderTest.java | 73 ++-- .../segment/indexing/DataSchemaTest.java | 100 +++--- .../druid/server/QueryResourceTest.java | 163 ++++++--- .../server/security/AuthValidatorTest.java | 37 +- 12 files changed, 488 insertions(+), 360 deletions(-) diff --git a/extensions-core/datasketches/pom.xml b/extensions-core/datasketches/pom.xml index 2657926a8a30..e5d408256aa0 100644 --- a/extensions-core/datasketches/pom.xml +++ b/extensions-core/datasketches/pom.xml @@ -165,6 +165,11 @@ hamcrest-core test + + org.hamcrest + hamcrest-all + test + nl.jqno.equalsverifier equalsverifier diff --git a/extensions-core/datasketches/src/test/java/org/apache/druid/query/aggregation/datasketches/theta/sql/ThetaSketchSqlAggregatorTest.java b/extensions-core/datasketches/src/test/java/org/apache/druid/query/aggregation/datasketches/theta/sql/ThetaSketchSqlAggregatorTest.java index 0887cc4e69a5..c1ddfa279d21 100644 --- a/extensions-core/datasketches/src/test/java/org/apache/druid/query/aggregation/datasketches/theta/sql/ThetaSketchSqlAggregatorTest.java +++ b/extensions-core/datasketches/src/test/java/org/apache/druid/query/aggregation/datasketches/theta/sql/ThetaSketchSqlAggregatorTest.java @@ -1035,7 +1035,7 @@ public void testThetaSketchIntersectOnScalarExpression() { assertQueryIsUnplannable( "SELECT THETA_SKETCH_INTERSECT(NULL, NULL) FROM foo", - "Possible error: THETA_SKETCH_INTERSECT can only be used on aggregates. " + + "THETA_SKETCH_INTERSECT can only be used on aggregates. " + "It cannot be used directly on a column or on a scalar expression." ); } diff --git a/extensions-core/datasketches/src/test/java/org/apache/druid/query/aggregation/datasketches/tuple/sql/ArrayOfDoublesSketchSqlAggregatorTest.java b/extensions-core/datasketches/src/test/java/org/apache/druid/query/aggregation/datasketches/tuple/sql/ArrayOfDoublesSketchSqlAggregatorTest.java index 3be8e8b18854..a240f89bdcc8 100644 --- a/extensions-core/datasketches/src/test/java/org/apache/druid/query/aggregation/datasketches/tuple/sql/ArrayOfDoublesSketchSqlAggregatorTest.java +++ b/extensions-core/datasketches/src/test/java/org/apache/druid/query/aggregation/datasketches/tuple/sql/ArrayOfDoublesSketchSqlAggregatorTest.java @@ -158,13 +158,13 @@ public void testMetricsSumEstimate() cannotVectorize(); final String sql = "SELECT\n" - + " dim1,\n" - + " SUM(cnt),\n" - + " DS_TUPLE_DOUBLES_METRICS_SUM_ESTIMATE(DS_TUPLE_DOUBLES(tuplesketch_dim2)),\n" - + " DS_TUPLE_DOUBLES_METRICS_SUM_ESTIMATE(DS_TUPLE_DOUBLES(dim2, m1)),\n" - + " DS_TUPLE_DOUBLES_METRICS_SUM_ESTIMATE(DS_TUPLE_DOUBLES(dim2, m1, 256))\n" - + "FROM druid.foo\n" - + "GROUP BY dim1"; + + " dim1,\n" + + " SUM(cnt),\n" + + " DS_TUPLE_DOUBLES_METRICS_SUM_ESTIMATE(DS_TUPLE_DOUBLES(tuplesketch_dim2)),\n" + + " DS_TUPLE_DOUBLES_METRICS_SUM_ESTIMATE(DS_TUPLE_DOUBLES(dim2, m1)),\n" + + " DS_TUPLE_DOUBLES_METRICS_SUM_ESTIMATE(DS_TUPLE_DOUBLES(dim2, m1, 256))\n" + + "FROM druid.foo\n" + + "GROUP BY dim1"; final List expectedResults; @@ -189,54 +189,54 @@ public void testMetricsSumEstimate() sql, ImmutableList.of( GroupByQuery.builder() - .setDataSource(CalciteTests.DATASOURCE1) - .setInterval(querySegmentSpec(Filtration.eternity())) - .setGranularity(Granularities.ALL) - .setDimensions(new DefaultDimensionSpec("dim1", "d0", ColumnType.STRING)) - .setAggregatorSpecs( - aggregators( - new LongSumAggregatorFactory("a0", "cnt"), - new ArrayOfDoublesSketchAggregatorFactory( - "a1", - "tuplesketch_dim2", - null, - null, - null - ), - new ArrayOfDoublesSketchAggregatorFactory( - "a2", - "dim2", - null, - ImmutableList.of("m1"), - null - ), - new ArrayOfDoublesSketchAggregatorFactory( - "a3", - "dim2", - 256, - ImmutableList.of("m1"), - null - ) - ) - ) - .setPostAggregatorSpecs( - ImmutableList.of( - new ArrayOfDoublesSketchToMetricsSumEstimatePostAggregator( - "p1", - new FieldAccessPostAggregator("p0", "a1") - ), - new ArrayOfDoublesSketchToMetricsSumEstimatePostAggregator( - "p3", - new FieldAccessPostAggregator("p2", "a2") - ), - new ArrayOfDoublesSketchToMetricsSumEstimatePostAggregator( - "p5", - new FieldAccessPostAggregator("p4", "a3") - ) - ) - ) - .setContext(QUERY_CONTEXT_DEFAULT) - .build() + .setDataSource(CalciteTests.DATASOURCE1) + .setInterval(querySegmentSpec(Filtration.eternity())) + .setGranularity(Granularities.ALL) + .setDimensions(new DefaultDimensionSpec("dim1", "d0", ColumnType.STRING)) + .setAggregatorSpecs( + aggregators( + new LongSumAggregatorFactory("a0", "cnt"), + new ArrayOfDoublesSketchAggregatorFactory( + "a1", + "tuplesketch_dim2", + null, + null, + null + ), + new ArrayOfDoublesSketchAggregatorFactory( + "a2", + "dim2", + null, + ImmutableList.of("m1"), + null + ), + new ArrayOfDoublesSketchAggregatorFactory( + "a3", + "dim2", + 256, + ImmutableList.of("m1"), + null + ) + ) + ) + .setPostAggregatorSpecs( + ImmutableList.of( + new ArrayOfDoublesSketchToMetricsSumEstimatePostAggregator( + "p1", + new FieldAccessPostAggregator("p0", "a1") + ), + new ArrayOfDoublesSketchToMetricsSumEstimatePostAggregator( + "p3", + new FieldAccessPostAggregator("p2", "a2") + ), + new ArrayOfDoublesSketchToMetricsSumEstimatePostAggregator( + "p5", + new FieldAccessPostAggregator("p4", "a3") + ) + ) + ) + .setContext(QUERY_CONTEXT_DEFAULT) + .build() ), expectedResults ); @@ -248,14 +248,14 @@ public void testMetricsSumEstimateIntersect() cannotVectorize(); final String sql = "SELECT\n" - + " SUM(cnt),\n" - + " DS_TUPLE_DOUBLES_METRICS_SUM_ESTIMATE(DS_TUPLE_DOUBLES(tuplesketch_dim2)) AS all_sum_estimates,\n" - + StringUtils.replace( - "DS_TUPLE_DOUBLES_METRICS_SUM_ESTIMATE(DS_TUPLE_DOUBLES_INTERSECT(COMPLEX_DECODE_BASE64('arrayOfDoublesSketch', '%s'), DS_TUPLE_DOUBLES(tuplesketch_dim2), 128)) AS intersect_sum_estimates\n", - "%s", - COMPACT_BASE_64_ENCODED_SKETCH_FOR_INTERSECTION - ) - + "FROM druid.foo"; + + " SUM(cnt),\n" + + " DS_TUPLE_DOUBLES_METRICS_SUM_ESTIMATE(DS_TUPLE_DOUBLES(tuplesketch_dim2)) AS all_sum_estimates,\n" + + StringUtils.replace( + "DS_TUPLE_DOUBLES_METRICS_SUM_ESTIMATE(DS_TUPLE_DOUBLES_INTERSECT(COMPLEX_DECODE_BASE64('arrayOfDoublesSketch', '%s'), DS_TUPLE_DOUBLES(tuplesketch_dim2), 128)) AS intersect_sum_estimates\n", + "%s", + COMPACT_BASE_64_ENCODED_SKETCH_FOR_INTERSECTION + ) + + "FROM druid.foo"; final List expectedResults; @@ -268,8 +268,12 @@ public void testMetricsSumEstimateIntersect() ); final String expectedBase64Constant = "'" - + StringUtils.replace(COMPACT_BASE_64_ENCODED_SKETCH_FOR_INTERSECTION, "=", "\\u003D") - + "'"; + + StringUtils.replace( + COMPACT_BASE_64_ENCODED_SKETCH_FOR_INTERSECTION, + "=", + "\\u003D" + ) + + "'"; testQuery( sql, @@ -282,38 +286,40 @@ public void testMetricsSumEstimateIntersect() ImmutableList.of( new LongSumAggregatorFactory("a0", "cnt"), new ArrayOfDoublesSketchAggregatorFactory( - "a1", - "tuplesketch_dim2", - null, - null, - null + "a1", + "tuplesketch_dim2", + null, + null, + null ) ) ) .postAggregators( ImmutableList.of( - new ArrayOfDoublesSketchToMetricsSumEstimatePostAggregator( - "p1", - new FieldAccessPostAggregator("p0", "a1") - ), - new ArrayOfDoublesSketchToMetricsSumEstimatePostAggregator( - "p5", - new ArrayOfDoublesSketchSetOpPostAggregator( - "p4", - "INTERSECT", - 128, - null, - ImmutableList.of( - new ExpressionPostAggregator( - "p2", - "complex_decode_base64('arrayOfDoublesSketch'," + expectedBase64Constant + ")", - null, - queryFramework().macroTable() - ), - new FieldAccessPostAggregator("p3", "a1") - ) - ) - ) + new ArrayOfDoublesSketchToMetricsSumEstimatePostAggregator( + "p1", + new FieldAccessPostAggregator("p0", "a1") + ), + new ArrayOfDoublesSketchToMetricsSumEstimatePostAggregator( + "p5", + new ArrayOfDoublesSketchSetOpPostAggregator( + "p4", + "INTERSECT", + 128, + null, + ImmutableList.of( + new ExpressionPostAggregator( + "p2", + "complex_decode_base64('arrayOfDoublesSketch'," + + expectedBase64Constant + + ")", + null, + queryFramework().macroTable() + ), + new FieldAccessPostAggregator("p3", "a1") + ) + ) + ) ) ) .context(QUERY_CONTEXT_DEFAULT) @@ -329,12 +335,12 @@ public void testNullInputs() cannotVectorize(); final String sql = "SELECT\n" - + " DS_TUPLE_DOUBLES(NULL),\n" - + " DS_TUPLE_DOUBLES_METRICS_SUM_ESTIMATE(NULL),\n" - + " DS_TUPLE_DOUBLES_UNION(NULL, NULL),\n" - + " DS_TUPLE_DOUBLES_UNION(NULL, DS_TUPLE_DOUBLES(tuplesketch_dim2)),\n" - + " DS_TUPLE_DOUBLES_UNION(DS_TUPLE_DOUBLES(tuplesketch_dim2), NULL)\n" - + "FROM druid.foo"; + + " DS_TUPLE_DOUBLES(NULL),\n" + + " DS_TUPLE_DOUBLES_METRICS_SUM_ESTIMATE(NULL),\n" + + " DS_TUPLE_DOUBLES_UNION(NULL, NULL),\n" + + " DS_TUPLE_DOUBLES_UNION(NULL, DS_TUPLE_DOUBLES(tuplesketch_dim2)),\n" + + " DS_TUPLE_DOUBLES_UNION(DS_TUPLE_DOUBLES(tuplesketch_dim2), NULL)\n" + + "FROM druid.foo"; final List expectedResults; @@ -345,7 +351,7 @@ public void testNullInputs() "\"AQEJAwQBzJP/////////fw==\"", "\"AQEJAwgBzJP/////////fwIAAAAAAAAAjFnadZuMrkg6WYAWZ8t1NgAAAAAAACBAAAAAAAAANkA=\"", "\"AQEJAwgBzJP/////////fwIAAAAAAAAAjFnadZuMrkg6WYAWZ8t1NgAAAAAAACBAAAAAAAAANkA=\"", - } + } ); testQuery( @@ -366,57 +372,57 @@ public void testNullInputs() .aggregators( ImmutableList.of( new ArrayOfDoublesSketchAggregatorFactory( - "a0", - "v0", - null, - null, - null + "a0", + "v0", + null, + null, + null ), new ArrayOfDoublesSketchAggregatorFactory( - "a1", - "tuplesketch_dim2", - null, - null, - null + "a1", + "tuplesketch_dim2", + null, + null, + null ) ) ) .postAggregators( ImmutableList.of( - new ArrayOfDoublesSketchToMetricsSumEstimatePostAggregator( - "p1", - new ExpressionPostAggregator("p0", "null", null, queryFramework().macroTable()) - ), - new ArrayOfDoublesSketchSetOpPostAggregator( - "p4", - ArrayOfDoublesSketchOperations.Operation.UNION.name(), - null, - null, - ImmutableList.of( - new ExpressionPostAggregator("p2", "null", null, queryFramework().macroTable()), - new ExpressionPostAggregator("p3", "null", null, queryFramework().macroTable()) - ) - ), - new ArrayOfDoublesSketchSetOpPostAggregator( - "p7", - ArrayOfDoublesSketchOperations.Operation.UNION.name(), - null, - null, - ImmutableList.of( - new ExpressionPostAggregator("p5", "null", null, queryFramework().macroTable()), - new FieldAccessPostAggregator("p6", "a1") - ) - ), - new ArrayOfDoublesSketchSetOpPostAggregator( - "p10", - ArrayOfDoublesSketchOperations.Operation.UNION.name(), - null, - null, - ImmutableList.of( - new FieldAccessPostAggregator("p8", "a1"), - new ExpressionPostAggregator("p9", "null", null, queryFramework().macroTable()) - ) - ) + new ArrayOfDoublesSketchToMetricsSumEstimatePostAggregator( + "p1", + new ExpressionPostAggregator("p0", "null", null, queryFramework().macroTable()) + ), + new ArrayOfDoublesSketchSetOpPostAggregator( + "p4", + ArrayOfDoublesSketchOperations.Operation.UNION.name(), + null, + null, + ImmutableList.of( + new ExpressionPostAggregator("p2", "null", null, queryFramework().macroTable()), + new ExpressionPostAggregator("p3", "null", null, queryFramework().macroTable()) + ) + ), + new ArrayOfDoublesSketchSetOpPostAggregator( + "p7", + ArrayOfDoublesSketchOperations.Operation.UNION.name(), + null, + null, + ImmutableList.of( + new ExpressionPostAggregator("p5", "null", null, queryFramework().macroTable()), + new FieldAccessPostAggregator("p6", "a1") + ) + ), + new ArrayOfDoublesSketchSetOpPostAggregator( + "p10", + ArrayOfDoublesSketchOperations.Operation.UNION.name(), + null, + null, + ImmutableList.of( + new FieldAccessPostAggregator("p8", "a1"), + new ExpressionPostAggregator("p9", "null", null, queryFramework().macroTable()) + ) + ) ) ) .context(QUERY_CONTEXT_DEFAULT) @@ -429,24 +435,30 @@ public void testNullInputs() @Test public void testArrayOfDoublesSketchIntersectOnScalarExpression() { - assertQueryIsUnplannable("SELECT DS_TUPLE_DOUBLES_INTERSECT(NULL, NULL) FROM foo", - "Possible error: DS_TUPLE_DOUBLES_INTERSECT can only be used on aggregates. " + - "It cannot be used directly on a column or on a scalar expression."); + assertQueryIsUnplannable( + "SELECT DS_TUPLE_DOUBLES_INTERSECT(NULL, NULL) FROM foo", + "DS_TUPLE_DOUBLES_INTERSECT can only be used on aggregates. " + + "It cannot be used directly on a column or on a scalar expression." + ); } @Test public void testArrayOfDoublesSketchNotOnScalarExpression() { - assertQueryIsUnplannable("SELECT DS_TUPLE_DOUBLES_NOT(NULL, NULL) FROM foo", - "Possible error: DS_TUPLE_DOUBLES_NOT can only be used on aggregates. " + - "It cannot be used directly on a column or on a scalar expression."); + assertQueryIsUnplannable( + "SELECT DS_TUPLE_DOUBLES_NOT(NULL, NULL) FROM foo", + "DS_TUPLE_DOUBLES_NOT can only be used on aggregates. " + + "It cannot be used directly on a column or on a scalar expression." + ); } @Test public void testArrayOfDoublesSketchUnionOnScalarExpression() { - assertQueryIsUnplannable("SELECT DS_TUPLE_DOUBLES_UNION(NULL, NULL) FROM foo", - "Possible error: DS_TUPLE_DOUBLES_UNION can only be used on aggregates. " + - "It cannot be used directly on a column or on a scalar expression."); + assertQueryIsUnplannable( + "SELECT DS_TUPLE_DOUBLES_UNION(NULL, NULL) FROM foo", + "DS_TUPLE_DOUBLES_UNION can only be used on aggregates. " + + "It cannot be used directly on a column or on a scalar expression." + ); } } diff --git a/indexing-service/src/test/java/org/apache/druid/indexing/worker/shuffle/LocalIntermediaryDataManagerManualAddAndDeleteTest.java b/indexing-service/src/test/java/org/apache/druid/indexing/worker/shuffle/LocalIntermediaryDataManagerManualAddAndDeleteTest.java index 75fed3e81dde..fabb1cfb1961 100644 --- a/indexing-service/src/test/java/org/apache/druid/indexing/worker/shuffle/LocalIntermediaryDataManagerManualAddAndDeleteTest.java +++ b/indexing-service/src/test/java/org/apache/druid/indexing/worker/shuffle/LocalIntermediaryDataManagerManualAddAndDeleteTest.java @@ -24,6 +24,8 @@ import com.google.common.primitives.Ints; import org.apache.commons.io.FileUtils; import org.apache.druid.client.indexing.NoopOverlordClient; +import org.apache.druid.error.DruidException; +import org.apache.druid.error.DruidExceptionMatcher; import org.apache.druid.indexing.common.config.TaskConfig; import org.apache.druid.indexing.common.config.TaskConfigBuilder; import org.apache.druid.indexing.worker.config.WorkerConfig; @@ -36,13 +38,13 @@ import org.apache.druid.timeline.partition.BuildingShardSpec; import org.apache.druid.timeline.partition.ShardSpec; import org.apache.druid.timeline.partition.ShardSpecLookup; +import org.hamcrest.MatcherAssert; import org.joda.time.Interval; import org.junit.After; import org.junit.Assert; import org.junit.Before; import org.junit.Rule; import org.junit.Test; -import org.junit.rules.ExpectedException; import org.junit.rules.TemporaryFolder; import java.io.File; @@ -56,9 +58,6 @@ public class LocalIntermediaryDataManagerManualAddAndDeleteTest @Rule public TemporaryFolder tempDir = new TemporaryFolder(); - @Rule - public ExpectedException expectedException = ExpectedException.none(); - private LocalIntermediaryDataManager intermediaryDataManager; private File intermediarySegmentsLocation; private File siblingLocation; @@ -93,11 +92,14 @@ public void testAddSegmentFailure() throws IOException DataSegment segment = newSegment(Intervals.of("2018/2019"), i); intermediaryDataManager.addSegment("supervisorTaskId", "subTaskId", segment, segmentFile); } - expectedException.expect(IllegalStateException.class); - expectedException.expectMessage("Can't find location to handle segment"); File segmentFile = generateSegmentDir("file_" + i); DataSegment segment = newSegment(Intervals.of("2018/2019"), 4); - intermediaryDataManager.addSegment("supervisorTaskId", "subTaskId", segment, segmentFile); + + IllegalStateException e = Assert.assertThrows( + IllegalStateException.class, + () -> intermediaryDataManager.addSegment("supervisorTaskId", "subTaskId", segment, segmentFile) + ); + Assert.assertEquals(StringUtils.format("Can't find location to handle segment[%s]", segment), e.getMessage()); } @Test @@ -140,7 +142,8 @@ public void deletePartitions() throws IOException for (int partitionId = 0; partitionId < 2; partitionId++) { for (int subTaskId = 0; subTaskId < 2; subTaskId++) { Assert.assertFalse( - intermediaryDataManager.findPartitionFile(supervisorTaskId, "subTaskId_" + subTaskId, interval, partitionId).isPresent() + intermediaryDataManager.findPartitionFile(supervisorTaskId, "subTaskId_" + subTaskId, interval, partitionId) + .isPresent() ); } } @@ -166,8 +169,6 @@ public void testAddRemoveAdd() throws IOException @Test public void testFailsWithCraftyFabricatedNamesForDelete() throws IOException { - expectedException.expect(IllegalArgumentException.class); - expectedException.expectMessage("supervisorTaskId cannot start with the '.' character."); final String supervisorTaskId = "../" + siblingLocation.getName(); final String someFile = "sneaky-snake.txt"; File dataFile = new File(siblingLocation, someFile); @@ -178,7 +179,15 @@ public void testFailsWithCraftyFabricatedNamesForDelete() throws IOException ); Assert.assertTrue(new File(intermediarySegmentsLocation, supervisorTaskId).exists()); Assert.assertTrue(dataFile.exists()); - intermediaryDataManager.deletePartitions(supervisorTaskId); + MatcherAssert.assertThat( + Assert.assertThrows(DruidException.class, () -> intermediaryDataManager.deletePartitions(supervisorTaskId)), + DruidExceptionMatcher.invalidInput().expectMessageIs( + StringUtils.format( + "Invalid value for field [supervisorTaskId]: Value [%s] cannot start with '.'.", + supervisorTaskId + ) + ) + ); Assert.assertTrue(new File(intermediarySegmentsLocation, supervisorTaskId).exists()); Assert.assertTrue(dataFile.exists()); } @@ -186,8 +195,6 @@ public void testFailsWithCraftyFabricatedNamesForDelete() throws IOException @Test public void testFailsWithCraftyFabricatedNamesForFind() throws IOException { - expectedException.expect(IllegalArgumentException.class); - expectedException.expectMessage("supervisorTaskId cannot start with the '.' character."); final String supervisorTaskId = "../" + siblingLocation.getName(); final Interval interval = Intervals.of("2018/2019"); final int partitionId = 0; @@ -211,13 +218,22 @@ public void testFailsWithCraftyFabricatedNamesForFind() throws IOException Assert.assertTrue( new File(intermediarySegmentsLocation, supervisorTaskId + "/" + someFilePath).exists()); - final Optional foundFile1 = intermediaryDataManager.findPartitionFile( - supervisorTaskId, - someFile, - interval, - partitionId + + MatcherAssert.assertThat( + Assert.assertThrows(DruidException.class, () -> + intermediaryDataManager.findPartitionFile( + supervisorTaskId, + someFile, + interval, + partitionId + )), + DruidExceptionMatcher.invalidInput().expectMessageIs( + StringUtils.format( + "Invalid value for field [supervisorTaskId]: Value [%s] cannot start with '.'.", + supervisorTaskId + ) + ) ); - Assert.assertFalse(foundFile1.isPresent()); } private File generateSegmentDir(String fileName) throws IOException diff --git a/processing/src/main/java/org/apache/druid/common/exception/DruidException.java b/processing/src/main/java/org/apache/druid/common/exception/DruidException.java index 638653fc5cee..55373baf143f 100644 --- a/processing/src/main/java/org/apache/druid/common/exception/DruidException.java +++ b/processing/src/main/java/org/apache/druid/common/exception/DruidException.java @@ -22,6 +22,7 @@ /** * A generic exception thrown by Druid. */ +@Deprecated public class DruidException extends RuntimeException { public static final int HTTP_CODE_SERVER_ERROR = 500; diff --git a/processing/src/main/java/org/apache/druid/error/ErrorResponse.java b/processing/src/main/java/org/apache/druid/error/ErrorResponse.java index f65615a94eb3..d1b415feec6d 100644 --- a/processing/src/main/java/org/apache/druid/error/ErrorResponse.java +++ b/processing/src/main/java/org/apache/druid/error/ErrorResponse.java @@ -66,8 +66,6 @@ public class ErrorResponse @JsonCreator public static ErrorResponse fromMap(Map map) { - // TODO: perhaps need to have normal DruidExceptions set error too just so that they can masquerade as - // QueryExceptions on initial release. final DruidException.Failure failure; final Object legacyErrorType = map.get("error"); diff --git a/processing/src/test/java/org/apache/druid/common/utils/IdUtilsTest.java b/processing/src/test/java/org/apache/druid/common/utils/IdUtilsTest.java index 806c74e61617..b61ef5df4cd8 100644 --- a/processing/src/test/java/org/apache/druid/common/utils/IdUtilsTest.java +++ b/processing/src/test/java/org/apache/druid/common/utils/IdUtilsTest.java @@ -19,21 +19,17 @@ package org.apache.druid.common.utils; +import org.apache.druid.error.DruidExceptionMatcher; import org.apache.druid.java.util.common.DateTimes; import org.apache.druid.java.util.common.Intervals; import org.junit.Assert; -import org.junit.Rule; import org.junit.Test; -import org.junit.rules.ExpectedException; public class IdUtilsTest { private static final String THINGO = "thingToValidate"; public static final String VALID_ID_CHARS = "alpha123..*~!@#&%^&*()-+ Россия\\ 한국 中国!"; - @Rule - public ExpectedException expectedException = ExpectedException.none(); - @Test public void testValidIdName() { @@ -43,89 +39,89 @@ public void testValidIdName() @Test public void testInvalidNull() { - expectedException.expect(IllegalArgumentException.class); - expectedException.expectMessage("thingToValidate cannot be null or empty. Please provide a thingToValidate."); - IdUtils.validateId(THINGO, null); + DruidExceptionMatcher.invalidInput().expectMessageIs( + "Invalid value for field [thingToValidate]: must not be null" + ).assertThrowsAndMatches(() -> IdUtils.validateId(THINGO, null)); } @Test public void testInvalidEmpty() { - expectedException.expect(IllegalArgumentException.class); - expectedException.expectMessage("thingToValidate cannot be null or empty. Please provide a thingToValidate."); - IdUtils.validateId(THINGO, ""); + DruidExceptionMatcher.invalidInput().expectMessageIs( + "Invalid value for field [thingToValidate]: must not be null" + ).assertThrowsAndMatches(() -> IdUtils.validateId(THINGO, "")); } @Test public void testInvalidSlashes() { - expectedException.expect(IllegalArgumentException.class); - expectedException.expectMessage("thingToValidate cannot contain the '/' character."); - IdUtils.validateId(THINGO, "/paths/are/bad/since/we/make/files/from/stuff"); + DruidExceptionMatcher.invalidInput().expectMessageIs( + "Invalid value for field [thingToValidate]: Value [/paths/are/bad/since/we/make/files/from/stuff] cannot contain '/'." + ).assertThrowsAndMatches(() -> IdUtils.validateId(THINGO, "/paths/are/bad/since/we/make/files/from/stuff")); } @Test public void testInvalidLeadingDot() { - expectedException.expect(IllegalArgumentException.class); - expectedException.expectMessage("thingToValidate cannot start with the '.' character."); - IdUtils.validateId(THINGO, "./nice/try"); + DruidExceptionMatcher.invalidInput().expectMessageIs( + "Invalid value for field [thingToValidate]: Value [./nice/try] cannot start with '.'." + ).assertThrowsAndMatches(() -> IdUtils.validateId(THINGO, "./nice/try")); } @Test public void testInvalidSpacesRegexTabs() { - expectedException.expect(IllegalArgumentException.class); - expectedException.expectMessage("thingToValidate cannot contain whitespace character except space."); - IdUtils.validateId(THINGO, "spaces\tare\tbetter\tthan\ttabs\twhich\tare\tillegal"); + DruidExceptionMatcher.invalidInput().expectMessageIs( + "Invalid value for field [thingToValidate]: Value [spaces\tare\tbetter\tthan\ttabs\twhich\tare\tillegal] contains illegal whitespace characters. Only space is allowed." + ).assertThrowsAndMatches(() -> IdUtils.validateId(THINGO, "spaces\tare\tbetter\tthan\ttabs\twhich\tare\tillegal")); } @Test public void testInvalidSpacesRegexNewline() { - expectedException.expect(IllegalArgumentException.class); - expectedException.expectMessage("thingToValidate cannot contain whitespace character except space."); - IdUtils.validateId(THINGO, "new\nline"); + DruidExceptionMatcher.invalidInput().expectMessageIs( + "Invalid value for field [thingToValidate]: Value [new\nline] contains illegal whitespace characters. Only space is allowed." + ).assertThrowsAndMatches(() -> IdUtils.validateId(THINGO, "new\nline")); } @Test public void testInvalidSpacesRegexCarriageReturn() { - expectedException.expect(IllegalArgumentException.class); - expectedException.expectMessage("thingToValidate cannot contain whitespace character except space."); - IdUtils.validateId(THINGO, "does\rexist\rby\ritself"); + DruidExceptionMatcher.invalidInput().expectMessageIs( + "Invalid value for field [thingToValidate]: Value [does\rexist\rby\ritself] contains illegal whitespace characters. Only space is allowed." + ).assertThrowsAndMatches(() -> IdUtils.validateId(THINGO, "does\rexist\rby\ritself")); } @Test public void testInvalidSpacesRegexLineTabulation() { - expectedException.expect(IllegalArgumentException.class); - expectedException.expectMessage("thingToValidate cannot contain whitespace character except space."); - IdUtils.validateId(THINGO, "what\u000Bis line tabulation"); + DruidExceptionMatcher.invalidInput().expectMessageIs( + "Invalid value for field [thingToValidate]: Value [what\u000Bis line tabulation] contains illegal whitespace characters. Only space is allowed." + ).assertThrowsAndMatches(() -> IdUtils.validateId(THINGO, "what\u000Bis line tabulation")); } @Test public void testInvalidSpacesRegexFormFeed() { - expectedException.expect(IllegalArgumentException.class); - expectedException.expectMessage("thingToValidate cannot contain whitespace character except space."); - IdUtils.validateId(THINGO, "form\u000cfeed?"); + DruidExceptionMatcher.invalidInput().expectMessageIs( + "Invalid value for field [thingToValidate]: Value [form\ffeed?] contains illegal whitespace characters. Only space is allowed." + ).assertThrowsAndMatches(() -> IdUtils.validateId(THINGO, "form\u000cfeed?")); } @Test public void testInvalidUnprintableChars() { - expectedException.expect(IllegalArgumentException.class); - expectedException.expectMessage("thingToValidate cannot contain character #129 (at position 4)."); - IdUtils.validateId(THINGO, "form\u0081feed?"); + DruidExceptionMatcher.invalidInput().expectMessageIs( + "Invalid value for field [thingToValidate]: Value [form\u0081feed?] contains illegal UTF8 character [#129] at position [4]" + ).assertThrowsAndMatches(() -> IdUtils.validateId(THINGO, "form\u0081feed?")); } @Test public void testInvalidEmojis() { - expectedException.expect(IllegalArgumentException.class); - expectedException.expectMessage("thingToValidate cannot contain character #55357 (at position 4)."); - IdUtils.validateId(THINGO, "form💯feed?"); + DruidExceptionMatcher.invalidInput().expectMessageIs( + "Invalid value for field [thingToValidate]: Value [form\uD83D\uDCAFfeed?] contains illegal UTF8 character [#55357] at position [4]" + ).assertThrowsAndMatches(() -> IdUtils.validateId(THINGO, "form💯feed?")); } @Test diff --git a/processing/src/test/java/org/apache/druid/error/DruidExceptionMatcher.java b/processing/src/test/java/org/apache/druid/error/DruidExceptionMatcher.java index 5741942f149c..c27658ca91d2 100644 --- a/processing/src/test/java/org/apache/druid/error/DruidExceptionMatcher.java +++ b/processing/src/test/java/org/apache/druid/error/DruidExceptionMatcher.java @@ -23,6 +23,7 @@ import org.hamcrest.Description; import org.hamcrest.DiagnosingMatcher; import org.hamcrest.Matcher; +import org.hamcrest.MatcherAssert; import org.hamcrest.Matchers; import org.hamcrest.core.AllOf; @@ -100,4 +101,26 @@ public void describeTo(Description description) { delegate.describeTo(description); } + + public void assertThrowsAndMatches(ThrowingSupplier fn) + { + boolean thrown = false; + try { + fn.get(); + } + catch (Throwable e) { + if (e instanceof DruidException) { + MatcherAssert.assertThat(e, this); + thrown = true; + } else { + throw new RuntimeException(e); + } + } + MatcherAssert.assertThat(thrown, Matchers.is(true)); + } + + public interface ThrowingSupplier + { + void get() throws Throwable; + } } diff --git a/processing/src/test/java/org/apache/druid/segment/nested/NestedPathFinderTest.java b/processing/src/test/java/org/apache/druid/segment/nested/NestedPathFinderTest.java index 161a752ce1e0..b22131833c64 100644 --- a/processing/src/test/java/org/apache/druid/segment/nested/NestedPathFinderTest.java +++ b/processing/src/test/java/org/apache/druid/segment/nested/NestedPathFinderTest.java @@ -21,11 +21,9 @@ import com.google.common.collect.ImmutableList; import com.google.common.collect.ImmutableMap; -import org.apache.druid.java.util.common.IAE; +import org.apache.druid.error.DruidExceptionMatcher; import org.junit.Assert; -import org.junit.Rule; import org.junit.Test; -import org.junit.rules.ExpectedException; import java.util.List; import java.util.Map; @@ -41,9 +39,6 @@ public class NestedPathFinderTest "[also_sneaky]", ImmutableList.of(ImmutableMap.of("a", "x"), ImmutableMap.of("b", "y", "c", "z")) ); - @Rule - public ExpectedException expectedException = ExpectedException.none(); - @Test public void testParseJqPath() { @@ -188,7 +183,10 @@ public void testParseJqPath() Assert.assertEquals("f?o.o", pathParts.get(2).getPartIdentifier()); Assert.assertTrue(pathParts.get(3) instanceof NestedPathField); Assert.assertEquals(".b?.a.r.", pathParts.get(3).getPartIdentifier()); - Assert.assertEquals(".\"x.y.z]?[\\\"]][]\".\"13234.12[]][23\".\"f?o.o\".\".b?.a.r.\"", NestedPathFinder.toNormalizedJqPath(pathParts)); + Assert.assertEquals( + ".\"x.y.z]?[\\\"]][]\".\"13234.12[]][23\".\"f?o.o\".\".b?.a.r.\"", + NestedPathFinder.toNormalizedJqPath(pathParts) + ); } @Test @@ -334,85 +332,84 @@ public void testParseJsonPath() @Test public void testBadFormatMustStartWithDot() { - expectedException.expect(IAE.class); - expectedException.expectMessage("Bad format, 'x.y' is not a valid 'jq' path: must start with '.'"); - NestedPathFinder.parseJqPath("x.y"); + DruidExceptionMatcher.invalidInput().expectMessageIs( + "jq path [x.y] is invalid, it must start with '.'" + ).assertThrowsAndMatches(() -> NestedPathFinder.parseJqPath("x.y")); } @Test public void testBadFormatNoDot() { - expectedException.expect(IAE.class); - expectedException.expectMessage(".\"x\"\"y\"' is not a valid 'jq' path: path parts must be separated with '.'"); - NestedPathFinder.parseJqPath(".\"x\"\"y\""); + DruidExceptionMatcher.invalidInput().expectMessageIs( + "jq path [.\"x\"\"y\"] is invalid, path parts must be separated with '.'" + ).assertThrowsAndMatches(() -> NestedPathFinder.parseJqPath(".\"x\"\"y\"")); } @Test public void testBadFormatWithDot2() { - expectedException.expect(IAE.class); - expectedException.expectMessage("Bad format, '..\"x\"' is not a valid 'jq' path: path parts separated by '.' must not be empty"); - NestedPathFinder.parseJqPath("..\"x\""); + DruidExceptionMatcher.invalidInput().expectMessageIs( + "jq path [..\"x\"] is invalid, path parts separated by '.' must not be empty" + ).assertThrowsAndMatches(() -> NestedPathFinder.parseJqPath("..\"x\"")); } @Test public void testBadFormatWithDot3() { - expectedException.expect(IAE.class); - expectedException.expectMessage("Bad format, '.x.[1]' is not a valid 'jq' path: invalid position 3 for '[', must not follow '.' or must be contained with '\"'"); - NestedPathFinder.parseJqPath(".x.[1]"); + DruidExceptionMatcher.invalidInput().expectMessageIs( + "jq path [.x.[1]] is invalid, found '[' at invalid position [3], must not follow '.' or must be contained with '\"'" + ).assertThrowsAndMatches(() -> NestedPathFinder.parseJqPath(".x.[1]")); } @Test public void testBadFormatWithDot4() { - expectedException.expect(IAE.class); - expectedException.expectMessage("Bad format, '.x[1].[2]' is not a valid 'jq' path: invalid position 6 for '[', must not follow '.' or must be contained with '\"'"); - NestedPathFinder.parseJqPath(".x[1].[2]"); + DruidExceptionMatcher.invalidInput().expectMessageIs( + "jq path [.x[1].[2]] is invalid, found '[' at invalid position [6], must not follow '.' or must be contained with '\"'" + ).assertThrowsAndMatches(() -> NestedPathFinder.parseJqPath(".x[1].[2]")); } @Test public void testBadFormatNotANumber() { - expectedException.expect(IAE.class); - expectedException.expectMessage("Bad format, '.x[.1]' is not a valid 'jq' path: expected number for array specifier got .1 instead. Use \"\" if this value was meant to be a field name"); - NestedPathFinder.parseJqPath(".x[.1]"); + DruidExceptionMatcher.invalidInput().expectMessageIs( + "jq path [.x[.1]] is invalid, array specifier [.1] should be a number, it was not. Use \"\" if this value was meant to be a field name" + ).assertThrowsAndMatches(() -> NestedPathFinder.parseJqPath(".x[.1]")); } @Test public void testBadFormatUnclosedArray() { - expectedException.expect(IAE.class); - expectedException.expectMessage("Bad format, '.x[1' is not a valid 'jq' path: unterminated '['"); - NestedPathFinder.parseJqPath(".x[1"); + DruidExceptionMatcher.invalidInput().expectMessageIs( + "jq path [.x[1] is invalid, unterminated '['" + ).assertThrowsAndMatches(() -> NestedPathFinder.parseJqPath(".x[1")); } @Test public void testBadFormatUnclosedArray2() { - expectedException.expect(IAE.class); - expectedException.expectMessage("Bad format, '.x[\"1\"' is not a valid 'jq' path: unterminated '['"); - NestedPathFinder.parseJqPath(".x[\"1\""); + DruidExceptionMatcher.invalidInput().expectMessageIs( + "jq path [.x[\"1\"] is invalid, unterminated '['" + ).assertThrowsAndMatches(() -> NestedPathFinder.parseJqPath(".x[\"1\"")); } @Test public void testBadFormatUnclosedQuote() { - expectedException.expect(IAE.class); - expectedException.expectMessage("Bad format, '.x.\"1' is not a valid 'jq' path: unterminated '\"'"); - NestedPathFinder.parseJqPath(".x.\"1"); + DruidExceptionMatcher.invalidInput().expectMessageIs( + "jq path [.x.\"1] is invalid, unterminated '\"'" + ).assertThrowsAndMatches(() -> NestedPathFinder.parseJqPath(".x.\"1")); } @Test public void testBadFormatUnclosedQuote2() { - expectedException.expect(IAE.class); - expectedException.expectMessage("Bad format, '.x[\"1]' is not a valid 'jq' path: unterminated '\"'"); - NestedPathFinder.parseJqPath(".x[\"1]"); + DruidExceptionMatcher.invalidInput().expectMessageIs( + "jq path [.x[\"1]] is invalid, unterminated '\"'" + ).assertThrowsAndMatches(() -> NestedPathFinder.parseJqPath(".x[\"1]")); } - @Test public void testPathSplitter() { diff --git a/server/src/test/java/org/apache/druid/segment/indexing/DataSchemaTest.java b/server/src/test/java/org/apache/druid/segment/indexing/DataSchemaTest.java index ffc7934fafe0..78294fca0c4b 100644 --- a/server/src/test/java/org/apache/druid/segment/indexing/DataSchemaTest.java +++ b/server/src/test/java/org/apache/druid/segment/indexing/DataSchemaTest.java @@ -32,8 +32,10 @@ import org.apache.druid.data.input.impl.JSONParseSpec; import org.apache.druid.data.input.impl.StringInputRowParser; import org.apache.druid.data.input.impl.TimestampSpec; +import org.apache.druid.error.DruidExceptionMatcher; import org.apache.druid.java.util.common.DateTimes; import org.apache.druid.java.util.common.Intervals; +import org.apache.druid.java.util.common.StringUtils; import org.apache.druid.java.util.common.granularity.DurationGranularity; import org.apache.druid.java.util.common.granularity.Granularities; import org.apache.druid.java.util.common.jackson.JacksonUtils; @@ -48,6 +50,7 @@ import org.apache.druid.segment.transform.TransformSpec; import org.apache.druid.testing.InitializedNullHandlingTest; import org.hamcrest.CoreMatchers; +import org.hamcrest.MatcherAssert; import org.junit.Assert; import org.junit.Rule; import org.junit.Test; @@ -60,7 +63,6 @@ import java.nio.charset.StandardCharsets; import java.util.Arrays; import java.util.Collections; -import java.util.List; import java.util.Map; import java.util.Set; @@ -413,22 +415,24 @@ public void testEmptyDatasource() ), JacksonUtils.TYPE_REFERENCE_MAP_STRING_OBJECT ); - expectedException.expect(CoreMatchers.instanceOf(IllegalArgumentException.class)); - expectedException.expectMessage( - "dataSource cannot be null or empty. Please provide a dataSource." - ); - - DataSchema schema = new DataSchema( - "", - parser, - new AggregatorFactory[]{ - new DoubleSumAggregatorFactory("metric1", "col1"), - new DoubleSumAggregatorFactory("metric2", "col2"), - }, - new ArbitraryGranularitySpec(Granularities.DAY, ImmutableList.of(Intervals.of("2014/2015"))), - null, - jsonMapper - ); + DruidExceptionMatcher + .invalidInput() + .expectMessageIs("Invalid value for field [dataSource]: must not be null") + .assertThrowsAndMatches( + () -> new DataSchema( + "", + parser, + new AggregatorFactory[]{ + new DoubleSumAggregatorFactory("metric1", "col1"), + new DoubleSumAggregatorFactory("metric2", "col2"), + }, + new ArbitraryGranularitySpec( + Granularities.DAY, + ImmutableList.of(Intervals.of("2014/2015")) + ), + null, + jsonMapper + )); } @@ -442,27 +446,21 @@ public void testInvalidWhitespaceDatasource() ); for (Map.Entry entry : invalidCharToDataSourceName.entrySet()) { - testInvalidWhitespaceDatasourceHelper(entry.getValue(), entry.getKey()); - } - } - - private void testInvalidWhitespaceDatasourceHelper(String dataSource, String invalidChar) - { - String testFailMsg = "dataSource contain invalid whitespace character: " + invalidChar; - try { - DataSchema schema = new DataSchema( - dataSource, - Collections.emptyMap(), - null, - null, - null, - jsonMapper + String dataSource = entry.getValue(); + final String msg = StringUtils.format( + "Invalid value for field [dataSource]: Value [%s] contains illegal whitespace characters. Only space is allowed.", + dataSource + ); + DruidExceptionMatcher.invalidInput().expectMessageIs(msg).assertThrowsAndMatches( + () -> new DataSchema( + dataSource, + Collections.emptyMap(), + null, + null, + null, + jsonMapper + ) ); - Assert.fail(testFailMsg); - } - catch (IllegalArgumentException errorMsg) { - String expectedMsg = "dataSource cannot contain whitespace character except space."; - Assert.assertEquals(testFailMsg, expectedMsg, errorMsg.getMessage()); } } @@ -524,10 +522,22 @@ public void testSerde() throws Exception public void testSerializeWithInvalidDataSourceName() throws Exception { // Escape backslashes to insert a tab character in the datasource name. - List datasources = ImmutableList.of("", "../invalid", "\tname", "name\t invalid"); - for (String datasource : datasources) { + Map datasourceToErrorMsg = ImmutableMap.of( + "", + "Invalid value for field [dataSource]: must not be null", + + "../invalid", + "Invalid value for field [dataSource]: Value [../invalid] cannot start with '.'.", + + "\tname", + "Invalid value for field [dataSource]: Value [\tname] contains illegal whitespace characters. Only space is allowed.", + + "name\t invalid", + "Invalid value for field [dataSource]: Value [name\t invalid] contains illegal whitespace characters. Only space is allowed." + ); + for (Map.Entry entry : datasourceToErrorMsg.entrySet()) { String jsonStr = "{" - + "\"dataSource\":\"" + StringEscapeUtils.escapeJson(datasource) + "\"," + + "\"dataSource\":\"" + StringEscapeUtils.escapeJson(entry.getKey()) + "\"," + "\"parser\":{" + "\"type\":\"string\"," + "\"parseSpec\":{" @@ -552,10 +562,16 @@ public void testSerializeWithInvalidDataSourceName() throws Exception ); } catch (ValueInstantiationException e) { - Assert.assertEquals(IllegalArgumentException.class, e.getCause().getClass()); + MatcherAssert.assertThat( + entry.getKey(), + e.getCause(), + DruidExceptionMatcher.invalidInput().expectMessageIs( + entry.getValue() + ) + ); continue; } - Assert.fail("Serialization of datasource " + datasource + " should have failed."); + Assert.fail("Serialization of datasource " + entry.getKey() + " should have failed."); } } diff --git a/server/src/test/java/org/apache/druid/server/QueryResourceTest.java b/server/src/test/java/org/apache/druid/server/QueryResourceTest.java index 34c0a44a5cf9..adb9b0e8f805 100644 --- a/server/src/test/java/org/apache/druid/server/QueryResourceTest.java +++ b/server/src/test/java/org/apache/druid/server/QueryResourceTest.java @@ -30,6 +30,9 @@ import com.google.common.util.concurrent.MoreExecutors; import com.google.inject.Injector; import com.google.inject.Key; +import org.apache.druid.error.DruidException; +import org.apache.druid.error.DruidExceptionMatcher; +import org.apache.druid.error.ErrorResponse; import org.apache.druid.guice.GuiceInjectors; import org.apache.druid.guice.annotations.Smile; import org.apache.druid.jackson.DefaultObjectMapper; @@ -76,6 +79,7 @@ import org.apache.druid.server.security.ForbiddenException; import org.apache.druid.server.security.Resource; import org.apache.http.HttpStatus; +import org.hamcrest.MatcherAssert; import org.joda.time.Interval; import org.junit.Assert; import org.junit.Before; @@ -87,16 +91,18 @@ import javax.ws.rs.core.MediaType; import javax.ws.rs.core.Response; import javax.ws.rs.core.Response.Status; -import javax.ws.rs.core.StreamingOutput; import java.io.ByteArrayInputStream; import java.io.ByteArrayOutputStream; import java.io.IOException; import java.nio.charset.StandardCharsets; +import java.util.ArrayList; import java.util.Collection; import java.util.Collections; import java.util.List; import java.util.concurrent.CountDownLatch; +import java.util.concurrent.ExecutionException; import java.util.concurrent.Executors; +import java.util.concurrent.Future; import java.util.concurrent.atomic.AtomicReference; import java.util.function.Consumer; @@ -642,11 +648,23 @@ public QueryRunner getQueryRunnerForSegments(Query query, Iterable> back2 = new ArrayList<>(); + createScheduledQueryResource(laningScheduler, Collections.emptyList(), ImmutableList.of(waitTwoScheduled)); - assertAsyncResponseAndCountdownOrBlockForever( + back2.add(eventuallyAssertAsyncResponse( SIMPLE_TIMESERIES_QUERY, - waitAllFinished, response -> Assert.assertEquals(Response.Status.OK.getStatusCode(), response.getStatus()) - ); - assertAsyncResponseAndCountdownOrBlockForever( + )); + back2.add(eventuallyAssertAsyncResponse( SIMPLE_TIMESERIES_QUERY, - waitAllFinished, response -> Assert.assertEquals(Status.OK.getStatusCode(), response.getStatus()) - ); + )); waitTwoScheduled.await(); - assertSynchronousResponseAndCountdownOrBlockForever( + back2.add(eventuallyaAssertSynchronousResponse( SIMPLE_TIMESERIES_QUERY, - waitAllFinished, response -> { Assert.assertEquals(QueryCapacityExceededException.STATUS_CODE, response.getStatus()); QueryCapacityExceededException ex; + final ErrorResponse entity = (ErrorResponse) response.getEntity(); + MatcherAssert.assertThat( + entity.getUnderlyingException(), + new DruidExceptionMatcher( + DruidException.Persona.OPERATOR, + DruidException.Category.CAPACITY_EXCEEDED, + "legacyQueryException" + ) + .expectMessageIs( + "Too many concurrent queries, total query capacity of 2 exceeded. Please try your query again later.") + ); + try { ByteArrayOutputStream baos = new ByteArrayOutputStream(); - ((StreamingOutput) response.getEntity()).write(baos); + jsonMapper.writeValue(baos, entity); + + // Here we are converting to a QueryCapacityExceededException. This is just to validate legacy stuff. + // When we delete the QueryException class, we can just rely on validating the DruidException instead ex = jsonMapper.readValue(baos.toByteArray(), QueryCapacityExceededException.class); } catch (IOException e) { @@ -917,17 +948,19 @@ public void testTooManyQuery() throws InterruptedException Assert.assertEquals(QueryCapacityExceededException.makeTotalErrorMessage(2), ex.getMessage()); Assert.assertEquals(QueryException.QUERY_CAPACITY_EXCEEDED_ERROR_CODE, ex.getErrorCode()); } - ); - waitAllFinished.await(); + )); + + for (Future theFuture : back2) { + Assert.assertTrue(theFuture.get()); + } } @Test(timeout = 10_000L) - public void testTooManyQueryInLane() throws InterruptedException + public void testTooManyQueryInLane() throws InterruptedException, ExecutionException { expectPermissiveHappyPathAuth(); final CountDownLatch waitTwoStarted = new CountDownLatch(2); final CountDownLatch waitOneScheduled = new CountDownLatch(1); - final CountDownLatch waitAllFinished = new CountDownLatch(3); final QueryScheduler scheduler = new QueryScheduler( 40, ManualQueryPrioritizationStrategy.INSTANCE, @@ -935,23 +968,39 @@ public void testTooManyQueryInLane() throws InterruptedException new ServerConfig() ); + ArrayList> back2 = new ArrayList<>(); + createScheduledQueryResource(scheduler, ImmutableList.of(waitTwoStarted), ImmutableList.of(waitOneScheduled)); - assertAsyncResponseAndCountdownOrBlockForever( + back2.add(eventuallyAssertAsyncResponse( SIMPLE_TIMESERIES_QUERY_LOW_PRIORITY, - waitAllFinished, response -> Assert.assertEquals(Response.Status.OK.getStatusCode(), response.getStatus()) - ); + )); waitOneScheduled.await(); - assertSynchronousResponseAndCountdownOrBlockForever( + back2.add(eventuallyaAssertSynchronousResponse( SIMPLE_TIMESERIES_QUERY_LOW_PRIORITY, - waitAllFinished, response -> { Assert.assertEquals(QueryCapacityExceededException.STATUS_CODE, response.getStatus()); QueryCapacityExceededException ex; + + final ErrorResponse entity = (ErrorResponse) response.getEntity(); + MatcherAssert.assertThat( + entity.getUnderlyingException(), + new DruidExceptionMatcher( + DruidException.Persona.OPERATOR, + DruidException.Category.CAPACITY_EXCEEDED, + "legacyQueryException" + ) + .expectMessageIs( + "Too many concurrent queries for lane 'low', query capacity of 1 exceeded. Please try your query again later.") + ); + try { ByteArrayOutputStream baos = new ByteArrayOutputStream(); - ((StreamingOutput) response.getEntity()).write(baos); + jsonMapper.writeValue(baos, entity); + + // Here we are converting to a QueryCapacityExceededException. This is just to validate legacy stuff. + // When we delete the QueryException class, we can just rely on validating the DruidException instead ex = jsonMapper.readValue(baos.toByteArray(), QueryCapacityExceededException.class); } catch (IOException e) { @@ -964,24 +1013,24 @@ public void testTooManyQueryInLane() throws InterruptedException Assert.assertEquals(QueryException.QUERY_CAPACITY_EXCEEDED_ERROR_CODE, ex.getErrorCode()); } - ); + )); waitTwoStarted.await(); - assertAsyncResponseAndCountdownOrBlockForever( + back2.add(eventuallyAssertAsyncResponse( SIMPLE_TIMESERIES_QUERY, - waitAllFinished, response -> Assert.assertEquals(Response.Status.OK.getStatusCode(), response.getStatus()) - ); + )); - waitAllFinished.await(); + for (Future theFuture : back2) { + Assert.assertTrue(theFuture.get()); + } } @Test(timeout = 10_000L) - public void testTooManyQueryInLaneImplicitFromDurationThreshold() throws InterruptedException + public void testTooManyQueryInLaneImplicitFromDurationThreshold() throws InterruptedException, ExecutionException { expectPermissiveHappyPathAuth(); final CountDownLatch waitTwoStarted = new CountDownLatch(2); final CountDownLatch waitOneScheduled = new CountDownLatch(1); - final CountDownLatch waitAllFinished = new CountDownLatch(3); final QueryScheduler scheduler = new QueryScheduler( 40, new ThresholdBasedQueryPrioritizationStrategy(null, "P90D", null, null), @@ -989,23 +1038,38 @@ public void testTooManyQueryInLaneImplicitFromDurationThreshold() throws Interru new ServerConfig() ); + ArrayList> back2 = new ArrayList<>(); createScheduledQueryResource(scheduler, ImmutableList.of(waitTwoStarted), ImmutableList.of(waitOneScheduled)); - assertAsyncResponseAndCountdownOrBlockForever( + back2.add(eventuallyAssertAsyncResponse( SIMPLE_TIMESERIES_QUERY, - waitAllFinished, response -> Assert.assertEquals(Response.Status.OK.getStatusCode(), response.getStatus()) - ); + )); waitOneScheduled.await(); - assertSynchronousResponseAndCountdownOrBlockForever( + back2.add(eventuallyaAssertSynchronousResponse( SIMPLE_TIMESERIES_QUERY, - waitAllFinished, response -> { Assert.assertEquals(QueryCapacityExceededException.STATUS_CODE, response.getStatus()); QueryCapacityExceededException ex; + + final ErrorResponse entity = (ErrorResponse) response.getEntity(); + MatcherAssert.assertThat( + entity.getUnderlyingException(), + new DruidExceptionMatcher( + DruidException.Persona.OPERATOR, + DruidException.Category.CAPACITY_EXCEEDED, + "legacyQueryException" + ) + .expectMessageIs( + "Too many concurrent queries for lane 'low', query capacity of 1 exceeded. Please try your query again later.") + ); + try { ByteArrayOutputStream baos = new ByteArrayOutputStream(); - ((StreamingOutput) response.getEntity()).write(baos); + jsonMapper.writeValue(baos, entity); + + // Here we are converting to a QueryCapacityExceededException. This is just to validate legacy stuff. + // When we delete the QueryException class, we can just rely on validating the DruidException instead ex = jsonMapper.readValue(baos.toByteArray(), QueryCapacityExceededException.class); } catch (IOException e) { @@ -1017,15 +1081,16 @@ public void testTooManyQueryInLaneImplicitFromDurationThreshold() throws Interru ); Assert.assertEquals(QueryException.QUERY_CAPACITY_EXCEEDED_ERROR_CODE, ex.getErrorCode()); } - ); + )); waitTwoStarted.await(); - assertAsyncResponseAndCountdownOrBlockForever( + back2.add(eventuallyAssertAsyncResponse( SIMPLE_TIMESERIES_QUERY_SMALLISH_INTERVAL, - waitAllFinished, response -> Assert.assertEquals(Response.Status.OK.getStatusCode(), response.getStatus()) - ); + )); - waitAllFinished.await(); + for (Future theFuture : back2) { + Assert.assertTrue(theFuture.get()); + } } private void createScheduledQueryResource( @@ -1090,20 +1155,19 @@ public QueryRunner getQueryRunnerForSegments(Query query, Iterable eventuallyAssertAsyncResponse( String query, - CountDownLatch done, Consumer asserts ) { - Executors.newSingleThreadExecutor().submit(() -> { + return Executors.newSingleThreadExecutor().submit(() -> { try { asserts.accept(expectAsyncRequestFlow(query, testServletRequest.mimic())); } catch (IOException e) { throw new RuntimeException(e); } - done.countDown(); + return true; }); } @@ -1152,13 +1216,12 @@ private MockHttpServletResponse expectAsyncRequestFlow( return response; } - private void assertSynchronousResponseAndCountdownOrBlockForever( + private Future eventuallyaAssertSynchronousResponse( String query, - CountDownLatch done, Consumer asserts ) { - Executors.newSingleThreadExecutor().submit(() -> { + return Executors.newSingleThreadExecutor().submit(() -> { try { asserts.accept( expectSynchronousRequestFlow( @@ -1171,7 +1234,7 @@ private void assertSynchronousResponseAndCountdownOrBlockForever( catch (IOException e) { throw new RuntimeException(e); } - done.countDown(); + return true; }); } diff --git a/server/src/test/java/org/apache/druid/server/security/AuthValidatorTest.java b/server/src/test/java/org/apache/druid/server/security/AuthValidatorTest.java index 3edfec1b5d0f..c8f78fb15a52 100644 --- a/server/src/test/java/org/apache/druid/server/security/AuthValidatorTest.java +++ b/server/src/test/java/org/apache/druid/server/security/AuthValidatorTest.java @@ -19,6 +19,7 @@ package org.apache.druid.server.security; +import org.apache.druid.error.DruidExceptionMatcher; import org.junit.Before; import org.junit.Rule; import org.junit.Test; @@ -40,41 +41,41 @@ public void setUp() @Test public void testAuthorizerNameWithEmptyIsInvalid() { - expectedException.expect(IllegalArgumentException.class); - expectedException.expectMessage("authorizerName cannot be null or empty."); - target.validateAuthorizerName(""); + DruidExceptionMatcher.invalidInput().expectMessageIs( + "Invalid value for field [authorizerName]: must not be null" + ).assertThrowsAndMatches(() -> target.validateAuthorizerName("")); } @Test public void testAuthorizerNameWithNullIsInvalid() { - expectedException.expect(IllegalArgumentException.class); - expectedException.expectMessage("authorizerName cannot be null or empty."); - target.validateAuthorizerName(null); + DruidExceptionMatcher.invalidInput().expectMessageIs( + "Invalid value for field [authorizerName]: must not be null" + ).assertThrowsAndMatches(() -> target.validateAuthorizerName(null)); } @Test public void testAuthorizerNameStartsWithDotIsInValid() { - expectedException.expect(IllegalArgumentException.class); - expectedException.expectMessage("authorizerName cannot start with the '.' character."); - target.validateAuthorizerName(".test"); + DruidExceptionMatcher.invalidInput().expectMessageIs( + "Invalid value for field [authorizerName]: Value [.test] cannot start with '.'." + ).assertThrowsAndMatches(() -> target.validateAuthorizerName(".test")); } @Test public void testAuthorizerNameWithSlashIsInvalid() { - expectedException.expect(IllegalArgumentException.class); - expectedException.expectMessage("authorizerName cannot contain the '/' character."); - target.validateAuthorizerName("tes/t"); + DruidExceptionMatcher.invalidInput().expectMessageIs( + "Invalid value for field [authorizerName]: Value [tes/t] cannot contain '/'." + ).assertThrowsAndMatches(() -> target.validateAuthorizerName("tes/t")); } @Test public void testAuthorizerNameWithWhitespaceIsInvalid() { - expectedException.expect(IllegalArgumentException.class); - expectedException.expectMessage("authorizerName cannot contain whitespace character except space."); - target.validateAuthorizerName("tes\tt"); + DruidExceptionMatcher.invalidInput().expectMessageIs( + "Invalid value for field [authorizerName]: Value [tes\tt] contains illegal whitespace characters. Only space is allowed." + ).assertThrowsAndMatches(() -> target.validateAuthorizerName("tes\tt")); } @Test @@ -92,8 +93,8 @@ public void testAuthenticatorNameWithAllowedCharactersIsValid() @Test public void testAuthenticatorNameWithWhitespaceIsInvalid() { - expectedException.expect(IllegalArgumentException.class); - expectedException.expectMessage("authenticatorName cannot contain whitespace character except space."); - target.validateAuthenticatorName("tes\tt"); + DruidExceptionMatcher.invalidInput().expectMessageIs( + "Invalid value for field [authenticatorName]: Value [tes\tt] contains illegal whitespace characters. Only space is allowed." + ).assertThrowsAndMatches(() -> target.validateAuthenticatorName("tes\tt")); } } From ae8224881d781a12692c0670ce0edb3c1c158248 Mon Sep 17 00:00:00 2001 From: imply-cheddar Date: Thu, 15 Jun 2023 11:19:46 +0900 Subject: [PATCH 13/17] More test fixes --- .../druid/tests/query/ITJdbcQueryTest.java | 4 +- .../apache/druid/error/DruidException.java | 22 +++- .../org/apache/druid/error/ErrorResponse.java | 4 +- .../apache/druid/error/ErrorResponseTest.java | 110 ++++++++++++++++++ .../apache/druid/matchers/DruidMatchers.java | 13 +++ .../apache/druid/server/QueryResource.java | 45 ------- .../druid/server/QueryResourceTest.java | 85 +++++++++++++- .../sql/calcite/CalciteJoinQueryTest.java | 2 +- 8 files changed, 233 insertions(+), 52 deletions(-) create mode 100644 processing/src/test/java/org/apache/druid/error/ErrorResponseTest.java diff --git a/integration-tests/src/test/java/org/apache/druid/tests/query/ITJdbcQueryTest.java b/integration-tests/src/test/java/org/apache/druid/tests/query/ITJdbcQueryTest.java index 72e682c9fbf2..8d07f1b8d607 100644 --- a/integration-tests/src/test/java/org/apache/druid/tests/query/ITJdbcQueryTest.java +++ b/integration-tests/src/test/java/org/apache/druid/tests/query/ITJdbcQueryTest.java @@ -213,13 +213,15 @@ public void testJdbcPrepareStatementQuery() } } - @Test(expectedExceptions = AvaticaSqlException.class, expectedExceptionsMessageRegExp = ".* Parameter at position \\[0] is not bound") + @Test(expectedExceptions = AvaticaSqlException.class, expectedExceptionsMessageRegExp = ".* No value bound for parameter \\(position \\[1]\\)") public void testJdbcPrepareStatementQueryMissingParameters() throws SQLException { for (String url : connections) { try (Connection connection = DriverManager.getConnection(url, connectionProperties); PreparedStatement statement = connection.prepareStatement(QUERY_PARAMETERIZED); ResultSet resultSet = statement.executeQuery()) { + // This won't actually run as we expect the exception to be thrown before it gets here + throw new IllegalStateException(resultSet.toString()); } } } diff --git a/processing/src/main/java/org/apache/druid/error/DruidException.java b/processing/src/main/java/org/apache/druid/error/DruidException.java index 097909415917..05dddca4c413 100644 --- a/processing/src/main/java/org/apache/druid/error/DruidException.java +++ b/processing/src/main/java/org/apache/druid/error/DruidException.java @@ -137,9 +137,9 @@ public class DruidException extends RuntimeException * @param persona the target persona of the exception message * @return a builder that can be used to complete the creation of the DruidException */ - public static DruidExceptionBuilder forPersona(Persona persona) + public static PartialDruidExceptionBuilder forPersona(Persona persona) { - return new DruidExceptionBuilder("adhoc").forPersona(persona); + return new PartialDruidExceptionBuilder("adhoc", persona); } /** @@ -239,6 +239,7 @@ public int getStatusCode() * * @return an ErrorResponse */ + @SuppressWarnings("unused") @JsonValue public ErrorResponse toErrorResponse() { @@ -353,6 +354,23 @@ public int getExpectedStatus() } } + public static class PartialDruidExceptionBuilder + { + private String errorCode; + private Persona targetPersona; + + private PartialDruidExceptionBuilder(String errorCode, Persona targetPersona) + { + this.errorCode = errorCode; + this.targetPersona = targetPersona; + } + + public DruidExceptionBuilder ofCategory(Category category) + { + return new DruidExceptionBuilder(errorCode).forPersona(targetPersona).ofCategory(category); + } + } + public static class DruidExceptionBuilder { private String errorCode; diff --git a/processing/src/main/java/org/apache/druid/error/ErrorResponse.java b/processing/src/main/java/org/apache/druid/error/ErrorResponse.java index d1b415feec6d..7b571cca2719 100644 --- a/processing/src/main/java/org/apache/druid/error/ErrorResponse.java +++ b/processing/src/main/java/org/apache/druid/error/ErrorResponse.java @@ -132,8 +132,8 @@ public Map getAsMap() } retVal.put("errorCode", underlyingException.getErrorCode()); - retVal.put("persona", underlyingException.getTargetPersona()); - retVal.put("category", underlyingException.getCategory()); + retVal.put("persona", underlyingException.getTargetPersona().toString()); + retVal.put("category", underlyingException.getCategory().toString()); retVal.put("errorMessage", underlyingException.getMessage()); retVal.put("context", underlyingException.getContext()); diff --git a/processing/src/test/java/org/apache/druid/error/ErrorResponseTest.java b/processing/src/test/java/org/apache/druid/error/ErrorResponseTest.java new file mode 100644 index 000000000000..2ddd39aa7dae --- /dev/null +++ b/processing/src/test/java/org/apache/druid/error/ErrorResponseTest.java @@ -0,0 +1,110 @@ +/* + * Licensed to the Apache Software Foundation (ASF) under one + * or more contributor license agreements. See the NOTICE file + * distributed with this work for additional information + * regarding copyright ownership. The ASF licenses this file + * to you under the Apache License, Version 2.0 (the + * "License"); you may not use this file except in compliance + * with the License. You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, + * software distributed under the License is distributed on an + * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY + * KIND, either express or implied. See the License for the + * specific language governing permissions and limitations + * under the License. + */ + +package org.apache.druid.error; + +import com.google.common.collect.ImmutableMap; +import org.apache.druid.matchers.DruidMatchers; +import org.apache.druid.query.QueryTimeoutException; +import org.hamcrest.Matcher; +import org.hamcrest.MatcherAssert; +import org.hamcrest.Matchers; +import org.junit.Test; + +import java.util.Map; + +public class ErrorResponseTest +{ + @Test + public void testSanity() + { + ErrorResponse response = new ErrorResponse(InvalidSqlInput.exception("bad sql!")); + + final Map asMap = response.getAsMap(); + MatcherAssert.assertThat( + asMap, + DruidMatchers.mapMatcher( + "error", "druidException", + "errorCode", "invalidInput", + "persona", "USER", + "category", "INVALID_INPUT", + "errorMessage", "bad sql!", + "context", ImmutableMap.of("sourceType", "sql") + ) + ); + + ErrorResponse recomposed = ErrorResponse.fromMap(asMap); + + MatcherAssert.assertThat( + recomposed.getUnderlyingException(), + DruidExceptionMatcher.invalidSqlInput().expectMessageIs("bad sql!") + ); + } + + @SuppressWarnings({"unchecked", "rawtypes"}) + @Test + public void testQueryExceptionCompat() + { + ErrorResponse response = new ErrorResponse( + DruidException.fromFailure(new QueryExceptionCompat(new QueryTimeoutException())) + ); + + final Map asMap = response.getAsMap(); + MatcherAssert.assertThat( + asMap, + DruidMatchers.mapMatcher( + "error", + "Query timeout", + + "errorCode", + "legacyQueryException", + + "persona", + "OPERATOR", + + "category", + "TIMEOUT", + + "errorMessage", + "Query did not complete within configured timeout period. You can increase query timeout or tune the performance of query." + ) + ); + MatcherAssert.assertThat( + asMap, + (Matcher) Matchers.hasEntry( + Matchers.is("context"), + Matchers.allOf( + DruidMatchers.mapMatcher( + "errorClass", "org.apache.druid.query.QueryTimeoutException", + "legacyErrorCode", "Query timeout" + ), + Matchers.hasKey("host") + ) + ) + ); + + ErrorResponse recomposed = ErrorResponse.fromMap(asMap); + + MatcherAssert.assertThat( + recomposed.getUnderlyingException(), + new DruidExceptionMatcher(DruidException.Persona.OPERATOR, DruidException.Category.TIMEOUT, "legacyQueryException") + .expectMessageIs("Query did not complete within configured timeout period. You can increase query timeout or tune the performance of query.") + ); + } +} diff --git a/processing/src/test/java/org/apache/druid/matchers/DruidMatchers.java b/processing/src/test/java/org/apache/druid/matchers/DruidMatchers.java index a3b94d613579..dbd03edf9e61 100644 --- a/processing/src/test/java/org/apache/druid/matchers/DruidMatchers.java +++ b/processing/src/test/java/org/apache/druid/matchers/DruidMatchers.java @@ -20,7 +20,10 @@ package org.apache.druid.matchers; import org.hamcrest.Matcher; +import org.hamcrest.Matchers; +import java.util.ArrayList; +import java.util.Map; import java.util.function.Function; public class DruidMatchers @@ -29,4 +32,14 @@ public static LambdaMatcher fn(String name, Function fn, Matc { return new LambdaMatcher<>(name + ": ", fn, matcher); } + + @SuppressWarnings({"unchecked", "rawtypes"}) + public static Matcher> mapMatcher(Object... keysAndValues) + { + ArrayList>> entryMatchers = new ArrayList<>(); + for (int i = 0; i < keysAndValues.length; i += 2) { + entryMatchers.add(Matchers.hasEntry((K) keysAndValues[i], (V) keysAndValues[i + 1])); + } + return Matchers.allOf((Iterable) entryMatchers); + } } diff --git a/server/src/main/java/org/apache/druid/server/QueryResource.java b/server/src/main/java/org/apache/druid/server/QueryResource.java index f4a7ab3edb75..2db205ca0bed 100644 --- a/server/src/main/java/org/apache/druid/server/QueryResource.java +++ b/server/src/main/java/org/apache/druid/server/QueryResource.java @@ -33,12 +33,10 @@ import com.google.common.collect.ImmutableMap; import com.google.common.collect.Iterables; import com.google.inject.Inject; -import org.apache.druid.client.DirectDruidClient; import org.apache.druid.guice.LazySingleton; import org.apache.druid.guice.annotations.Json; import org.apache.druid.guice.annotations.Self; import org.apache.druid.guice.annotations.Smile; -import org.apache.druid.java.util.common.StringUtils; import org.apache.druid.java.util.emitter.EmittingLogger; import org.apache.druid.query.BadJsonQueryException; import org.apache.druid.query.Query; @@ -46,7 +44,6 @@ import org.apache.druid.query.QueryException; import org.apache.druid.query.QueryInterruptedException; import org.apache.druid.query.QueryToolChest; -import org.apache.druid.query.TruncatedResponseContextException; import org.apache.druid.query.context.ResponseContext; import org.apache.druid.query.context.ResponseContext.Keys; import org.apache.druid.server.metrics.QueryCountStatsProvider; @@ -268,48 +265,6 @@ public interface QueryMetricCounter void incrementTimedOut(); } - public static void attachResponseContextToHttpResponse( - String queryId, - ResponseContext responseContext, - Response.ResponseBuilder responseBuilder, - ObjectMapper jsonMapper, ResponseContextConfig responseContextConfig, DruidNode selfNode - ) throws JsonProcessingException - { - transferEntityTag(responseContext, responseBuilder); - - DirectDruidClient.removeMagicResponseContextFields(responseContext); - - // Limit the response-context header, see https://github.com/apache/druid/issues/2331 - // Note that Response.ResponseBuilder.header(String key,Object value).build() calls value.toString() - // and encodes the string using ASCII, so 1 char is = 1 byte - final ResponseContext.SerializationResult serializationResult = responseContext.serializeWith( - jsonMapper, - responseContextConfig.getMaxResponseContextHeaderSize() - ); - - if (serializationResult.isTruncated()) { - final String logToPrint = StringUtils.format( - "Response Context truncated for id [%s]. Full context is [%s].", - queryId, - serializationResult.getFullResult() - ); - if (responseContextConfig.shouldFailOnTruncatedResponseContext()) { - log.error(logToPrint); - throw new QueryInterruptedException( - new TruncatedResponseContextException( - "Serialized response context exceeds the max size[%s]", - responseContextConfig.getMaxResponseContextHeaderSize() - ), - selfNode.getHostAndPortToUse() - ); - } else { - log.warn(logToPrint); - } - } - - responseBuilder.header(HEADER_RESPONSE_CONTEXT, serializationResult.getResult()); - } - private Query readQuery( final HttpServletRequest req, final InputStream in, diff --git a/server/src/test/java/org/apache/druid/server/QueryResourceTest.java b/server/src/test/java/org/apache/druid/server/QueryResourceTest.java index adb9b0e8f805..8a599d2ae2da 100644 --- a/server/src/test/java/org/apache/druid/server/QueryResourceTest.java +++ b/server/src/test/java/org/apache/druid/server/QueryResourceTest.java @@ -302,6 +302,79 @@ public void testGoodQueryWithQueryConfigOverrideDefault() throws IOException ); } + @Test + public void testGoodQueryThrowsDruidExceptionFromLifecycleExecute() throws IOException + { + String overrideConfigKey = "priority"; + String overrideConfigValue = "678"; + DefaultQueryConfig overrideConfig = new DefaultQueryConfig(ImmutableMap.of(overrideConfigKey, overrideConfigValue)); + queryResource = new QueryResource( + new QueryLifecycleFactory( + WAREHOUSE, + new QuerySegmentWalker() + { + @Override + public QueryRunner getQueryRunnerForIntervals( + Query query, + Iterable intervals + ) + { + throw DruidException.forPersona(DruidException.Persona.OPERATOR) + .ofCategory(DruidException.Category.RUNTIME_FAILURE) + .build("failing for coverage!"); + } + + @Override + public QueryRunner getQueryRunnerForSegments( + Query query, + Iterable specs + ) + { + throw new UnsupportedOperationException(); + } + }, + new DefaultGenericQueryMetricsFactory(), + new NoopServiceEmitter(), + testRequestLogger, + new AuthConfig(), + AuthTestUtils.TEST_AUTHORIZER_MAPPER, + Suppliers.ofInstance(overrideConfig) + ), + jsonMapper, + smileMapper, + queryScheduler, + new AuthConfig(), + null, + ResponseContextConfig.newConfig(true), + DRUID_NODE + ); + + expectPermissiveHappyPathAuth(); + + final Response response = expectSynchronousRequestFlow(SIMPLE_TIMESERIES_QUERY); + Assert.assertEquals(Status.INTERNAL_SERVER_ERROR.getStatusCode(), response.getStatus()); + + final ErrorResponse entity = (ErrorResponse) response.getEntity(); + MatcherAssert.assertThat( + entity.getUnderlyingException(), + new DruidExceptionMatcher(DruidException.Persona.OPERATOR, DruidException.Category.RUNTIME_FAILURE, "adhoc") + .expectMessageIs("failing for coverage!") + ); + + Assert.assertEquals(1, testRequestLogger.getNativeQuerylogs().size()); + Assert.assertNotNull(testRequestLogger.getNativeQuerylogs().get(0).getQuery()); + Assert.assertNotNull(testRequestLogger.getNativeQuerylogs().get(0).getQuery().getContext()); + Assert.assertTrue(testRequestLogger.getNativeQuerylogs() + .get(0) + .getQuery() + .getContext() + .containsKey(overrideConfigKey)); + Assert.assertEquals( + overrideConfigValue, + testRequestLogger.getNativeQuerylogs().get(0).getQuery().getContext().get(overrideConfigKey) + ); + } + @Test public void testGoodQueryWithQueryConfigDoesNotOverrideQueryContext() throws IOException { @@ -1203,7 +1276,8 @@ private MockHttpServletResponse expectAsyncRequestFlow( @Nonnull private MockHttpServletResponse expectAsyncRequestFlow( MockHttpServletRequest req, - byte[] queryBytes, QueryResource queryResource + byte[] queryBytes, + QueryResource queryResource ) throws IOException { final MockHttpServletResponse response = MockHttpServletResponse.forRequest(req); @@ -1238,6 +1312,15 @@ private Future eventuallyaAssertSynchronousResponse( }); } + private Response expectSynchronousRequestFlow(String simpleTimeseriesQuery) throws IOException + { + return expectSynchronousRequestFlow( + testServletRequest, + simpleTimeseriesQuery.getBytes(StandardCharsets.UTF_8), + queryResource + ); + } + private Response expectSynchronousRequestFlow( MockHttpServletRequest req, byte[] bytes, diff --git a/sql/src/test/java/org/apache/druid/sql/calcite/CalciteJoinQueryTest.java b/sql/src/test/java/org/apache/druid/sql/calcite/CalciteJoinQueryTest.java index 31c0f5da35b4..92e585d770ef 100644 --- a/sql/src/test/java/org/apache/druid/sql/calcite/CalciteJoinQueryTest.java +++ b/sql/src/test/java/org/apache/druid/sql/calcite/CalciteJoinQueryTest.java @@ -5434,7 +5434,7 @@ public void testRegressionFilteredAggregatorsSubqueryJoins(Map q .dimension(new DefaultDimensionSpec("v0", "d0", ColumnType.LONG)) .metric(new InvertedTopNMetricSpec(new DimensionTopNMetricSpec( null, - StringComparators.LEXICOGRAPHIC + StringComparators.NUMERIC ))) .aggregators(new CountAggregatorFactory("a0")) .threshold(1) From 166cbc44e3f142464b73aef41e14c220d5c480dd Mon Sep 17 00:00:00 2001 From: imply-cheddar Date: Thu, 15 Jun 2023 15:49:47 +0900 Subject: [PATCH 14/17] Static checks --- .../java/org/apache/druid/benchmark/query/SqlBenchmark.java | 4 ++-- .../apache/druid/benchmark/query/SqlExpressionBenchmark.java | 3 +-- .../apache/druid/benchmark/query/SqlNestedDataBenchmark.java | 2 +- .../apache/druid/benchmark/query/SqlVsNativeBenchmark.java | 2 +- .../java/org/apache/druid/error/DruidExceptionMatcher.java | 2 +- .../test/java/org/apache/druid/matchers/DruidMatchers.java | 4 ++++ sql/src/test/java/org/apache/druid/sql/SqlStatementTest.java | 2 +- .../java/org/apache/druid/sql/avatica/DruidStatementTest.java | 4 ++-- .../druid/sql/calcite/SqlVectorizedExpressionSanityTest.java | 2 -- 9 files changed, 13 insertions(+), 12 deletions(-) diff --git a/benchmarks/src/test/java/org/apache/druid/benchmark/query/SqlBenchmark.java b/benchmarks/src/test/java/org/apache/druid/benchmark/query/SqlBenchmark.java index 9459b234303b..ba9debb28815 100644 --- a/benchmarks/src/test/java/org/apache/druid/benchmark/query/SqlBenchmark.java +++ b/benchmarks/src/test/java/org/apache/druid/benchmark/query/SqlBenchmark.java @@ -543,7 +543,7 @@ public void tearDown() throws Exception @Benchmark @BenchmarkMode(Mode.AverageTime) @OutputTimeUnit(TimeUnit.MILLISECONDS) - public void querySql(Blackhole blackhole) throws Exception + public void querySql(Blackhole blackhole) { final Map context = ImmutableMap.of( QueryContexts.VECTORIZE_KEY, vectorize, @@ -561,7 +561,7 @@ public void querySql(Blackhole blackhole) throws Exception @Benchmark @BenchmarkMode(Mode.AverageTime) @OutputTimeUnit(TimeUnit.MILLISECONDS) - public void planSql(Blackhole blackhole) throws Exception + public void planSql(Blackhole blackhole) { final Map context = ImmutableMap.of( QueryContexts.VECTORIZE_KEY, vectorize, diff --git a/benchmarks/src/test/java/org/apache/druid/benchmark/query/SqlExpressionBenchmark.java b/benchmarks/src/test/java/org/apache/druid/benchmark/query/SqlExpressionBenchmark.java index 1c64d7a749df..7733281908f0 100644 --- a/benchmarks/src/test/java/org/apache/druid/benchmark/query/SqlExpressionBenchmark.java +++ b/benchmarks/src/test/java/org/apache/druid/benchmark/query/SqlExpressionBenchmark.java @@ -67,7 +67,6 @@ import org.openjdk.jmh.infra.Blackhole; import javax.annotation.Nullable; - import java.util.List; import java.util.Map; import java.util.concurrent.TimeUnit; @@ -349,7 +348,7 @@ public void tearDown() throws Exception @Benchmark @BenchmarkMode(Mode.AverageTime) @OutputTimeUnit(TimeUnit.MILLISECONDS) - public void querySql(Blackhole blackhole) throws Exception + public void querySql(Blackhole blackhole) { final Map context = ImmutableMap.of( QueryContexts.VECTORIZE_KEY, vectorize, diff --git a/benchmarks/src/test/java/org/apache/druid/benchmark/query/SqlNestedDataBenchmark.java b/benchmarks/src/test/java/org/apache/druid/benchmark/query/SqlNestedDataBenchmark.java index aeda68b25b05..98514512e9ab 100644 --- a/benchmarks/src/test/java/org/apache/druid/benchmark/query/SqlNestedDataBenchmark.java +++ b/benchmarks/src/test/java/org/apache/druid/benchmark/query/SqlNestedDataBenchmark.java @@ -375,7 +375,7 @@ public void tearDown() throws Exception @Benchmark @BenchmarkMode(Mode.AverageTime) @OutputTimeUnit(TimeUnit.MILLISECONDS) - public void querySql(Blackhole blackhole) throws Exception + public void querySql(Blackhole blackhole) { final Map context = ImmutableMap.of( QueryContexts.VECTORIZE_KEY, vectorize, diff --git a/benchmarks/src/test/java/org/apache/druid/benchmark/query/SqlVsNativeBenchmark.java b/benchmarks/src/test/java/org/apache/druid/benchmark/query/SqlVsNativeBenchmark.java index 1cb747048dc7..de3db00accf5 100644 --- a/benchmarks/src/test/java/org/apache/druid/benchmark/query/SqlVsNativeBenchmark.java +++ b/benchmarks/src/test/java/org/apache/druid/benchmark/query/SqlVsNativeBenchmark.java @@ -170,7 +170,7 @@ public void queryNative(Blackhole blackhole) @Benchmark @BenchmarkMode(Mode.AverageTime) @OutputTimeUnit(TimeUnit.MILLISECONDS) - public void queryPlanner(Blackhole blackhole) throws Exception + public void queryPlanner(Blackhole blackhole) { try (final DruidPlanner planner = plannerFactory.createPlannerForTesting(engine, sqlQuery, Collections.emptyMap())) { final PlannerResult plannerResult = planner.plan(); diff --git a/processing/src/test/java/org/apache/druid/error/DruidExceptionMatcher.java b/processing/src/test/java/org/apache/druid/error/DruidExceptionMatcher.java index c27658ca91d2..f929d832c47f 100644 --- a/processing/src/test/java/org/apache/druid/error/DruidExceptionMatcher.java +++ b/processing/src/test/java/org/apache/druid/error/DruidExceptionMatcher.java @@ -121,6 +121,6 @@ public void assertThrowsAndMatches(ThrowingSupplier fn) public interface ThrowingSupplier { - void get() throws Throwable; + void get() throws Exception; } } diff --git a/processing/src/test/java/org/apache/druid/matchers/DruidMatchers.java b/processing/src/test/java/org/apache/druid/matchers/DruidMatchers.java index dbd03edf9e61..de9b10bbefc5 100644 --- a/processing/src/test/java/org/apache/druid/matchers/DruidMatchers.java +++ b/processing/src/test/java/org/apache/druid/matchers/DruidMatchers.java @@ -19,6 +19,7 @@ package org.apache.druid.matchers; +import org.apache.druid.java.util.common.IAE; import org.hamcrest.Matcher; import org.hamcrest.Matchers; @@ -36,6 +37,9 @@ public static LambdaMatcher fn(String name, Function fn, Matc @SuppressWarnings({"unchecked", "rawtypes"}) public static Matcher> mapMatcher(Object... keysAndValues) { + if (keysAndValues.length % 2 == 1) { + throw new IAE("keysAndValues should be pairs, but had an odd length [%s]", keysAndValues.length); + } ArrayList>> entryMatchers = new ArrayList<>(); for (int i = 0; i < keysAndValues.length; i += 2) { entryMatchers.add(Matchers.hasEntry((K) keysAndValues[i], (V) keysAndValues[i + 1])); diff --git a/sql/src/test/java/org/apache/druid/sql/SqlStatementTest.java b/sql/src/test/java/org/apache/druid/sql/SqlStatementTest.java index ac1364848aa5..c9a100e6a051 100644 --- a/sql/src/test/java/org/apache/druid/sql/SqlStatementTest.java +++ b/sql/src/test/java/org/apache/druid/sql/SqlStatementTest.java @@ -134,7 +134,7 @@ public static void tearDownClass() throws IOException } @Before - public void setUp() throws Exception + public void setUp() { executorService = MoreExecutors.listeningDecorator(Execs.multiThreaded(8, "test_sql_resource_%s")); diff --git a/sql/src/test/java/org/apache/druid/sql/avatica/DruidStatementTest.java b/sql/src/test/java/org/apache/druid/sql/avatica/DruidStatementTest.java index 96b21619c99e..505bfd98e877 100644 --- a/sql/src/test/java/org/apache/druid/sql/avatica/DruidStatementTest.java +++ b/sql/src/test/java/org/apache/druid/sql/avatica/DruidStatementTest.java @@ -101,7 +101,7 @@ public static void tearDownClass() throws IOException private SqlStatementFactory sqlStatementFactory; @Before - public void setUp() throws Exception + public void setUp() { final PlannerConfig plannerConfig = new PlannerConfig(); final DruidOperatorTable operatorTable = CalciteTests.createOperatorTable(); @@ -129,7 +129,7 @@ public void setUp() throws Exception } @After - public void tearDown() throws Exception + public void tearDown() { } diff --git a/sql/src/test/java/org/apache/druid/sql/calcite/SqlVectorizedExpressionSanityTest.java b/sql/src/test/java/org/apache/druid/sql/calcite/SqlVectorizedExpressionSanityTest.java index d75796792752..b2609f96404e 100644 --- a/sql/src/test/java/org/apache/druid/sql/calcite/SqlVectorizedExpressionSanityTest.java +++ b/sql/src/test/java/org/apache/druid/sql/calcite/SqlVectorizedExpressionSanityTest.java @@ -62,7 +62,6 @@ import org.junit.runners.Parameterized; import javax.annotation.Nullable; - import java.io.IOException; import java.util.List; import java.util.Map; @@ -187,7 +186,6 @@ public void testQuery() throws ValidationException } public static void sanityTestVectorizedSqlQueries(PlannerFactory plannerFactory, String query) - throws ValidationException { final Map vector = ImmutableMap.of( QueryContexts.VECTORIZE_KEY, "force", From f4c3e77ec961a35526dd96cb78a6c1dbcdbb0b94 Mon Sep 17 00:00:00 2001 From: imply-cheddar Date: Fri, 16 Jun 2023 07:43:54 +0900 Subject: [PATCH 15/17] Static checks take 2 --- .../java/org/apache/druid/error/DruidExceptionMatcher.java | 2 +- .../druid/sql/calcite/SqlVectorizedExpressionSanityTest.java | 3 +-- 2 files changed, 2 insertions(+), 3 deletions(-) diff --git a/processing/src/test/java/org/apache/druid/error/DruidExceptionMatcher.java b/processing/src/test/java/org/apache/druid/error/DruidExceptionMatcher.java index f929d832c47f..d3d4e057c5e0 100644 --- a/processing/src/test/java/org/apache/druid/error/DruidExceptionMatcher.java +++ b/processing/src/test/java/org/apache/druid/error/DruidExceptionMatcher.java @@ -121,6 +121,6 @@ public void assertThrowsAndMatches(ThrowingSupplier fn) public interface ThrowingSupplier { - void get() throws Exception; + void get(); } } diff --git a/sql/src/test/java/org/apache/druid/sql/calcite/SqlVectorizedExpressionSanityTest.java b/sql/src/test/java/org/apache/druid/sql/calcite/SqlVectorizedExpressionSanityTest.java index b2609f96404e..157a9e271753 100644 --- a/sql/src/test/java/org/apache/druid/sql/calcite/SqlVectorizedExpressionSanityTest.java +++ b/sql/src/test/java/org/apache/druid/sql/calcite/SqlVectorizedExpressionSanityTest.java @@ -22,7 +22,6 @@ import com.google.common.collect.ImmutableList; import com.google.common.collect.ImmutableMap; import com.google.common.collect.ImmutableSet; -import org.apache.calcite.tools.ValidationException; import org.apache.druid.java.util.common.StringUtils; import org.apache.druid.java.util.common.granularity.Granularities; import org.apache.druid.java.util.common.guava.Sequence; @@ -180,7 +179,7 @@ public SqlVectorizedExpressionSanityTest(String query) } @Test - public void testQuery() throws ValidationException + public void testQuery() { sanityTestVectorizedSqlQueries(PLANNER_FACTORY, query); } From d9a07eec2d4a5735c6ef63eadec8575c5a546b31 Mon Sep 17 00:00:00 2001 From: imply-cheddar Date: Fri, 16 Jun 2023 17:21:12 +0900 Subject: [PATCH 16/17] Remove readme deep in the code packages --- .../java/org/apache/druid/error/README.md | 404 ------------------ 1 file changed, 404 deletions(-) delete mode 100644 processing/src/main/java/org/apache/druid/error/README.md diff --git a/processing/src/main/java/org/apache/druid/error/README.md b/processing/src/main/java/org/apache/druid/error/README.md deleted file mode 100644 index f9b4e0e14dfc..000000000000 --- a/processing/src/main/java/org/apache/druid/error/README.md +++ /dev/null @@ -1,404 +0,0 @@ - - -WARNING WARNING -TODO: this README has not been adjusted to align with the current code - -# Guide to Druid Error Messages - -Errors in Druid are complex. Errors come from both Druid code and from libraries. -The audience for errors varies depending on the type of error. Managed systems -often want to redact sensitive information, while developers need full details. -Each subsystem within Druid has evolve to use its own error handling methodology. -The goal of this note is to explain how we wish to handle errors moving forward. - -## Requirements - -Druid has a robust set of error handling requirements that, taken together drive -the error handling implementation explained below. - -### Audiences - -Errors must address a number of audiences. While most errors are returned to the -end user, it may sometimes be the case that the end user can’t fix the issues. -For example, the user can fix errors in a SQL statement, but cannot fix -configuration issues. Still, the user is always the first point of contact -for errors. - -When the error is something that the user cannot fix, we must strike a balance: -provide the user enough information to request the proper help, but not to -reveal internal details that the user does not need, or that the user should -not see. At the same time, if someone other than the user has to fix the error -(a system administrator, a developer, etc.), then that second person does need -the details. - -This split audience drives a large part of the error handling system design. - -### Managed Deployments - -Druid runs in all sizes of deployments. Developers run Druid on a single machine, -and must fix issues arising from code changes. Developers, presumably, have a -strong understanding of Druid internals. New users will download Druid to run on -a laptop, but such users have little knowledge of Druid — they are just getting -started. As Druid moves into production, a TechOps team may run Druid, while a -different set of users load data and issue queries. In a fully managed environment, -users are in different organizations than the people who manage Druid. - -This complexity makes clear that the audiences above may not have any contact -with one another. Further, a fully managed environment wants to restrict the -information that “leaks” to the end user. While a developer wants the details, -a user in another company should see a “sanitized” message stripped of internal -details. - -This requirement means that Druid errors must be flexible. It should be possible -to both log the details (for the people running Druid), while meaningfully redact -sensitive information for end users in remote organizations. - -### Error Categorization - -To help with the above requirements, we need to categorize errors. Druid is -complex: there are hundreds (if not thousands) of things that could go wrong, -ranging from bad user input to incorrect configuration, a badly configured network, -or buggy code. We’ve noted that we want to identify the audience for an error. -Sometimes that is easy: the user is responsible for the text of a SQL message. -Other times, it is hard: who is responsible for a network timeout? - -We’ve also noted that managed environments want to redact information. Doing so -error-by-error is an impossible task. Doing so by category is more practical. - -Druid has the concept of an “error code” which is one level of abstraction above -the detailed error message. We wish to generalize that concept to categorize all -errors. Creating the categories is a non-trivial task: it requires balancing the -audience (when known) with the functional area. For example both timeouts and bad -configuration might be of interest to an “admin”. In larger shops, the Druid admin -is distinct from the network admin. Thus, it might make sense to have a “network” -category distinct from a “config” category. And so on. - -Each category may need to include unique information. SQL errors should include -the line number of the error. I/O errors the identify of the resource that failed. -Config errors the name of the config variable. And so on. - -Error sanitization systems may use the category to aid in redacting information. -“User” errors might be provided as-is, while “system” errors might be redacted to -a generic “internal error: please contact support for assistance.” In such cases, -the log file would provide “support” with the details. - -### Error Destinations - -The above sections hint that errors flow to multiple destinations. The end user -(via an API response) is the most obvious destination. Errors also flow to logs, -and from there to many forms of log aggregation systems. As noted, each destination -may receive a different “view” of the error. End users get a simplified, user-focused -view. Developers get the full details, including stack trace. Administrators may -get just “important” errors, and just the description, shorn of stack trace. And -so on. - -Druid primarily uses a REST API for its messages. However, each Druid subsystem -has evolved its own way to return errors. The query endpoints use the -`QueryException` format, other endpoints use a variety of ad-hoc formats: -some use plain text, others use ad-hoc JSON, etc. - -As Druid evolves, we have added multiple query API protocols: JDBC, gRPC, -etc. Each protocol has its own way to format errors, often not as a REST response. - -This means that Druid exceptions don’t have just one format: they must allow -each destination to apply a format appropriate for that destination. - -### Forwarding Remote Errors - -Druid is a distributed system. Queries run across tiers. The “live” query -system uses scatter/gather in which queries run on data nodes. MSQ runs -across multiple stages. In these cases, a remote node may raise an error -which must be returned to a different node, and then forwarded to the user. -Care must be taken to preserve the error as created on the remote node. In -particular, stack traces should be from the remote node, not the receiving node. - -Errors are sent across a REST API. As such, Druid exceptions must be -serializable, in some form which allows recovering the exception on the -receiver (Broker, MSQ controller) side. - -By contrast, when errors are returned to the end user, we do not expect that -the user will deserialize the errors using Druid’s error classes. Most clients -don’t have visibility to Druid’s code. Thus, errors returned to the user -should have a standard format so that a single client-side class can -deserialize any Druid exception. - -## Implementation - -With the requirements out of the way, we can now discuss the implementation -that meets these requirements. - -### `DruidException` and its Subclasses - -Most exceptions raised within Druid code should use a subclass of the -`DruidException` class. Use this class when the error is to be returned to the -user (and, perhaps, logged.) Use other exceptions when the goal is to throw an -exception caught and handled by some other bit of code, and which is not -returned to the user. - -Create a subclass of `DruidException`: - -* For each error category. -* Within a category when the error must contain specific additional fields. -* When the class name, when in logs, provides useful information to developers. - -These rules provide a three-level hierarchy: - -```text -DruidException - Exception - Exception -``` - -### Special Fields - -Errors include a number of specialized fields that assist with the requirements -above. - -* `host`: When an error occurs on a data node, this field indicates the - identity of that node. When the error occurs on the node that received the - user response (e.g. the Broker), the field is `null`. -* `suggestion`: Provides suggested corrective action, which may only be valid - in the case of a simple Druid deployment. For example, “Increase the value - of the druid.something.memory config variable.” Managed systems may omit - this text. -* `code`: An error code that identifies the category of error. Categories are - grouped by target audience: some are for the user (SQL syntax, SQL validation, - etc.) Some are for the admin (OOM, resource issues.) Some are ambiguous - (network timeouts.) The code allows managed systems to do wholesale redactions. - -### Context - -The context is the “get out of jail free” card. The context allows us to add as -much detail to an error as wanted, without running the risk of exposing -sensitive information. In a managed system, the context may be hidden from the -user, but still logged. In a development system, the context gives the developer -the information needed to identify a problem. - -Context should include secondary information the can safely be hidden. Primary -information (such as the name of the column that can’t be found) should be in -the message itself. - -### Query Endpoint Errors - -Errors returned from `/sql` or `/sql/task` have a format defined by `QueryException`: - -```json -{ - "error": "", - "errorClass": "", - "errorMessage": "", - "host": "" -} -``` - -The `host` is set only for errors that occured on data nodes, but not when the error -occurred on the Broker (in, say, SQL validation.) - -The `error` is an ad-hoc set of codes defined in `QueryException`, but is neither -exaustive or unique: some errors could fall into multiple error codes. - -Per Druid's compatibility rules, we can add new fields to the above format, but we -cannot remove existing fields or change their meaning. This is particularly unfortunate -for the `errorClass` field since it exposes the specific class name used to throw the -exception: something that will change with the `DruidException` system. - -### Data Node Errors - -Data nodes (Historical, Peon, Indexer) use the same format as the query endpoint. Such -errors are deserialized into the `QueryException` class. Thus, `QueryException` has a -JSON serialization coupled tightly to both our internal Broker-to-data-node API, and the -external `/sql` API. `DruidException` must fit into this internal API without causing -compatibility issues during rolling upgrades. That is, the wire format must not change. -Short-term, this may mean that we discard information when returning errors from the -data node so that the Broker does not fail due to unexpected fields. This restriction -limits our freedom in crafting good error messages on data nodes. - -### MSQ Errors - -MSQ introduced a well-organized system to report errors from MSQ tasks to the user by -way of an Overlord task report. The system is unique to the MSQ environment and is not -general enough to handle non-MSQ cases. We do not want to modify the MSQ system. Instead, -we want to ensure that the `DruidException` plays well with the MSQ system. - -#### Quick Overview of the MSQ Fault System - -A quick review of the MSQ code suggests that `DruidException` tries to solve -the same problems as the MSQ error system, though in perhaps a more general way. - -MSQ apparently has a fault system separate from exceptions. MSQ splits errors -into two parts: `MSQFault`, which is JSON-serializable, and `MSQException`, which -is not. There are many subclasses of `MSQFault` which are not also subclasses of -exceptions. - -`MSQFault` is part of the `MSQErrorReport`. It seems that `MSQFault` is designed -to capture the fields that are JSON serialized into reports, while `MSQException` -is something that can unwind the stack. This split means we don't have to add -JSON serialization to our exception classes. This is wise since, as we'll see -later, the REST JSON format differs a bit from the MSQ format. A single class -cannot have to distinct JSON serializations. By creating the fault class, MSQ -can control its own specialized JSON format. - -Now, let's compare the MSQ system with the proposed Druid exception system. - -The `MSQFault` interface has many subclasses: apparently one for each kind of -error. Each class includes fields for any error-specific context. JSON -serialization places those fields as top-level context in the serialized -error. Example: - -```json -{ - "errorCode": "", - "errorMessage": "" -} -``` - -#### Integration with `DruidException` - -In an MSQ task, the MSQ system is reponsible for returnining errors to the -user by way of the Overlord task report. Unlike the "classic" query system -(and unlike other REST service), MSQ does not directly return an error to -the user. Instead, MSQ adds the error to the report, then does substantial -work to shut down workers, wrap up the Overlord task, etc. - -As a result, we never expect that an `MSQFault` will need to map to a -`DruidException`. We do, however, expect the need to go the other way. MSQ -reuses substantial portions of the native query codebase. That code doesn't -know if it is running in a historical (where it would just throw a -`DruidException` and exit) or in MSQ (where it has to play well in the MSQ -system.) So, we allow that code to use the `DruidException` system. It is up -to the MSQ worker to translate the `DruidException` into a suitable `MSQFault`, -which is then handled via the MSQ error system. - -For example, a `DruidException` may provide fields such as the S3 bucket on -which an I/O error occurred. MSQ can map that to a matching `MSQFault` so that -the information appears as a field in the JSON message. - -### REST Endpoint Errors - -A section above discussed the form of errors from the `/sql` endpoint. Druid has -hundreds of other REST endpoints, with many ad-hoc error solutions. We propose to -unify error reporting to use the (enhanced) `/sql` format. That is, we use the -`DruidException` everywhere in our code, and we map those exceptions to REST -responses the same way for every REST API (unless there is some special reason -not to.) - -### Third-Party Exceptions - -Druid uses libraries (including the Java library) that throws its own exceptions. -The only workable approach is: - -* Catch such exceptions as close to the cause as possible, then translate the error - to a `DruidException`, providing Druid-specific context. The original exception is - attached as a cause, so developers can track down the underlying issue. -* Provide a Servlet filter that catches "stray" exceptions and returns a generic error - message. These cases indicate gaps in the code where we failed to properly catch and - handle and exception. Managed systems can't know if the non-Druid exception contains - sensitive information. So, report the error as something like "An internal error - occurred. See the logs for details", associated with a unique error category so that - manage services can replace the wording. - -### Other APIs - -Druid occasionally offers non-REST APIs: JDBC, gRPC, etc. For these cases, an API-specific -mapping from the `DruidException` to the specialized API can handle the needs of that API. - -In an ideal world, `DruidException` would be independent of all APIs, and the REST API -would do its own mapping. Howeer, since REST is standard in Druid, we allow `DruidException` -to serialize to and from Druid's REST API JSON. - -### JSON Deserialization - -The implementation envisions a large number of `DruidException` subclasses: one per -category, with finer grain subclasses. The JSON format given above was designed based -on a single class: `QueryException`. There is no `type` field that Jackson could use to -recover the subclass. - -When running a query, the data node with raise a specific error class, then serialize it -in the generic format. The Broker does not have sufficient information to recover the -original class. Instead, the Broker deserializes exceptions as a `GenericException` class. -The result can be thrown, and will re-serialize to the same format as that sent by the -data node, but it loses the ability to parse exceptions based on the exception class. - -### Mutable Exception Fields - -Druid prefers that class fields be immutable because such an approach reduces risk in -a multi-threaded system. In an ideal world, `DruidException` fields would also be -immutable, with a "builder" class to gather values. Such a solution is workable only -if we have one (or a very few) exception classes. The design here, however, follows -MSQ practice and envisions many such classes. Creating a builder per class would be -tedious. - -Instead, we allow certain `DruidException` fields to be mutable so that they can be -set after the exception is created. Mutable fields include: - -* `host` (set at the top level of the data node) -* `context` (to allow callers to add information to an exception as it bubbles up - the call stack). -* `suggestion` (to allow a higher-level of code to offer a suggestion when the - code that throws the exception doesn't have sufficient context.) - -## Guidelines - -With the above requirements and design in mind, we can identify some guidelines -when developers write error messages. - -### Wording - -Word error messages so that they speak to the end user who will receive the -error as a response to a request. When the error is a “user error” this is -simple. Explain the problem in user terms. That is, rather than “key not found”, -say something like “If you set the X context parameter, you must also set the Y -parameter.” - -The task is harder when the error is one only an admin or developer can solve. -We want to provide the information that audience needs to solve the problem. -But, we must provide that information only in logs. For example, if we hit an -assertion error, there is not much the user can do. But, a developer wants to -know where the error triggered and why. For this, provide a generic message to -the user “Druid internal error.” Druid might add “See logs for details.” A -managed service would say, “Contact support.” However, the log should provide -the full details, including the stack trace, and the value that caused the issue. - -Thus, some errors must be constructed with two sets of information: the bland -user message, and the details for developers. Use the context to help. - -### Interpolated Values - -Errors will often include interpolated values. Example: “Table
not -found”. Druid has a long-standing convention of always enclosing interpolated -values in square brackets: “Table [foo] not found.” While this format is not -standard English, it is standard Druid, and all error messages must follow -this form. - -### Sensitive Values - -Error messages want to be as helpful as possible by providing all the details -that would be needed to resolve the issue. This is ideal during development, -or in a single-machine deployment. But, in doing so, a message may leak -sensitive information when run in a managed service. - -Errors should thus divide information into two “pools.” The error message itself -should contain only that information which is suitable for a managed service -user. Information which might be considered sensitive should reside in the -context. A managed service can strip context values that a user cannot see. - -Another alternative is for a manages service to redact an entire category of -errors, as noted above. Thus, errors should be assigned categories that enable -efficient redaction policies. From 9d50c1e5ec25323d2b6b97321940b82de31cee5c Mon Sep 17 00:00:00 2001 From: imply-cheddar Date: Mon, 19 Jun 2023 13:07:53 +0900 Subject: [PATCH 17/17] Review comments and conflict resolution --- .../apache/druid/msq/exec/MSQInsertTest.java | 28 ++++++++----------- .../apache/druid/msq/exec/MSQReplaceTest.java | 8 ++---- .../apache/druid/msq/exec/MSQSelectTest.java | 2 +- .../common/exception/DruidException.java | 3 ++ .../apache/druid/error/DruidException.java | 8 +++--- .../druid/error/QueryExceptionCompat.java | 7 +++++ .../druid/server/QueryResourceTest.java | 2 +- .../calcite/parser/DruidSqlParserUtils.java | 13 ++++----- .../sql/calcite/BaseCalciteQueryTest.java | 2 +- .../sql/calcite/CalciteInsertDmlTest.java | 3 +- .../sql/calcite/CalciteJoinQueryTest.java | 2 +- .../sql/calcite/CalciteReplaceDmlTest.java | 3 +- .../parser/DruidSqlParserUtilsTest.java | 27 +++++------------- .../druid/sql/http/SqlResourceTest.java | 2 +- 14 files changed, 48 insertions(+), 62 deletions(-) diff --git a/extensions-core/multi-stage-query/src/test/java/org/apache/druid/msq/exec/MSQInsertTest.java b/extensions-core/multi-stage-query/src/test/java/org/apache/druid/msq/exec/MSQInsertTest.java index b4ba676b5890..1769c3028a5b 100644 --- a/extensions-core/multi-stage-query/src/test/java/org/apache/druid/msq/exec/MSQInsertTest.java +++ b/extensions-core/multi-stage-query/src/test/java/org/apache/druid/msq/exec/MSQInsertTest.java @@ -744,11 +744,9 @@ public void testInsertWithClusteredByDescendingThrowsException() + "PARTITIONED BY DAY " + "CLUSTERED BY dim1 DESC" ) - .setExpectedValidationErrorMatcher(CoreMatchers.allOf( - CoreMatchers.instanceOf(SqlPlanningException.class), - ThrowableMessageMatcher.hasMessage(CoreMatchers.startsWith( - "[`dim1` DESC] is invalid. CLUSTERED BY columns cannot be sorted in descending order.")) - )) + .setExpectedValidationErrorMatcher( + invalidSqlIs("Invalid CLUSTERED BY clause [`dim1` DESC]: cannot sort in descending order.") + ) .verifyPlanningErrors(); } @@ -979,7 +977,7 @@ public void testIncorrectInsertQuery() "insert into foo1 select __time, dim1 , count(*) as cnt from foo where dim1 is not null group by 1, 2 clustered by dim1" ) .setExpectedValidationErrorMatcher(invalidSqlContains( - "LUSTERED BY found before PARTITIONED BY, CLUSTERED BY must come after the PARTITIONED BY clause" + "CLUSTERED BY found before PARTITIONED BY, CLUSTERED BY must come after the PARTITIONED BY clause" )) .verifyPlanningErrors(); } @@ -1092,11 +1090,11 @@ public void testInsertLimitWithPeriodGranularityThrowsException() + "FROM foo " + "LIMIT 50 " + "PARTITIONED BY MONTH") - .setExpectedValidationErrorMatcher(CoreMatchers.allOf( - CoreMatchers.instanceOf(DruidException.class), - ThrowableMessageMatcher.hasMessage(CoreMatchers.containsString( - "INSERT and REPLACE queries cannot have a LIMIT unless PARTITIONED BY is \"ALL\"")) - )) + .setExpectedValidationErrorMatcher( + invalidSqlContains( + "INSERT and REPLACE queries cannot have a LIMIT unless PARTITIONED BY is \"ALL\"" + ) + ) .setQueryContext(context) .verifyPlanningErrors(); } @@ -1110,11 +1108,9 @@ public void testInsertOffsetThrowsException() + "LIMIT 50 " + "OFFSET 10" + "PARTITIONED BY ALL TIME") - .setExpectedValidationErrorMatcher(CoreMatchers.allOf( - CoreMatchers.instanceOf(DruidException.class), - ThrowableMessageMatcher.hasMessage(CoreMatchers.containsString( - "INSERT and REPLACE queries cannot have an OFFSET")) - )) + .setExpectedValidationErrorMatcher( + invalidSqlContains("INSERT and REPLACE queries cannot have an OFFSET") + ) .setQueryContext(context) .verifyPlanningErrors(); } diff --git a/extensions-core/multi-stage-query/src/test/java/org/apache/druid/msq/exec/MSQReplaceTest.java b/extensions-core/multi-stage-query/src/test/java/org/apache/druid/msq/exec/MSQReplaceTest.java index 42716297ab09..500d2a68bee8 100644 --- a/extensions-core/multi-stage-query/src/test/java/org/apache/druid/msq/exec/MSQReplaceTest.java +++ b/extensions-core/multi-stage-query/src/test/java/org/apache/druid/msq/exec/MSQReplaceTest.java @@ -733,11 +733,9 @@ public void testReplaceWithClusteredByDescendingThrowsException() + "PARTITIONED BY ALL TIME " + "CLUSTERED BY m2, m1 DESC" ) - .setExpectedValidationErrorMatcher(CoreMatchers.allOf( - CoreMatchers.instanceOf(SqlPlanningException.class), - ThrowableMessageMatcher.hasMessage(CoreMatchers.startsWith( - "[`m1` DESC] is invalid. CLUSTERED BY columns cannot be sorted in descending order.")) - )) + .setExpectedValidationErrorMatcher( + invalidSqlIs("Invalid CLUSTERED BY clause [`m1` DESC]: cannot sort in descending order.") + ) .verifyPlanningErrors(); } diff --git a/extensions-core/multi-stage-query/src/test/java/org/apache/druid/msq/exec/MSQSelectTest.java b/extensions-core/multi-stage-query/src/test/java/org/apache/druid/msq/exec/MSQSelectTest.java index ff5480611944..0ad6e4d90bea 100644 --- a/extensions-core/multi-stage-query/src/test/java/org/apache/druid/msq/exec/MSQSelectTest.java +++ b/extensions-core/multi-stage-query/src/test/java/org/apache/druid/msq/exec/MSQSelectTest.java @@ -1625,7 +1625,7 @@ public void testTimeColumnAggregationFromExtern() throws IOException + "FROM kttm_data " + "GROUP BY 1") .setExpectedValidationErrorMatcher( - new DruidExceptionMatcher(DruidException.Persona.ADMIN, DruidException.Category.INVALID_INPUT, "adhoc") + new DruidExceptionMatcher(DruidException.Persona.ADMIN, DruidException.Category.INVALID_INPUT, "general") .expectMessageIs( "Query planning failed for unknown reason, our best guess is this " + "[LATEST and EARLIEST aggregators implicitly depend on the __time column, " diff --git a/processing/src/main/java/org/apache/druid/common/exception/DruidException.java b/processing/src/main/java/org/apache/druid/common/exception/DruidException.java index 55373baf143f..42a679b6bd2f 100644 --- a/processing/src/main/java/org/apache/druid/common/exception/DruidException.java +++ b/processing/src/main/java/org/apache/druid/common/exception/DruidException.java @@ -21,6 +21,9 @@ /** * A generic exception thrown by Druid. + * + * This class is deprecated and should not be used. {@link org.apache.druid.error.DruidException} should be used for + * any error that is intended to be delivered to the end user. */ @Deprecated public class DruidException extends RuntimeException diff --git a/processing/src/main/java/org/apache/druid/error/DruidException.java b/processing/src/main/java/org/apache/druid/error/DruidException.java index 05dddca4c413..6acedf55fdb0 100644 --- a/processing/src/main/java/org/apache/druid/error/DruidException.java +++ b/processing/src/main/java/org/apache/druid/error/DruidException.java @@ -104,7 +104,7 @@ * codes) for similar exception messages. *

* The error code is a code that indicates a grouping of error messages. There is no forced structure around whether - * a specific error code can be reused for different problems or not. That is, an error code like "adhoc" will get + * a specific error code can be reused for different problems or not. That is, an error code like "general" will get * reused in many different places as it's the basic error code used whenever a DruidException is created in-line. But, * we might decide that a specific type of error should be identified explicitly by its error code and should only mean * one thing, in which case that error code might only exist on a single error. @@ -120,7 +120,7 @@ *

* A DruidException can be built from one of 2 static methods: {@link #forPersona} or {@link #fromFailure(Failure)}. * The only way to set a specific error code is to build a DruidException from a Failure, when built in-line using - * forPersona, it will always be an "adhoc" error. + * forPersona, it will always be an "general" error. *

* Additionally, DruidException is not intended to be directly serialized. The intention is that something converts * it into an {@link ErrorResponse} first using {@link ErrorResponse#ErrorResponse(DruidException)} and then that @@ -132,14 +132,14 @@ public class DruidException extends RuntimeException { /** - * Starts building an "adhoc" DruidException targetting the specific persona. + * Starts building an "general" DruidException targetting the specific persona. * * @param persona the target persona of the exception message * @return a builder that can be used to complete the creation of the DruidException */ public static PartialDruidExceptionBuilder forPersona(Persona persona) { - return new PartialDruidExceptionBuilder("adhoc", persona); + return new PartialDruidExceptionBuilder("general", persona); } /** diff --git a/processing/src/main/java/org/apache/druid/error/QueryExceptionCompat.java b/processing/src/main/java/org/apache/druid/error/QueryExceptionCompat.java index 1829a41046e9..12e4905efae9 100644 --- a/processing/src/main/java/org/apache/druid/error/QueryExceptionCompat.java +++ b/processing/src/main/java/org/apache/druid/error/QueryExceptionCompat.java @@ -21,6 +21,13 @@ import org.apache.druid.query.QueryException; +/** + * A {@link DruidException.Failure} that serves to cover conversions from {@link QueryException}. + * + * When/if QueryException is completely eliminated from the code base, this compat layer should also be able to + * be removed. Additionally, it is the hope that nobody should actually be interacting with this class as it should + * be an implementation detail of {@link DruidException} and not really seen outside of that. + */ public class QueryExceptionCompat extends DruidException.Failure { public static final String ERROR_CODE = "legacyQueryException"; diff --git a/server/src/test/java/org/apache/druid/server/QueryResourceTest.java b/server/src/test/java/org/apache/druid/server/QueryResourceTest.java index 8a599d2ae2da..c7c96ecd3b12 100644 --- a/server/src/test/java/org/apache/druid/server/QueryResourceTest.java +++ b/server/src/test/java/org/apache/druid/server/QueryResourceTest.java @@ -357,7 +357,7 @@ public QueryRunner getQueryRunnerForSegments( final ErrorResponse entity = (ErrorResponse) response.getEntity(); MatcherAssert.assertThat( entity.getUnderlyingException(), - new DruidExceptionMatcher(DruidException.Persona.OPERATOR, DruidException.Category.RUNTIME_FAILURE, "adhoc") + new DruidExceptionMatcher(DruidException.Persona.OPERATOR, DruidException.Category.RUNTIME_FAILURE, "general") .expectMessageIs("failing for coverage!") ); diff --git a/sql/src/main/java/org/apache/druid/sql/calcite/parser/DruidSqlParserUtils.java b/sql/src/main/java/org/apache/druid/sql/calcite/parser/DruidSqlParserUtils.java index 60fe551c6799..5f11c6f836a6 100644 --- a/sql/src/main/java/org/apache/druid/sql/calcite/parser/DruidSqlParserUtils.java +++ b/sql/src/main/java/org/apache/druid/sql/calcite/parser/DruidSqlParserUtils.java @@ -276,7 +276,6 @@ public static List validateQueryAndConvertToIntervals( * @throws ValidationException if any of the clustered by columns contain DESCENDING order. */ public static SqlOrderBy convertClusterByToOrderBy(SqlNode query, SqlNodeList clusteredByList) - throws ValidationException { validateClusteredByColumns(clusteredByList); // If we have a CLUSTERED BY clause, extract the information in that CLUSTERED BY and create a new @@ -306,10 +305,9 @@ public static SqlOrderBy convertClusterByToOrderBy(SqlNode query, SqlNodeList cl /** * Validates the clustered by columns to ensure that it does not contain DESCENDING order columns. * - * @param clusteredByNodes List of SqlNodes representing columns to be clustered by. - * @throws ValidationException if any of the clustered by columns contain DESCENDING order. + * @param clusteredByNodes List of SqlNodes representing columns to be clustered by. */ - public static void validateClusteredByColumns(final SqlNodeList clusteredByNodes) throws ValidationException + public static void validateClusteredByColumns(final SqlNodeList clusteredByNodes) { if (clusteredByNodes == null) { return; @@ -317,10 +315,9 @@ public static void validateClusteredByColumns(final SqlNodeList clusteredByNodes for (final SqlNode clusteredByNode : clusteredByNodes.getList()) { if (clusteredByNode.isA(ImmutableSet.of(SqlKind.DESCENDING))) { - throw new ValidationException( - StringUtils.format("[%s] is invalid." - + " CLUSTERED BY columns cannot be sorted in descending order.", clusteredByNode.toString() - ) + throw InvalidSqlInput.exception( + "Invalid CLUSTERED BY clause [%s]: cannot sort in descending order.", + clusteredByNode ); } } diff --git a/sql/src/test/java/org/apache/druid/sql/calcite/BaseCalciteQueryTest.java b/sql/src/test/java/org/apache/druid/sql/calcite/BaseCalciteQueryTest.java index 6c570661bb71..428e1d820045 100644 --- a/sql/src/test/java/org/apache/druid/sql/calcite/BaseCalciteQueryTest.java +++ b/sql/src/test/java/org/apache/druid/sql/calcite/BaseCalciteQueryTest.java @@ -651,7 +651,7 @@ public void assertQueryIsUnplannable(final PlannerConfig plannerConfig, final St catch (DruidException e) { MatcherAssert.assertThat( e, - new DruidExceptionMatcher(DruidException.Persona.ADMIN, DruidException.Category.INVALID_INPUT, "adhoc") + new DruidExceptionMatcher(DruidException.Persona.ADMIN, DruidException.Category.INVALID_INPUT, "general") .expectMessageIs( StringUtils.format( "Query planning failed for unknown reason, our best guess is this [%s]", diff --git a/sql/src/test/java/org/apache/druid/sql/calcite/CalciteInsertDmlTest.java b/sql/src/test/java/org/apache/druid/sql/calcite/CalciteInsertDmlTest.java index 93f2c7951ce5..7fb52843b4e4 100644 --- a/sql/src/test/java/org/apache/druid/sql/calcite/CalciteInsertDmlTest.java +++ b/sql/src/test/java/org/apache/druid/sql/calcite/CalciteInsertDmlTest.java @@ -767,8 +767,7 @@ public void testExplainPlanInsertWithClusteredByDescThrowsException() testIngestionQuery() .sql(sql) .expectValidationError( - SqlPlanningException.class, - "[`dim1` DESC] is invalid. CLUSTERED BY columns cannot be sorted in descending order." + invalidSqlIs("Invalid CLUSTERED BY clause [`dim1` DESC]: cannot sort in descending order.") ) .verify(); } diff --git a/sql/src/test/java/org/apache/druid/sql/calcite/CalciteJoinQueryTest.java b/sql/src/test/java/org/apache/druid/sql/calcite/CalciteJoinQueryTest.java index 92e585d770ef..337926d462ae 100644 --- a/sql/src/test/java/org/apache/druid/sql/calcite/CalciteJoinQueryTest.java +++ b/sql/src/test/java/org/apache/druid/sql/calcite/CalciteJoinQueryTest.java @@ -1500,7 +1500,7 @@ public void testTimeColumnAggregationsOnLookups(Map queryContext catch (DruidException e) { MatcherAssert.assertThat( e, - new DruidExceptionMatcher(DruidException.Persona.ADMIN, DruidException.Category.INVALID_INPUT, "adhoc") + new DruidExceptionMatcher(DruidException.Persona.ADMIN, DruidException.Category.INVALID_INPUT, "general") .expectMessageIs( "Query planning failed for unknown reason, our best guess is this " + "[LATEST and EARLIEST aggregators implicitly depend on the __time column, " diff --git a/sql/src/test/java/org/apache/druid/sql/calcite/CalciteReplaceDmlTest.java b/sql/src/test/java/org/apache/druid/sql/calcite/CalciteReplaceDmlTest.java index 5c4e061089d4..d7ba655c1efa 100644 --- a/sql/src/test/java/org/apache/druid/sql/calcite/CalciteReplaceDmlTest.java +++ b/sql/src/test/java/org/apache/druid/sql/calcite/CalciteReplaceDmlTest.java @@ -797,8 +797,7 @@ public void testExplainPlanReplaceWithClusteredByDescThrowsException() testIngestionQuery() .sql(sql) .expectValidationError( - SqlPlanningException.class, - "[`dim1` DESC] is invalid. CLUSTERED BY columns cannot be sorted in descending order." + invalidSqlIs("Invalid CLUSTERED BY clause [`dim1` DESC]: cannot sort in descending order.") ) .verify(); } diff --git a/sql/src/test/java/org/apache/druid/sql/calcite/parser/DruidSqlParserUtilsTest.java b/sql/src/test/java/org/apache/druid/sql/calcite/parser/DruidSqlParserUtilsTest.java index 0600935ae073..01f0544e1567 100644 --- a/sql/src/test/java/org/apache/druid/sql/calcite/parser/DruidSqlParserUtilsTest.java +++ b/sql/src/test/java/org/apache/druid/sql/calcite/parser/DruidSqlParserUtilsTest.java @@ -134,12 +134,8 @@ public static class ClusteredByColumnsValidationTest public void testEmptyClusteredByColumnsValid() { final SqlNodeList clusteredByArgs = new SqlNodeList(SqlParserPos.ZERO); - try { - DruidSqlParserUtils.validateClusteredByColumns(clusteredByArgs); - } - catch (ValidationException e) { - Assert.fail("Did not expect an exception" + e.getMessage()); - } + + DruidSqlParserUtils.validateClusteredByColumns(clusteredByArgs); } /** @@ -153,12 +149,7 @@ public void testClusteredByColumnsValid() clusteredByArgs.add(new SqlIdentifier("DIM2 ASC", SqlParserPos.ZERO)); clusteredByArgs.add(SqlLiteral.createExactNumeric("3", SqlParserPos.ZERO)); - try { - DruidSqlParserUtils.validateClusteredByColumns(clusteredByArgs); - } - catch (ValidationException e) { - Assert.fail("Did not expect an exception" + e.getMessage()); - } + DruidSqlParserUtils.validateClusteredByColumns(clusteredByArgs); } /** @@ -181,14 +172,10 @@ public void testClusteredByColumnsWithDescThrowsException() ); clusteredByArgs.add(sqlBasicCall); - ValidationException e = Assert.assertThrows( - ValidationException.class, - () -> DruidSqlParserUtils.validateClusteredByColumns(clusteredByArgs) - ); - Assert.assertEquals( - "[`DIM4` DESC] is invalid. CLUSTERED BY columns cannot be sorted in descending order.", - e.getMessage() - ); + DruidExceptionMatcher + .invalidSqlInput() + .expectMessageIs("Invalid CLUSTERED BY clause [`DIM4` DESC]: cannot sort in descending order.") + .assertThrowsAndMatches(() -> DruidSqlParserUtils.validateClusteredByColumns(clusteredByArgs)); } } diff --git a/sql/src/test/java/org/apache/druid/sql/http/SqlResourceTest.java b/sql/src/test/java/org/apache/druid/sql/http/SqlResourceTest.java index 912c6378f762..7dbc5ce69317 100644 --- a/sql/src/test/java/org/apache/druid/sql/http/SqlResourceTest.java +++ b/sql/src/test/java/org/apache/druid/sql/http/SqlResourceTest.java @@ -1399,7 +1399,7 @@ public void testCannotConvert() throws Exception validateErrorResponse( exception, - "adhoc", + "general", DruidException.Persona.ADMIN, DruidException.Category.INVALID_INPUT, "Query planning failed for unknown reason, our best guess is this "