From 52e6781c90340b0984adecfad1bcb2c84f443d87 Mon Sep 17 00:00:00 2001 From: Gian Merlino Date: Sun, 14 Nov 2021 14:06:59 -0800 Subject: [PATCH 1/5] SQL INSERT planner support. The main changes are: 1) DruidPlanner is able to validate and authorize INSERT queries. They require WRITE permission on the target datasource. 2) QueryMaker is now an interface, and there is a QueryMakerFactory that creates instances of it. There is only one production implementation of each (NativeQueryMaker and NativeQueryMakerFactory), which together behave the same way as the former QueryMaker class. But this opens the door to executing queries in ways other than the Druid query stack, and is used by unit tests (CalciteInsertDmlTest) to test the INSERT planning functionality. 3) Adds an EXTERN table macro that allows references external data using InputSource and InputFormat from Druid's batch ingestion API. This is not exposed in production yet, but is used by unit tests. 4) Adds a QueryFeature concept that enables the planner to change its behavior slightly depending on the capabilities of the execution system. 5) Adds an "AuthorizableOperator" concept that enables SqlOperators to require additional permissions. This is used by the EXTERN table macro. Related odds and ends: - Add equals, hashCode, toString methods to InlineInputSource. Aids in the "from external" tests in CalciteInsertDmlTest. - Add JSON-serializability to RowSignature. - Move the SQL string inside PlannerContext so it is "baked into" the planner when the planner is created. Cleans up the code a bit, since in practice, the same query is passed in every time to the same planner anyway. --- .../data/input/impl/InlineInputSource.java | 28 + .../druid/segment/column/ColumnSignature.java | 99 +++ .../druid/segment/column/RowSignature.java | 62 +- .../segment/column/RowSignatureTest.java | 62 ++ .../org/apache/druid/sql/SqlLifecycle.java | 27 +- .../druid/sql/avatica/DruidStatement.java | 37 +- .../expression/AuthorizableOperator.java | 35 + .../calcite/external/ExternalDataSource.java | 152 ++++ .../external/ExternalOperatorConversion.java | 115 ++++ .../calcite/external/ExternalTableMacro.java | 159 +++++ .../calcite/external/ExternalTableScan.java | 78 +++ .../external/ExternalTableScanRule.java | 59 ++ .../calcite/planner/CalcitePlannerModule.java | 24 +- .../sql/calcite/planner/DruidPlanner.java | 350 +++++++--- .../sql/calcite/planner/PlannerContext.java | 72 +- .../sql/calcite/planner/PlannerFactory.java | 46 +- .../druid/sql/calcite/planner/Rules.java | 19 +- .../planner/SqlResourceCollectorShuttle.java | 28 +- .../sql/calcite/planner/ValidationResult.java | 11 +- .../sql/calcite/rel/DruidJoinQueryRel.java | 30 +- .../sql/calcite/rel/DruidOuterQueryRel.java | 32 +- .../druid/sql/calcite/rel/DruidQuery.java | 85 ++- .../druid/sql/calcite/rel/DruidQueryRel.java | 58 +- .../druid/sql/calcite/rel/DruidRel.java | 22 +- .../calcite/rel/DruidUnionDataSourceRel.java | 28 +- .../druid/sql/calcite/rel/DruidUnionRel.java | 13 +- .../apache/druid/sql/calcite/rel/Sorting.java | 2 +- .../druid/sql/calcite/rule/DruidJoinRule.java | 2 +- .../calcite/rule/DruidLogicalValuesRule.java | 12 +- .../sql/calcite/rule/DruidSortUnionRule.java | 2 +- .../sql/calcite/rule/DruidTableScanRule.java | 10 +- .../rule/DruidUnionDataSourceRule.java | 4 +- .../sql/calcite/rule/DruidUnionRule.java | 2 +- .../NativeQueryMaker.java} | 97 +-- .../calcite/run/NativeQueryMakerFactory.java | 69 ++ .../druid/sql/calcite/run/QueryFeature.java | 49 ++ .../calcite/run/QueryFeatureInspector.java | 32 + .../druid/sql/calcite/run/QueryMaker.java | 42 ++ .../sql/calcite/run/QueryMakerFactory.java | 59 ++ .../druid/sql/calcite/schema/DruidSchema.java | 2 +- .../sql/calcite/schema/LookupSchema.java | 5 +- .../druid/sql/calcite/table/DruidTable.java | 22 +- .../sql/calcite/view/DruidViewMacro.java | 5 +- .../apache/druid/sql/http/SqlResource.java | 9 +- .../apache/druid/sql/SqlLifecycleTest.java | 16 +- .../sql/avatica/DruidAvaticaHandlerTest.java | 13 +- .../druid/sql/avatica/DruidStatementTest.java | 6 +- .../sql/calcite/BaseCalciteQueryTest.java | 21 +- .../sql/calcite/CalciteInsertDmlTest.java | 647 ++++++++++++++++++ .../DruidPlannerResourceAnalyzeTest.java | 56 +- .../SqlVectorizedExpressionSanityTest.java | 10 +- .../sql/calcite/TestInsertQueryMaker.java | 100 +++ .../sql/calcite/TestQueryMakerFactory.java | 62 ++ .../expression/ExpressionTestHelper.java | 2 + .../planner/CalcitePlannerModuleTest.java | 8 +- .../calcite/planner/DruidRexExecutorTest.java | 2 + .../rule/DruidUnionDataSourceRuleTest.java | 1 + .../schema/DruidCalciteSchemaModuleTest.java | 3 +- .../druid/sql/calcite/util/CalciteTests.java | 82 +-- .../druid/sql/http/SqlResourceTest.java | 6 +- 60 files changed, 2667 insertions(+), 524 deletions(-) create mode 100644 processing/src/main/java/org/apache/druid/segment/column/ColumnSignature.java create mode 100644 processing/src/test/java/org/apache/druid/segment/column/RowSignatureTest.java create mode 100644 sql/src/main/java/org/apache/druid/sql/calcite/expression/AuthorizableOperator.java create mode 100644 sql/src/main/java/org/apache/druid/sql/calcite/external/ExternalDataSource.java create mode 100644 sql/src/main/java/org/apache/druid/sql/calcite/external/ExternalOperatorConversion.java create mode 100644 sql/src/main/java/org/apache/druid/sql/calcite/external/ExternalTableMacro.java create mode 100644 sql/src/main/java/org/apache/druid/sql/calcite/external/ExternalTableScan.java create mode 100644 sql/src/main/java/org/apache/druid/sql/calcite/external/ExternalTableScanRule.java rename sql/src/main/java/org/apache/druid/sql/calcite/{rel/QueryMaker.java => run/NativeQueryMaker.java} (87%) create mode 100644 sql/src/main/java/org/apache/druid/sql/calcite/run/NativeQueryMakerFactory.java create mode 100644 sql/src/main/java/org/apache/druid/sql/calcite/run/QueryFeature.java create mode 100644 sql/src/main/java/org/apache/druid/sql/calcite/run/QueryFeatureInspector.java create mode 100644 sql/src/main/java/org/apache/druid/sql/calcite/run/QueryMaker.java create mode 100644 sql/src/main/java/org/apache/druid/sql/calcite/run/QueryMakerFactory.java create mode 100644 sql/src/test/java/org/apache/druid/sql/calcite/CalciteInsertDmlTest.java create mode 100644 sql/src/test/java/org/apache/druid/sql/calcite/TestInsertQueryMaker.java create mode 100644 sql/src/test/java/org/apache/druid/sql/calcite/TestQueryMakerFactory.java diff --git a/core/src/main/java/org/apache/druid/data/input/impl/InlineInputSource.java b/core/src/main/java/org/apache/druid/data/input/impl/InlineInputSource.java index 1e7b59fcf4a9..4cc6d0f9cf4c 100644 --- a/core/src/main/java/org/apache/druid/data/input/impl/InlineInputSource.java +++ b/core/src/main/java/org/apache/druid/data/input/impl/InlineInputSource.java @@ -30,6 +30,7 @@ import javax.annotation.Nullable; import java.io.File; +import java.util.Objects; import java.util.stream.Stream; public class InlineInputSource extends AbstractInputSource @@ -75,4 +76,31 @@ protected InputSourceReader formattableReader( temporaryDirectory ); } + + @Override + public boolean equals(Object o) + { + if (this == o) { + return true; + } + if (o == null || getClass() != o.getClass()) { + return false; + } + InlineInputSource that = (InlineInputSource) o; + return Objects.equals(data, that.data); + } + + @Override + public int hashCode() + { + return Objects.hash(data); + } + + @Override + public String toString() + { + return "InlineInputSource{" + + "data='" + data + '\'' + + '}'; + } } diff --git a/processing/src/main/java/org/apache/druid/segment/column/ColumnSignature.java b/processing/src/main/java/org/apache/druid/segment/column/ColumnSignature.java new file mode 100644 index 000000000000..07ad2728f758 --- /dev/null +++ b/processing/src/main/java/org/apache/druid/segment/column/ColumnSignature.java @@ -0,0 +1,99 @@ +/* + * Licensed to the Apache Software Foundation (ASF) under one + * or more contributor license agreements. See the NOTICE file + * distributed with this work for additional information + * regarding copyright ownership. The ASF licenses this file + * to you under the Apache License, Version 2.0 (the + * "License"); you may not use this file except in compliance + * with the License. You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, + * software distributed under the License is distributed on an + * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY + * KIND, either express or implied. See the License for the + * specific language governing permissions and limitations + * under the License. + */ + +package org.apache.druid.segment.column; + +import com.fasterxml.jackson.annotation.JsonCreator; +import com.fasterxml.jackson.annotation.JsonInclude; +import com.fasterxml.jackson.annotation.JsonProperty; +import org.apache.druid.java.util.common.IAE; + +import javax.annotation.Nullable; +import java.util.Objects; + +/** + * Class used by {@link RowSignature} for serialization. + * + * Package-private since it is not intended to be used outside that narrow use case. In other cases where passing + * around information about column types is important, use {@link ColumnType} instead. + */ +class ColumnSignature +{ + private final String name; + + @Nullable + private final ColumnType type; + + @JsonCreator + ColumnSignature( + @JsonProperty("name") String name, + @JsonProperty("type") @Nullable ColumnType type + ) + { + this.name = name; + this.type = type; + + // Name must be nonnull, but type can be null (if the type is unknown) + if (name == null || name.isEmpty()) { + throw new IAE(name, "Column name must be non-empty"); + } + } + + @JsonProperty("name") + String name() + { + return name; + } + + @Nullable + @JsonProperty("type") + @JsonInclude(JsonInclude.Include.NON_NULL) + ColumnType type() + { + return type; + } + + @Override + public boolean equals(Object o) + { + if (this == o) { + return true; + } + if (o == null || getClass() != o.getClass()) { + return false; + } + ColumnSignature that = (ColumnSignature) o; + return name.equals(that.name) && Objects.equals(type, that.type); + } + + @Override + public int hashCode() + { + return Objects.hash(name, type); + } + + @Override + public String toString() + { + return "ColumnSignature{" + + "name='" + name + '\'' + + ", type=" + type + + '}'; + } +} diff --git a/processing/src/main/java/org/apache/druid/segment/column/RowSignature.java b/processing/src/main/java/org/apache/druid/segment/column/RowSignature.java index 77794bb8c797..b7b6a68a93e5 100644 --- a/processing/src/main/java/org/apache/druid/segment/column/RowSignature.java +++ b/processing/src/main/java/org/apache/druid/segment/column/RowSignature.java @@ -19,12 +19,13 @@ package org.apache.druid.segment.column; +import com.fasterxml.jackson.annotation.JsonCreator; +import com.fasterxml.jackson.annotation.JsonValue; import com.google.common.base.Preconditions; import com.google.common.collect.ImmutableList; import it.unimi.dsi.fastutil.objects.Object2IntMap; import it.unimi.dsi.fastutil.objects.Object2IntOpenHashMap; import org.apache.druid.java.util.common.IAE; -import org.apache.druid.java.util.common.Pair; import org.apache.druid.query.aggregation.AggregatorFactory; import org.apache.druid.query.aggregation.PostAggregator; import org.apache.druid.query.dimension.DimensionSpec; @@ -40,9 +41,7 @@ import java.util.Optional; /** - * Type signature for a row in a Druid datasource or query result. Rows have an ordering and every - * column has a defined type. This is a little bit of a fiction in the Druid world (where rows do not _actually_ have - * well defined types) but we do impose types for the SQL layer. + * Type signature for a row in a Druid datasource or query result. * * @see org.apache.druid.query.QueryToolChest#resultArraySignature which returns signatures for query results * @see org.apache.druid.query.InlineDataSource#getRowSignature which returns signatures for inline datasources @@ -55,30 +54,42 @@ public class RowSignature implements ColumnInspector private final Object2IntMap columnPositions = new Object2IntOpenHashMap<>(); private final List columnNames; - private RowSignature(final List> columnTypeList) + private RowSignature(final List columnTypeList) { this.columnPositions.defaultReturnValue(-1); final ImmutableList.Builder columnNamesBuilder = ImmutableList.builder(); for (int i = 0; i < columnTypeList.size(); i++) { - final Pair pair = columnTypeList.get(i); - final ColumnType existingType = columnTypes.get(pair.lhs); + final ColumnSignature sig = columnTypeList.get(i); + final ColumnType existingType = columnTypes.get(sig.name()); - if (columnTypes.containsKey(pair.lhs) && existingType != pair.rhs) { + if (columnTypes.containsKey(sig.name()) && !Objects.equals(existingType, sig.type())) { // It's ok to add the same column twice as long as the type is consistent. // Note: we need the containsKey because the existingType might be present, but null. - throw new IAE("Column[%s] has conflicting types [%s] and [%s]", pair.lhs, existingType, pair.rhs); + throw new IAE("Column[%s] has conflicting types [%s] and [%s]", sig.name(), existingType, sig.type()); } - columnTypes.put(pair.lhs, pair.rhs); - columnPositions.put(pair.lhs, i); - columnNamesBuilder.add(pair.lhs); + columnTypes.put(sig.name(), sig.type()); + columnPositions.put(sig.name(), i); + columnNamesBuilder.add(sig.name()); } this.columnNames = columnNamesBuilder.build(); } + @JsonCreator + static RowSignature fromColumnSignatures(final List columnSignatures) + { + final Builder builder = builder(); + + for (final ColumnSignature columnSignature : columnSignatures) { + builder.add(columnSignature.name(), columnSignature.type()); + } + + return builder.build(); + } + public static Builder builder() { return new Builder(); @@ -158,6 +169,19 @@ public int indexOf(final String columnName) return columnPositions.applyAsInt(columnName); } + @JsonValue + private List asColumnSignatures() + { + final List retVal = new ArrayList<>(); + + for (String columnName : columnNames) { + final ColumnType type = columnTypes.get(columnName); + retVal.add(new ColumnSignature(columnName, type)); + } + + return retVal; + } + @Override public boolean equals(Object o) { @@ -207,7 +231,7 @@ public ColumnCapabilities getColumnCapabilities(String column) public static class Builder { - private final List> columnTypeList; + private final List columnTypeList; private Builder() { @@ -216,21 +240,21 @@ private Builder() /** * Add a column to this signature. - * @param columnName name, must be nonnull + * + * @param columnName name, must be nonnull * @param columnType type, may be null if unknown */ public Builder add(final String columnName, @Nullable final ColumnType columnType) { - // Name must be nonnull, but type can be null (if the type is unknown) - Preconditions.checkNotNull(columnName, "'columnName' must be non-null"); - columnTypeList.add(Pair.of(columnName, columnType)); + columnTypeList.add(new ColumnSignature(columnName, columnType)); return this; } public Builder addAll(final RowSignature other) { - for (String columnName : other.getColumnNames()) { - add(columnName, other.getColumnType(columnName).orElse(null)); + final List names = other.getColumnNames(); + for (int i = 0; i < names.size(); i++) { + add(names.get(i), other.getColumnType(i).orElse(null)); } return this; diff --git a/processing/src/test/java/org/apache/druid/segment/column/RowSignatureTest.java b/processing/src/test/java/org/apache/druid/segment/column/RowSignatureTest.java new file mode 100644 index 000000000000..3c77b0499c13 --- /dev/null +++ b/processing/src/test/java/org/apache/druid/segment/column/RowSignatureTest.java @@ -0,0 +1,62 @@ +/* + * Licensed to the Apache Software Foundation (ASF) under one + * or more contributor license agreements. See the NOTICE file + * distributed with this work for additional information + * regarding copyright ownership. The ASF licenses this file + * to you under the Apache License, Version 2.0 (the + * "License"); you may not use this file except in compliance + * with the License. You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, + * software distributed under the License is distributed on an + * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY + * KIND, either express or implied. See the License for the + * specific language governing permissions and limitations + * under the License. + */ + +package org.apache.druid.segment.column; + +import com.fasterxml.jackson.databind.ObjectMapper; +import org.apache.druid.segment.TestHelper; +import org.junit.Assert; +import org.junit.Test; + +import java.io.IOException; + +public class RowSignatureTest +{ + @Test + public void testJson() throws IOException + { + final String signatureString = + "[{\"name\":\"s\",\"type\":\"STRING\"}," + + "{\"name\":\"d\",\"type\":\"DOUBLE\"}," + + "{\"name\":\"f\",\"type\":\"FLOAT\"}," + + "{\"name\":\"l\",\"type\":\"LONG\"}," + + "{\"name\":\"u\"}," + + "{\"name\":\"c\",\"type\":\"COMPLEX\"}," + + "{\"name\":\"cf\",\"type\":\"COMPLEX\"}," + + "{\"name\":\"as\",\"type\":\"ARRAY\"}" + + "]"; + + final ObjectMapper mapper = TestHelper.makeJsonMapper(); + final RowSignature signature = mapper.readValue(signatureString, RowSignature.class); + Assert.assertEquals(signatureString, mapper.writeValueAsString(signature)); + Assert.assertEquals( + RowSignature.builder() + .add("s", ColumnType.STRING) + .add("d", ColumnType.DOUBLE) + .add("f", ColumnType.FLOAT) + .add("l", ColumnType.LONG) + .add("u", null) + .add("c", ColumnType.UNKNOWN_COMPLEX) + .add("cf", ColumnType.ofComplex("foo")) + .add("as", ColumnType.ofArray(ColumnType.STRING)) + .build(), + signature + ); + } +} diff --git a/sql/src/main/java/org/apache/druid/sql/SqlLifecycle.java b/sql/src/main/java/org/apache/druid/sql/SqlLifecycle.java index 98a7c22abb76..15ffa204cc19 100644 --- a/sql/src/main/java/org/apache/druid/sql/SqlLifecycle.java +++ b/sql/src/main/java/org/apache/druid/sql/SqlLifecycle.java @@ -21,7 +21,6 @@ import com.google.common.annotations.VisibleForTesting; import com.google.common.base.Preconditions; -import com.google.common.collect.Iterables; import com.google.errorprone.annotations.concurrent.GuardedBy; import org.apache.calcite.avatica.remote.TypedValue; import org.apache.calcite.sql.parser.SqlParseException; @@ -47,7 +46,7 @@ import org.apache.druid.server.security.AuthenticationResult; import org.apache.druid.server.security.AuthorizationUtils; import org.apache.druid.server.security.ForbiddenException; -import org.apache.druid.server.security.Resource; +import org.apache.druid.server.security.ResourceAction; import org.apache.druid.sql.calcite.planner.DruidPlanner; import org.apache.druid.sql.calcite.planner.PlannerContext; import org.apache.druid.sql.calcite.planner.PlannerFactory; @@ -194,7 +193,7 @@ public void validateAndAuthorize(AuthenticationResult authenticationResult) Access access = doAuthorize( AuthorizationUtils.authorizeAllResourceActions( authenticationResult, - Iterables.transform(validationResult.getResources(), AuthorizationUtils.RESOURCE_READ_RA_GENERATOR), + validationResult.getResourceActions(), plannerFactory.getAuthorizerMapper() ) ); @@ -216,7 +215,7 @@ public void validateAndAuthorize(HttpServletRequest req) Access access = doAuthorize( AuthorizationUtils.authorizeAllResourceActions( req, - Iterables.transform(validationResult.getResources(), AuthorizationUtils.RESOURCE_READ_RA_GENERATOR), + validationResult.getResourceActions(), plannerFactory.getAuthorizerMapper() ) ); @@ -225,13 +224,13 @@ public void validateAndAuthorize(HttpServletRequest req) private ValidationResult validate(AuthenticationResult authenticationResult) { - try (DruidPlanner planner = plannerFactory.createPlanner(queryContext)) { + try (DruidPlanner planner = plannerFactory.createPlanner(sql, queryContext)) { // set planner context for logs/metrics in case something explodes early this.plannerContext = planner.getPlannerContext(); this.plannerContext.setAuthenticationResult(authenticationResult); // set parameters on planner context, if parameters have already been set this.plannerContext.setParameters(parameters); - this.validationResult = planner.validate(sql); + this.validationResult = planner.validate(); return validationResult; } // we can't collapse catch clauses since SqlPlanningException has type-sensitive constructors. @@ -266,7 +265,6 @@ private void checkAccess(Access access) * Prepare the query lifecycle for execution, without completely planning into something that is executable, but * including some initial parsing and validation and any dyanmic parameter type resolution, to support prepared * statements via JDBC. - * */ public PrepareResult prepare() throws RelConversionException { @@ -277,7 +275,7 @@ public PrepareResult prepare() throws RelConversionException } Preconditions.checkNotNull(plannerContext, "Cannot prepare, plannerContext is null"); try (DruidPlanner planner = plannerFactory.createPlannerWithContext(plannerContext)) { - this.prepareResult = planner.prepare(sql); + this.prepareResult = planner.prepare(); return prepareResult; } // we can't collapse catch clauses since SqlPlanningException has type-sensitive constructors. @@ -299,7 +297,7 @@ public void plan() throws RelConversionException transition(State.AUTHORIZED, State.PLANNED); Preconditions.checkNotNull(plannerContext, "Cannot plan, plannerContext is null"); try (DruidPlanner planner = plannerFactory.createPlannerWithContext(plannerContext)) { - this.plannerResult = planner.plan(sql); + this.plannerResult = planner.plan(); } // we can't collapse catch clauses since SqlPlanningException has type-sensitive constructors. catch (SqlParseException e) { @@ -379,10 +377,9 @@ public ValidationResult runAnalyzeResources(AuthenticationResult authenticationR return validate(authenticationResult); } - public Set getAuthorizedResources() + public Set getRequiredResourceActions() { - assert validationResult != null; - return validationResult.getResources(); + return Preconditions.checkNotNull(validationResult, "validationResult").getResourceActions(); } /** @@ -449,7 +446,11 @@ public void finalizeStateAndEmitLogsAndMetrics( if (validationResult != null) { metricBuilder.setDimension( "dataSource", - validationResult.getResources().stream().map(Resource::getName).collect(Collectors.toList()).toString() + validationResult.getResourceActions() + .stream() + .map(action -> action.getResource().getName()) + .collect(Collectors.toList()) + .toString() ); } metricBuilder.setDimension("remoteAddress", StringUtils.nullToEmptyNonDruidDataString(remoteAddress)); diff --git a/sql/src/main/java/org/apache/druid/sql/avatica/DruidStatement.java b/sql/src/main/java/org/apache/druid/sql/avatica/DruidStatement.java index 7ad8a2ef0f07..12609e894c36 100644 --- a/sql/src/main/java/org/apache/druid/sql/avatica/DruidStatement.java +++ b/sql/src/main/java/org/apache/druid/sql/avatica/DruidStatement.java @@ -40,9 +40,9 @@ import org.apache.druid.sql.SqlLifecycle; import org.apache.druid.sql.calcite.planner.Calcites; import org.apache.druid.sql.calcite.planner.PrepareResult; -import org.apache.druid.sql.calcite.rel.QueryMaker; import java.io.Closeable; +import java.sql.Array; import java.sql.DatabaseMetaData; import java.util.ArrayList; import java.util.List; @@ -119,20 +119,20 @@ public static List createColumnMetaData(final RelDataType rowTyp final ColumnMetaData.AvaticaType columnType; if (field.getType().getSqlTypeName() == SqlTypeName.ARRAY) { - final ColumnMetaData.Rep elementRep = QueryMaker.rep(field.getType().getComponentType().getSqlTypeName()); + final ColumnMetaData.Rep elementRep = rep(field.getType().getComponentType().getSqlTypeName()); final ColumnMetaData.ScalarType elementType = ColumnMetaData.scalar( field.getType().getComponentType().getSqlTypeName().getJdbcOrdinal(), field.getType().getComponentType().getSqlTypeName().getName(), elementRep ); - final ColumnMetaData.Rep arrayRep = QueryMaker.rep(field.getType().getSqlTypeName()); + final ColumnMetaData.Rep arrayRep = rep(field.getType().getSqlTypeName()); columnType = ColumnMetaData.array( elementType, field.getType().getSqlTypeName().getName(), arrayRep ); } else { - final ColumnMetaData.Rep rep = QueryMaker.rep(field.getType().getSqlTypeName()); + final ColumnMetaData.Rep rep = rep(field.getType().getSqlTypeName()); columnType = ColumnMetaData.scalar( field.getType().getSqlTypeName().getJdbcOrdinal(), field.getType().getSqlTypeName().getName(), @@ -411,6 +411,35 @@ private void ensure(final State... desiredStates) throw new ISE("Invalid action for state[%s]", state); } + private static ColumnMetaData.Rep rep(final SqlTypeName sqlType) + { + if (SqlTypeName.CHAR_TYPES.contains(sqlType)) { + return ColumnMetaData.Rep.of(String.class); + } else if (sqlType == SqlTypeName.TIMESTAMP) { + return ColumnMetaData.Rep.of(Long.class); + } else if (sqlType == SqlTypeName.DATE) { + return ColumnMetaData.Rep.of(Integer.class); + } else if (sqlType == SqlTypeName.INTEGER) { + // use Number.class for exact numeric types since JSON transport might switch longs to integers + return ColumnMetaData.Rep.of(Number.class); + } else if (sqlType == SqlTypeName.BIGINT) { + // use Number.class for exact numeric types since JSON transport might switch longs to integers + return ColumnMetaData.Rep.of(Number.class); + } else if (sqlType == SqlTypeName.FLOAT) { + return ColumnMetaData.Rep.of(Float.class); + } else if (sqlType == SqlTypeName.DOUBLE || sqlType == SqlTypeName.DECIMAL) { + return ColumnMetaData.Rep.of(Double.class); + } else if (sqlType == SqlTypeName.BOOLEAN) { + return ColumnMetaData.Rep.of(Boolean.class); + } else if (sqlType == SqlTypeName.OTHER) { + return ColumnMetaData.Rep.of(Object.class); + } else if (sqlType == SqlTypeName.ARRAY) { + return ColumnMetaData.Rep.of(Array.class); + } else { + throw new ISE("No rep for SQL type[%s]", sqlType); + } + } + enum State { NEW, diff --git a/sql/src/main/java/org/apache/druid/sql/calcite/expression/AuthorizableOperator.java b/sql/src/main/java/org/apache/druid/sql/calcite/expression/AuthorizableOperator.java new file mode 100644 index 000000000000..1c6beb31b41d --- /dev/null +++ b/sql/src/main/java/org/apache/druid/sql/calcite/expression/AuthorizableOperator.java @@ -0,0 +1,35 @@ +/* + * Licensed to the Apache Software Foundation (ASF) under one + * or more contributor license agreements. See the NOTICE file + * distributed with this work for additional information + * regarding copyright ownership. The ASF licenses this file + * to you under the Apache License, Version 2.0 (the + * "License"); you may not use this file except in compliance + * with the License. You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, + * software distributed under the License is distributed on an + * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY + * KIND, either express or implied. See the License for the + * specific language governing permissions and limitations + * under the License. + */ + +package org.apache.druid.sql.calcite.expression; + +import org.apache.calcite.sql.SqlCall; +import org.apache.druid.server.security.ResourceAction; + +import java.util.Set; + +/** + * Interface for {@link org.apache.calcite.sql.SqlOperator} that need authorization in order to execute. + * + * Checked by {@link org.apache.druid.sql.calcite.planner.SqlResourceCollectorShuttle}. + */ +public interface AuthorizableOperator +{ + Set computeResources(SqlCall call); +} diff --git a/sql/src/main/java/org/apache/druid/sql/calcite/external/ExternalDataSource.java b/sql/src/main/java/org/apache/druid/sql/calcite/external/ExternalDataSource.java new file mode 100644 index 000000000000..f32c5c1498db --- /dev/null +++ b/sql/src/main/java/org/apache/druid/sql/calcite/external/ExternalDataSource.java @@ -0,0 +1,152 @@ +/* + * Licensed to the Apache Software Foundation (ASF) under one + * or more contributor license agreements. See the NOTICE file + * distributed with this work for additional information + * regarding copyright ownership. The ASF licenses this file + * to you under the Apache License, Version 2.0 (the + * "License"); you may not use this file except in compliance + * with the License. You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, + * software distributed under the License is distributed on an + * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY + * KIND, either express or implied. See the License for the + * specific language governing permissions and limitations + * under the License. + */ + +package org.apache.druid.sql.calcite.external; + +import com.fasterxml.jackson.annotation.JsonCreator; +import com.fasterxml.jackson.annotation.JsonProperty; +import com.fasterxml.jackson.annotation.JsonTypeName; +import com.google.common.base.Preconditions; +import org.apache.druid.data.input.InputFormat; +import org.apache.druid.data.input.InputSource; +import org.apache.druid.java.util.common.IAE; +import org.apache.druid.query.DataSource; +import org.apache.druid.segment.column.RowSignature; + +import java.util.Collections; +import java.util.List; +import java.util.Objects; +import java.util.Set; + +/** + * Represents external data for INSERT queries. Only used by the SQL layer, not by the query stack. + * + * Includes an {@link InputSource} and {@link InputFormat}, plus a {@link RowSignature} so the SQL planner has + * the type information necessary to validate and plan the query. + * + * This class is exercised in CalciteInsertDmlTest but is not currently exposed to end users. + */ +@JsonTypeName("external") +public class ExternalDataSource implements DataSource +{ + private final InputSource inputSource; + private final InputFormat inputFormat; + private final RowSignature signature; + + @JsonCreator + public ExternalDataSource( + @JsonProperty("inputSource") final InputSource inputSource, + @JsonProperty("inputFormat") final InputFormat inputFormat, + @JsonProperty("signature") final RowSignature signature + ) + { + this.inputSource = Preconditions.checkNotNull(inputSource, "inputSource"); + this.inputFormat = Preconditions.checkNotNull(inputFormat, "inputFormat"); + this.signature = Preconditions.checkNotNull(signature, "signature"); + } + + @JsonProperty + public InputSource getInputSource() + { + return inputSource; + } + + @JsonProperty + public InputFormat getInputFormat() + { + return inputFormat; + } + + @JsonProperty + public RowSignature getSignature() + { + return signature; + } + + @Override + public Set getTableNames() + { + return Collections.emptySet(); + } + + @Override + public List getChildren() + { + return Collections.emptyList(); + } + + @Override + public DataSource withChildren(final List children) + { + if (!children.isEmpty()) { + throw new IAE("Cannot accept children"); + } + + return this; + } + + @Override + public boolean isCacheable(boolean isBroker) + { + return false; + } + + @Override + public boolean isGlobal() + { + return false; + } + + @Override + public boolean isConcrete() + { + return false; + } + + @Override + public boolean equals(Object o) + { + if (this == o) { + return true; + } + if (o == null || getClass() != o.getClass()) { + return false; + } + ExternalDataSource that = (ExternalDataSource) o; + return Objects.equals(inputSource, that.inputSource) + && Objects.equals(inputFormat, that.inputFormat) + && Objects.equals(signature, that.signature); + } + + @Override + public int hashCode() + { + return Objects.hash(inputSource, inputFormat, signature); + } + + @Override + public String toString() + { + return "ExternalDataSource{" + + "inputSource=" + inputSource + + ", inputFormat=" + inputFormat + + ", signature=" + signature + + '}'; + } +} diff --git a/sql/src/main/java/org/apache/druid/sql/calcite/external/ExternalOperatorConversion.java b/sql/src/main/java/org/apache/druid/sql/calcite/external/ExternalOperatorConversion.java new file mode 100644 index 000000000000..db3f6bd8c375 --- /dev/null +++ b/sql/src/main/java/org/apache/druid/sql/calcite/external/ExternalOperatorConversion.java @@ -0,0 +1,115 @@ +/* + * Licensed to the Apache Software Foundation (ASF) under one + * or more contributor license agreements. See the NOTICE file + * distributed with this work for additional information + * regarding copyright ownership. The ASF licenses this file + * to you under the Apache License, Version 2.0 (the + * "License"); you may not use this file except in compliance + * with the License. You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, + * software distributed under the License is distributed on an + * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY + * KIND, either express or implied. See the License for the + * specific language governing permissions and limitations + * under the License. + */ + +package org.apache.druid.sql.calcite.external; + +import com.google.inject.Inject; +import org.apache.calcite.rel.type.RelDataTypeFactory; +import org.apache.calcite.rex.RexNode; +import org.apache.calcite.sql.SqlCall; +import org.apache.calcite.sql.SqlIdentifier; +import org.apache.calcite.sql.SqlOperator; +import org.apache.calcite.sql.parser.SqlParserPos; +import org.apache.calcite.sql.type.OperandTypes; +import org.apache.calcite.sql.type.ReturnTypes; +import org.apache.calcite.sql.type.SqlTypeFactoryImpl; +import org.apache.calcite.sql.type.SqlTypeFamily; +import org.apache.calcite.sql.validate.SqlUserDefinedTableMacro; +import org.apache.druid.segment.column.RowSignature; +import org.apache.druid.server.security.Action; +import org.apache.druid.server.security.Resource; +import org.apache.druid.server.security.ResourceAction; +import org.apache.druid.sql.calcite.expression.AuthorizableOperator; +import org.apache.druid.sql.calcite.expression.DruidExpression; +import org.apache.druid.sql.calcite.expression.SqlOperatorConversion; +import org.apache.druid.sql.calcite.planner.DruidTypeSystem; +import org.apache.druid.sql.calcite.planner.PlannerContext; + +import javax.annotation.Nullable; +import java.util.Collections; +import java.util.Set; +import java.util.stream.Collectors; + +/** + * Registers the "EXTERN" operator, which is used in queries like "INSERT INTO dst SELECT * FROM TABLE(EXTERN(...))". + * + * This class is exercised in CalciteInsertDmlTest but is not currently exposed to end users. + */ +public class ExternalOperatorConversion implements SqlOperatorConversion +{ + public static final String FUNCTION_NAME = "EXTERN"; + + // EXTERNAL is not an "official" resource type (it doesn't appear as a constant in the ResourceType class). + // It is here so we can write tests that check that authorization works as expected, like CalciteInsertDmlTest. + // This should be rethought before the functionality is exposed to end users. + public static final ResourceAction EXTERNAL_RESOURCE_ACTION = + new ResourceAction(new Resource("EXTERNAL", "EXTERNAL"), Action.READ); + + private static final RelDataTypeFactory TYPE_FACTORY = new SqlTypeFactoryImpl(DruidTypeSystem.INSTANCE); + + private final SqlUserDefinedTableMacro operator; + + @Inject + public ExternalOperatorConversion(final ExternalTableMacro macro) + { + this.operator = new ExternalOperator(macro); + } + + @Override + public SqlOperator calciteOperator() + { + return operator; + } + + @Nullable + @Override + public DruidExpression toDruidExpression(PlannerContext plannerContext, RowSignature rowSignature, RexNode rexNode) + { + return null; + } + + private static class ExternalOperator extends SqlUserDefinedTableMacro implements AuthorizableOperator + { + public ExternalOperator(final ExternalTableMacro macro) + { + super( + new SqlIdentifier(FUNCTION_NAME, SqlParserPos.ZERO), + ReturnTypes.CURSOR, + null, + OperandTypes.sequence( + "(inputSource, inputFormat, signature)", + OperandTypes.family(SqlTypeFamily.STRING), + OperandTypes.family(SqlTypeFamily.STRING), + OperandTypes.family(SqlTypeFamily.STRING) + ), + macro.getParameters() + .stream() + .map(parameter -> parameter.getType(TYPE_FACTORY)) + .collect(Collectors.toList()), + macro + ); + } + + @Override + public Set computeResources(final SqlCall call) + { + return Collections.singleton(EXTERNAL_RESOURCE_ACTION); + } + } +} diff --git a/sql/src/main/java/org/apache/druid/sql/calcite/external/ExternalTableMacro.java b/sql/src/main/java/org/apache/druid/sql/calcite/external/ExternalTableMacro.java new file mode 100644 index 000000000000..5e73a54825ec --- /dev/null +++ b/sql/src/main/java/org/apache/druid/sql/calcite/external/ExternalTableMacro.java @@ -0,0 +1,159 @@ +/* + * Licensed to the Apache Software Foundation (ASF) under one + * or more contributor license agreements. See the NOTICE file + * distributed with this work for additional information + * regarding copyright ownership. The ASF licenses this file + * to you under the Apache License, Version 2.0 (the + * "License"); you may not use this file except in compliance + * with the License. You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, + * software distributed under the License is distributed on an + * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY + * KIND, either express or implied. See the License for the + * specific language governing permissions and limitations + * under the License. + */ + +package org.apache.druid.sql.calcite.external; + +import com.fasterxml.jackson.core.JsonProcessingException; +import com.fasterxml.jackson.databind.ObjectMapper; +import com.google.common.collect.ImmutableList; +import com.google.inject.Inject; +import org.apache.calcite.rel.type.RelDataType; +import org.apache.calcite.rel.type.RelDataTypeFactory; +import org.apache.calcite.schema.FunctionParameter; +import org.apache.calcite.schema.TableMacro; +import org.apache.calcite.schema.TranslatableTable; +import org.apache.druid.data.input.InputFormat; +import org.apache.druid.data.input.InputSource; +import org.apache.druid.guice.annotations.Json; +import org.apache.druid.segment.column.RowSignature; +import org.apache.druid.sql.calcite.table.DruidTable; + +import java.util.List; + +/** + * Used by {@link ExternalOperatorConversion} to generate {@link DruidTable} that reference {@link ExternalDataSource}. + * + * This class is exercised in CalciteInsertDmlTest but is not currently exposed to end users. + */ +public class ExternalTableMacro implements TableMacro +{ + private final ObjectMapper jsonMapper; + + @Inject + public ExternalTableMacro(@Json final ObjectMapper jsonMapper) + { + this.jsonMapper = jsonMapper; + } + + @Override + public TranslatableTable apply(final List arguments) + { + try { + final InputSource inputSource = jsonMapper.readValue((String) arguments.get(0), InputSource.class); + final InputFormat inputFormat = jsonMapper.readValue((String) arguments.get(1), InputFormat.class); + final RowSignature signature = jsonMapper.readValue((String) arguments.get(2), RowSignature.class); + + return new DruidTable( + new ExternalDataSource(inputSource, inputFormat, signature), + signature, + jsonMapper, + false, + false + ); + } + catch (JsonProcessingException e) { + throw new RuntimeException(e); + } + } + + @Override + public List getParameters() + { + return ImmutableList.of( + new FunctionParameter() + { + @Override + public int getOrdinal() + { + return 0; + } + + @Override + public String getName() + { + return "inputSource"; + } + + @Override + public RelDataType getType(RelDataTypeFactory typeFactory) + { + return typeFactory.createJavaType(String.class); + } + + @Override + public boolean isOptional() + { + return false; + } + }, + new FunctionParameter() + { + @Override + public int getOrdinal() + { + return 1; + } + + @Override + public String getName() + { + return "inputFormat"; + } + + @Override + public RelDataType getType(RelDataTypeFactory typeFactory) + { + return typeFactory.createJavaType(String.class); + } + + @Override + public boolean isOptional() + { + return false; + } + }, + new FunctionParameter() + { + @Override + public int getOrdinal() + { + return 2; + } + + @Override + public String getName() + { + return "signature"; + } + + @Override + public RelDataType getType(RelDataTypeFactory typeFactory) + { + return typeFactory.createJavaType(String.class); + } + + @Override + public boolean isOptional() + { + return false; + } + } + ); + } +} diff --git a/sql/src/main/java/org/apache/druid/sql/calcite/external/ExternalTableScan.java b/sql/src/main/java/org/apache/druid/sql/calcite/external/ExternalTableScan.java new file mode 100644 index 000000000000..d01d5784e0ee --- /dev/null +++ b/sql/src/main/java/org/apache/druid/sql/calcite/external/ExternalTableScan.java @@ -0,0 +1,78 @@ +/* + * Licensed to the Apache Software Foundation (ASF) under one + * or more contributor license agreements. See the NOTICE file + * distributed with this work for additional information + * regarding copyright ownership. The ASF licenses this file + * to you under the Apache License, Version 2.0 (the + * "License"); you may not use this file except in compliance + * with the License. You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, + * software distributed under the License is distributed on an + * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY + * KIND, either express or implied. See the License for the + * specific language governing permissions and limitations + * under the License. + */ + +package org.apache.druid.sql.calcite.external; + +import com.fasterxml.jackson.core.JsonProcessingException; +import com.fasterxml.jackson.databind.ObjectMapper; +import org.apache.calcite.plan.Convention; +import org.apache.calcite.plan.RelOptCluster; +import org.apache.calcite.rel.AbstractRelNode; +import org.apache.calcite.rel.RelWriter; +import org.apache.calcite.rel.type.RelDataType; +import org.apache.druid.sql.calcite.table.DruidTable; + +/** + * Represents a scan of an external table. Generated by {@link DruidTable} when its datasource is an + * {@link ExternalDataSource}. + * + * This class is exercised in CalciteInsertDmlTest but is not currently exposed to end users. + */ +public class ExternalTableScan extends AbstractRelNode +{ + private final ObjectMapper jsonMapper; + private final DruidTable druidTable; + + public ExternalTableScan( + final RelOptCluster cluster, + final ObjectMapper jsonMapper, + final DruidTable druidTable + ) + { + super(cluster, cluster.traitSetOf(Convention.NONE)); + this.jsonMapper = jsonMapper; + this.druidTable = druidTable; + } + + public DruidTable getDruidTable() + { + return druidTable; + } + + @Override + protected RelDataType deriveRowType() + { + return druidTable.getRowType(getCluster().getTypeFactory()); + } + + @Override + public RelWriter explainTerms(RelWriter pw) + { + final String dataSourceString; + + try { + dataSourceString = jsonMapper.writeValueAsString(druidTable.getDataSource()); + } + catch (JsonProcessingException e) { + throw new RuntimeException(e); + } + + return pw.item("dataSource", dataSourceString); + } +} diff --git a/sql/src/main/java/org/apache/druid/sql/calcite/external/ExternalTableScanRule.java b/sql/src/main/java/org/apache/druid/sql/calcite/external/ExternalTableScanRule.java new file mode 100644 index 000000000000..786837eb87db --- /dev/null +++ b/sql/src/main/java/org/apache/druid/sql/calcite/external/ExternalTableScanRule.java @@ -0,0 +1,59 @@ +/* + * Licensed to the Apache Software Foundation (ASF) under one + * or more contributor license agreements. See the NOTICE file + * distributed with this work for additional information + * regarding copyright ownership. The ASF licenses this file + * to you under the Apache License, Version 2.0 (the + * "License"); you may not use this file except in compliance + * with the License. You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, + * software distributed under the License is distributed on an + * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY + * KIND, either express or implied. See the License for the + * specific language governing permissions and limitations + * under the License. + */ + +package org.apache.druid.sql.calcite.external; + +import org.apache.calcite.plan.RelOptRule; +import org.apache.calcite.plan.RelOptRuleCall; +import org.apache.druid.sql.calcite.planner.PlannerContext; +import org.apache.druid.sql.calcite.rel.DruidQueryRel; +import org.apache.druid.sql.calcite.run.QueryFeature; + +/** + * Rule that converts an {@link ExternalTableScan} to a call to {@link DruidQueryRel#scanExternal}. + * + * This class is exercised in CalciteInsertDmlTest but is not currently exposed to end users. + */ +public class ExternalTableScanRule extends RelOptRule +{ + private final PlannerContext plannerContext; + + public ExternalTableScanRule(final PlannerContext plannerContext) + { + super(operand(ExternalTableScan.class, any())); + this.plannerContext = plannerContext; + } + + @Override + public boolean matches(RelOptRuleCall call) + { + if (plannerContext.getQueryMaker().feature(QueryFeature.CAN_READ_EXTERNAL_DATA)) { + return super.matches(call); + } else { + return false; + } + } + + @Override + public void onMatch(final RelOptRuleCall call) + { + final ExternalTableScan scan = call.rel(0); + call.transformTo(DruidQueryRel.scanExternal(scan, plannerContext)); + } +} diff --git a/sql/src/main/java/org/apache/druid/sql/calcite/planner/CalcitePlannerModule.java b/sql/src/main/java/org/apache/druid/sql/calcite/planner/CalcitePlannerModule.java index 6d90eb5076a6..16aaa3a1b7f6 100644 --- a/sql/src/main/java/org/apache/druid/sql/calcite/planner/CalcitePlannerModule.java +++ b/sql/src/main/java/org/apache/druid/sql/calcite/planner/CalcitePlannerModule.java @@ -20,19 +20,39 @@ package org.apache.druid.sql.calcite.planner; import com.google.inject.Binder; +import com.google.inject.Key; import com.google.inject.Module; import org.apache.druid.guice.JsonConfigProvider; +import org.apache.druid.guice.LazySingleton; +import org.apache.druid.guice.PolyBind; +import org.apache.druid.sql.calcite.run.NativeQueryMakerFactory; +import org.apache.druid.sql.calcite.run.QueryMakerFactory; /** * The module responsible for provide bindings for the Calcite Planner. */ public class CalcitePlannerModule implements Module { + public static final String PROPERTY_SQL_EXECUTOR_TYPE = "druid.sql.executor.type"; + @Override public void configure(Binder binder) { JsonConfigProvider.bind(binder, "druid.sql.planner", PlannerConfig.class); - binder.bind(PlannerFactory.class); - binder.bind(DruidOperatorTable.class); + + PolyBind.optionBinder(binder, Key.get(QueryMakerFactory.class)) + .addBinding(NativeQueryMakerFactory.TYPE) + .to(NativeQueryMakerFactory.class) + .in(LazySingleton.class); + + PolyBind.createChoiceWithDefault( + binder, + PROPERTY_SQL_EXECUTOR_TYPE, + Key.get(QueryMakerFactory.class), + NativeQueryMakerFactory.TYPE + ); + + binder.bind(PlannerFactory.class).in(LazySingleton.class); + binder.bind(DruidOperatorTable.class).in(LazySingleton.class); } } diff --git a/sql/src/main/java/org/apache/druid/sql/calcite/planner/DruidPlanner.java b/sql/src/main/java/org/apache/druid/sql/calcite/planner/DruidPlanner.java index a1b9149be6ac..2d07239bd155 100644 --- a/sql/src/main/java/org/apache/druid/sql/calcite/planner/DruidPlanner.java +++ b/sql/src/main/java/org/apache/druid/sql/calcite/planner/DruidPlanner.java @@ -20,11 +20,11 @@ package org.apache.druid.sql.calcite.planner; import com.fasterxml.jackson.core.JsonProcessingException; -import com.fasterxml.jackson.databind.ObjectMapper; import com.google.common.base.Preconditions; import com.google.common.base.Supplier; import com.google.common.base.Suppliers; import com.google.common.collect.ImmutableList; +import com.google.common.collect.Iterables; import org.apache.calcite.DataContext; import org.apache.calcite.adapter.java.JavaTypeFactory; import org.apache.calcite.config.CalciteConnectionConfig; @@ -49,6 +49,8 @@ import org.apache.calcite.rex.RexBuilder; import org.apache.calcite.rex.RexNode; import org.apache.calcite.sql.SqlExplain; +import org.apache.calcite.sql.SqlIdentifier; +import org.apache.calcite.sql.SqlInsert; import org.apache.calcite.sql.SqlKind; import org.apache.calcite.sql.SqlNode; import org.apache.calcite.sql.parser.SqlParseException; @@ -61,20 +63,30 @@ import org.apache.calcite.tools.RelConversionException; import org.apache.calcite.tools.ValidationException; import org.apache.calcite.util.Pair; +import org.apache.druid.java.util.common.StringUtils; import org.apache.druid.java.util.common.guava.BaseSequence; import org.apache.druid.java.util.common.guava.Sequence; import org.apache.druid.java.util.common.guava.Sequences; import org.apache.druid.java.util.emitter.EmittingLogger; import org.apache.druid.segment.DimensionHandlerUtils; +import org.apache.druid.server.security.Action; +import org.apache.druid.server.security.Resource; +import org.apache.druid.server.security.ResourceAction; +import org.apache.druid.server.security.ResourceType; import org.apache.druid.sql.calcite.rel.DruidConvention; import org.apache.druid.sql.calcite.rel.DruidRel; +import org.apache.druid.sql.calcite.run.QueryMaker; +import org.apache.druid.sql.calcite.run.QueryMakerFactory; import javax.annotation.Nullable; import java.io.Closeable; import java.util.ArrayList; +import java.util.HashSet; import java.util.Iterator; import java.util.List; import java.util.Properties; +import java.util.Set; +import java.util.stream.Collectors; public class DruidPlanner implements Closeable { @@ -83,115 +95,120 @@ public class DruidPlanner implements Closeable private final FrameworkConfig frameworkConfig; private final Planner planner; private final PlannerContext plannerContext; - private final ObjectMapper jsonMapper; + private final QueryMakerFactory queryMakerFactory; + private RexBuilder rexBuilder; - public DruidPlanner( + DruidPlanner( final FrameworkConfig frameworkConfig, final PlannerContext plannerContext, - final ObjectMapper jsonMapper + final QueryMakerFactory queryMakerFactory ) { this.frameworkConfig = frameworkConfig; this.planner = Frameworks.getPlanner(frameworkConfig); this.plannerContext = plannerContext; - this.jsonMapper = jsonMapper; + this.queryMakerFactory = queryMakerFactory; } /** - * Validates an SQL query and collects a {@link ValidationResult} which contains a set of - * {@link org.apache.druid.server.security.Resource} corresponding to any Druid datasources or views which are taking - * part in the query + * Validates a SQL query and populates {@link PlannerContext#getResourceActions()}. + * + * @return set of {@link Resource} corresponding to any Druid datasources or views which are taking part in the query. */ - public ValidationResult validate(final String sql) throws SqlParseException, ValidationException + public ValidationResult validate() throws SqlParseException, ValidationException { - reset(); - SqlNode parsed = planner.parse(sql); - if (parsed.getKind() == SqlKind.EXPLAIN) { - SqlExplain explain = (SqlExplain) parsed; - parsed = explain.getExplicandum(); - } - SqlValidator validator = getValidator(); - SqlNode validated; + resetPlanner(); + final ParsedNodes parsed = ParsedNodes.create(planner.parse(plannerContext.getSql())); + final SqlValidator validator = getValidator(); + final SqlNode validatedQueryNode; + try { - validated = validator.validate(rewriteDynamicParameters(parsed)); + validatedQueryNode = validator.validate(rewriteDynamicParameters(parsed.getQueryNode())); } catch (RuntimeException e) { throw new ValidationException(e); } - SqlResourceCollectorShuttle resourceCollectorShuttle = - new SqlResourceCollectorShuttle(validator, plannerContext); - validated.accept(resourceCollectorShuttle); - plannerContext.setResources(resourceCollectorShuttle.getResources()); - return new ValidationResult(resourceCollectorShuttle.getResources()); + + SqlResourceCollectorShuttle resourceCollectorShuttle = new SqlResourceCollectorShuttle(validator, plannerContext); + validatedQueryNode.accept(resourceCollectorShuttle); + + final Set resourceActions = new HashSet<>(resourceCollectorShuttle.getResourceActions()); + + if (parsed.getInsertNode() != null) { + final String targetDataSource = validateAndGetDataSourceForInsert(parsed.getInsertNode()); + resourceActions.add(new ResourceAction(new Resource(targetDataSource, ResourceType.DATASOURCE), Action.WRITE)); + } + + plannerContext.setResourceActions(resourceActions); + return new ValidationResult(resourceActions); } /** * Prepare an SQL query for execution, including some initial parsing and validation and any dyanmic parameter type * resolution, to support prepared statements via JDBC. * - * In some future this could perhaps re-use some of the work done by {@link #validate(String)} + * In some future this could perhaps re-use some of the work done by {@link #validate()} * instead of repeating it, but that day is not today. */ - public PrepareResult prepare(final String sql) throws SqlParseException, ValidationException, RelConversionException + public PrepareResult prepare() throws SqlParseException, ValidationException, RelConversionException { - reset(); - SqlNode parsed = planner.parse(sql); - SqlExplain explain = null; - if (parsed.getKind() == SqlKind.EXPLAIN) { - explain = (SqlExplain) parsed; - parsed = explain.getExplicandum(); - } - final SqlNode validated = planner.validate(parsed); - RelRoot root = planner.rel(validated); - RelDataType rowType = root.validatedRowType; + resetPlanner(); - SqlValidator validator = getValidator(); - RelDataType parameterTypes = validator.getParameterRowType(validator.validate(validated)); + final ParsedNodes parsed = ParsedNodes.create(planner.parse(plannerContext.getSql())); + final SqlNode validatedQueryNode = planner.validate(parsed.getQueryNode()); + final RelRoot rootQueryRel = planner.rel(validatedQueryNode); - if (explain != null) { - final RelDataTypeFactory typeFactory = root.rel.getCluster().getTypeFactory(); - return new PrepareResult(getExplainStructType(typeFactory), parameterTypes); + final SqlValidator validator = getValidator(); + final RelDataTypeFactory typeFactory = rootQueryRel.rel.getCluster().getTypeFactory(); + final RelDataType parameterTypes = validator.getParameterRowType(validator.validate(validatedQueryNode)); + final RelDataType returnedRowType; + + if (parsed.getExplainNode() != null) { + returnedRowType = getExplainStructType(typeFactory); + } else { + returnedRowType = buildQueryMaker(rootQueryRel, parsed.getInsertNode()).getResultType(); } - return new PrepareResult(rowType, parameterTypes); + + return new PrepareResult(returnedRowType, parameterTypes); } /** * Plan an SQL query for execution, returning a {@link PlannerResult} which can be used to actually execute the query. * - * Ideally, the query can be planned into a native Druid query, using - * {@link #planWithDruidConvention(SqlExplain, RelRoot)}, but will fall-back to - * {@link #planWithBindableConvention(SqlExplain, RelRoot)} if this is not possible. + * Ideally, the query can be planned into a native Druid query, using {@link #planWithDruidConvention}, but will + * fall-back to {@link #planWithBindableConvention} if this is not possible. * - * In some future this could perhaps re-use some of the work done by {@link #validate(String)} + * In some future this could perhaps re-use some of the work done by {@link #validate()} * instead of repeating it, but that day is not today. */ - public PlannerResult plan(final String sql) throws SqlParseException, ValidationException, RelConversionException + public PlannerResult plan() throws SqlParseException, ValidationException, RelConversionException { - reset(); - SqlExplain explain = null; - SqlNode parsed = planner.parse(sql); - if (parsed.getKind() == SqlKind.EXPLAIN) { - explain = (SqlExplain) parsed; - parsed = explain.getExplicandum(); - } + resetPlanner(); + + final ParsedNodes parsed = ParsedNodes.create(planner.parse(plannerContext.getSql())); + // the planner's type factory is not available until after parsing this.rexBuilder = new RexBuilder(planner.getTypeFactory()); - SqlNode parametized = rewriteDynamicParameters(parsed); - - final SqlNode validated = planner.validate(parametized); - final RelRoot root = planner.rel(validated); + final SqlNode parameterizedQueryNode = rewriteDynamicParameters(parsed.getQueryNode()); + final SqlNode validatedQueryNode = planner.validate(parameterizedQueryNode); + final RelRoot rootQueryRel = planner.rel(validatedQueryNode); try { - return planWithDruidConvention(explain, root); + return planWithDruidConvention(rootQueryRel, parsed.getExplainNode(), parsed.getInsertNode()); } catch (RelOptPlanner.CannotPlanException e) { - // Try again with BINDABLE convention. Used for querying Values and metadata tables. - try { - return planWithBindableConvention(explain, root); - } - catch (Exception e2) { - e.addSuppressed(e2); + if (parsed.getInsertNode() == null) { + // Try again with BINDABLE convention. Used for querying Values and metadata tables. + try { + return planWithBindableConvention(rootQueryRel, parsed.getExplainNode()); + } + catch (Exception e2) { + e.addSuppressed(e2); + throw e; + } + } else { + // Cannot INSERT with BINDABLE. throw e; } } @@ -223,7 +240,7 @@ public void close() * closely with the state of {@link #planner}, instead of repeating parsing and validation between each of these * steps. */ - private void reset() + private void resetPlanner() { planner.close(); planner.reset(); @@ -233,19 +250,23 @@ private void reset() * Construct a {@link PlannerResult} for a {@link RelNode} that is directly translatable to a native Druid query. */ private PlannerResult planWithDruidConvention( - final SqlExplain explain, - final RelRoot root - ) throws RelConversionException + final RelRoot root, + @Nullable final SqlExplain explain, + @Nullable final SqlInsert insert + ) throws ValidationException, RelConversionException { - final RelNode possiblyWrappedRootRel = possiblyWrapRootWithOuterLimitFromContext(root); + final RelRoot possiblyLimitedRoot = possiblyWrapRootWithOuterLimitFromContext(root); + + final QueryMaker queryMaker = buildQueryMaker(root, insert); + plannerContext.setQueryMaker(queryMaker); - RelNode parametized = rewriteRelDynamicParameters(possiblyWrappedRootRel); + RelNode parameterized = rewriteRelDynamicParameters(possiblyLimitedRoot.rel); final DruidRel druidRel = (DruidRel) planner.transform( Rules.DRUID_CONVENTION_RULES, planner.getEmptyTraitSet() .replace(DruidConvention.instance()) .plus(root.collation), - parametized + parameterized ); if (explain != null) { @@ -253,32 +274,25 @@ private PlannerResult planWithDruidConvention( } else { final Supplier> resultsSupplier = () -> { // sanity check + final Set readResourceActions = + plannerContext.getResourceActions() + .stream() + .filter(action -> action.getAction() == Action.READ) + .collect(Collectors.toSet()); + Preconditions.checkState( - plannerContext.getResources().isEmpty() == druidRel.getDataSourceNames().isEmpty() + readResourceActions.isEmpty() == druidRel.getDataSourceNames().isEmpty() // The resources found in the plannerContext can be less than the datasources in // the query plan, because the query planner can eliminate empty tables by replacing // them with InlineDataSource of empty rows. - || plannerContext.getResources().size() >= druidRel.getDataSourceNames().size(), + || readResourceActions.size() >= druidRel.getDataSourceNames().size(), "Authorization sanity check failed" ); - if (root.isRefTrivial()) { - return druidRel.runQuery(); - } else { - // Add a mapping on top to accommodate root.fields. - return Sequences.map( - druidRel.runQuery(), - input -> { - final Object[] retVal = new Object[root.fields.size()]; - for (int i = 0; i < root.fields.size(); i++) { - retVal[i] = input[root.fields.get(i).getKey()]; - } - return retVal; - } - ); - } + + return druidRel.runQuery(); }; - return new PlannerResult(resultsSupplier, root.validatedRowType); + return new PlannerResult(resultsSupplier, queryMaker.getResultType()); } } @@ -286,12 +300,12 @@ private PlannerResult planWithDruidConvention( * Construct a {@link PlannerResult} for a fall-back 'bindable' rel, for things that are not directly translatable * to native Druid queries such as system tables and just a general purpose (but definitely not optimized) fall-back. * - * See {@link #planWithDruidConvention(SqlExplain, RelRoot)} which will handle things which are directly translatable + * See {@link #planWithDruidConvention} which will handle things which are directly translatable * to native Druid queries. */ private PlannerResult planWithBindableConvention( - final SqlExplain explain, - final RelRoot root + final RelRoot root, + @Nullable final SqlExplain explain ) throws RelConversionException { BindableRel bindableRel = (BindableRel) planner.transform( @@ -370,17 +384,19 @@ private PlannerResult planExplanation( ) { final String explanation = RelOptUtil.dumpPlan("", rel, explain.getFormat(), explain.getDetailLevel()); - String resources; + String resourcesString; try { - resources = jsonMapper.writeValueAsString(plannerContext.getResources()); + final Set resources = + plannerContext.getResourceActions().stream().map(ResourceAction::getResource).collect(Collectors.toSet()); + resourcesString = plannerContext.getJsonMapper().writeValueAsString(resources); } catch (JsonProcessingException jpe) { // this should never happen, we create the Resources here, not a user log.error(jpe, "Encountered exception while serializing Resources for explain output"); - resources = null; + resourcesString = null; } final Supplier> resultsSupplier = Suppliers.ofInstance( - Sequences.simple(ImmutableList.of(new Object[]{explanation, resources}))); + Sequences.simple(ImmutableList.of(new Object[]{explanation, resourcesString}))); return new PlannerResult(resultsSupplier, getExplainStructType(rel.getCluster().getTypeFactory())); } @@ -397,14 +413,16 @@ private PlannerResult planExplanation( * @return root node wrapped with a limiting logical sort if a limit is specified in the query context. */ @Nullable - private RelNode possiblyWrapRootWithOuterLimitFromContext(RelRoot root) + private RelRoot possiblyWrapRootWithOuterLimitFromContext(RelRoot root) { Object outerLimitObj = plannerContext.getQueryContext().get(PlannerContext.CTX_SQL_OUTER_LIMIT); Long outerLimit = DimensionHandlerUtils.convertObjectToLong(outerLimitObj, true); if (outerLimit == null) { - return root.rel; + return root; } + final LogicalSort newRootRel; + if (root.rel instanceof Sort) { Sort sort = (Sort) root.rel; @@ -413,34 +431,25 @@ private RelNode possiblyWrapRootWithOuterLimitFromContext(RelRoot root) if (newOffsetLimit.equals(originalOffsetLimit)) { // nothing to do, don't bother to make a new sort - return root.rel; + return root; } - return LogicalSort.create( + newRootRel = LogicalSort.create( sort.getInput(), sort.collation, newOffsetLimit.getOffsetAsRexNode(rexBuilder), newOffsetLimit.getLimitAsRexNode(rexBuilder) ); } else { - return LogicalSort.create( + newRootRel = LogicalSort.create( root.rel, root.collation, null, new OffsetLimit(0, outerLimit).getLimitAsRexNode(rexBuilder) ); } - } - private static RelDataType getExplainStructType(RelDataTypeFactory typeFactory) - { - return typeFactory.createStructType( - ImmutableList.of( - Calcites.createSqlType(typeFactory, SqlTypeName.VARCHAR), - Calcites.createSqlType(typeFactory, SqlTypeName.VARCHAR) - ), - ImmutableList.of("PLAN", "RESOURCES") - ); + return new RelRoot(newRootRel, root.validatedRowType, root.kind, root.fields, root.collation); } /** @@ -506,6 +515,67 @@ private RelNode rewriteRelDynamicParameters(RelNode rootRel) return rootRel.accept(parameterizer); } + private QueryMaker buildQueryMaker( + final RelRoot rootQueryRel, + @Nullable final SqlInsert insert + ) throws ValidationException + { + if (insert != null) { + final String targetDataSource = validateAndGetDataSourceForInsert(insert); + return queryMakerFactory.buildForInsert(targetDataSource, rootQueryRel, plannerContext); + } else { + return queryMakerFactory.buildForSelect(rootQueryRel, plannerContext); + } + } + + private static RelDataType getExplainStructType(RelDataTypeFactory typeFactory) + { + return typeFactory.createStructType( + ImmutableList.of( + Calcites.createSqlType(typeFactory, SqlTypeName.VARCHAR), + Calcites.createSqlType(typeFactory, SqlTypeName.VARCHAR) + ), + ImmutableList.of("PLAN", "RESOURCES") + ); + } + + /** + * Extract target datasource from a {@link SqlInsert}, and also validate that the INSERT is of a form we support. + * Expects the INSERT target to be either an unqualified name, or a name qualified by the default schema. + */ + private String validateAndGetDataSourceForInsert(final SqlInsert insert) throws ValidationException + { + if (insert.isUpsert()) { + throw new ValidationException("UPSERT is not supported."); + } + + if (insert.getTargetColumnList() != null) { + throw new ValidationException("INSERT with target column list is not supported."); + } + + final SqlIdentifier tableIdentifier = (SqlIdentifier) insert.getTargetTable(); + + if (tableIdentifier.names.isEmpty()) { + // I don't think this can happen, but include a branch for it just in case. + throw new ValidationException("INSERT requires target table."); + } else if (tableIdentifier.names.size() == 1) { + // Unqualified name. + return Iterables.getOnlyElement(tableIdentifier.names); + } else { + // Qualified name. + final String defaultSchemaName = + Iterables.getOnlyElement(CalciteSchema.from(frameworkConfig.getDefaultSchema()).path(null)); + + if (tableIdentifier.names.size() == 2 && defaultSchemaName.equals(tableIdentifier.names.get(0))) { + return tableIdentifier.names.get(1); + } else { + throw new ValidationException( + StringUtils.format("Cannot INSERT into [%s] because it is not a Druid datasource.", tableIdentifier) + ); + } + } + } + private static class EnumeratorIterator implements Iterator { private final Iterator it; @@ -527,4 +597,62 @@ public T next() return it.next(); } } + + private static class ParsedNodes + { + @Nullable + private SqlExplain explain; + + @Nullable + private SqlInsert insert; + + private SqlNode query; + + private ParsedNodes(@Nullable SqlExplain explain, @Nullable SqlInsert insert, SqlNode query) + { + this.explain = explain; + this.insert = insert; + this.query = query; + } + + static ParsedNodes create(final SqlNode node) throws ValidationException + { + SqlExplain explain = null; + SqlInsert insert = null; + SqlNode query = node; + + if (query.getKind() == SqlKind.EXPLAIN) { + explain = (SqlExplain) query; + query = explain.getExplicandum(); + } + + if (query.getKind() == SqlKind.INSERT) { + insert = (SqlInsert) query; + query = insert.getSource(); + } + + if (!query.isA(SqlKind.QUERY)) { + throw new ValidationException(StringUtils.format("Cannot execute [%s].", query.getKind())); + } + + return new ParsedNodes(explain, insert, query); + } + + @Nullable + public SqlExplain getExplainNode() + { + return explain; + } + + @Nullable + public SqlInsert getInsertNode() + { + return insert; + } + + public SqlNode getQueryNode() + { + return query; + } + } } diff --git a/sql/src/main/java/org/apache/druid/sql/calcite/planner/PlannerContext.java b/sql/src/main/java/org/apache/druid/sql/calcite/planner/PlannerContext.java index 0ec442e7c6b9..34cad6fdbcc7 100644 --- a/sql/src/main/java/org/apache/druid/sql/calcite/planner/PlannerContext.java +++ b/sql/src/main/java/org/apache/druid/sql/calcite/planner/PlannerContext.java @@ -19,6 +19,7 @@ package org.apache.druid.sql.calcite.planner; +import com.fasterxml.jackson.databind.ObjectMapper; import com.google.common.base.Preconditions; import com.google.common.base.Strings; import com.google.common.collect.ImmutableMap; @@ -28,11 +29,13 @@ import org.apache.calcite.linq4j.QueryProvider; import org.apache.calcite.schema.SchemaPlus; import org.apache.druid.java.util.common.DateTimes; +import org.apache.druid.java.util.common.ISE; import org.apache.druid.java.util.common.Numbers; import org.apache.druid.math.expr.ExprMacroTable; import org.apache.druid.server.security.Access; import org.apache.druid.server.security.AuthenticationResult; -import org.apache.druid.server.security.Resource; +import org.apache.druid.server.security.ResourceAction; +import org.apache.druid.sql.calcite.run.QueryMaker; import org.apache.druid.sql.calcite.schema.DruidSchemaCatalog; import org.joda.time.DateTime; import org.joda.time.DateTimeZone; @@ -66,8 +69,10 @@ public class PlannerContext // DataContext keys public static final String DATA_CTX_AUTHENTICATION_RESULT = "authenticationResult"; + private final String sql; private final DruidOperatorTable operatorTable; private final ExprMacroTable macroTable; + private final ObjectMapper jsonMapper; private final PlannerConfig plannerConfig; private final DateTime localNow; private final DruidSchemaCatalog rootSchema; @@ -79,14 +84,17 @@ public class PlannerContext private List parameters = Collections.emptyList(); // result of authentication, providing identity to authorize set of resources produced by validation private AuthenticationResult authenticationResult; - // set of datasources and views which must be authorized - private Set resources = Collections.emptySet(); + // set of datasources and views which must be authorized, initialized to null so we can detect if it has been set. + private Set resourceActions = null; // result of authorizing set of resources against authentication identity private Access authorizationResult; + private QueryMaker queryMaker; private PlannerContext( + final String sql, final DruidOperatorTable operatorTable, final ExprMacroTable macroTable, + final ObjectMapper jsonMapper, final PlannerConfig plannerConfig, final DateTime localNow, final boolean stringifyArrays, @@ -94,8 +102,10 @@ private PlannerContext( final Map queryContext ) { + this.sql = sql; this.operatorTable = operatorTable; this.macroTable = macroTable; + this.jsonMapper = jsonMapper; this.plannerConfig = Preconditions.checkNotNull(plannerConfig, "plannerConfig"); this.rootSchema = rootSchema; this.queryContext = queryContext != null ? new HashMap<>(queryContext) : new HashMap<>(); @@ -111,8 +121,10 @@ private PlannerContext( } public static PlannerContext create( + final String sql, final DruidOperatorTable operatorTable, final ExprMacroTable macroTable, + final ObjectMapper jsonMapper, final PlannerConfig plannerConfig, final DruidSchemaCatalog rootSchema, final Map queryContext @@ -151,8 +163,10 @@ public static PlannerContext create( } return new PlannerContext( + sql, operatorTable, macroTable, + jsonMapper, plannerConfig.withOverrides(queryContext), utcNow.withZone(timeZone), stringifyArrays, @@ -171,6 +185,11 @@ public ExprMacroTable getExprMacroTable() return macroTable; } + public ObjectMapper getJsonMapper() + { + return jsonMapper; + } + public PlannerConfig getPlannerConfig() { return plannerConfig; @@ -209,7 +228,12 @@ public List getParameters() public AuthenticationResult getAuthenticationResult() { - return authenticationResult; + return Preconditions.checkNotNull(authenticationResult, "Authentication result not available"); + } + + public String getSql() + { + return sql; } public String getSqlQueryId() @@ -288,7 +312,7 @@ public Object get(final String name) public Access getAuthorizationResult() { - return authorizationResult; + return Preconditions.checkNotNull(authorizationResult, "Authorization result not available"); } public void setParameters(List parameters) @@ -298,21 +322,51 @@ public void setParameters(List parameters) public void setAuthenticationResult(AuthenticationResult authenticationResult) { + if (this.authenticationResult != null) { + // It's a bug if this happens, because setAuthenticationResult should be called exactly once. + throw new ISE("Authentication result has already been set"); + } + this.authenticationResult = Preconditions.checkNotNull(authenticationResult, "authenticationResult"); } public void setAuthorizationResult(Access access) { + if (this.authorizationResult != null) { + // It's a bug if this happens, because setAuthorizationResult should be called exactly once. + throw new ISE("Authorization result has already been set"); + } + this.authorizationResult = Preconditions.checkNotNull(access, "authorizationResult"); } - public Set getResources() + public Set getResourceActions() { - return resources; + return Preconditions.checkNotNull(resourceActions, "Resources not available"); + } + + public void setResourceActions(Set resourceActions) + { + if (this.resourceActions != null) { + // It's a bug if this happens, because setResourceActions should be called exactly once. + throw new ISE("Resources have already been set"); + } + + this.resourceActions = Preconditions.checkNotNull(resourceActions, "resourceActions"); + } + + public void setQueryMaker(QueryMaker queryMaker) + { + if (this.queryMaker != null) { + // It's a bug if this happens, because setQueryMaker should be called exactly once. + throw new ISE("QueryMaker has already been set"); + } + + this.queryMaker = Preconditions.checkNotNull(queryMaker, "queryMaker"); } - public void setResources(Set resources) + public QueryMaker getQueryMaker() { - this.resources = Preconditions.checkNotNull(resources, "resources"); + return Preconditions.checkNotNull(queryMaker, "QueryMaker not available"); } } diff --git a/sql/src/main/java/org/apache/druid/sql/calcite/planner/PlannerFactory.java b/sql/src/main/java/org/apache/druid/sql/calcite/planner/PlannerFactory.java index a96818e2e856..b1aa57654071 100644 --- a/sql/src/main/java/org/apache/druid/sql/calcite/planner/PlannerFactory.java +++ b/sql/src/main/java/org/apache/druid/sql/calcite/planner/PlannerFactory.java @@ -38,11 +38,10 @@ import org.apache.calcite.tools.ValidationException; import org.apache.druid.guice.annotations.Json; import org.apache.druid.math.expr.ExprMacroTable; -import org.apache.druid.server.QueryLifecycleFactory; import org.apache.druid.server.security.Access; import org.apache.druid.server.security.AuthorizerMapper; import org.apache.druid.server.security.NoopEscalator; -import org.apache.druid.sql.calcite.rel.QueryMaker; +import org.apache.druid.sql.calcite.run.QueryMakerFactory; import org.apache.druid.sql.calcite.schema.DruidSchemaCatalog; import org.apache.druid.sql.calcite.schema.DruidSchemaName; @@ -61,7 +60,7 @@ public class PlannerFactory .build(); private final DruidSchemaCatalog rootSchema; - private final QueryLifecycleFactory queryLifecycleFactory; + private final QueryMakerFactory queryMakerFactory; private final DruidOperatorTable operatorTable; private final ExprMacroTable macroTable; private final PlannerConfig plannerConfig; @@ -72,7 +71,7 @@ public class PlannerFactory @Inject public PlannerFactory( final DruidSchemaCatalog rootSchema, - final QueryLifecycleFactory queryLifecycleFactory, + final QueryMakerFactory queryMakerFactory, final DruidOperatorTable operatorTable, final ExprMacroTable macroTable, final PlannerConfig plannerConfig, @@ -82,7 +81,7 @@ public PlannerFactory( ) { this.rootSchema = rootSchema; - this.queryLifecycleFactory = queryLifecycleFactory; + this.queryMakerFactory = queryMakerFactory; this.operatorTable = operatorTable; this.macroTable = macroTable; this.plannerConfig = plannerConfig; @@ -94,41 +93,29 @@ public PlannerFactory( /** * Create a Druid query planner from an initial query context */ - public DruidPlanner createPlanner(final Map queryContext) + public DruidPlanner createPlanner(final String sql, final Map queryContext) { - final PlannerContext plannerContext = PlannerContext.create( + final PlannerContext context = PlannerContext.create( + sql, operatorTable, macroTable, + jsonMapper, plannerConfig, rootSchema, queryContext ); - final QueryMaker queryMaker = new QueryMaker(queryLifecycleFactory, plannerContext, jsonMapper); - final FrameworkConfig frameworkConfig = buildFrameworkConfig(plannerContext, queryMaker); - return new DruidPlanner( - frameworkConfig, - plannerContext, - jsonMapper - ); + return createPlannerWithContext(context); } /** * Create a new Druid query planner, re-using a previous {@link PlannerContext} */ - public DruidPlanner createPlannerWithContext(PlannerContext plannerContext) + public DruidPlanner createPlannerWithContext(final PlannerContext plannerContext) { - final QueryMaker queryMaker = new QueryMaker(queryLifecycleFactory, plannerContext, jsonMapper); - final FrameworkConfig frameworkConfig = buildFrameworkConfig(plannerContext, queryMaker); - - return new DruidPlanner( - frameworkConfig, - plannerContext, - jsonMapper - ); + return new DruidPlanner(buildFrameworkConfig(plannerContext), plannerContext, queryMakerFactory); } - /** * Not just visible for, but only for testing. Create a planner pre-loaded with an escalated authentication result * and ready to go authorization result. @@ -136,10 +123,11 @@ public DruidPlanner createPlannerWithContext(PlannerContext plannerContext) @VisibleForTesting public DruidPlanner createPlannerForTesting(final Map queryContext, String query) { - DruidPlanner thePlanner = createPlanner(queryContext); - thePlanner.getPlannerContext().setAuthenticationResult(NoopEscalator.getInstance().createEscalatedAuthenticationResult()); + final DruidPlanner thePlanner = createPlanner(query, queryContext); + thePlanner.getPlannerContext() + .setAuthenticationResult(NoopEscalator.getInstance().createEscalatedAuthenticationResult()); try { - thePlanner.validate(query); + thePlanner.validate(); } catch (SqlParseException | ValidationException e) { throw new RuntimeException(e); @@ -153,7 +141,7 @@ public AuthorizerMapper getAuthorizerMapper() return authorizerMapper; } - private FrameworkConfig buildFrameworkConfig(PlannerContext plannerContext, QueryMaker queryMaker) + private FrameworkConfig buildFrameworkConfig(PlannerContext plannerContext) { final SqlToRelConverter.Config sqlToRelConverterConfig = SqlToRelConverter .configBuilder() @@ -168,7 +156,7 @@ private FrameworkConfig buildFrameworkConfig(PlannerContext plannerContext, Quer .traitDefs(ConventionTraitDef.INSTANCE, RelCollationTraitDef.INSTANCE) .convertletTable(new DruidConvertletTable(plannerContext)) .operatorTable(operatorTable) - .programs(Rules.programs(plannerContext, queryMaker)) + .programs(Rules.programs(plannerContext)) .executor(new DruidRexExecutor(plannerContext)) .typeSystem(DruidTypeSystem.INSTANCE) .defaultSchema(rootSchema.getSubSchema(druidSchemaName)) diff --git a/sql/src/main/java/org/apache/druid/sql/calcite/planner/Rules.java b/sql/src/main/java/org/apache/druid/sql/calcite/planner/Rules.java index 7a6146dac852..8169a244c00a 100644 --- a/sql/src/main/java/org/apache/druid/sql/calcite/planner/Rules.java +++ b/sql/src/main/java/org/apache/druid/sql/calcite/planner/Rules.java @@ -73,7 +73,7 @@ import org.apache.calcite.tools.Program; import org.apache.calcite.tools.Programs; import org.apache.calcite.tools.RelBuilder; -import org.apache.druid.sql.calcite.rel.QueryMaker; +import org.apache.druid.sql.calcite.external.ExternalTableScanRule; import org.apache.druid.sql.calcite.rule.DruidLogicalValuesRule; import org.apache.druid.sql.calcite.rule.DruidRelToDruidRule; import org.apache.druid.sql.calcite.rule.DruidRules; @@ -203,7 +203,7 @@ private Rules() // No instantiation. } - public static List programs(final PlannerContext plannerContext, final QueryMaker queryMaker) + public static List programs(final PlannerContext plannerContext) { @@ -216,7 +216,7 @@ public static List programs(final PlannerContext plannerContext, final ); return ImmutableList.of( - Programs.sequence(preProgram, Programs.ofRules(druidConventionRuleSet(plannerContext, queryMaker))), + Programs.sequence(preProgram, Programs.ofRules(druidConventionRuleSet(plannerContext))), Programs.sequence(preProgram, Programs.ofRules(bindableConventionRuleSet(plannerContext))) ); } @@ -234,16 +234,15 @@ private static Program buildHepProgram(Iterable rules, return Programs.of(builder.build(), noDag, metadataProvider); } - private static List druidConventionRuleSet( - final PlannerContext plannerContext, - final QueryMaker queryMaker - ) + private static List druidConventionRuleSet(final PlannerContext plannerContext) { - final ImmutableList.Builder retVal = ImmutableList.builder() + final ImmutableList.Builder retVal = ImmutableList + .builder() .addAll(baseRuleSet(plannerContext)) .add(DruidRelToDruidRule.instance()) - .add(new DruidTableScanRule(queryMaker)) - .add(new DruidLogicalValuesRule(queryMaker)) + .add(new DruidTableScanRule(plannerContext)) + .add(new DruidLogicalValuesRule(plannerContext)) + .add(new ExternalTableScanRule(plannerContext)) .addAll(DruidRules.rules(plannerContext)); return retVal.build(); diff --git a/sql/src/main/java/org/apache/druid/sql/calcite/planner/SqlResourceCollectorShuttle.java b/sql/src/main/java/org/apache/druid/sql/calcite/planner/SqlResourceCollectorShuttle.java index a333b0342da1..464648cc53ee 100644 --- a/sql/src/main/java/org/apache/druid/sql/calcite/planner/SqlResourceCollectorShuttle.java +++ b/sql/src/main/java/org/apache/druid/sql/calcite/planner/SqlResourceCollectorShuttle.java @@ -19,6 +19,7 @@ package org.apache.druid.sql.calcite.planner; +import org.apache.calcite.sql.SqlCall; import org.apache.calcite.sql.SqlIdentifier; import org.apache.calcite.sql.SqlNode; import org.apache.calcite.sql.util.SqlShuttle; @@ -26,8 +27,12 @@ import org.apache.calcite.sql.validate.SqlValidator; import org.apache.calcite.sql.validate.SqlValidatorNamespace; import org.apache.calcite.sql.validate.SqlValidatorTable; +import org.apache.druid.java.util.common.ISE; +import org.apache.druid.server.security.Action; import org.apache.druid.server.security.Resource; +import org.apache.druid.server.security.ResourceAction; import org.apache.druid.server.security.ResourceType; +import org.apache.druid.sql.calcite.expression.AuthorizableOperator; import java.util.HashSet; import java.util.List; @@ -43,17 +48,27 @@ */ public class SqlResourceCollectorShuttle extends SqlShuttle { - private final Set resources; + private final Set resourceActions; private final PlannerContext plannerContext; private final SqlValidator validator; public SqlResourceCollectorShuttle(SqlValidator validator, PlannerContext plannerContext) { this.validator = validator; - this.resources = new HashSet<>(); + this.resourceActions = new HashSet<>(); this.plannerContext = plannerContext; } + @Override + public SqlNode visit(SqlCall call) + { + if (call.getOperator() instanceof AuthorizableOperator) { + resourceActions.addAll(((AuthorizableOperator) call.getOperator()).computeResources(call)); + } + + return super.visit(call); + } + @Override public SqlNode visit(SqlIdentifier id) { @@ -71,16 +86,19 @@ public SqlNode visit(SqlIdentifier id) final String resourceName = qualifiedNameParts.get(1); final String resourceType = plannerContext.getSchemaResourceType(schema, resourceName); if (resourceType != null) { - resources.add(new Resource(resourceName, resourceType)); + resourceActions.add(new ResourceAction(new Resource(resourceName, resourceType), Action.READ)); } + } else if (qualifiedNameParts.size() > 2) { + // Don't expect to see more than 2 names (catalog?). + throw new ISE("Cannot analyze table idetifier %s", qualifiedNameParts); } } } return super.visit(id); } - public Set getResources() + public Set getResourceActions() { - return resources; + return resourceActions; } } diff --git a/sql/src/main/java/org/apache/druid/sql/calcite/planner/ValidationResult.java b/sql/src/main/java/org/apache/druid/sql/calcite/planner/ValidationResult.java index 81d9f98ae36f..206bd28436d3 100644 --- a/sql/src/main/java/org/apache/druid/sql/calcite/planner/ValidationResult.java +++ b/sql/src/main/java/org/apache/druid/sql/calcite/planner/ValidationResult.java @@ -21,6 +21,7 @@ import com.google.common.collect.ImmutableSet; import org.apache.druid.server.security.Resource; +import org.apache.druid.server.security.ResourceAction; import java.util.Set; @@ -31,17 +32,17 @@ */ public class ValidationResult { - private final Set resources; + private final Set resourceActions; public ValidationResult( - final Set resources + final Set resourceActions ) { - this.resources = ImmutableSet.copyOf(resources); + this.resourceActions = ImmutableSet.copyOf(resourceActions); } - public Set getResources() + public Set getResourceActions() { - return resources; + return resourceActions; } } diff --git a/sql/src/main/java/org/apache/druid/sql/calcite/rel/DruidJoinQueryRel.java b/sql/src/main/java/org/apache/druid/sql/calcite/rel/DruidJoinQueryRel.java index 886798c50bdb..e651433ccac4 100644 --- a/sql/src/main/java/org/apache/druid/sql/calcite/rel/DruidJoinQueryRel.java +++ b/sql/src/main/java/org/apache/druid/sql/calcite/rel/DruidJoinQueryRel.java @@ -42,7 +42,6 @@ import org.apache.druid.java.util.common.ISE; import org.apache.druid.java.util.common.Pair; import org.apache.druid.java.util.common.StringUtils; -import org.apache.druid.java.util.common.guava.Sequence; import org.apache.druid.query.DataSource; import org.apache.druid.query.JoinDataSource; import org.apache.druid.query.QueryDataSource; @@ -83,16 +82,16 @@ private DruidJoinQueryRel( Join joinRel, Filter leftFilter, PartialDruidQuery partialQuery, - QueryMaker queryMaker + PlannerContext plannerContext ) { - super(cluster, traitSet, queryMaker); + super(cluster, traitSet, plannerContext); this.joinRel = joinRel; this.left = joinRel.getLeft(); this.right = joinRel.getRight(); this.leftFilter = leftFilter; this.partialQuery = partialQuery; - this.plannerConfig = queryMaker.getPlannerContext().getPlannerConfig(); + this.plannerConfig = plannerContext.getPlannerConfig(); } /** @@ -101,7 +100,7 @@ private DruidJoinQueryRel( public static DruidJoinQueryRel create( final Join joinRel, final Filter leftFilter, - final QueryMaker queryMaker + final PlannerContext plannerContext ) { return new DruidJoinQueryRel( @@ -110,7 +109,7 @@ public static DruidJoinQueryRel create( joinRel, leftFilter, PartialDruidQuery.create(joinRel), - queryMaker + plannerContext ); } @@ -120,17 +119,6 @@ public PartialDruidQuery getPartialDruidQuery() return partialQuery; } - @Override - public Sequence runQuery() - { - // runQuery doesn't need to finalize aggregations, because the fact that runQuery is happening suggests this - // is the outermost query and it will actually get run as a native query. Druid's native query layer will - // finalize aggregations for the outermost query even if we don't explicitly ask it to. - - final DruidQuery query = toDruidQuery(false); - return getQueryMaker().runQuery(query); - } - @Override public DruidJoinQueryRel withPartialQuery(final PartialDruidQuery newQueryBuilder) { @@ -140,7 +128,7 @@ public DruidJoinQueryRel withPartialQuery(final PartialDruidQuery newQueryBuilde joinRel, leftFilter, newQueryBuilder, - getQueryMaker() + getPlannerContext() ); } @@ -234,7 +222,7 @@ public DruidJoinQueryRel asDruidConvention() ), leftFilter, partialQuery, - getQueryMaker() + getPlannerContext() ); } @@ -273,7 +261,7 @@ public RelNode copy(final RelTraitSet traitSet, final List inputs) joinRel.copy(joinRel.getTraitSet(), inputs), leftFilter, getPartialDruidQuery(), - getQueryMaker() + getPlannerContext() ); } @@ -293,7 +281,7 @@ public RelWriter explainTerms(RelWriter pw) final DruidQuery druidQuery = toDruidQueryForExplaining(); try { - queryString = getQueryMaker().getJsonMapper().writeValueAsString(druidQuery.getQuery()); + queryString = getPlannerContext().getJsonMapper().writeValueAsString(druidQuery.getQuery()); } catch (JsonProcessingException e) { throw new RuntimeException(e); diff --git a/sql/src/main/java/org/apache/druid/sql/calcite/rel/DruidOuterQueryRel.java b/sql/src/main/java/org/apache/druid/sql/calcite/rel/DruidOuterQueryRel.java index e26acf6d4509..6c0160760d2e 100644 --- a/sql/src/main/java/org/apache/druid/sql/calcite/rel/DruidOuterQueryRel.java +++ b/sql/src/main/java/org/apache/druid/sql/calcite/rel/DruidOuterQueryRel.java @@ -32,11 +32,10 @@ import org.apache.calcite.rel.metadata.RelMetadataQuery; import org.apache.calcite.rel.type.RelDataType; import org.apache.druid.java.util.common.StringUtils; -import org.apache.druid.java.util.common.guava.Sequence; -import org.apache.druid.java.util.common.guava.Sequences; import org.apache.druid.query.QueryDataSource; import org.apache.druid.query.TableDataSource; import org.apache.druid.segment.column.RowSignature; +import org.apache.druid.sql.calcite.planner.PlannerContext; import org.apache.druid.sql.calcite.table.RowSignatures; import java.util.List; @@ -57,10 +56,10 @@ private DruidOuterQueryRel( RelTraitSet traitSet, RelNode sourceRel, PartialDruidQuery partialQuery, - QueryMaker queryMaker + PlannerContext plannerContext ) { - super(cluster, traitSet, queryMaker); + super(cluster, traitSet, plannerContext); this.sourceRel = sourceRel; this.partialQuery = partialQuery; } @@ -75,7 +74,7 @@ public static DruidOuterQueryRel create( sourceRel.getTraitSet().plusAll(partialQuery.getRelTraits()), sourceRel, partialQuery, - sourceRel.getQueryMaker() + sourceRel.getPlannerContext() ); } @@ -85,21 +84,6 @@ public PartialDruidQuery getPartialDruidQuery() return partialQuery; } - @Override - public Sequence runQuery() - { - // runQuery doesn't need to finalize aggregations, because the fact that runQuery is happening suggests this - // is the outermost query and it will actually get run as a native query. Druid's native query layer will - // finalize aggregations for the outermost query even if we don't explicitly ask it to. - - final DruidQuery query = toDruidQuery(false); - if (query != null) { - return getQueryMaker().runQuery(query); - } else { - return Sequences.empty(); - } - } - @Override public DruidOuterQueryRel withPartialQuery(final PartialDruidQuery newQueryBuilder) { @@ -108,7 +92,7 @@ public DruidOuterQueryRel withPartialQuery(final PartialDruidQuery newQueryBuild getTraitSet().plusAll(newQueryBuilder.getRelTraits()), sourceRel, newQueryBuilder, - getQueryMaker() + getPlannerContext() ); } @@ -150,7 +134,7 @@ public DruidOuterQueryRel asDruidConvention() getTraitSet().plus(DruidConvention.instance()), RelOptRule.convert(sourceRel, DruidConvention.instance()), partialQuery, - getQueryMaker() + getPlannerContext() ); } @@ -177,7 +161,7 @@ public RelNode copy(final RelTraitSet traitSet, final List inputs) traitSet, Iterables.getOnlyElement(inputs), getPartialDruidQuery(), - getQueryMaker() + getPlannerContext() ); } @@ -194,7 +178,7 @@ public RelWriter explainTerms(RelWriter pw) final DruidQuery druidQuery = toDruidQueryForExplaining(); try { - queryString = getQueryMaker().getJsonMapper().writeValueAsString(druidQuery.getQuery()); + queryString = getPlannerContext().getJsonMapper().writeValueAsString(druidQuery.getQuery()); } catch (JsonProcessingException e) { throw new RuntimeException(e); diff --git a/sql/src/main/java/org/apache/druid/sql/calcite/rel/DruidQuery.java b/sql/src/main/java/org/apache/druid/sql/calcite/rel/DruidQuery.java index c734a8be9e68..a662f11ddc64 100644 --- a/sql/src/main/java/org/apache/druid/sql/calcite/rel/DruidQuery.java +++ b/sql/src/main/java/org/apache/druid/sql/calcite/rel/DruidQuery.java @@ -25,7 +25,6 @@ import com.google.common.collect.ImmutableSortedMap; import com.google.common.collect.Iterables; import com.google.common.collect.Iterators; -import com.google.common.collect.Ordering; import com.google.common.primitives.Ints; import it.unimi.dsi.fastutil.ints.IntArrayList; import it.unimi.dsi.fastutil.ints.IntList; @@ -82,11 +81,14 @@ import org.apache.druid.sql.calcite.planner.OffsetLimit; import org.apache.druid.sql.calcite.planner.PlannerContext; import org.apache.druid.sql.calcite.rule.GroupByRules; +import org.apache.druid.sql.calcite.run.QueryFeature; +import org.apache.druid.sql.calcite.run.QueryFeatureInspector; import org.apache.druid.sql.calcite.table.RowSignatures; import javax.annotation.Nonnull; import javax.annotation.Nullable; import java.util.ArrayList; +import java.util.Collections; import java.util.Comparator; import java.util.HashMap; import java.util.HashSet; @@ -94,6 +96,9 @@ import java.util.Map; import java.util.Optional; import java.util.Set; +import java.util.SortedSet; +import java.util.TreeSet; +import java.util.stream.Collectors; /** * A fully formed Druid query, built from a {@link PartialDruidQuery}. The work to develop this query is done @@ -142,7 +147,7 @@ private DruidQuery( this.outputRowSignature = computeOutputRowSignature(sourceRowSignature, selectProjection, grouping, sorting); this.outputRowType = Preconditions.checkNotNull(outputRowType, "outputRowType"); this.virtualColumnRegistry = Preconditions.checkNotNull(virtualColumnRegistry, "virtualColumnRegistry"); - this.query = computeQuery(); + this.query = computeQuery(plannerContext.getQueryMaker()); } public static DruidQuery fromPartialQuery( @@ -732,35 +737,35 @@ public Query getQuery() * * @return Druid query */ - private Query computeQuery() + private Query computeQuery(final QueryFeatureInspector queryFeatureInspector) { if (dataSource instanceof QueryDataSource) { // If there is a subquery, then we prefer the outer query to be a groupBy if possible, since this potentially // enables more efficient execution. (The groupBy query toolchest can handle some subqueries by itself, without // requiring the Broker to inline results.) - final GroupByQuery outerQuery = toGroupByQuery(); + final GroupByQuery outerQuery = toGroupByQuery(queryFeatureInspector); if (outerQuery != null) { return outerQuery; } } - final TimeseriesQuery tsQuery = toTimeseriesQuery(); + final TimeseriesQuery tsQuery = toTimeseriesQuery(queryFeatureInspector); if (tsQuery != null) { return tsQuery; } - final TopNQuery topNQuery = toTopNQuery(); + final TopNQuery topNQuery = toTopNQuery(queryFeatureInspector); if (topNQuery != null) { return topNQuery; } - final GroupByQuery groupByQuery = toGroupByQuery(); + final GroupByQuery groupByQuery = toGroupByQuery(queryFeatureInspector); if (groupByQuery != null) { return groupByQuery; } - final ScanQuery scanQuery = toScanQuery(); + final ScanQuery scanQuery = toScanQuery(queryFeatureInspector); if (scanQuery != null) { return scanQuery; } @@ -774,9 +779,10 @@ private Query computeQuery() * @return query */ @Nullable - public TimeseriesQuery toTimeseriesQuery() + private TimeseriesQuery toTimeseriesQuery(final QueryFeatureInspector queryFeatureInspector) { - if (grouping == null + if (!queryFeatureInspector.feature(QueryFeature.CAN_RUN_TIMESERIES) + || grouping == null || grouping.getSubtotals().hasEffect(grouping.getDimensionSpecs()) || grouping.getHavingFilter() != null) { return null; @@ -821,7 +827,7 @@ public TimeseriesQuery toTimeseriesQuery() timeseriesLimit = Ints.checkedCast(limit); } - switch (sorting.getSortKind(dimensionExpression.getOutputName())) { + switch (sorting.getTimeSortKind(dimensionExpression.getOutputName())) { case UNORDERED: case TIME_ASCENDING: descending = false; @@ -883,8 +889,13 @@ public TimeseriesQuery toTimeseriesQuery() * @return query or null */ @Nullable - public TopNQuery toTopNQuery() + private TopNQuery toTopNQuery(final QueryFeatureInspector queryFeatureInspector) { + // Must be allowed by the QueryMaker. + if (!queryFeatureInspector.feature(QueryFeature.CAN_RUN_TOPN)) { + return null; + } + // Must have GROUP BY one column, no GROUPING SETS, ORDER BY ≤ 1 column, LIMIT > 0 and ≤ maxTopNLimit, // no OFFSET, no HAVING. final boolean topNOk = grouping != null @@ -969,7 +980,7 @@ public TopNQuery toTopNQuery() * @return query or null */ @Nullable - public GroupByQuery toGroupByQuery() + private GroupByQuery toGroupByQuery(final QueryFeatureInspector queryFeatureInspector) { if (grouping == null) { return null; @@ -1082,7 +1093,7 @@ public GroupByQuery toGroupByQuery() * @return query or null */ @Nullable - public ScanQuery toScanQuery() + private ScanQuery toScanQuery(final QueryFeatureInspector queryFeatureInspector) { if (grouping != null) { // Scan cannot GROUP BY. @@ -1102,7 +1113,7 @@ public ScanQuery toScanQuery() final DataSource newDataSource = dataSourceFiltrationPair.lhs; final Filtration filtration = dataSourceFiltrationPair.rhs; - final ScanQuery.Order order; + final List orderByColumns; long scanOffset = 0L; long scanLimit = 0L; @@ -1120,31 +1131,31 @@ public ScanQuery toScanQuery() scanLimit = limit; } - final Sorting.SortKind sortKind = sorting.getSortKind(ColumnHolder.TIME_COLUMN_NAME); - - if (sortKind == Sorting.SortKind.UNORDERED) { - order = ScanQuery.Order.NONE; - } else if (sortKind == Sorting.SortKind.TIME_ASCENDING) { - order = ScanQuery.Order.ASCENDING; - } else if (sortKind == Sorting.SortKind.TIME_DESCENDING) { - order = ScanQuery.Order.DESCENDING; - } else { - assert sortKind == Sorting.SortKind.NON_TIME; - - // Scan cannot ORDER BY non-time columns. - return null; - } + orderByColumns = sorting.getOrderBys().stream().map( + orderBy -> + new ScanQuery.OrderBy( + orderBy.getDimension(), + orderBy.getDirection() == OrderByColumnSpec.Direction.DESCENDING + ? ScanQuery.Order.DESCENDING + : ScanQuery.Order.ASCENDING + ) + ).collect(Collectors.toList()); } else { - order = ScanQuery.Order.NONE; + orderByColumns = Collections.emptyList(); } - // Compute the list of columns to select. - final Set columns = new HashSet<>(outputRowSignature.getColumnNames()); - - if (order != ScanQuery.Order.NONE) { - columns.add(ColumnHolder.TIME_COLUMN_NAME); + if (!queryFeatureInspector.feature(QueryFeature.SCAN_CAN_ORDER_BY_NON_TIME) + && (orderByColumns.size() > 1 + || orderByColumns.stream() + .anyMatch(orderBy -> !orderBy.getColumnName().equals(ColumnHolder.TIME_COLUMN_NAME)))) { + // Cannot handle this ordering. + return null; } + // Compute the list of columns to select, sorted and deduped. + final SortedSet scanColumns = new TreeSet<>(outputRowSignature.getColumnNames()); + orderByColumns.forEach(column -> scanColumns.add(column.getColumnName())); + return new ScanQuery( newDataSource, filtration.getQuerySegmentSpec(), @@ -1153,10 +1164,10 @@ public ScanQuery toScanQuery() 0, scanOffset, scanLimit, - order, null, + orderByColumns, filtration.getDimFilter(), - Ordering.natural().sortedCopy(columns), + ImmutableList.copyOf(scanColumns), false, ImmutableSortedMap.copyOf(plannerContext.getQueryContext()) ); diff --git a/sql/src/main/java/org/apache/druid/sql/calcite/rel/DruidQueryRel.java b/sql/src/main/java/org/apache/druid/sql/calcite/rel/DruidQueryRel.java index 02273ce22e35..373e3d4abffd 100644 --- a/sql/src/main/java/org/apache/druid/sql/calcite/rel/DruidQueryRel.java +++ b/sql/src/main/java/org/apache/druid/sql/calcite/rel/DruidQueryRel.java @@ -32,7 +32,8 @@ import org.apache.calcite.rel.logical.LogicalValues; import org.apache.calcite.rel.metadata.RelMetadataQuery; import org.apache.calcite.rel.type.RelDataType; -import org.apache.druid.java.util.common.guava.Sequence; +import org.apache.druid.sql.calcite.external.ExternalTableScan; +import org.apache.druid.sql.calcite.planner.PlannerContext; import org.apache.druid.sql.calcite.table.DruidTable; import javax.annotation.Nullable; @@ -53,24 +54,24 @@ private DruidQueryRel( final RelTraitSet traitSet, @Nullable final RelOptTable table, final DruidTable druidTable, - final QueryMaker queryMaker, + final PlannerContext plannerContext, final PartialDruidQuery partialQuery ) { - super(cluster, traitSet, queryMaker); + super(cluster, traitSet, plannerContext); this.table = table; this.druidTable = Preconditions.checkNotNull(druidTable, "druidTable"); this.partialQuery = Preconditions.checkNotNull(partialQuery, "partialQuery"); } /** - * Create a DruidQueryRel representing a full scan. + * Create a DruidQueryRel representing a full scan of a builtin table or lookup. */ - public static DruidQueryRel fullScan( + public static DruidQueryRel scanTable( final LogicalTableScan scanRel, final RelOptTable table, final DruidTable druidTable, - final QueryMaker queryMaker + final PlannerContext plannerContext ) { return new DruidQueryRel( @@ -78,15 +79,36 @@ public static DruidQueryRel fullScan( scanRel.getCluster().traitSetOf(Convention.NONE), Preconditions.checkNotNull(table, "table"), druidTable, - queryMaker, + plannerContext, PartialDruidQuery.create(scanRel) ); } - public static DruidQueryRel fullScan( + /** + * Create a DruidQueryRel representing a full scan of external data. + */ + public static DruidQueryRel scanExternal( + final ExternalTableScan scanRel, + final PlannerContext plannerContext + ) + { + return new DruidQueryRel( + scanRel.getCluster(), + scanRel.getCluster().traitSetOf(Convention.NONE), + null, + scanRel.getDruidTable(), + plannerContext, + PartialDruidQuery.create(scanRel) + ); + } + + /** + * Create a DruidQueryRel representing a full scan of inline, literal values. + */ + public static DruidQueryRel scanValues( final LogicalValues valuesRel, final DruidTable druidTable, - final QueryMaker queryMaker + final PlannerContext plannerContext ) { return new DruidQueryRel( @@ -94,7 +116,7 @@ public static DruidQueryRel fullScan( valuesRel.getTraitSet(), // the traitSet of valuesRel should be kept null, druidTable, - queryMaker, + plannerContext, PartialDruidQuery.create(valuesRel) ); } @@ -125,7 +147,7 @@ public DruidQueryRel asDruidConvention() getTraitSet().replace(DruidConvention.instance()), table, druidTable, - getQueryMaker(), + getPlannerContext(), partialQuery ); } @@ -150,21 +172,11 @@ public DruidQueryRel withPartialQuery(final PartialDruidQuery newQueryBuilder) getTraitSet().plusAll(newQueryBuilder.getRelTraits()), table, druidTable, - getQueryMaker(), + getPlannerContext(), newQueryBuilder ); } - @Override - public Sequence runQuery() - { - // runQuery doesn't need to finalize aggregations, because the fact that runQuery is happening suggests this - // is the outermost query and it will actually get run as a native query. Druid's native query layer will - // finalize aggregations for the outermost query even if we don't explicitly ask it to. - - return getQueryMaker().runQuery(toDruidQuery(false)); - } - public DruidTable getDruidTable() { return druidTable; @@ -189,7 +201,7 @@ public RelWriter explainTerms(final RelWriter pw) final DruidQuery druidQuery = toDruidQueryForExplaining(); try { - queryString = getQueryMaker().getJsonMapper().writeValueAsString(druidQuery.getQuery()); + queryString = getPlannerContext().getJsonMapper().writeValueAsString(druidQuery.getQuery()); } catch (JsonProcessingException e) { throw new RuntimeException(e); diff --git a/sql/src/main/java/org/apache/druid/sql/calcite/rel/DruidRel.java b/sql/src/main/java/org/apache/druid/sql/calcite/rel/DruidRel.java index 9b9d30f3fc4a..6f601ec5aa52 100644 --- a/sql/src/main/java/org/apache/druid/sql/calcite/rel/DruidRel.java +++ b/sql/src/main/java/org/apache/druid/sql/calcite/rel/DruidRel.java @@ -30,12 +30,12 @@ public abstract class DruidRel extends AbstractRelNode { - private final QueryMaker queryMaker; + private final PlannerContext plannerContext; - protected DruidRel(RelOptCluster cluster, RelTraitSet traitSet, QueryMaker queryMaker) + protected DruidRel(RelOptCluster cluster, RelTraitSet traitSet, PlannerContext plannerContext) { super(cluster, traitSet); - this.queryMaker = queryMaker; + this.plannerContext = plannerContext; } /** @@ -45,7 +45,14 @@ protected DruidRel(RelOptCluster cluster, RelTraitSet traitSet, QueryMaker query @Nullable public abstract PartialDruidQuery getPartialDruidQuery(); - public abstract Sequence runQuery(); + public Sequence runQuery() + { + // runQuery doesn't need to finalize aggregations, because the fact that runQuery is happening suggests this + // is the outermost query, and it will actually get run as a native query. Druid's native query layer will + // finalize aggregations for the outermost query even if we don't explicitly ask it to. + + return getPlannerContext().getQueryMaker().runQuery(toDruidQuery(false)); + } public abstract T withPartialQuery(PartialDruidQuery newQueryBuilder); @@ -83,14 +90,9 @@ public boolean isValidDruidQuery() */ public abstract DruidQuery toDruidQueryForExplaining(); - public QueryMaker getQueryMaker() - { - return queryMaker; - } - public PlannerContext getPlannerContext() { - return queryMaker.getPlannerContext(); + return plannerContext; } public abstract T asDruidConvention(); diff --git a/sql/src/main/java/org/apache/druid/sql/calcite/rel/DruidUnionDataSourceRel.java b/sql/src/main/java/org/apache/druid/sql/calcite/rel/DruidUnionDataSourceRel.java index 982394639f7d..5b3d127304ae 100644 --- a/sql/src/main/java/org/apache/druid/sql/calcite/rel/DruidUnionDataSourceRel.java +++ b/sql/src/main/java/org/apache/druid/sql/calcite/rel/DruidUnionDataSourceRel.java @@ -31,11 +31,11 @@ import org.apache.calcite.rel.metadata.RelMetadataQuery; import org.apache.calcite.rel.type.RelDataType; import org.apache.druid.java.util.common.StringUtils; -import org.apache.druid.java.util.common.guava.Sequence; import org.apache.druid.query.DataSource; import org.apache.druid.query.TableDataSource; import org.apache.druid.query.UnionDataSource; import org.apache.druid.segment.column.RowSignature; +import org.apache.druid.sql.calcite.planner.PlannerContext; import org.apache.druid.sql.calcite.table.RowSignatures; import java.util.ArrayList; @@ -66,10 +66,10 @@ private DruidUnionDataSourceRel( final Union unionRel, final List unionColumnNames, final PartialDruidQuery partialQuery, - final QueryMaker queryMaker + final PlannerContext plannerContext ) { - super(cluster, traitSet, queryMaker); + super(cluster, traitSet, plannerContext); this.unionRel = unionRel; this.unionColumnNames = unionColumnNames; this.partialQuery = partialQuery; @@ -78,7 +78,7 @@ private DruidUnionDataSourceRel( public static DruidUnionDataSourceRel create( final Union unionRel, final List unionColumnNames, - final QueryMaker queryMaker + final PlannerContext plannerContext ) { return new DruidUnionDataSourceRel( @@ -87,7 +87,7 @@ public static DruidUnionDataSourceRel create( unionRel, unionColumnNames, PartialDruidQuery.create(unionRel), - queryMaker + plannerContext ); } @@ -111,20 +111,10 @@ public DruidUnionDataSourceRel withPartialQuery(final PartialDruidQuery newQuery unionRel, unionColumnNames, newQueryBuilder, - getQueryMaker() + getPlannerContext() ); } - @Override - public Sequence runQuery() - { - // runQuery doesn't need to finalize aggregations, because the fact that runQuery is happening suggests this - // is the outermost query and it will actually get run as a native query. Druid's native query layer will - // finalize aggregations for the outermost query even if we don't explicitly ask it to. - - return getQueryMaker().runQuery(toDruidQuery(false)); - } - @Override public DruidQuery toDruidQuery(final boolean finalizeAggregations) { @@ -204,7 +194,7 @@ public DruidUnionDataSourceRel asDruidConvention() ), unionColumnNames, partialQuery, - getQueryMaker() + getPlannerContext() ); } @@ -229,7 +219,7 @@ public RelNode copy(final RelTraitSet traitSet, final List inputs) (Union) unionRel.copy(unionRel.getTraitSet(), inputs), unionColumnNames, partialQuery, - getQueryMaker() + getPlannerContext() ); } @@ -252,7 +242,7 @@ public RelWriter explainTerms(RelWriter pw) final DruidQuery druidQuery = toDruidQueryForExplaining(); try { - queryString = getQueryMaker().getJsonMapper().writeValueAsString(druidQuery.getQuery()); + queryString = getPlannerContext().getJsonMapper().writeValueAsString(druidQuery.getQuery()); } catch (JsonProcessingException e) { throw new RuntimeException(e); diff --git a/sql/src/main/java/org/apache/druid/sql/calcite/rel/DruidUnionRel.java b/sql/src/main/java/org/apache/druid/sql/calcite/rel/DruidUnionRel.java index a83869e0fe21..25e6e9f52326 100644 --- a/sql/src/main/java/org/apache/druid/sql/calcite/rel/DruidUnionRel.java +++ b/sql/src/main/java/org/apache/druid/sql/calcite/rel/DruidUnionRel.java @@ -34,6 +34,7 @@ import org.apache.druid.java.util.common.guava.Sequence; import org.apache.druid.java.util.common.guava.Sequences; import org.apache.druid.query.UnionDataSource; +import org.apache.druid.sql.calcite.planner.PlannerContext; import javax.annotation.Nullable; import java.util.ArrayList; @@ -60,20 +61,20 @@ public class DruidUnionRel extends DruidRel private DruidUnionRel( final RelOptCluster cluster, final RelTraitSet traitSet, - final QueryMaker queryMaker, + final PlannerContext plannerContext, final RelDataType rowType, final List rels, final int limit ) { - super(cluster, traitSet, queryMaker); + super(cluster, traitSet, plannerContext); this.rowType = rowType; this.rels = rels; this.limit = limit; } public static DruidUnionRel create( - final QueryMaker queryMaker, + final PlannerContext plannerContext, final RelDataType rowType, final List rels, final int limit @@ -84,7 +85,7 @@ public static DruidUnionRel create( return new DruidUnionRel( rels.get(0).getCluster(), rels.get(0).getTraitSet(), - queryMaker, + plannerContext, rowType, new ArrayList<>(rels), limit @@ -138,7 +139,7 @@ public DruidUnionRel asDruidConvention() return new DruidUnionRel( getCluster(), getTraitSet().replace(DruidConvention.instance()), - getQueryMaker(), + getPlannerContext(), rowType, rels.stream().map(rel -> RelOptRule.convert(rel, DruidConvention.instance())).collect(Collectors.toList()), limit @@ -163,7 +164,7 @@ public RelNode copy(final RelTraitSet traitSet, final List inputs) return new DruidUnionRel( getCluster(), traitSet, - getQueryMaker(), + getPlannerContext(), rowType, inputs, limit diff --git a/sql/src/main/java/org/apache/druid/sql/calcite/rel/Sorting.java b/sql/src/main/java/org/apache/druid/sql/calcite/rel/Sorting.java index 0f18102be2fa..38c1fe84e853 100644 --- a/sql/src/main/java/org/apache/druid/sql/calcite/rel/Sorting.java +++ b/sql/src/main/java/org/apache/druid/sql/calcite/rel/Sorting.java @@ -84,7 +84,7 @@ public static Sorting none() return new Sorting(Collections.emptyList(), OffsetLimit.none(), null); } - public SortKind getSortKind(final String timeColumn) + public SortKind getTimeSortKind(final String timeColumn) { if (orderBys.isEmpty()) { return SortKind.UNORDERED; diff --git a/sql/src/main/java/org/apache/druid/sql/calcite/rule/DruidJoinRule.java b/sql/src/main/java/org/apache/druid/sql/calcite/rule/DruidJoinRule.java index a8843eefacf9..46ff31f406c2 100644 --- a/sql/src/main/java/org/apache/druid/sql/calcite/rule/DruidJoinRule.java +++ b/sql/src/main/java/org/apache/druid/sql/calcite/rule/DruidJoinRule.java @@ -186,7 +186,7 @@ public void onMatch(RelOptRuleCall call) join.isSemiJoinDone() ), leftFilter, - left.getQueryMaker() + left.getPlannerContext() ); final RelBuilder relBuilder = diff --git a/sql/src/main/java/org/apache/druid/sql/calcite/rule/DruidLogicalValuesRule.java b/sql/src/main/java/org/apache/druid/sql/calcite/rule/DruidLogicalValuesRule.java index 6a76202acec3..e66f5ae6afd7 100644 --- a/sql/src/main/java/org/apache/druid/sql/calcite/rule/DruidLogicalValuesRule.java +++ b/sql/src/main/java/org/apache/druid/sql/calcite/rule/DruidLogicalValuesRule.java @@ -31,7 +31,6 @@ import org.apache.druid.sql.calcite.planner.Calcites; import org.apache.druid.sql.calcite.planner.PlannerContext; import org.apache.druid.sql.calcite.rel.DruidQueryRel; -import org.apache.druid.sql.calcite.rel.QueryMaker; import org.apache.druid.sql.calcite.table.DruidTable; import org.apache.druid.sql.calcite.table.RowSignatures; @@ -50,12 +49,12 @@ */ public class DruidLogicalValuesRule extends RelOptRule { - private final QueryMaker queryMaker; + private final PlannerContext plannerContext; - public DruidLogicalValuesRule(QueryMaker queryMaker) + public DruidLogicalValuesRule(PlannerContext plannerContext) { super(operand(LogicalValues.class, any())); - this.queryMaker = queryMaker; + this.plannerContext = plannerContext; } @Override @@ -67,7 +66,7 @@ public void onMatch(RelOptRuleCall call) .stream() .map(tuple -> tuple .stream() - .map(v -> getValueFromLiteral(v, queryMaker.getPlannerContext())) + .map(v -> getValueFromLiteral(v, plannerContext)) .collect(Collectors.toList()) .toArray(new Object[0]) ) @@ -79,11 +78,12 @@ public void onMatch(RelOptRuleCall call) final DruidTable druidTable = new DruidTable( InlineDataSource.fromIterable(objectTuples, rowSignature), rowSignature, + null, true, false ); call.transformTo( - DruidQueryRel.fullScan(values, druidTable, queryMaker) + DruidQueryRel.scanValues(values, druidTable, plannerContext) ); } diff --git a/sql/src/main/java/org/apache/druid/sql/calcite/rule/DruidSortUnionRule.java b/sql/src/main/java/org/apache/druid/sql/calcite/rule/DruidSortUnionRule.java index 0ef41fb7fe65..daf1162ac44d 100644 --- a/sql/src/main/java/org/apache/druid/sql/calcite/rule/DruidSortUnionRule.java +++ b/sql/src/main/java/org/apache/druid/sql/calcite/rule/DruidSortUnionRule.java @@ -62,7 +62,7 @@ public void onMatch(final RelOptRuleCall call) final int offset = sort.offset != null ? RexLiteral.intValue(sort.offset) : 0; final DruidUnionRel newUnionRel = DruidUnionRel.create( - unionRel.getQueryMaker(), + unionRel.getPlannerContext(), unionRel.getRowType(), unionRel.getInputs(), unionRel.getLimit() >= 0 ? Math.min(limit + offset, unionRel.getLimit()) : limit + offset diff --git a/sql/src/main/java/org/apache/druid/sql/calcite/rule/DruidTableScanRule.java b/sql/src/main/java/org/apache/druid/sql/calcite/rule/DruidTableScanRule.java index eb744fa5853d..11533f7a2754 100644 --- a/sql/src/main/java/org/apache/druid/sql/calcite/rule/DruidTableScanRule.java +++ b/sql/src/main/java/org/apache/druid/sql/calcite/rule/DruidTableScanRule.java @@ -23,18 +23,18 @@ import org.apache.calcite.plan.RelOptRuleCall; import org.apache.calcite.plan.RelOptTable; import org.apache.calcite.rel.logical.LogicalTableScan; +import org.apache.druid.sql.calcite.planner.PlannerContext; import org.apache.druid.sql.calcite.rel.DruidQueryRel; -import org.apache.druid.sql.calcite.rel.QueryMaker; import org.apache.druid.sql.calcite.table.DruidTable; public class DruidTableScanRule extends RelOptRule { - private final QueryMaker queryMaker; + private final PlannerContext plannerContext; - public DruidTableScanRule(final QueryMaker queryMaker) + public DruidTableScanRule(final PlannerContext plannerContext) { super(operand(LogicalTableScan.class, any())); - this.queryMaker = queryMaker; + this.plannerContext = plannerContext; } @Override @@ -45,7 +45,7 @@ public void onMatch(final RelOptRuleCall call) final DruidTable druidTable = table.unwrap(DruidTable.class); if (druidTable != null) { call.transformTo( - DruidQueryRel.fullScan(scan, table, druidTable, queryMaker) + DruidQueryRel.scanTable(scan, table, druidTable, plannerContext) ); } } diff --git a/sql/src/main/java/org/apache/druid/sql/calcite/rule/DruidUnionDataSourceRule.java b/sql/src/main/java/org/apache/druid/sql/calcite/rule/DruidUnionDataSourceRule.java index fe9c0d47f022..ae3ff39009cb 100644 --- a/sql/src/main/java/org/apache/druid/sql/calcite/rule/DruidUnionDataSourceRule.java +++ b/sql/src/main/java/org/apache/druid/sql/calcite/rule/DruidUnionDataSourceRule.java @@ -91,7 +91,7 @@ public void onMatch(final RelOptRuleCall call) DruidUnionDataSourceRel.create( (Union) newUnionRel, getColumnNamesIfTableOrUnion(firstDruidRel).get(), - firstDruidRel.getQueryMaker() + firstDruidRel.getPlannerContext() ) ); } else { @@ -104,7 +104,7 @@ public void onMatch(final RelOptRuleCall call) DruidUnionDataSourceRel.create( unionRel, getColumnNamesIfTableOrUnion(firstDruidRel).get(), - firstDruidRel.getQueryMaker() + firstDruidRel.getPlannerContext() ) ); } diff --git a/sql/src/main/java/org/apache/druid/sql/calcite/rule/DruidUnionRule.java b/sql/src/main/java/org/apache/druid/sql/calcite/rule/DruidUnionRule.java index 5863415abc8c..6e7fec39c99b 100644 --- a/sql/src/main/java/org/apache/druid/sql/calcite/rule/DruidUnionRule.java +++ b/sql/src/main/java/org/apache/druid/sql/calcite/rule/DruidUnionRule.java @@ -72,7 +72,7 @@ public void onMatch(final RelOptRuleCall call) if (unionRel.all) { call.transformTo( DruidUnionRel.create( - someDruidRel.getQueryMaker(), + someDruidRel.getPlannerContext(), unionRel.getRowType(), inputs, -1 diff --git a/sql/src/main/java/org/apache/druid/sql/calcite/rel/QueryMaker.java b/sql/src/main/java/org/apache/druid/sql/calcite/run/NativeQueryMaker.java similarity index 87% rename from sql/src/main/java/org/apache/druid/sql/calcite/rel/QueryMaker.java rename to sql/src/main/java/org/apache/druid/sql/calcite/run/NativeQueryMaker.java index 9b625b540135..3f656d0e16c3 100644 --- a/sql/src/main/java/org/apache/druid/sql/calcite/rel/QueryMaker.java +++ b/sql/src/main/java/org/apache/druid/sql/calcite/run/NativeQueryMaker.java @@ -17,7 +17,7 @@ * under the License. */ -package org.apache.druid.sql.calcite.rel; +package org.apache.druid.sql.calcite.run; import com.fasterxml.jackson.core.JsonProcessingException; import com.fasterxml.jackson.databind.ObjectMapper; @@ -25,12 +25,14 @@ import com.google.common.primitives.Ints; import it.unimi.dsi.fastutil.objects.Object2IntMap; import it.unimi.dsi.fastutil.objects.Object2IntOpenHashMap; -import org.apache.calcite.avatica.ColumnMetaData; +import org.apache.calcite.rel.type.RelDataType; import org.apache.calcite.runtime.Hook; import org.apache.calcite.sql.type.SqlTypeName; import org.apache.calcite.util.NlsString; +import org.apache.calcite.util.Pair; import org.apache.druid.common.config.NullHandling; import org.apache.druid.java.util.common.DateTimes; +import org.apache.druid.java.util.common.IAE; import org.apache.druid.java.util.common.ISE; import org.apache.druid.java.util.common.Intervals; import org.apache.druid.java.util.common.guava.Sequence; @@ -49,44 +51,64 @@ import org.apache.druid.server.security.AuthenticationResult; import org.apache.druid.sql.calcite.planner.Calcites; import org.apache.druid.sql.calcite.planner.PlannerContext; +import org.apache.druid.sql.calcite.rel.CannotBuildQueryException; +import org.apache.druid.sql.calcite.rel.DruidQuery; import org.joda.time.DateTime; import org.joda.time.Interval; import java.io.IOException; -import java.sql.Array; +import java.util.ArrayList; import java.util.Arrays; import java.util.Collection; import java.util.List; import java.util.UUID; import java.util.stream.Collectors; -public class QueryMaker +public class NativeQueryMaker implements QueryMaker { private final QueryLifecycleFactory queryLifecycleFactory; private final PlannerContext plannerContext; private final ObjectMapper jsonMapper; + private final List> fieldMapping; + private final RelDataType resultType; - public QueryMaker( + public NativeQueryMaker( final QueryLifecycleFactory queryLifecycleFactory, final PlannerContext plannerContext, - final ObjectMapper jsonMapper + final ObjectMapper jsonMapper, + final List> fieldMapping, + final RelDataType resultType ) { this.queryLifecycleFactory = queryLifecycleFactory; this.plannerContext = plannerContext; this.jsonMapper = jsonMapper; + this.fieldMapping = fieldMapping; + this.resultType = resultType; } - public PlannerContext getPlannerContext() + @Override + public RelDataType getResultType() { - return plannerContext; + return resultType; } - public ObjectMapper getJsonMapper() + @Override + public boolean feature(QueryFeature feature) { - return jsonMapper; + switch (feature) { + case CAN_RUN_TIMESERIES: + case CAN_RUN_TOPN: + return true; + case CAN_READ_EXTERNAL_DATA: + case SCAN_CAN_ORDER_BY_NON_TIME: + return false; + default: + throw new IAE("Unrecognized feature: %s", feature); + } } + @Override public Sequence runQuery(final DruidQuery druidQuery) { final Query query = druidQuery.getQuery(); @@ -113,14 +135,17 @@ public Sequence runQuery(final DruidQuery druidQuery) rowOrder = druidQuery.getOutputRowSignature().getColumnNames(); } - return execute( - query, - rowOrder, + final List columnTypes = druidQuery.getOutputRowType() .getFieldList() .stream() .map(f -> f.getType().getSqlTypeName()) - .collect(Collectors.toList()) + .collect(Collectors.toList()); + + return execute( + query, + mapColumnList(rowOrder, fieldMapping), + mapColumnList(columnTypes, fieldMapping) ); } @@ -159,10 +184,10 @@ private Sequence execute(Query query, final List newFie final List resultArrayFields = toolChest.resultArraySignature(query).getColumnNames(); final Sequence resultArrays = toolChest.resultsAsArrays(query, results); - return remapFields(resultArrays, resultArrayFields, newFields, newTypes); + return mapResultSequence(resultArrays, resultArrayFields, newFields, newTypes); } - private Sequence remapFields( + private Sequence mapResultSequence( final Sequence sequence, final List originalFields, final List newFields, @@ -204,35 +229,6 @@ private Sequence remapFields( ); } - public static ColumnMetaData.Rep rep(final SqlTypeName sqlType) - { - if (SqlTypeName.CHAR_TYPES.contains(sqlType)) { - return ColumnMetaData.Rep.of(String.class); - } else if (sqlType == SqlTypeName.TIMESTAMP) { - return ColumnMetaData.Rep.of(Long.class); - } else if (sqlType == SqlTypeName.DATE) { - return ColumnMetaData.Rep.of(Integer.class); - } else if (sqlType == SqlTypeName.INTEGER) { - // use Number.class for exact numeric types since JSON transport might switch longs to integers - return ColumnMetaData.Rep.of(Number.class); - } else if (sqlType == SqlTypeName.BIGINT) { - // use Number.class for exact numeric types since JSON transport might switch longs to integers - return ColumnMetaData.Rep.of(Number.class); - } else if (sqlType == SqlTypeName.FLOAT) { - return ColumnMetaData.Rep.of(Float.class); - } else if (sqlType == SqlTypeName.DOUBLE || sqlType == SqlTypeName.DECIMAL) { - return ColumnMetaData.Rep.of(Double.class); - } else if (sqlType == SqlTypeName.BOOLEAN) { - return ColumnMetaData.Rep.of(Boolean.class); - } else if (sqlType == SqlTypeName.OTHER) { - return ColumnMetaData.Rep.of(Object.class); - } else if (sqlType == SqlTypeName.ARRAY) { - return ColumnMetaData.Rep.of(Array.class); - } else { - throw new ISE("No rep for SQL type[%s]", sqlType); - } - } - private Object coerce(final Object value, final SqlTypeName sqlType) { final Object coercedValue; @@ -368,4 +364,15 @@ private static DateTime coerceDateTime(Object value, SqlTypeName sqlType) } return dateTime; } + + private static List mapColumnList(final List in, final List> fieldMapping) + { + final List out = new ArrayList<>(fieldMapping.size()); + + for (final Pair entry : fieldMapping) { + out.add(in.get(entry.getKey())); + } + + return out; + } } diff --git a/sql/src/main/java/org/apache/druid/sql/calcite/run/NativeQueryMakerFactory.java b/sql/src/main/java/org/apache/druid/sql/calcite/run/NativeQueryMakerFactory.java new file mode 100644 index 000000000000..7553f4630988 --- /dev/null +++ b/sql/src/main/java/org/apache/druid/sql/calcite/run/NativeQueryMakerFactory.java @@ -0,0 +1,69 @@ +/* + * Licensed to the Apache Software Foundation (ASF) under one + * or more contributor license agreements. See the NOTICE file + * distributed with this work for additional information + * regarding copyright ownership. The ASF licenses this file + * to you under the Apache License, Version 2.0 (the + * "License"); you may not use this file except in compliance + * with the License. You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, + * software distributed under the License is distributed on an + * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY + * KIND, either express or implied. See the License for the + * specific language governing permissions and limitations + * under the License. + */ + +package org.apache.druid.sql.calcite.run; + +import com.fasterxml.jackson.databind.ObjectMapper; +import com.google.inject.Inject; +import org.apache.calcite.rel.RelRoot; +import org.apache.calcite.tools.ValidationException; +import org.apache.druid.guice.LazySingleton; +import org.apache.druid.server.QueryLifecycleFactory; +import org.apache.druid.sql.calcite.planner.PlannerContext; + +@LazySingleton +public class NativeQueryMakerFactory implements QueryMakerFactory +{ + public static final String TYPE = "native"; + + private final QueryLifecycleFactory queryLifecycleFactory; + private final ObjectMapper jsonMapper; + + @Inject + public NativeQueryMakerFactory( + final QueryLifecycleFactory queryLifecycleFactory, + final ObjectMapper jsonMapper + ) + { + this.queryLifecycleFactory = queryLifecycleFactory; + this.jsonMapper = jsonMapper; + } + + @Override + public QueryMaker buildForSelect(final RelRoot relRoot, final PlannerContext plannerContext) + { + return new NativeQueryMaker( + queryLifecycleFactory, + plannerContext, + jsonMapper, + relRoot.fields, + relRoot.validatedRowType + ); + } + + @Override + public QueryMaker buildForInsert( + final String targetDataSource, + final RelRoot relRoot, + final PlannerContext plannerContext + ) throws ValidationException + { + throw new ValidationException("Cannot execute INSERT queries."); + } +} diff --git a/sql/src/main/java/org/apache/druid/sql/calcite/run/QueryFeature.java b/sql/src/main/java/org/apache/druid/sql/calcite/run/QueryFeature.java new file mode 100644 index 000000000000..7a1cdee319ed --- /dev/null +++ b/sql/src/main/java/org/apache/druid/sql/calcite/run/QueryFeature.java @@ -0,0 +1,49 @@ +/* + * Licensed to the Apache Software Foundation (ASF) under one + * or more contributor license agreements. See the NOTICE file + * distributed with this work for additional information + * regarding copyright ownership. The ASF licenses this file + * to you under the Apache License, Version 2.0 (the + * "License"); you may not use this file except in compliance + * with the License. You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, + * software distributed under the License is distributed on an + * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY + * KIND, either express or implied. See the License for the + * specific language governing permissions and limitations + * under the License. + */ + +package org.apache.druid.sql.calcite.run; + +import org.apache.druid.sql.calcite.external.ExternalDataSource; + +/** + * Arguments to {@link QueryFeatureInspector#feature(QueryFeature)}. + */ +public enum QueryFeature +{ + /** + * Queries of type {@link org.apache.druid.query.timeseries.TimeseriesQuery} are usable. + */ + CAN_RUN_TIMESERIES, + + /** + * Queries of type {@link org.apache.druid.query.topn.TopNQuery} are usable. + */ + CAN_RUN_TOPN, + + /** + * Queries can use {@link ExternalDataSource}. + */ + CAN_READ_EXTERNAL_DATA, + + /** + * Scan queries can use {@link org.apache.druid.query.scan.ScanQuery#getOrderBys()} that are based on something + * other than the "__time" column. + */ + SCAN_CAN_ORDER_BY_NON_TIME, +} diff --git a/sql/src/main/java/org/apache/druid/sql/calcite/run/QueryFeatureInspector.java b/sql/src/main/java/org/apache/druid/sql/calcite/run/QueryFeatureInspector.java new file mode 100644 index 000000000000..a0d9a11241dc --- /dev/null +++ b/sql/src/main/java/org/apache/druid/sql/calcite/run/QueryFeatureInspector.java @@ -0,0 +1,32 @@ +/* + * Licensed to the Apache Software Foundation (ASF) under one + * or more contributor license agreements. See the NOTICE file + * distributed with this work for additional information + * regarding copyright ownership. The ASF licenses this file + * to you under the Apache License, Version 2.0 (the + * "License"); you may not use this file except in compliance + * with the License. You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, + * software distributed under the License is distributed on an + * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY + * KIND, either express or implied. See the License for the + * specific language governing permissions and limitations + * under the License. + */ + +package org.apache.druid.sql.calcite.run; + +/** + * Gives the SQL-to-Druid query translator information about what features are supporetd by the {@link QueryMaker} + * that will execute the query. + */ +public interface QueryFeatureInspector +{ + /** + * Returns whether a feature is present or not. + */ + boolean feature(QueryFeature feature); +} diff --git a/sql/src/main/java/org/apache/druid/sql/calcite/run/QueryMaker.java b/sql/src/main/java/org/apache/druid/sql/calcite/run/QueryMaker.java new file mode 100644 index 000000000000..c76504b2bddc --- /dev/null +++ b/sql/src/main/java/org/apache/druid/sql/calcite/run/QueryMaker.java @@ -0,0 +1,42 @@ +/* + * Licensed to the Apache Software Foundation (ASF) under one + * or more contributor license agreements. See the NOTICE file + * distributed with this work for additional information + * regarding copyright ownership. The ASF licenses this file + * to you under the Apache License, Version 2.0 (the + * "License"); you may not use this file except in compliance + * with the License. You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, + * software distributed under the License is distributed on an + * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY + * KIND, either express or implied. See the License for the + * specific language governing permissions and limitations + * under the License. + */ + +package org.apache.druid.sql.calcite.run; + +import org.apache.calcite.rel.type.RelDataType; +import org.apache.druid.java.util.common.guava.Sequence; +import org.apache.druid.sql.calcite.rel.DruidQuery; + +/** + * Interface for executing Druid queries. Each one is created by a {@link QueryMakerFactory} and is tied to a + * specific SQL query. Extends {@link QueryFeatureInspector}, so calling code can tell what this executor supports. + */ +public interface QueryMaker extends QueryFeatureInspector +{ + /** + * Returns the SQL row type for this query. + */ + RelDataType getResultType(); + + /** + * Executes a given Druid query, which is expected to correspond to the SQL query that this QueryMaker was originally + * created for. The returned arrays match the row type given by {@link #getResultType()}. + */ + Sequence runQuery(DruidQuery druidQuery); +} diff --git a/sql/src/main/java/org/apache/druid/sql/calcite/run/QueryMakerFactory.java b/sql/src/main/java/org/apache/druid/sql/calcite/run/QueryMakerFactory.java new file mode 100644 index 000000000000..559b607af8c9 --- /dev/null +++ b/sql/src/main/java/org/apache/druid/sql/calcite/run/QueryMakerFactory.java @@ -0,0 +1,59 @@ +/* + * Licensed to the Apache Software Foundation (ASF) under one + * or more contributor license agreements. See the NOTICE file + * distributed with this work for additional information + * regarding copyright ownership. The ASF licenses this file + * to you under the Apache License, Version 2.0 (the + * "License"); you may not use this file except in compliance + * with the License. You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, + * software distributed under the License is distributed on an + * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY + * KIND, either express or implied. See the License for the + * specific language governing permissions and limitations + * under the License. + */ + +package org.apache.druid.sql.calcite.run; + +import org.apache.calcite.rel.RelRoot; +import org.apache.calcite.tools.ValidationException; +import org.apache.druid.sql.calcite.planner.PlannerContext; + +/** + * Interface for creating {@link QueryMaker}, which in turn are used to execute Druid queries. + */ +public interface QueryMakerFactory +{ + /** + * Create a {@link QueryMaker} for a SELECT query. + * + * @param relRoot planned and validated rel + * @param plannerContext context for this query + * + * @return an executor for the provided query + * + * @throws ValidationException if this factory cannot build an executor for the provided query + */ + QueryMaker buildForSelect(RelRoot relRoot, PlannerContext plannerContext) throws ValidationException; + + /** + * Create a {@link QueryMaker} for an INSERT ... SELECT query. + * + * @param targetDataSource datasource for the INSERT portion of the query + * @param relRoot planned and validated rel for the SELECT portion of the query + * @param plannerContext context for this query + * + * @return an executor for the provided query + * + * @throws ValidationException if this factory cannot build an executor for the provided query + */ + QueryMaker buildForInsert( + String targetDataSource, + RelRoot relRoot, + PlannerContext plannerContext + ) throws ValidationException; +} diff --git a/sql/src/main/java/org/apache/druid/sql/calcite/schema/DruidSchema.java b/sql/src/main/java/org/apache/druid/sql/calcite/schema/DruidSchema.java index 0c1dffa3d0e1..5db9bfa46895 100644 --- a/sql/src/main/java/org/apache/druid/sql/calcite/schema/DruidSchema.java +++ b/sql/src/main/java/org/apache/druid/sql/calcite/schema/DruidSchema.java @@ -791,7 +791,7 @@ DruidTable buildDruidTable(final String dataSource) } else { tableDataSource = new TableDataSource(dataSource); } - return new DruidTable(tableDataSource, builder.build(), isJoinable, isBroadcast); + return new DruidTable(tableDataSource, builder.build(), null, isJoinable, isBroadcast); } @VisibleForTesting diff --git a/sql/src/main/java/org/apache/druid/sql/calcite/schema/LookupSchema.java b/sql/src/main/java/org/apache/druid/sql/calcite/schema/LookupSchema.java index b3400e7afca8..1fea9116ff5b 100644 --- a/sql/src/main/java/org/apache/druid/sql/calcite/schema/LookupSchema.java +++ b/sql/src/main/java/org/apache/druid/sql/calcite/schema/LookupSchema.java @@ -59,7 +59,10 @@ protected Map getTableMap() for (final String lookupName : lookupProvider.getAllLookupNames()) { // all lookups should be also joinable through lookup joinable factory, and lookups are effectively broadcast // (if we ignore lookup tiers...) - tableMapBuilder.put(lookupName, new DruidTable(new LookupDataSource(lookupName), ROW_SIGNATURE, true, true)); + tableMapBuilder.put( + lookupName, + new DruidTable(new LookupDataSource(lookupName), ROW_SIGNATURE, null, true, true) + ); } return tableMapBuilder.build(); diff --git a/sql/src/main/java/org/apache/druid/sql/calcite/table/DruidTable.java b/sql/src/main/java/org/apache/druid/sql/calcite/table/DruidTable.java index 94da5ede16e5..dfbf9117cbc5 100644 --- a/sql/src/main/java/org/apache/druid/sql/calcite/table/DruidTable.java +++ b/sql/src/main/java/org/apache/druid/sql/calcite/table/DruidTable.java @@ -19,6 +19,7 @@ package org.apache.druid.sql.calcite.table; +import com.fasterxml.jackson.databind.ObjectMapper; import com.google.common.base.Preconditions; import org.apache.calcite.config.CalciteConnectionConfig; import org.apache.calcite.plan.RelOptTable; @@ -34,27 +35,40 @@ import org.apache.calcite.sql.SqlNode; import org.apache.druid.query.DataSource; import org.apache.druid.segment.column.RowSignature; +import org.apache.druid.sql.calcite.external.ExternalDataSource; +import org.apache.druid.sql.calcite.external.ExternalTableScan; +import javax.annotation.Nullable; import java.util.Objects; public class DruidTable implements TranslatableTable { private final DataSource dataSource; private final RowSignature rowSignature; + + @Nullable + private final ObjectMapper objectMapper; private final boolean joinable; private final boolean broadcast; public DruidTable( final DataSource dataSource, final RowSignature rowSignature, + @Nullable final ObjectMapper objectMapper, final boolean isJoinable, final boolean isBroadcast ) { this.dataSource = Preconditions.checkNotNull(dataSource, "dataSource"); this.rowSignature = Preconditions.checkNotNull(rowSignature, "rowSignature"); + this.objectMapper = objectMapper; this.joinable = isJoinable; this.broadcast = isBroadcast; + + if (dataSource instanceof ExternalDataSource && objectMapper == null) { + // objectMapper is used by ExternalTableScan to generate its digest. + throw new NullPointerException("ObjectMapper is required for external datasources"); + } } public DataSource getDataSource() @@ -115,7 +129,13 @@ public boolean rolledUpColumnValidInsideAgg( @Override public RelNode toRel(final RelOptTable.ToRelContext context, final RelOptTable table) { - return LogicalTableScan.create(context.getCluster(), table); + if (dataSource instanceof ExternalDataSource) { + // Cannot use LogicalTableScan here, because its digest is solely based on the name of the table macro. + // Must use our own class that computes its own digest. + return new ExternalTableScan(context.getCluster(), objectMapper, this); + } else { + return LogicalTableScan.create(context.getCluster(), table); + } } @Override diff --git a/sql/src/main/java/org/apache/druid/sql/calcite/view/DruidViewMacro.java b/sql/src/main/java/org/apache/druid/sql/calcite/view/DruidViewMacro.java index 9df3b6694231..f1dbe0519d3b 100644 --- a/sql/src/main/java/org/apache/druid/sql/calcite/view/DruidViewMacro.java +++ b/sql/src/main/java/org/apache/druid/sql/calcite/view/DruidViewMacro.java @@ -60,9 +60,8 @@ public DruidViewMacro( public TranslatableTable apply(final List arguments) { final RelDataType rowType; - try (final DruidPlanner planner = plannerFactory.createPlanner(null)) { - - rowType = planner.plan(viewSql).rowType(); + try (final DruidPlanner planner = plannerFactory.createPlanner(viewSql, null)) { + rowType = planner.plan().rowType(); } catch (Exception e) { throw new RuntimeException(e); diff --git a/sql/src/main/java/org/apache/druid/sql/http/SqlResource.java b/sql/src/main/java/org/apache/druid/sql/http/SqlResource.java index 5c920512c3a1..f23c92a6022a 100644 --- a/sql/src/main/java/org/apache/druid/sql/http/SqlResource.java +++ b/sql/src/main/java/org/apache/druid/sql/http/SqlResource.java @@ -22,7 +22,6 @@ import com.fasterxml.jackson.core.JsonProcessingException; import com.fasterxml.jackson.databind.ObjectMapper; import com.google.common.base.Preconditions; -import com.google.common.collect.Iterables; import com.google.common.io.CountingOutputStream; import com.google.inject.Inject; import org.apache.calcite.plan.RelOptPlanner; @@ -45,7 +44,7 @@ import org.apache.druid.server.security.AuthorizationUtils; import org.apache.druid.server.security.AuthorizerMapper; import org.apache.druid.server.security.ForbiddenException; -import org.apache.druid.server.security.Resource; +import org.apache.druid.server.security.ResourceAction; import org.apache.druid.sql.SqlLifecycle; import org.apache.druid.sql.SqlLifecycleFactory; import org.apache.druid.sql.SqlLifecycleManager; @@ -278,13 +277,13 @@ public Response cancelQuery( if (lifecycles.isEmpty()) { return Response.status(Status.NOT_FOUND).build(); } - Set resources = lifecycles + Set resources = lifecycles .stream() - .flatMap(lifecycle -> lifecycle.getAuthorizedResources().stream()) + .flatMap(lifecycle -> lifecycle.getRequiredResourceActions().stream()) .collect(Collectors.toSet()); Access access = AuthorizationUtils.authorizeAllResourceActions( req, - Iterables.transform(resources, AuthorizationUtils.RESOURCE_READ_RA_GENERATOR), + resources, authorizerMapper ); diff --git a/sql/src/test/java/org/apache/druid/sql/SqlLifecycleTest.java b/sql/src/test/java/org/apache/druid/sql/SqlLifecycleTest.java index 606591200131..e2ecd0db066b 100644 --- a/sql/src/test/java/org/apache/druid/sql/SqlLifecycleTest.java +++ b/sql/src/test/java/org/apache/druid/sql/SqlLifecycleTest.java @@ -109,7 +109,7 @@ public void testStateTransition() DruidPlanner mockPlanner = EasyMock.createMock(DruidPlanner.class); PlannerContext mockPlannerContext = EasyMock.createMock(PlannerContext.class); ValidationResult validationResult = new ValidationResult(Collections.emptySet()); - EasyMock.expect(plannerFactory.createPlanner(EasyMock.anyObject())).andReturn(mockPlanner).once(); + EasyMock.expect(plannerFactory.createPlanner(EasyMock.eq(sql), EasyMock.anyObject())).andReturn(mockPlanner).once(); EasyMock.expect(mockPlanner.getPlannerContext()).andReturn(mockPlannerContext).once(); mockPlannerContext.setAuthenticationResult(CalciteTests.REGULAR_USER_AUTH_RESULT); EasyMock.expectLastCall(); @@ -118,7 +118,7 @@ public void testStateTransition() EasyMock.expect(plannerFactory.getAuthorizerMapper()).andReturn(CalciteTests.TEST_AUTHORIZER_MAPPER).once(); mockPlannerContext.setAuthorizationResult(Access.OK); EasyMock.expectLastCall(); - EasyMock.expect(mockPlanner.validate(sql)).andReturn(validationResult).once(); + EasyMock.expect(mockPlanner.validate()).andReturn(validationResult).once(); mockPlanner.close(); EasyMock.expectLastCall(); @@ -132,7 +132,7 @@ public void testStateTransition() // test prepare PrepareResult mockPrepareResult = EasyMock.createMock(PrepareResult.class); EasyMock.expect(plannerFactory.createPlannerWithContext(EasyMock.eq(mockPlannerContext))).andReturn(mockPlanner).once(); - EasyMock.expect(mockPlanner.prepare(sql)).andReturn(mockPrepareResult).once(); + EasyMock.expect(mockPlanner.prepare()).andReturn(mockPrepareResult).once(); mockPlanner.close(); EasyMock.expectLastCall(); EasyMock.replay(plannerFactory, serviceEmitter, requestLogger, mockPlanner, mockPlannerContext, mockPrepareResult); @@ -145,7 +145,7 @@ public void testStateTransition() // test plan PlannerResult mockPlanResult = EasyMock.createMock(PlannerResult.class); EasyMock.expect(plannerFactory.createPlannerWithContext(EasyMock.eq(mockPlannerContext))).andReturn(mockPlanner).once(); - EasyMock.expect(mockPlanner.plan(sql)).andReturn(mockPlanResult).once(); + EasyMock.expect(mockPlanner.plan()).andReturn(mockPlanResult).once(); mockPlanner.close(); EasyMock.expectLastCall(); EasyMock.replay(plannerFactory, serviceEmitter, requestLogger, mockPlanner, mockPlannerContext, mockPrepareResult, mockPlanResult); @@ -206,7 +206,7 @@ public void testStateTransitionHttpRequest() DruidPlanner mockPlanner = EasyMock.createMock(DruidPlanner.class); PlannerContext mockPlannerContext = EasyMock.createMock(PlannerContext.class); ValidationResult validationResult = new ValidationResult(Collections.emptySet()); - EasyMock.expect(plannerFactory.createPlanner(EasyMock.anyObject())).andReturn(mockPlanner).once(); + EasyMock.expect(plannerFactory.createPlanner(EasyMock.eq(sql), EasyMock.anyObject())).andReturn(mockPlanner).once(); EasyMock.expect(mockPlanner.getPlannerContext()).andReturn(mockPlannerContext).once(); mockPlannerContext.setAuthenticationResult(CalciteTests.REGULAR_USER_AUTH_RESULT); EasyMock.expectLastCall(); @@ -215,7 +215,7 @@ public void testStateTransitionHttpRequest() EasyMock.expect(plannerFactory.getAuthorizerMapper()).andReturn(CalciteTests.TEST_AUTHORIZER_MAPPER).once(); mockPlannerContext.setAuthorizationResult(Access.OK); EasyMock.expectLastCall(); - EasyMock.expect(mockPlanner.validate(sql)).andReturn(validationResult).once(); + EasyMock.expect(mockPlanner.validate()).andReturn(validationResult).once(); mockPlanner.close(); EasyMock.expectLastCall(); @@ -235,7 +235,7 @@ public void testStateTransitionHttpRequest() // test prepare PrepareResult mockPrepareResult = EasyMock.createMock(PrepareResult.class); EasyMock.expect(plannerFactory.createPlannerWithContext(EasyMock.eq(mockPlannerContext))).andReturn(mockPlanner).once(); - EasyMock.expect(mockPlanner.prepare(sql)).andReturn(mockPrepareResult).once(); + EasyMock.expect(mockPlanner.prepare()).andReturn(mockPrepareResult).once(); mockPlanner.close(); EasyMock.expectLastCall(); EasyMock.replay(plannerFactory, serviceEmitter, requestLogger, mockPlanner, mockPlannerContext, mockPrepareResult); @@ -248,7 +248,7 @@ public void testStateTransitionHttpRequest() // test plan PlannerResult mockPlanResult = EasyMock.createMock(PlannerResult.class); EasyMock.expect(plannerFactory.createPlannerWithContext(EasyMock.eq(mockPlannerContext))).andReturn(mockPlanner).once(); - EasyMock.expect(mockPlanner.plan(sql)).andReturn(mockPlanResult).once(); + EasyMock.expect(mockPlanner.plan()).andReturn(mockPlanResult).once(); mockPlanner.close(); EasyMock.expectLastCall(); EasyMock.replay(plannerFactory, serviceEmitter, requestLogger, mockPlanner, mockPlannerContext, mockPrepareResult, mockPlanResult); diff --git a/sql/src/test/java/org/apache/druid/sql/avatica/DruidAvaticaHandlerTest.java b/sql/src/test/java/org/apache/druid/sql/avatica/DruidAvaticaHandlerTest.java index d767eff8665d..7a96085b50d2 100644 --- a/sql/src/test/java/org/apache/druid/sql/avatica/DruidAvaticaHandlerTest.java +++ b/sql/src/test/java/org/apache/druid/sql/avatica/DruidAvaticaHandlerTest.java @@ -65,6 +65,8 @@ import org.apache.druid.sql.calcite.planner.DruidOperatorTable; import org.apache.druid.sql.calcite.planner.PlannerConfig; import org.apache.druid.sql.calcite.planner.PlannerFactory; +import org.apache.druid.sql.calcite.run.NativeQueryMakerFactory; +import org.apache.druid.sql.calcite.run.QueryMakerFactory; import org.apache.druid.sql.calcite.schema.DruidSchemaCatalog; import org.apache.druid.sql.calcite.schema.DruidSchemaName; import org.apache.druid.sql.calcite.schema.NamedSchema; @@ -209,6 +211,7 @@ public void configure(Binder binder) binder.bind(QueryScheduler.class) .toProvider(QuerySchedulerProvider.class) .in(LazySingleton.class); + binder.bind(QueryMakerFactory.class).to(NativeQueryMakerFactory.class); } } ) @@ -890,7 +893,10 @@ public int getMaxRowsPerFrame() CalciteTests.createSqlLifecycleFactory( new PlannerFactory( rootSchema, - CalciteTests.createMockQueryLifecycleFactory(walker, conglomerate), + new NativeQueryMakerFactory( + CalciteTests.createMockQueryLifecycleFactory(walker, conglomerate), + CalciteTests.getJsonMapper() + ), operatorTable, macroTable, plannerConfig, @@ -980,7 +986,10 @@ public int getMinRowsPerFrame() CalciteTests.createSqlLifecycleFactory( new PlannerFactory( rootSchema, - CalciteTests.createMockQueryLifecycleFactory(walker, conglomerate), + new NativeQueryMakerFactory( + CalciteTests.createMockQueryLifecycleFactory(walker, conglomerate), + CalciteTests.getJsonMapper() + ), operatorTable, macroTable, plannerConfig, diff --git a/sql/src/test/java/org/apache/druid/sql/avatica/DruidStatementTest.java b/sql/src/test/java/org/apache/druid/sql/avatica/DruidStatementTest.java index fbfc2f1010c6..b0b26dd5f039 100644 --- a/sql/src/test/java/org/apache/druid/sql/avatica/DruidStatementTest.java +++ b/sql/src/test/java/org/apache/druid/sql/avatica/DruidStatementTest.java @@ -35,6 +35,7 @@ import org.apache.druid.sql.calcite.planner.DruidOperatorTable; import org.apache.druid.sql.calcite.planner.PlannerConfig; import org.apache.druid.sql.calcite.planner.PlannerFactory; +import org.apache.druid.sql.calcite.run.NativeQueryMakerFactory; import org.apache.druid.sql.calcite.schema.DruidSchemaCatalog; import org.apache.druid.sql.calcite.util.CalciteTestBase; import org.apache.druid.sql.calcite.util.CalciteTests; @@ -91,7 +92,10 @@ public void setUp() throws Exception CalciteTests.createMockRootSchema(conglomerate, walker, plannerConfig, AuthTestUtils.TEST_AUTHORIZER_MAPPER); final PlannerFactory plannerFactory = new PlannerFactory( rootSchema, - CalciteTests.createMockQueryLifecycleFactory(walker, conglomerate), + new NativeQueryMakerFactory( + CalciteTests.createMockQueryLifecycleFactory(walker, conglomerate), + CalciteTests.getJsonMapper() + ), operatorTable, macroTable, plannerConfig, diff --git a/sql/src/test/java/org/apache/druid/sql/calcite/BaseCalciteQueryTest.java b/sql/src/test/java/org/apache/druid/sql/calcite/BaseCalciteQueryTest.java index 8c8704d7e99c..1a982beb37b1 100644 --- a/sql/src/test/java/org/apache/druid/sql/calcite/BaseCalciteQueryTest.java +++ b/sql/src/test/java/org/apache/druid/sql/calcite/BaseCalciteQueryTest.java @@ -23,6 +23,7 @@ import com.fasterxml.jackson.databind.InjectableValues; import com.fasterxml.jackson.databind.Module; import com.fasterxml.jackson.databind.ObjectMapper; +import com.fasterxml.jackson.databind.module.SimpleModule; import com.google.common.collect.ImmutableList; import com.google.common.collect.ImmutableMap; import org.apache.calcite.plan.RelOptPlanner; @@ -76,10 +77,11 @@ import org.apache.druid.server.security.AuthenticationResult; import org.apache.druid.server.security.AuthorizerMapper; import org.apache.druid.server.security.ForbiddenException; -import org.apache.druid.server.security.Resource; +import org.apache.druid.server.security.ResourceAction; import org.apache.druid.sql.SqlLifecycle; import org.apache.druid.sql.SqlLifecycleFactory; import org.apache.druid.sql.calcite.expression.DruidExpression; +import org.apache.druid.sql.calcite.external.ExternalDataSource; import org.apache.druid.sql.calcite.planner.Calcites; import org.apache.druid.sql.calcite.planner.DruidOperatorTable; import org.apache.druid.sql.calcite.planner.PlannerConfig; @@ -478,7 +480,7 @@ public static void tearDownClass() throws IOException @Rule public QueryLogHook getQueryLogHook() { - return queryLogHook = QueryLogHook.create(queryJsonMapper); + return queryLogHook = QueryLogHook.create(createQueryJsonMapper()); } @Before @@ -555,7 +557,9 @@ public final void setMapperInjectableValues(ObjectMapper mapper, Map getJacksonModules() { - return new LookupSerdeModule().getJacksonModules(); + final List modules = new ArrayList<>(new LookupSerdeModule().getJacksonModules()); + modules.add(new SimpleModule().registerSubtypes(ExternalDataSource.class)); + return modules; } public void assertQueryIsUnplannable(final String sql) @@ -698,7 +702,7 @@ public void testQuery( /** * Override not just the outer query context, but also the contexts of all subqueries. */ - private Query recursivelyOverrideContext(final Query query, final Map context) + public Query recursivelyOverrideContext(final Query query, final Map context) { return query.withDataSource(recursivelyOverrideContext(query.getDataSource(), context)) .withOverriddenContext(context); @@ -945,7 +949,7 @@ public void testQueryThrows( } } - public Set analyzeResources( + public Set analyzeResources( PlannerConfig plannerConfig, String sql, AuthenticationResult authenticationResult @@ -961,7 +965,7 @@ public Set analyzeResources( SqlLifecycle lifecycle = lifecycleFactory.factorize(); lifecycle.initialize(sql, ImmutableMap.of()); - return lifecycle.runAnalyzeResources(authenticationResult).getResources(); + return lifecycle.runAnalyzeResources(authenticationResult).getResourceActions(); } public SqlLifecycleFactory getSqlLifecycleFactory( @@ -984,7 +988,10 @@ public SqlLifecycleFactory getSqlLifecycleFactory( final PlannerFactory plannerFactory = new PlannerFactory( rootSchema, - CalciteTests.createMockQueryLifecycleFactory(walker, conglomerate), + new TestQueryMakerFactory( + CalciteTests.createMockQueryLifecycleFactory(walker, conglomerate), + objectMapper + ), operatorTable, macroTable, plannerConfig, diff --git a/sql/src/test/java/org/apache/druid/sql/calcite/CalciteInsertDmlTest.java b/sql/src/test/java/org/apache/druid/sql/calcite/CalciteInsertDmlTest.java new file mode 100644 index 000000000000..e58b1bf38030 --- /dev/null +++ b/sql/src/test/java/org/apache/druid/sql/calcite/CalciteInsertDmlTest.java @@ -0,0 +1,647 @@ +/* + * Licensed to the Apache Software Foundation (ASF) under one + * or more contributor license agreements. See the NOTICE file + * distributed with this work for additional information + * regarding copyright ownership. The ASF licenses this file + * to you under the Apache License, Version 2.0 (the + * "License"); you may not use this file except in compliance + * with the License. You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, + * software distributed under the License is distributed on an + * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY + * KIND, either express or implied. See the License for the + * specific language governing permissions and limitations + * under the License. + */ + +package org.apache.druid.sql.calcite; + +import com.fasterxml.jackson.core.JsonProcessingException; +import com.google.common.base.Preconditions; +import com.google.common.collect.ImmutableList; +import com.google.common.collect.ImmutableMap; +import com.google.common.collect.ImmutableSet; +import org.apache.druid.data.input.impl.CsvInputFormat; +import org.apache.druid.data.input.impl.InlineInputSource; +import org.apache.druid.java.util.common.ISE; +import org.apache.druid.java.util.common.StringUtils; +import org.apache.druid.java.util.common.granularity.Granularities; +import org.apache.druid.java.util.common.jackson.JacksonUtils; +import org.apache.druid.query.Query; +import org.apache.druid.query.aggregation.CountAggregatorFactory; +import org.apache.druid.query.aggregation.LongSumAggregatorFactory; +import org.apache.druid.query.aggregation.hyperloglog.HyperUniquesAggregatorFactory; +import org.apache.druid.query.dimension.DefaultDimensionSpec; +import org.apache.druid.query.groupby.GroupByQuery; +import org.apache.druid.query.scan.ScanQuery; +import org.apache.druid.segment.column.ColumnType; +import org.apache.druid.segment.column.RowSignature; +import org.apache.druid.server.security.Action; +import org.apache.druid.server.security.AuthenticationResult; +import org.apache.druid.server.security.ForbiddenException; +import org.apache.druid.server.security.Resource; +import org.apache.druid.server.security.ResourceAction; +import org.apache.druid.server.security.ResourceType; +import org.apache.druid.sql.SqlLifecycle; +import org.apache.druid.sql.SqlLifecycleFactory; +import org.apache.druid.sql.SqlPlanningException; +import org.apache.druid.sql.calcite.external.ExternalDataSource; +import org.apache.druid.sql.calcite.external.ExternalOperatorConversion; +import org.apache.druid.sql.calcite.filtration.Filtration; +import org.apache.druid.sql.calcite.planner.Calcites; +import org.apache.druid.sql.calcite.planner.PlannerConfig; +import org.apache.druid.sql.calcite.planner.PlannerContext; +import org.apache.druid.sql.calcite.util.CalciteTests; +import org.hamcrest.CoreMatchers; +import org.hamcrest.Matcher; +import org.hamcrest.MatcherAssert; +import org.junit.After; +import org.junit.Assert; +import org.junit.Test; +import org.junit.internal.matchers.ThrowableMessageMatcher; + +import java.util.Arrays; +import java.util.Collections; +import java.util.List; +import java.util.Map; + +public class CalciteInsertDmlTest extends BaseCalciteQueryTest +{ + private static final Map DEFAULT_CONTEXT = + ImmutableMap.builder() + .put(PlannerContext.CTX_SQL_QUERY_ID, DUMMY_SQL_ID) + .build(); + + private static final RowSignature FOO_TABLE_SIGNATURE = + RowSignature.builder() + .addTimeColumn() + .add("cnt", ColumnType.LONG) + .add("dim1", ColumnType.STRING) + .add("dim2", ColumnType.STRING) + .add("dim3", ColumnType.STRING) + .add("m1", ColumnType.FLOAT) + .add("m2", ColumnType.DOUBLE) + .add("unique_dim1", HyperUniquesAggregatorFactory.TYPE) + .build(); + + private final ExternalDataSource externalDataSource = new ExternalDataSource( + new InlineInputSource("a,b,1\nc,d,2\n"), + new CsvInputFormat(ImmutableList.of("x", "y", "z"), null, false, false, 0), + RowSignature.builder() + .add("x", ColumnType.STRING) + .add("y", ColumnType.STRING) + .add("z", ColumnType.LONG) + .build() + ); + + private boolean didTest = false; + + @After + @Override + public void tearDown() throws Exception + { + super.tearDown(); + + // Catch situations where tests forgot to call "verify" on their tester. + if (!didTest) { + throw new ISE("Test was not run; did you call verify() on a tester?"); + } + } + + @Test + public void testInsertFromTable() + { + testInsertQuery() + .sql("INSERT INTO dst SELECT * FROM foo") + .expectTarget("dst", FOO_TABLE_SIGNATURE) + .expectResources(dataSourceRead("foo"), dataSourceWrite("dst")) + .expectQuery( + newScanQueryBuilder() + .dataSource("foo") + .intervals(querySegmentSpec(Filtration.eternity())) + .columns("__time", "cnt", "dim1", "dim2", "dim3", "m1", "m2", "unique_dim1") + .build() + ) + .verify(); + } + + @Test + public void testInsertFromView() + { + testInsertQuery() + .sql("INSERT INTO dst SELECT * FROM view.aview") + .expectTarget("dst", RowSignature.builder().add("dim1_firstchar", ColumnType.STRING).build()) + .expectResources(viewRead("aview"), dataSourceWrite("dst")) + .expectQuery( + newScanQueryBuilder() + .dataSource("foo") + .intervals(querySegmentSpec(Filtration.eternity())) + .virtualColumns(expressionVirtualColumn("v0", "substring(\"dim1\", 0, 1)", ColumnType.STRING)) + .filters(selector("dim2", "a", null)) + .columns("v0") + .build() + ) + .verify(); + } + + @Test + public void testInsertIntoExistingTable() + { + testInsertQuery() + .sql("INSERT INTO foo SELECT * FROM foo") + .expectTarget("foo", FOO_TABLE_SIGNATURE) + .expectResources(dataSourceRead("foo"), dataSourceWrite("foo")) + .expectQuery( + newScanQueryBuilder() + .dataSource("foo") + .intervals(querySegmentSpec(Filtration.eternity())) + .columns("__time", "cnt", "dim1", "dim2", "dim3", "m1", "m2", "unique_dim1") + .build() + ) + .verify(); + } + + @Test + public void testInsertIntoQualifiedTable() + { + testInsertQuery() + .sql("INSERT INTO druid.dst SELECT * FROM foo") + .expectTarget("dst", FOO_TABLE_SIGNATURE) + .expectResources(dataSourceRead("foo"), dataSourceWrite("dst")) + .expectQuery( + newScanQueryBuilder() + .dataSource("foo") + .intervals(querySegmentSpec(Filtration.eternity())) + .columns("__time", "cnt", "dim1", "dim2", "dim3", "m1", "m2", "unique_dim1") + .build() + ) + .verify(); + } + + @Test + public void testInsertUsingColumnList() + { + testInsertQuery() + .sql("INSERT INTO dst (foo, bar) SELECT dim1, dim2 FROM foo") + .expectValidationError(SqlPlanningException.class, "INSERT with target column list is not supported.") + .verify(); + } + + @Test + public void testUpsert() + { + testInsertQuery() + .sql("UPSERT INTO dst SELECT * FROM foo") + .expectValidationError(SqlPlanningException.class, "UPSERT is not supported.") + .verify(); + } + + @Test + public void testInsertIntoSystemTable() + { + testInsertQuery() + .sql("INSERT INTO INFORMATION_SCHEMA.COLUMNS SELECT * FROM foo") + .expectValidationError( + SqlPlanningException.class, + "Cannot INSERT into [INFORMATION_SCHEMA.COLUMNS] because it is not a Druid datasource." + ) + .verify(); + } + + @Test + public void testInsertIntoView() + { + testInsertQuery() + .sql("INSERT INTO view.aview SELECT * FROM foo") + .expectValidationError( + SqlPlanningException.class, + "Cannot INSERT into [view.aview] because it is not a Druid datasource." + ) + .verify(); + } + + @Test + public void testInsertFromUnauthorizedDataSource() + { + testInsertQuery() + .sql("INSERT INTO dst SELECT * FROM \"%s\"", CalciteTests.FORBIDDEN_DATASOURCE) + .expectValidationError(ForbiddenException.class) + .verify(); + } + + @Test + public void testInsertIntoUnauthorizedDataSource() + { + testInsertQuery() + .sql("INSERT INTO \"%s\" SELECT * FROM foo", CalciteTests.FORBIDDEN_DATASOURCE) + .expectValidationError(ForbiddenException.class) + .verify(); + } + + @Test + public void testInsertIntoNonexistentSchema() + { + testInsertQuery() + .sql("INSERT INTO nonexistent.dst SELECT * FROM foo") + .expectValidationError( + SqlPlanningException.class, + "Cannot INSERT into [nonexistent.dst] because it is not a Druid datasource." + ) + .verify(); + } + + @Test + public void testInsertFromExternal() + { + testInsertQuery() + .sql("INSERT INTO dst SELECT * FROM %s", externSql(externalDataSource)) + .authentication(CalciteTests.SUPER_USER_AUTH_RESULT) + .expectTarget("dst", externalDataSource.getSignature()) + .expectResources(dataSourceWrite("dst"), ExternalOperatorConversion.EXTERNAL_RESOURCE_ACTION) + .expectQuery( + newScanQueryBuilder() + .dataSource(externalDataSource) + .intervals(querySegmentSpec(Filtration.eternity())) + .columns("x", "y", "z") + .build() + ) + .verify(); + } + + @Test + public void testExplainInsertFromExternal() throws Exception + { + // Skip vectorization since otherwise the "context" will change for each subtest. + skipVectorize(); + + // Use testQuery for EXPLAIN (not testInsertQuery). + testQuery( + new PlannerConfig(), + StringUtils.format("EXPLAIN PLAN FOR INSERT INTO dst SELECT * FROM %s", externSql(externalDataSource)), + CalciteTests.SUPER_USER_AUTH_RESULT, + ImmutableList.of(), + ImmutableList.of( + new Object[]{ + "DruidQueryRel(query=[" + + queryJsonMapper.writeValueAsString( + newScanQueryBuilder() + .dataSource(externalDataSource) + .intervals(querySegmentSpec(Filtration.eternity())) + .columns("x", "y", "z") + .context( + queryJsonMapper.readValue( + "{\"defaultTimeout\":300000,\"maxScatterGatherBytes\":9223372036854775807,\"sqlCurrentTimestamp\":\"2000-01-01T00:00:00Z\",\"sqlQueryId\":\"dummy\",\"vectorize\":\"false\",\"vectorizeVirtualColumns\":\"false\"}", + JacksonUtils.TYPE_REFERENCE_MAP_STRING_OBJECT + ) + ) + .build() + ) + + "], signature=[{x:STRING, y:STRING, z:LONG}])\n", + "[{\"name\":\"EXTERNAL\",\"type\":\"EXTERNAL\"},{\"name\":\"dst\",\"type\":\"DATASOURCE\"}]" + } + ) + ); + + // Not using testInsertQuery, so must set didTest manually to satisfy the check in tearDown. + didTest = true; + } + + @Test + public void testExplainInsertFromExternalUnauthorized() + { + // Use testQuery for EXPLAIN (not testInsertQuery). + Assert.assertThrows( + ForbiddenException.class, + () -> + testQuery( + StringUtils.format("EXPLAIN PLAN FOR INSERT INTO dst SELECT * FROM %s", externSql(externalDataSource)), + ImmutableList.of(), + ImmutableList.of() + ) + ); + + // Not using testInsertQuery, so must set didTest manually to satisfy the check in tearDown. + didTest = true; + } + + @Test + public void testInsertFromExternalUnauthorized() + { + testInsertQuery() + .sql("INSERT INTO dst SELECT * FROM %s", externSql(externalDataSource)) + .expectValidationError(ForbiddenException.class) + .verify(); + } + + @Test + public void testInsertFromExternalProjectSort() + { + // INSERT with a particular column ordering. + + testInsertQuery() + .sql("INSERT INTO dst SELECT x || y AS xy, z FROM %s ORDER BY 1, 2", externSql(externalDataSource)) + .authentication(CalciteTests.SUPER_USER_AUTH_RESULT) + .expectTarget("dst", RowSignature.builder().add("xy", ColumnType.STRING).add("z", ColumnType.LONG).build()) + .expectResources(dataSourceWrite("dst"), ExternalOperatorConversion.EXTERNAL_RESOURCE_ACTION) + .expectQuery( + newScanQueryBuilder() + .dataSource(externalDataSource) + .intervals(querySegmentSpec(Filtration.eternity())) + .virtualColumns(expressionVirtualColumn("v0", "concat(\"x\",\"y\")", ColumnType.STRING)) + .columns("v0", "z") + .orderBy( + ImmutableList.of( + new ScanQuery.OrderBy("v0", ScanQuery.Order.ASCENDING), + new ScanQuery.OrderBy("z", ScanQuery.Order.ASCENDING) + ) + ) + .build() + ) + .verify(); + } + + @Test + public void testInsertFromExternalAggregate() + { + // INSERT with rollup. + + testInsertQuery() + .sql( + "INSERT INTO dst SELECT x, SUM(z) AS sum_z, COUNT(*) AS cnt FROM %s GROUP BY 1", + externSql(externalDataSource) + ) + .authentication(CalciteTests.SUPER_USER_AUTH_RESULT) + .expectTarget( + "dst", + RowSignature.builder() + .add("x", ColumnType.STRING) + .add("sum_z", ColumnType.LONG) + .add("cnt", ColumnType.LONG) + .build() + ) + .expectResources(dataSourceWrite("dst"), ExternalOperatorConversion.EXTERNAL_RESOURCE_ACTION) + .expectQuery( + GroupByQuery.builder() + .setDataSource(externalDataSource) + .setInterval(querySegmentSpec(Filtration.eternity())) + .setGranularity(Granularities.ALL) + .setDimensions(dimensions(new DefaultDimensionSpec("x", "d0"))) + .setAggregatorSpecs( + new LongSumAggregatorFactory("a0", "z"), + new CountAggregatorFactory("a1") + ) + .build() + ) + .verify(); + } + + @Test + public void testInsertFromExternalAggregateAll() + { + // INSERT with rollup into a single row (no GROUP BY exprs). + + testInsertQuery() + .sql( + "INSERT INTO dst SELECT COUNT(*) AS cnt FROM %s", + externSql(externalDataSource) + ) + .authentication(CalciteTests.SUPER_USER_AUTH_RESULT) + .expectTarget( + "dst", + RowSignature.builder() + .add("cnt", ColumnType.LONG) + .build() + ) + .expectResources(dataSourceWrite("dst"), ExternalOperatorConversion.EXTERNAL_RESOURCE_ACTION) + .expectQuery( + GroupByQuery.builder() + .setDataSource(externalDataSource) + .setInterval(querySegmentSpec(Filtration.eternity())) + .setGranularity(Granularities.ALL) + .setAggregatorSpecs(new CountAggregatorFactory("a0")) + .build() + ) + .verify(); + } + + private String externSql(final ExternalDataSource externalDataSource) + { + try { + return StringUtils.format( + "TABLE(extern(%s, %s, %s))", + Calcites.escapeStringLiteral(queryJsonMapper.writeValueAsString(externalDataSource.getInputSource())), + Calcites.escapeStringLiteral(queryJsonMapper.writeValueAsString(externalDataSource.getInputFormat())), + Calcites.escapeStringLiteral(queryJsonMapper.writeValueAsString(externalDataSource.getSignature())) + ); + } + catch (JsonProcessingException e) { + throw new RuntimeException(e); + } + } + + private InsertDmlTester testInsertQuery() + { + return new InsertDmlTester(); + } + + public class InsertDmlTester + { + private String sql; + private PlannerConfig plannerConfig = new PlannerConfig(); + private Map queryContext = DEFAULT_CONTEXT; + private AuthenticationResult authenticationResult = CalciteTests.REGULAR_USER_AUTH_RESULT; + private String expectedTargetDataSource; + private RowSignature expectedTargetSignature; + private List expectedResources; + private Query expectedQuery; + private Matcher validationErrorMatcher; + + private InsertDmlTester() + { + // Nothing to do. + } + + public InsertDmlTester sql(final String sql) + { + this.sql = sql; + return this; + } + + private InsertDmlTester sql(final String sqlPattern, final Object arg, final Object... otherArgs) + { + final Object[] args = new Object[otherArgs.length + 1]; + args[0] = arg; + System.arraycopy(otherArgs, 0, args, 1, otherArgs.length); + this.sql = StringUtils.format(sqlPattern, args); + return this; + } + + public InsertDmlTester context(final Map context) + { + this.queryContext = context; + return this; + } + + public InsertDmlTester authentication(final AuthenticationResult authenticationResult) + { + this.authenticationResult = authenticationResult; + return this; + } + + public InsertDmlTester expectTarget( + final String expectedTargetDataSource, + final RowSignature expectedTargetSignature + ) + { + this.expectedTargetDataSource = Preconditions.checkNotNull(expectedTargetDataSource, "expectedTargetDataSource"); + this.expectedTargetSignature = Preconditions.checkNotNull(expectedTargetSignature, "expectedTargetSignature"); + return this; + } + + public InsertDmlTester expectResources(final ResourceAction... expectedResources) + { + this.expectedResources = Arrays.asList(expectedResources); + return this; + } + + @SuppressWarnings("rawtypes") + public InsertDmlTester expectQuery(final Query expectedQuery) + { + this.expectedQuery = expectedQuery; + return this; + } + + public InsertDmlTester expectValidationError(Matcher validationErrorMatcher) + { + this.validationErrorMatcher = validationErrorMatcher; + return this; + } + + public InsertDmlTester expectValidationError(Class clazz) + { + return expectValidationError(CoreMatchers.instanceOf(clazz)); + } + + public InsertDmlTester expectValidationError(Class clazz, String message) + { + return expectValidationError( + CoreMatchers.allOf( + CoreMatchers.instanceOf(clazz), + ThrowableMessageMatcher.hasMessage(CoreMatchers.equalTo(message)) + ) + ); + } + + public void verify() + { + if (didTest) { + // It's good form to only do one test per method. + // This also helps us ensure that "verify" actually does get called. + throw new ISE("Use one @Test method per tester"); + } + + didTest = true; + + if (sql == null) { + throw new ISE("Test must have SQL statement"); + } + + try { + log.info("SQL: %s", sql); + queryLogHook.clearRecordedQueries(); + + if (validationErrorMatcher != null) { + verifyValidationError(); + } else { + verifySuccess(); + } + } + catch (Exception e) { + throw new RuntimeException(e); + } + } + + private void verifyValidationError() + { + if (expectedTargetDataSource != null) { + throw new ISE("Test must not have expectedTargetDataSource"); + } + + if (expectedResources != null) { + throw new ISE("Test must not have expectedResources"); + } + + if (expectedQuery != null) { + throw new ISE("Test must not have expectedQuery"); + } + + final SqlLifecycleFactory sqlLifecycleFactory = getSqlLifecycleFactory( + plannerConfig, + createOperatorTable(), + createMacroTable(), + CalciteTests.TEST_AUTHORIZER_MAPPER, + queryJsonMapper + ); + + final SqlLifecycle sqlLifecycle = sqlLifecycleFactory.factorize(); + sqlLifecycle.initialize(sql, queryContext); + + final Throwable e = Assert.assertThrows( + Throwable.class, + () -> sqlLifecycle.validateAndAuthorize(authenticationResult) + ); + + MatcherAssert.assertThat(e, validationErrorMatcher); + Assert.assertTrue(queryLogHook.getRecordedQueries().isEmpty()); + } + + private void verifySuccess() throws Exception + { + if (expectedTargetDataSource == null) { + throw new ISE("Test must have expectedTargetDataSource"); + } + + if (expectedResources == null) { + throw new ISE("Test must have expectedResources"); + } + + final List expectedQueries = + expectedQuery == null + ? Collections.emptyList() + : Collections.singletonList(recursivelyOverrideContext(expectedQuery, queryContext)); + + Assert.assertEquals( + ImmutableSet.copyOf(expectedResources), + analyzeResources(plannerConfig, sql, authenticationResult) + ); + + final List results = + getResults(plannerConfig, queryContext, Collections.emptyList(), sql, authenticationResult); + + verifyResults( + sql, + expectedQueries, + Collections.singletonList(new Object[]{expectedTargetDataSource, expectedTargetSignature}), + results + ); + } + } + + private static ResourceAction viewRead(final String viewName) + { + return new ResourceAction(new Resource(viewName, ResourceType.VIEW), Action.READ); + } + + private static ResourceAction dataSourceRead(final String dataSource) + { + return new ResourceAction(new Resource(dataSource, ResourceType.DATASOURCE), Action.READ); + } + + private static ResourceAction dataSourceWrite(final String dataSource) + { + return new ResourceAction(new Resource(dataSource, ResourceType.DATASOURCE), Action.WRITE); + } +} diff --git a/sql/src/test/java/org/apache/druid/sql/calcite/DruidPlannerResourceAnalyzeTest.java b/sql/src/test/java/org/apache/druid/sql/calcite/DruidPlannerResourceAnalyzeTest.java index 402fffc4faf2..71dd52dcc97c 100644 --- a/sql/src/test/java/org/apache/druid/sql/calcite/DruidPlannerResourceAnalyzeTest.java +++ b/sql/src/test/java/org/apache/druid/sql/calcite/DruidPlannerResourceAnalyzeTest.java @@ -20,7 +20,9 @@ package org.apache.druid.sql.calcite; import com.google.common.collect.ImmutableSet; +import org.apache.druid.server.security.Action; import org.apache.druid.server.security.Resource; +import org.apache.druid.server.security.ResourceAction; import org.apache.druid.server.security.ResourceType; import org.apache.druid.sql.calcite.planner.PlannerConfig; import org.apache.druid.sql.calcite.util.CalciteTests; @@ -36,7 +38,7 @@ public void testTable() { final String sql = "SELECT COUNT(*) FROM foo WHERE foo.dim1 <> 'z'"; - Set requiredResources = analyzeResources( + Set requiredResources = analyzeResources( PLANNER_CONFIG_DEFAULT, sql, CalciteTests.REGULAR_USER_AUTH_RESULT @@ -44,7 +46,7 @@ public void testTable() Assert.assertEquals( ImmutableSet.of( - new Resource("foo", ResourceType.DATASOURCE) + new ResourceAction(new Resource("foo", ResourceType.DATASOURCE), Action.READ) ), requiredResources ); @@ -55,7 +57,7 @@ public void testConfusingTable() { final String sql = "SELECT COUNT(*) FROM foo as druid WHERE druid.dim1 <> 'z'"; - Set requiredResources = analyzeResources( + Set requiredResources = analyzeResources( PLANNER_CONFIG_DEFAULT, sql, CalciteTests.REGULAR_USER_AUTH_RESULT @@ -63,7 +65,7 @@ public void testConfusingTable() Assert.assertEquals( ImmutableSet.of( - new Resource("foo", ResourceType.DATASOURCE) + new ResourceAction(new Resource("foo", ResourceType.DATASOURCE), Action.READ) ), requiredResources ); @@ -81,7 +83,7 @@ public void testSubquery() + " )\n" + ")"; - Set requiredResources = analyzeResources( + Set requiredResources = analyzeResources( PLANNER_CONFIG_DEFAULT, sql, CalciteTests.REGULAR_USER_AUTH_RESULT @@ -89,8 +91,8 @@ public void testSubquery() Assert.assertEquals( ImmutableSet.of( - new Resource("foo", ResourceType.DATASOURCE), - new Resource("numfoo", ResourceType.DATASOURCE) + new ResourceAction(new Resource("foo", ResourceType.DATASOURCE), Action.READ), + new ResourceAction(new Resource("numfoo", ResourceType.DATASOURCE), Action.READ) ), requiredResources ); @@ -107,7 +109,7 @@ public void testSubqueryUnion() + " FROM (SELECT * FROM druid.foo UNION ALL SELECT * FROM druid.foo2)\n" + " GROUP BY dim2\n" + ")"; - Set requiredResources = analyzeResources( + Set requiredResources = analyzeResources( PLANNER_CONFIG_DEFAULT, sql, CalciteTests.REGULAR_USER_AUTH_RESULT @@ -115,8 +117,8 @@ public void testSubqueryUnion() Assert.assertEquals( ImmutableSet.of( - new Resource("foo", ResourceType.DATASOURCE), - new Resource("foo2", ResourceType.DATASOURCE) + new ResourceAction(new Resource("foo", ResourceType.DATASOURCE), Action.READ), + new ResourceAction(new Resource("foo2", ResourceType.DATASOURCE), Action.READ) ), requiredResources ); @@ -127,7 +129,7 @@ public void testJoin() { final String sql = "SELECT COUNT(*) FROM foo INNER JOIN numfoo ON foo.dim1 = numfoo.dim1 WHERE numfoo.dim1 <> 'z'"; - Set requiredResources = analyzeResources( + Set requiredResources = analyzeResources( PLANNER_CONFIG_DEFAULT, sql, CalciteTests.REGULAR_USER_AUTH_RESULT @@ -135,8 +137,8 @@ public void testJoin() Assert.assertEquals( ImmutableSet.of( - new Resource("foo", ResourceType.DATASOURCE), - new Resource("numfoo", ResourceType.DATASOURCE) + new ResourceAction(new Resource("foo", ResourceType.DATASOURCE), Action.READ), + new ResourceAction(new Resource("numfoo", ResourceType.DATASOURCE), Action.READ) ), requiredResources ); @@ -147,7 +149,7 @@ public void testView() { final String sql = "SELECT COUNT(*) FROM view.aview as druid WHERE dim1_firstchar <> 'z'"; - Set requiredResources = analyzeResources( + Set requiredResources = analyzeResources( PLANNER_CONFIG_DEFAULT, sql, CalciteTests.REGULAR_USER_AUTH_RESULT @@ -155,7 +157,7 @@ public void testView() Assert.assertEquals( ImmutableSet.of( - new Resource("aview", ResourceType.VIEW) + new ResourceAction(new Resource("aview", ResourceType.VIEW), Action.READ) ), requiredResources ); @@ -173,7 +175,7 @@ public void testSubqueryView() + " )\n" + ")"; - Set requiredResources = analyzeResources( + Set requiredResources = analyzeResources( PLANNER_CONFIG_DEFAULT, sql, CalciteTests.REGULAR_USER_AUTH_RESULT @@ -181,8 +183,8 @@ public void testSubqueryView() Assert.assertEquals( ImmutableSet.of( - new Resource("foo", ResourceType.DATASOURCE), - new Resource("cview", ResourceType.VIEW) + new ResourceAction(new Resource("foo", ResourceType.DATASOURCE), Action.READ), + new ResourceAction(new Resource("cview", ResourceType.VIEW), Action.READ) ), requiredResources ); @@ -193,7 +195,7 @@ public void testJoinView() { final String sql = "SELECT COUNT(*) FROM view.cview as aview INNER JOIN numfoo ON aview.dim2 = numfoo.dim2 WHERE numfoo.dim1 <> 'z'"; - Set requiredResources = analyzeResources( + Set requiredResources = analyzeResources( PLANNER_CONFIG_DEFAULT, sql, CalciteTests.REGULAR_USER_AUTH_RESULT @@ -201,8 +203,8 @@ public void testJoinView() Assert.assertEquals( ImmutableSet.of( - new Resource("cview", ResourceType.VIEW), - new Resource("numfoo", ResourceType.DATASOURCE) + new ResourceAction(new Resource("cview", ResourceType.VIEW), Action.READ), + new ResourceAction(new Resource("numfoo", ResourceType.DATASOURCE), Action.READ) ), requiredResources ); @@ -213,7 +215,7 @@ public void testConfusingViewIdentifiers() { final String sql = "SELECT COUNT(*) FROM view.dview as druid WHERE druid.numfoo <> 'z'"; - Set requiredResources = analyzeResources( + Set requiredResources = analyzeResources( PLANNER_CONFIG_DEFAULT, sql, CalciteTests.REGULAR_USER_AUTH_RESULT @@ -221,7 +223,7 @@ public void testConfusingViewIdentifiers() Assert.assertEquals( ImmutableSet.of( - new Resource("dview", ResourceType.VIEW) + new ResourceAction(new Resource("dview", ResourceType.VIEW), Action.READ) ), requiredResources ); @@ -231,7 +233,7 @@ public void testConfusingViewIdentifiers() public void testDynamicParameters() { final String sql = "SELECT SUBSTRING(dim2, CAST(? as BIGINT), CAST(? as BIGINT)) FROM druid.foo LIMIT ?"; - Set requiredResources = analyzeResources( + Set requiredResources = analyzeResources( PLANNER_CONFIG_DEFAULT, sql, CalciteTests.REGULAR_USER_AUTH_RESULT @@ -239,7 +241,7 @@ public void testDynamicParameters() Assert.assertEquals( ImmutableSet.of( - new Resource("foo", ResourceType.DATASOURCE) + new ResourceAction(new Resource("foo", ResourceType.DATASOURCE), Action.READ) ), requiredResources ); @@ -263,7 +265,7 @@ public void testSysTables() private void testSysTable(String sql, String name, PlannerConfig plannerConfig) { - Set requiredResources = analyzeResources( + Set requiredResources = analyzeResources( plannerConfig, sql, CalciteTests.REGULAR_USER_AUTH_RESULT @@ -273,7 +275,7 @@ private void testSysTable(String sql, String name, PlannerConfig plannerConfig) } else { Assert.assertEquals( ImmutableSet.of( - new Resource(name, ResourceType.SYSTEM_TABLE) + new ResourceAction(new Resource(name, ResourceType.SYSTEM_TABLE), Action.READ) ), requiredResources ); diff --git a/sql/src/test/java/org/apache/druid/sql/calcite/SqlVectorizedExpressionSanityTest.java b/sql/src/test/java/org/apache/druid/sql/calcite/SqlVectorizedExpressionSanityTest.java index 337ffd24cbbf..7bdc0d92f92c 100644 --- a/sql/src/test/java/org/apache/druid/sql/calcite/SqlVectorizedExpressionSanityTest.java +++ b/sql/src/test/java/org/apache/druid/sql/calcite/SqlVectorizedExpressionSanityTest.java @@ -44,6 +44,7 @@ import org.apache.druid.sql.calcite.planner.PlannerConfig; import org.apache.druid.sql.calcite.planner.PlannerFactory; import org.apache.druid.sql.calcite.planner.PlannerResult; +import org.apache.druid.sql.calcite.run.NativeQueryMakerFactory; import org.apache.druid.sql.calcite.schema.DruidSchemaCatalog; import org.apache.druid.sql.calcite.util.CalciteTests; import org.apache.druid.sql.calcite.util.SpecificSegmentsQuerySegmentWalker; @@ -132,7 +133,10 @@ public static void setupClass() CalciteTests.createMockRootSchema(CONGLOMERATE, WALKER, plannerConfig, AuthTestUtils.TEST_AUTHORIZER_MAPPER); PLANNER_FACTORY = new PlannerFactory( rootSchema, - CalciteTests.createMockQueryLifecycleFactory(WALKER, CONGLOMERATE), + new NativeQueryMakerFactory( + CalciteTests.createMockQueryLifecycleFactory(WALKER, CONGLOMERATE), + CalciteTests.getJsonMapper() + ), CalciteTests.createOperatorTable(), CalciteTests.createExprMacroTable(), plannerConfig, @@ -183,8 +187,8 @@ public static void sanityTestVectorizedSqlQueries(PlannerFactory plannerFactory, final DruidPlanner vectorPlanner = plannerFactory.createPlannerForTesting(vector, query); final DruidPlanner nonVectorPlanner = plannerFactory.createPlannerForTesting(nonvector, query) ) { - final PlannerResult vectorPlan = vectorPlanner.plan(query); - final PlannerResult nonVectorPlan = nonVectorPlanner.plan(query); + final PlannerResult vectorPlan = vectorPlanner.plan(); + final PlannerResult nonVectorPlan = nonVectorPlanner.plan(); final Sequence vectorSequence = vectorPlan.run(); final Sequence nonVectorSequence = nonVectorPlan.run(); Yielder vectorizedYielder = Yielders.each(vectorSequence); diff --git a/sql/src/test/java/org/apache/druid/sql/calcite/TestInsertQueryMaker.java b/sql/src/test/java/org/apache/druid/sql/calcite/TestInsertQueryMaker.java new file mode 100644 index 000000000000..5169a4d53d50 --- /dev/null +++ b/sql/src/test/java/org/apache/druid/sql/calcite/TestInsertQueryMaker.java @@ -0,0 +1,100 @@ +/* + * Licensed to the Apache Software Foundation (ASF) under one + * or more contributor license agreements. See the NOTICE file + * distributed with this work for additional information + * regarding copyright ownership. The ASF licenses this file + * to you under the Apache License, Version 2.0 (the + * "License"); you may not use this file except in compliance + * with the License. You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, + * software distributed under the License is distributed on an + * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY + * KIND, either express or implied. See the License for the + * specific language governing permissions and limitations + * under the License. + */ + +package org.apache.druid.sql.calcite; + +import com.google.common.collect.ImmutableList; +import org.apache.calcite.rel.type.RelDataType; +import org.apache.calcite.rel.type.RelDataTypeFactory; +import org.apache.calcite.runtime.Hook; +import org.apache.calcite.sql.type.SqlTypeName; +import org.apache.druid.java.util.common.IAE; +import org.apache.druid.java.util.common.guava.Sequence; +import org.apache.druid.java.util.common.guava.Sequences; +import org.apache.druid.segment.column.RowSignature; +import org.apache.druid.sql.calcite.rel.DruidQuery; +import org.apache.druid.sql.calcite.run.QueryFeature; +import org.apache.druid.sql.calcite.run.QueryMaker; + +/** + * QueryMaker used by {@link CalciteInsertDmlTest}. + */ +public class TestInsertQueryMaker implements QueryMaker +{ + private final RelDataType resultType; + private final String targetDataSource; + private final RowSignature signature; + + public TestInsertQueryMaker( + final RelDataTypeFactory typeFactory, + final String targetDataSource, + final RowSignature signature + ) + { + this.resultType = typeFactory.createStructType( + ImmutableList.of( + typeFactory.createSqlType(SqlTypeName.VARCHAR), + typeFactory.createSqlType(SqlTypeName.OTHER) + ), + ImmutableList.of("dataSource", "signature") + ); + this.targetDataSource = targetDataSource; + this.signature = signature; + } + + @Override + public boolean feature(final QueryFeature feature) + { + switch (feature) { + // INSERT queries should stick to groupBy, scan. + case CAN_RUN_TIMESERIES: + case CAN_RUN_TOPN: + return false; + + // INSERT uses external data. + case CAN_READ_EXTERNAL_DATA: + return true; + + // INSERT uses Scan + ORDER BY. + case SCAN_CAN_ORDER_BY_NON_TIME: + return true; + + default: + throw new IAE("Unrecognized feature: %s", feature); + } + } + + @Override + public RelDataType getResultType() + { + return resultType; + } + + @Override + public Sequence runQuery(final DruidQuery druidQuery) + { + // Don't actually execute anything, but do record information that tests will check for. + + // 1) Add the query to Hook.QUERY_PLAN, so it gets picked up by QueryLogHook. + Hook.QUERY_PLAN.run(druidQuery.getQuery()); + + // 2) Return the dataSource and signature of the insert operation, so tests can confirm they are correct. + return Sequences.simple(ImmutableList.of(new Object[]{targetDataSource, signature})); + } +} diff --git a/sql/src/test/java/org/apache/druid/sql/calcite/TestQueryMakerFactory.java b/sql/src/test/java/org/apache/druid/sql/calcite/TestQueryMakerFactory.java new file mode 100644 index 000000000000..c2fbe5aeeefd --- /dev/null +++ b/sql/src/test/java/org/apache/druid/sql/calcite/TestQueryMakerFactory.java @@ -0,0 +1,62 @@ +/* + * Licensed to the Apache Software Foundation (ASF) under one + * or more contributor license agreements. See the NOTICE file + * distributed with this work for additional information + * regarding copyright ownership. The ASF licenses this file + * to you under the Apache License, Version 2.0 (the + * "License"); you may not use this file except in compliance + * with the License. You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, + * software distributed under the License is distributed on an + * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY + * KIND, either express or implied. See the License for the + * specific language governing permissions and limitations + * under the License. + */ + +package org.apache.druid.sql.calcite; + +import com.fasterxml.jackson.databind.ObjectMapper; +import org.apache.calcite.rel.RelRoot; +import org.apache.druid.segment.column.RowSignature; +import org.apache.druid.server.QueryLifecycleFactory; +import org.apache.druid.sql.calcite.planner.PlannerContext; +import org.apache.druid.sql.calcite.run.NativeQueryMakerFactory; +import org.apache.druid.sql.calcite.run.QueryMaker; +import org.apache.druid.sql.calcite.run.QueryMakerFactory; +import org.apache.druid.sql.calcite.table.RowSignatures; + +public class TestQueryMakerFactory implements QueryMakerFactory +{ + private final QueryLifecycleFactory queryLifecycleFactory; + private final ObjectMapper jsonMapper; + + TestQueryMakerFactory( + final QueryLifecycleFactory queryLifecycleFactory, + final ObjectMapper jsonMapper + ) + { + this.queryLifecycleFactory = queryLifecycleFactory; + this.jsonMapper = jsonMapper; + } + + @Override + public QueryMaker buildForSelect(RelRoot relRoot, PlannerContext plannerContext) + { + return new NativeQueryMakerFactory(queryLifecycleFactory, jsonMapper).buildForSelect(relRoot, plannerContext); + } + + @Override + public QueryMaker buildForInsert(String targetDataSource, RelRoot relRoot, PlannerContext plannerContext) + { + final RowSignature signature = RowSignatures.fromRelDataType( + relRoot.validatedRowType.getFieldNames(), + relRoot.validatedRowType + ); + + return new TestInsertQueryMaker(relRoot.rel.getCluster().getTypeFactory(), targetDataSource, signature); + } +} diff --git a/sql/src/test/java/org/apache/druid/sql/calcite/expression/ExpressionTestHelper.java b/sql/src/test/java/org/apache/druid/sql/calcite/expression/ExpressionTestHelper.java index 3101eb0ff5a9..2e58c0a2dd9f 100644 --- a/sql/src/test/java/org/apache/druid/sql/calcite/expression/ExpressionTestHelper.java +++ b/sql/src/test/java/org/apache/druid/sql/calcite/expression/ExpressionTestHelper.java @@ -71,8 +71,10 @@ class ExpressionTestHelper { private static final PlannerContext PLANNER_CONTEXT = PlannerContext.create( + "SELECT 1", // The actual query isn't important for this test CalciteTests.createOperatorTable(), CalciteTests.createExprMacroTable(), + CalciteTests.getJsonMapper(), new PlannerConfig(), new DruidSchemaCatalog( EasyMock.createMock(SchemaPlus.class), diff --git a/sql/src/test/java/org/apache/druid/sql/calcite/planner/CalcitePlannerModuleTest.java b/sql/src/test/java/org/apache/druid/sql/calcite/planner/CalcitePlannerModuleTest.java index 90dfcbada486..b84ec3345538 100644 --- a/sql/src/test/java/org/apache/druid/sql/calcite/planner/CalcitePlannerModuleTest.java +++ b/sql/src/test/java/org/apache/druid/sql/calcite/planner/CalcitePlannerModuleTest.java @@ -115,8 +115,10 @@ public void testDruidOperatorTableIsInjectable() { DruidOperatorTable operatorTable = injector.getInstance(DruidOperatorTable.class); Assert.assertNotNull(operatorTable); + + // Should be a singleton. DruidOperatorTable other = injector.getInstance(DruidOperatorTable.class); - Assert.assertNotSame(other, operatorTable); + Assert.assertSame(other, operatorTable); } @Test @@ -124,8 +126,10 @@ public void testPlannerFactoryIsInjectable() { PlannerFactory plannerFactory = injector.getInstance(PlannerFactory.class); Assert.assertNotNull(PlannerFactory.class); + + // Should be a singleton. PlannerFactory other = injector.getInstance(PlannerFactory.class); - Assert.assertNotSame(other, plannerFactory); + Assert.assertSame(other, plannerFactory); } @Test diff --git a/sql/src/test/java/org/apache/druid/sql/calcite/planner/DruidRexExecutorTest.java b/sql/src/test/java/org/apache/druid/sql/calcite/planner/DruidRexExecutorTest.java index a44995e489f1..abaab15b3a2d 100644 --- a/sql/src/test/java/org/apache/druid/sql/calcite/planner/DruidRexExecutorTest.java +++ b/sql/src/test/java/org/apache/druid/sql/calcite/planner/DruidRexExecutorTest.java @@ -75,11 +75,13 @@ public class DruidRexExecutorTest extends InitializedNullHandlingTest .build(); private static final PlannerContext PLANNER_CONTEXT = PlannerContext.create( + "SELECT 1", // The actual query isn't important for this test new DruidOperatorTable( Collections.emptySet(), ImmutableSet.of(new DirectOperatorConversion(OPERATOR, "hyper_unique")) ), CalciteTests.createExprMacroTable(), + CalciteTests.getJsonMapper(), new PlannerConfig(), new DruidSchemaCatalog( EasyMock.createMock(SchemaPlus.class), diff --git a/sql/src/test/java/org/apache/druid/sql/calcite/rule/DruidUnionDataSourceRuleTest.java b/sql/src/test/java/org/apache/druid/sql/calcite/rule/DruidUnionDataSourceRuleTest.java index b9dab37e9bbf..77ab336de1b3 100644 --- a/sql/src/test/java/org/apache/druid/sql/calcite/rule/DruidUnionDataSourceRuleTest.java +++ b/sql/src/test/java/org/apache/druid/sql/calcite/rule/DruidUnionDataSourceRuleTest.java @@ -50,6 +50,7 @@ public class DruidUnionDataSourceRuleTest .add("col1", ColumnType.STRING) .add("col2", ColumnType.LONG) .build(), + null, false, false ); diff --git a/sql/src/test/java/org/apache/druid/sql/calcite/schema/DruidCalciteSchemaModuleTest.java b/sql/src/test/java/org/apache/druid/sql/calcite/schema/DruidCalciteSchemaModuleTest.java index 451e16f4c166..2553f998d38d 100644 --- a/sql/src/test/java/org/apache/druid/sql/calcite/schema/DruidCalciteSchemaModuleTest.java +++ b/sql/src/test/java/org/apache/druid/sql/calcite/schema/DruidCalciteSchemaModuleTest.java @@ -36,6 +36,7 @@ import org.apache.druid.discovery.DruidNodeDiscoveryProvider; import org.apache.druid.guice.LazySingleton; import org.apache.druid.guice.LifecycleModule; +import org.apache.druid.guice.annotations.Json; import org.apache.druid.query.lookup.LookupExtractorFactoryContainerProvider; import org.apache.druid.query.lookup.LookupReferencesManager; import org.apache.druid.segment.join.JoinableFactory; @@ -118,7 +119,7 @@ public void setUp() .annotatedWith(IndexingService.class) .toInstance(overlordDruidLeaderClient); binder.bind(DruidNodeDiscoveryProvider.class).toInstance(druidNodeDiscoveryProvider); - binder.bind(ObjectMapper.class).toInstance(objectMapper); + binder.bind(ObjectMapper.class).annotatedWith(Json.class).toInstance(objectMapper); binder.bindScope(LazySingleton.class, Scopes.SINGLETON); binder.bind(LookupExtractorFactoryContainerProvider.class).toInstance(lookupReferencesManager); }, diff --git a/sql/src/test/java/org/apache/druid/sql/calcite/util/CalciteTests.java b/sql/src/test/java/org/apache/druid/sql/calcite/util/CalciteTests.java index cbc5849b19c4..337890a20cbc 100644 --- a/sql/src/test/java/org/apache/druid/sql/calcite/util/CalciteTests.java +++ b/sql/src/test/java/org/apache/druid/sql/calcite/util/CalciteTests.java @@ -115,6 +115,7 @@ import org.apache.druid.sql.SqlLifecycleFactory; import org.apache.druid.sql.calcite.aggregation.SqlAggregationModule; import org.apache.druid.sql.calcite.expression.builtin.QueryLookupOperatorConversion; +import org.apache.druid.sql.calcite.external.ExternalOperatorConversion; import org.apache.druid.sql.calcite.planner.DruidOperatorTable; import org.apache.druid.sql.calcite.planner.PlannerConfig; import org.apache.druid.sql.calcite.planner.PlannerFactory; @@ -131,7 +132,6 @@ import org.apache.druid.sql.calcite.schema.SystemSchema; import org.apache.druid.sql.calcite.schema.ViewSchema; import org.apache.druid.sql.calcite.view.DruidViewMacroFactory; -import org.apache.druid.sql.calcite.view.NoopViewManager; import org.apache.druid.sql.calcite.view.ViewManager; import org.apache.druid.sql.guice.SqlBindings; import org.apache.druid.timeline.DataSegment; @@ -191,8 +191,10 @@ public Authorizer getAuthorizer(String name) return new Access(false); } else if (ResourceType.VIEW.equals(resource.getType()) && resource.getName().equals("forbiddenView")) { return new Access(false); - } else { + } else if (ResourceType.DATASOURCE.equals(resource.getType()) || ResourceType.VIEW.equals(resource.getType())) { return Access.OK; + } else { + return new Access(false); } }; } @@ -259,10 +261,11 @@ public AuthenticationResult createEscalatedAuthenticationResult() new LookupSerdeModule().getJacksonModules() ); mapper.setInjectableValues( - new InjectableValues.Std().addValue(ExprMacroTable.class.getName(), TestExprMacroTable.INSTANCE) - .addValue(ObjectMapper.class.getName(), mapper) - .addValue(DataSegment.PruneSpecsHolder.class, DataSegment.PruneSpecsHolder.DEFAULT) - .addValue(LookupExtractorFactoryContainerProvider.class.getName(), lookupProvider) + new InjectableValues.Std() + .addValue(ExprMacroTable.class.getName(), TestExprMacroTable.INSTANCE) + .addValue(ObjectMapper.class.getName(), mapper) + .addValue(DataSegment.PruneSpecsHolder.class, DataSegment.PruneSpecsHolder.DEFAULT) + .addValue(LookupExtractorFactoryContainerProvider.class.getName(), lookupProvider) ); binder.bind(Key.get(ObjectMapper.class, Json.class)).toInstance( mapper @@ -271,6 +274,9 @@ public AuthenticationResult createEscalatedAuthenticationResult() // This Module is just to get a LookupExtractorFactoryContainerProvider with a usable "lookyloo" lookup. binder.bind(LookupExtractorFactoryContainerProvider.class).toInstance(lookupProvider); SqlBindings.addOperatorConversion(binder, QueryLookupOperatorConversion.class); + + // Add "EXTERN" table macro, for CalciteInsertDmlTest. + SqlBindings.addOperatorConversion(binder, ExternalOperatorConversion.class); }, new SqlAggregationModule() ); @@ -1149,56 +1155,34 @@ public static DruidSchemaCatalog createMockRootSchema( final AuthorizerMapper authorizerMapper ) { - DruidSchema druidSchema = createMockSchema(conglomerate, walker, plannerConfig); - SystemSchema systemSchema = - CalciteTests.createMockSystemSchema(druidSchema, walker, plannerConfig, authorizerMapper); - - LookupSchema lookupSchema = CalciteTests.createMockLookupSchema(); - SchemaPlus rootSchema = CalciteSchema.createRootSchema(false, false).plus(); - Set namedSchemas = ImmutableSet.of( - new NamedDruidSchema(druidSchema, CalciteTests.DRUID_SCHEMA_NAME), - new NamedSystemSchema(plannerConfig, systemSchema), - new NamedLookupSchema(lookupSchema) - ); - DruidSchemaCatalog catalog = new DruidSchemaCatalog( - rootSchema, - namedSchemas.stream().collect(Collectors.toMap(NamedSchema::getSchemaName, x -> x)) - ); - InformationSchema informationSchema = - new InformationSchema( - catalog, - authorizerMapper - ); - rootSchema.add(CalciteTests.DRUID_SCHEMA_NAME, druidSchema); - rootSchema.add(CalciteTests.INFORMATION_SCHEMA_NAME, informationSchema); - rootSchema.add(NamedSystemSchema.NAME, systemSchema); - rootSchema.add(NamedLookupSchema.NAME, lookupSchema); - - return catalog; + return createMockRootSchema(conglomerate, walker, plannerConfig, null, authorizerMapper); } public static DruidSchemaCatalog createMockRootSchema( final QueryRunnerFactoryConglomerate conglomerate, final SpecificSegmentsQuerySegmentWalker walker, final PlannerConfig plannerConfig, - final ViewManager viewManager, + @Nullable final ViewManager viewManager, final AuthorizerMapper authorizerMapper ) { - DruidSchema druidSchema = createMockSchema(conglomerate, walker, plannerConfig, viewManager); + DruidSchema druidSchema = createMockSchema(conglomerate, walker, plannerConfig); SystemSchema systemSchema = CalciteTests.createMockSystemSchema(druidSchema, walker, plannerConfig, authorizerMapper); LookupSchema lookupSchema = CalciteTests.createMockLookupSchema(); - ViewSchema viewSchema = new ViewSchema(viewManager); + ViewSchema viewSchema = viewManager != null ? new ViewSchema(viewManager) : null; SchemaPlus rootSchema = CalciteSchema.createRootSchema(false, false).plus(); - Set namedSchemas = ImmutableSet.of( - new NamedDruidSchema(druidSchema, CalciteTests.DRUID_SCHEMA_NAME), - new NamedSystemSchema(plannerConfig, systemSchema), - new NamedLookupSchema(lookupSchema), - new NamedViewSchema(viewSchema) - ); + Set namedSchemas = new HashSet<>(); + namedSchemas.add(new NamedDruidSchema(druidSchema, CalciteTests.DRUID_SCHEMA_NAME)); + namedSchemas.add(new NamedSystemSchema(plannerConfig, systemSchema)); + namedSchemas.add(new NamedLookupSchema(lookupSchema)); + + if (viewSchema != null) { + namedSchemas.add(new NamedViewSchema(viewSchema)); + } + DruidSchemaCatalog catalog = new DruidSchemaCatalog( rootSchema, namedSchemas.stream().collect(Collectors.toMap(NamedSchema::getSchemaName, x -> x)) @@ -1212,7 +1196,11 @@ public static DruidSchemaCatalog createMockRootSchema( rootSchema.add(CalciteTests.INFORMATION_SCHEMA_NAME, informationSchema); rootSchema.add(NamedSystemSchema.NAME, systemSchema); rootSchema.add(NamedLookupSchema.NAME, lookupSchema); - rootSchema.add(NamedViewSchema.NAME, viewSchema); + + if (viewSchema != null) { + rootSchema.add(NamedViewSchema.NAME, viewSchema); + } + return catalog; } @@ -1236,16 +1224,6 @@ private static DruidSchema createMockSchema( final SpecificSegmentsQuerySegmentWalker walker, final PlannerConfig plannerConfig ) - { - return createMockSchema(conglomerate, walker, plannerConfig, new NoopViewManager()); - } - - private static DruidSchema createMockSchema( - final QueryRunnerFactoryConglomerate conglomerate, - final SpecificSegmentsQuerySegmentWalker walker, - final PlannerConfig plannerConfig, - final ViewManager viewManager - ) { final DruidSchema schema = new DruidSchema( CalciteTests.createMockQueryLifecycleFactory(walker, conglomerate), diff --git a/sql/src/test/java/org/apache/druid/sql/http/SqlResourceTest.java b/sql/src/test/java/org/apache/druid/sql/http/SqlResourceTest.java index c09a6114870d..280a84cb3ad3 100644 --- a/sql/src/test/java/org/apache/druid/sql/http/SqlResourceTest.java +++ b/sql/src/test/java/org/apache/druid/sql/http/SqlResourceTest.java @@ -73,6 +73,7 @@ import org.apache.druid.sql.calcite.planner.PlannerConfig; import org.apache.druid.sql.calcite.planner.PlannerContext; import org.apache.druid.sql.calcite.planner.PlannerFactory; +import org.apache.druid.sql.calcite.run.NativeQueryMakerFactory; import org.apache.druid.sql.calcite.schema.DruidSchemaCatalog; import org.apache.druid.sql.calcite.util.CalciteTestBase; import org.apache.druid.sql.calcite.util.CalciteTests; @@ -230,7 +231,10 @@ public boolean shouldSerializeComplexValues() final PlannerFactory plannerFactory = new PlannerFactory( rootSchema, - CalciteTests.createMockQueryLifecycleFactory(walker, conglomerate), + new NativeQueryMakerFactory( + CalciteTests.createMockQueryLifecycleFactory(walker, conglomerate), + CalciteTests.getJsonMapper() + ), operatorTable, macroTable, plannerConfig, From f8d99a7e4ea2e63af88bce04ed6b0be2bb589587 Mon Sep 17 00:00:00 2001 From: Gian Merlino Date: Fri, 19 Nov 2021 16:51:34 -0800 Subject: [PATCH 2/5] Fix up calls to CalciteTests.createMockQueryLifecycleFactory. --- .../org/apache/druid/benchmark/query/SqlBenchmark.java | 6 +++--- .../druid/benchmark/query/SqlExpressionBenchmark.java | 4 ++-- .../druid/benchmark/query/SqlVsNativeBenchmark.java | 4 ++-- .../druid/sql/avatica/DruidAvaticaHandlerTest.java | 10 ++-------- .../apache/druid/sql/avatica/DruidStatementTest.java | 6 +----- .../sql/calcite/SqlVectorizedExpressionSanityTest.java | 6 +----- .../apache/druid/sql/calcite/util/CalciteTests.java | 10 ++++++++++ .../org/apache/druid/sql/http/SqlResourceTest.java | 5 +---- 8 files changed, 22 insertions(+), 29 deletions(-) diff --git a/benchmarks/src/test/java/org/apache/druid/benchmark/query/SqlBenchmark.java b/benchmarks/src/test/java/org/apache/druid/benchmark/query/SqlBenchmark.java index 12b717ec002f..a64e4710dfb3 100644 --- a/benchmarks/src/test/java/org/apache/druid/benchmark/query/SqlBenchmark.java +++ b/benchmarks/src/test/java/org/apache/druid/benchmark/query/SqlBenchmark.java @@ -425,7 +425,7 @@ public void setup() CalciteTests.createMockRootSchema(conglomerate, walker, plannerConfig, AuthTestUtils.TEST_AUTHORIZER_MAPPER); plannerFactory = new PlannerFactory( rootSchema, - CalciteTests.createMockQueryLifecycleFactory(walker, conglomerate), + CalciteTests.createMockQueryMakerFactory(walker, conglomerate), createOperatorTable(), CalciteTests.createExprMacroTable(), plannerConfig, @@ -467,7 +467,7 @@ public void querySql(Blackhole blackhole) throws Exception ); final String sql = QUERIES.get(Integer.parseInt(query)); try (final DruidPlanner planner = plannerFactory.createPlannerForTesting(context, sql)) { - final PlannerResult plannerResult = planner.plan(sql); + final PlannerResult plannerResult = planner.plan(); final Sequence resultSequence = plannerResult.run(); final Object[] lastRow = resultSequence.accumulate(null, (accumulated, in) -> in); blackhole.consume(lastRow); @@ -485,7 +485,7 @@ public void planSql(Blackhole blackhole) throws Exception ); final String sql = QUERIES.get(Integer.parseInt(query)); try (final DruidPlanner planner = plannerFactory.createPlannerForTesting(context, sql)) { - final PlannerResult plannerResult = planner.plan(sql); + final PlannerResult plannerResult = planner.plan(); blackhole.consume(plannerResult); } } diff --git a/benchmarks/src/test/java/org/apache/druid/benchmark/query/SqlExpressionBenchmark.java b/benchmarks/src/test/java/org/apache/druid/benchmark/query/SqlExpressionBenchmark.java index 3f99b241f7dc..76cdbfa9ab6d 100644 --- a/benchmarks/src/test/java/org/apache/druid/benchmark/query/SqlExpressionBenchmark.java +++ b/benchmarks/src/test/java/org/apache/druid/benchmark/query/SqlExpressionBenchmark.java @@ -268,7 +268,7 @@ public void setup() CalciteTests.createMockRootSchema(conglomerate, walker, plannerConfig, AuthTestUtils.TEST_AUTHORIZER_MAPPER); plannerFactory = new PlannerFactory( rootSchema, - CalciteTests.createMockQueryLifecycleFactory(walker, conglomerate), + CalciteTests.createMockQueryMakerFactory(walker, conglomerate), CalciteTests.createOperatorTable(), CalciteTests.createExprMacroTable(), plannerConfig, @@ -305,7 +305,7 @@ public void querySql(Blackhole blackhole) throws Exception ); final String sql = QUERIES.get(Integer.parseInt(query)); try (final DruidPlanner planner = plannerFactory.createPlannerForTesting(context, sql)) { - final PlannerResult plannerResult = planner.plan(sql); + final PlannerResult plannerResult = planner.plan(); final Sequence resultSequence = plannerResult.run(); final Object[] lastRow = resultSequence.accumulate(null, (accumulated, in) -> in); blackhole.consume(lastRow); diff --git a/benchmarks/src/test/java/org/apache/druid/benchmark/query/SqlVsNativeBenchmark.java b/benchmarks/src/test/java/org/apache/druid/benchmark/query/SqlVsNativeBenchmark.java index 9e805d8daf00..8b64ea4c64c3 100644 --- a/benchmarks/src/test/java/org/apache/druid/benchmark/query/SqlVsNativeBenchmark.java +++ b/benchmarks/src/test/java/org/apache/druid/benchmark/query/SqlVsNativeBenchmark.java @@ -115,7 +115,7 @@ public void setup() CalciteTests.createMockRootSchema(conglomerate, walker, plannerConfig, AuthTestUtils.TEST_AUTHORIZER_MAPPER); plannerFactory = new PlannerFactory( rootSchema, - CalciteTests.createMockQueryLifecycleFactory(walker, conglomerate), + CalciteTests.createMockQueryMakerFactory(walker, conglomerate), CalciteTests.createOperatorTable(), CalciteTests.createExprMacroTable(), plannerConfig, @@ -162,7 +162,7 @@ public void queryNative(Blackhole blackhole) public void queryPlanner(Blackhole blackhole) throws Exception { try (final DruidPlanner planner = plannerFactory.createPlannerForTesting(null, sqlQuery)) { - final PlannerResult plannerResult = planner.plan(sqlQuery); + final PlannerResult plannerResult = planner.plan(); final Sequence resultSequence = plannerResult.run(); final Object[] lastRow = resultSequence.accumulate(null, (accumulated, in) -> in); blackhole.consume(lastRow); diff --git a/sql/src/test/java/org/apache/druid/sql/avatica/DruidAvaticaHandlerTest.java b/sql/src/test/java/org/apache/druid/sql/avatica/DruidAvaticaHandlerTest.java index 7a96085b50d2..d32b601f6396 100644 --- a/sql/src/test/java/org/apache/druid/sql/avatica/DruidAvaticaHandlerTest.java +++ b/sql/src/test/java/org/apache/druid/sql/avatica/DruidAvaticaHandlerTest.java @@ -893,10 +893,7 @@ public int getMaxRowsPerFrame() CalciteTests.createSqlLifecycleFactory( new PlannerFactory( rootSchema, - new NativeQueryMakerFactory( - CalciteTests.createMockQueryLifecycleFactory(walker, conglomerate), - CalciteTests.getJsonMapper() - ), + CalciteTests.createMockQueryMakerFactory(walker, conglomerate), operatorTable, macroTable, plannerConfig, @@ -986,10 +983,7 @@ public int getMinRowsPerFrame() CalciteTests.createSqlLifecycleFactory( new PlannerFactory( rootSchema, - new NativeQueryMakerFactory( - CalciteTests.createMockQueryLifecycleFactory(walker, conglomerate), - CalciteTests.getJsonMapper() - ), + CalciteTests.createMockQueryMakerFactory(walker, conglomerate), operatorTable, macroTable, plannerConfig, diff --git a/sql/src/test/java/org/apache/druid/sql/avatica/DruidStatementTest.java b/sql/src/test/java/org/apache/druid/sql/avatica/DruidStatementTest.java index b0b26dd5f039..1e27d44845b7 100644 --- a/sql/src/test/java/org/apache/druid/sql/avatica/DruidStatementTest.java +++ b/sql/src/test/java/org/apache/druid/sql/avatica/DruidStatementTest.java @@ -35,7 +35,6 @@ import org.apache.druid.sql.calcite.planner.DruidOperatorTable; import org.apache.druid.sql.calcite.planner.PlannerConfig; import org.apache.druid.sql.calcite.planner.PlannerFactory; -import org.apache.druid.sql.calcite.run.NativeQueryMakerFactory; import org.apache.druid.sql.calcite.schema.DruidSchemaCatalog; import org.apache.druid.sql.calcite.util.CalciteTestBase; import org.apache.druid.sql.calcite.util.CalciteTests; @@ -92,10 +91,7 @@ public void setUp() throws Exception CalciteTests.createMockRootSchema(conglomerate, walker, plannerConfig, AuthTestUtils.TEST_AUTHORIZER_MAPPER); final PlannerFactory plannerFactory = new PlannerFactory( rootSchema, - new NativeQueryMakerFactory( - CalciteTests.createMockQueryLifecycleFactory(walker, conglomerate), - CalciteTests.getJsonMapper() - ), + CalciteTests.createMockQueryMakerFactory(walker, conglomerate), operatorTable, macroTable, plannerConfig, diff --git a/sql/src/test/java/org/apache/druid/sql/calcite/SqlVectorizedExpressionSanityTest.java b/sql/src/test/java/org/apache/druid/sql/calcite/SqlVectorizedExpressionSanityTest.java index 7bdc0d92f92c..423a44598c6e 100644 --- a/sql/src/test/java/org/apache/druid/sql/calcite/SqlVectorizedExpressionSanityTest.java +++ b/sql/src/test/java/org/apache/druid/sql/calcite/SqlVectorizedExpressionSanityTest.java @@ -44,7 +44,6 @@ import org.apache.druid.sql.calcite.planner.PlannerConfig; import org.apache.druid.sql.calcite.planner.PlannerFactory; import org.apache.druid.sql.calcite.planner.PlannerResult; -import org.apache.druid.sql.calcite.run.NativeQueryMakerFactory; import org.apache.druid.sql.calcite.schema.DruidSchemaCatalog; import org.apache.druid.sql.calcite.util.CalciteTests; import org.apache.druid.sql.calcite.util.SpecificSegmentsQuerySegmentWalker; @@ -133,10 +132,7 @@ public static void setupClass() CalciteTests.createMockRootSchema(CONGLOMERATE, WALKER, plannerConfig, AuthTestUtils.TEST_AUTHORIZER_MAPPER); PLANNER_FACTORY = new PlannerFactory( rootSchema, - new NativeQueryMakerFactory( - CalciteTests.createMockQueryLifecycleFactory(WALKER, CONGLOMERATE), - CalciteTests.getJsonMapper() - ), + CalciteTests.createMockQueryMakerFactory(WALKER, CONGLOMERATE), CalciteTests.createOperatorTable(), CalciteTests.createExprMacroTable(), plannerConfig, diff --git a/sql/src/test/java/org/apache/druid/sql/calcite/util/CalciteTests.java b/sql/src/test/java/org/apache/druid/sql/calcite/util/CalciteTests.java index 337890a20cbc..25137a47b08a 100644 --- a/sql/src/test/java/org/apache/druid/sql/calcite/util/CalciteTests.java +++ b/sql/src/test/java/org/apache/druid/sql/calcite/util/CalciteTests.java @@ -119,6 +119,8 @@ import org.apache.druid.sql.calcite.planner.DruidOperatorTable; import org.apache.druid.sql.calcite.planner.PlannerConfig; import org.apache.druid.sql.calcite.planner.PlannerFactory; +import org.apache.druid.sql.calcite.run.NativeQueryMakerFactory; +import org.apache.druid.sql.calcite.run.QueryMakerFactory; import org.apache.druid.sql.calcite.schema.DruidSchema; import org.apache.druid.sql.calcite.schema.DruidSchemaCatalog; import org.apache.druid.sql.calcite.schema.InformationSchema; @@ -781,6 +783,14 @@ private CalciteTests() public static final DruidViewMacroFactory DRUID_VIEW_MACRO_FACTORY = new TestDruidViewMacroFactory(); + public static QueryMakerFactory createMockQueryMakerFactory( + final QuerySegmentWalker walker, + final QueryRunnerFactoryConglomerate conglomerate + ) + { + return new NativeQueryMakerFactory(createMockQueryLifecycleFactory(walker, conglomerate), getJsonMapper()); + } + public static QueryLifecycleFactory createMockQueryLifecycleFactory( final QuerySegmentWalker walker, final QueryRunnerFactoryConglomerate conglomerate diff --git a/sql/src/test/java/org/apache/druid/sql/http/SqlResourceTest.java b/sql/src/test/java/org/apache/druid/sql/http/SqlResourceTest.java index 280a84cb3ad3..418adc9bf321 100644 --- a/sql/src/test/java/org/apache/druid/sql/http/SqlResourceTest.java +++ b/sql/src/test/java/org/apache/druid/sql/http/SqlResourceTest.java @@ -231,10 +231,7 @@ public boolean shouldSerializeComplexValues() final PlannerFactory plannerFactory = new PlannerFactory( rootSchema, - new NativeQueryMakerFactory( - CalciteTests.createMockQueryLifecycleFactory(walker, conglomerate), - CalciteTests.getJsonMapper() - ), + CalciteTests.createMockQueryMakerFactory(walker, conglomerate), operatorTable, macroTable, plannerConfig, From 63d924a92e75ed75380f06271497276b5988c48b Mon Sep 17 00:00:00 2001 From: Gian Merlino Date: Sat, 20 Nov 2021 09:00:18 -0800 Subject: [PATCH 3/5] Fix checkstyle issues. --- .../sql/calcite/CalciteInsertDmlTest.java | 33 ++++++++++--------- .../druid/sql/http/SqlResourceTest.java | 1 - 2 files changed, 18 insertions(+), 16 deletions(-) diff --git a/sql/src/test/java/org/apache/druid/sql/calcite/CalciteInsertDmlTest.java b/sql/src/test/java/org/apache/druid/sql/calcite/CalciteInsertDmlTest.java index e58b1bf38030..8709a899a753 100644 --- a/sql/src/test/java/org/apache/druid/sql/calcite/CalciteInsertDmlTest.java +++ b/sql/src/test/java/org/apache/druid/sql/calcite/CalciteInsertDmlTest.java @@ -277,6 +277,23 @@ public void testExplainInsertFromExternal() throws Exception // Skip vectorization since otherwise the "context" will change for each subtest. skipVectorize(); + final ScanQuery expectedQuery = newScanQueryBuilder() + .dataSource(externalDataSource) + .intervals(querySegmentSpec(Filtration.eternity())) + .columns("x", "y", "z") + .context( + queryJsonMapper.readValue( + "{\"defaultTimeout\":300000,\"maxScatterGatherBytes\":9223372036854775807,\"sqlCurrentTimestamp\":\"2000-01-01T00:00:00Z\",\"sqlQueryId\":\"dummy\",\"vectorize\":\"false\",\"vectorizeVirtualColumns\":\"false\"}", + JacksonUtils.TYPE_REFERENCE_MAP_STRING_OBJECT + ) + ) + .build(); + + final String expectedExplanation = + "DruidQueryRel(query=[" + + queryJsonMapper.writeValueAsString(expectedQuery) + + "], signature=[{x:STRING, y:STRING, z:LONG}])\n"; + // Use testQuery for EXPLAIN (not testInsertQuery). testQuery( new PlannerConfig(), @@ -285,21 +302,7 @@ public void testExplainInsertFromExternal() throws Exception ImmutableList.of(), ImmutableList.of( new Object[]{ - "DruidQueryRel(query=[" - + queryJsonMapper.writeValueAsString( - newScanQueryBuilder() - .dataSource(externalDataSource) - .intervals(querySegmentSpec(Filtration.eternity())) - .columns("x", "y", "z") - .context( - queryJsonMapper.readValue( - "{\"defaultTimeout\":300000,\"maxScatterGatherBytes\":9223372036854775807,\"sqlCurrentTimestamp\":\"2000-01-01T00:00:00Z\",\"sqlQueryId\":\"dummy\",\"vectorize\":\"false\",\"vectorizeVirtualColumns\":\"false\"}", - JacksonUtils.TYPE_REFERENCE_MAP_STRING_OBJECT - ) - ) - .build() - ) - + "], signature=[{x:STRING, y:STRING, z:LONG}])\n", + expectedExplanation, "[{\"name\":\"EXTERNAL\",\"type\":\"EXTERNAL\"},{\"name\":\"dst\",\"type\":\"DATASOURCE\"}]" } ) diff --git a/sql/src/test/java/org/apache/druid/sql/http/SqlResourceTest.java b/sql/src/test/java/org/apache/druid/sql/http/SqlResourceTest.java index 418adc9bf321..217645ef9d5e 100644 --- a/sql/src/test/java/org/apache/druid/sql/http/SqlResourceTest.java +++ b/sql/src/test/java/org/apache/druid/sql/http/SqlResourceTest.java @@ -73,7 +73,6 @@ import org.apache.druid.sql.calcite.planner.PlannerConfig; import org.apache.druid.sql.calcite.planner.PlannerContext; import org.apache.druid.sql.calcite.planner.PlannerFactory; -import org.apache.druid.sql.calcite.run.NativeQueryMakerFactory; import org.apache.druid.sql.calcite.schema.DruidSchemaCatalog; import org.apache.druid.sql.calcite.util.CalciteTestBase; import org.apache.druid.sql.calcite.util.CalciteTests; From d95ce75145be07dd6295d561cb69146eecab6bec Mon Sep 17 00:00:00 2001 From: Gian Merlino Date: Sat, 20 Nov 2021 09:37:57 -0800 Subject: [PATCH 4/5] Adjustments for CI. --- .../druid/segment/column/ColumnSignature.java | 20 ----- .../segment/column/RowSignatureTest.java | 73 ++++++++++++++++++- .../sql/calcite/run/QueryMakerFactory.java | 1 + 3 files changed, 73 insertions(+), 21 deletions(-) diff --git a/processing/src/main/java/org/apache/druid/segment/column/ColumnSignature.java b/processing/src/main/java/org/apache/druid/segment/column/ColumnSignature.java index 07ad2728f758..62553ddaf4e9 100644 --- a/processing/src/main/java/org/apache/druid/segment/column/ColumnSignature.java +++ b/processing/src/main/java/org/apache/druid/segment/column/ColumnSignature.java @@ -25,7 +25,6 @@ import org.apache.druid.java.util.common.IAE; import javax.annotation.Nullable; -import java.util.Objects; /** * Class used by {@link RowSignature} for serialization. @@ -69,25 +68,6 @@ ColumnType type() return type; } - @Override - public boolean equals(Object o) - { - if (this == o) { - return true; - } - if (o == null || getClass() != o.getClass()) { - return false; - } - ColumnSignature that = (ColumnSignature) o; - return name.equals(that.name) && Objects.equals(type, that.type); - } - - @Override - public int hashCode() - { - return Objects.hash(name, type); - } - @Override public String toString() { diff --git a/processing/src/test/java/org/apache/druid/segment/column/RowSignatureTest.java b/processing/src/test/java/org/apache/druid/segment/column/RowSignatureTest.java index 3c77b0499c13..04cd540fea7f 100644 --- a/processing/src/test/java/org/apache/druid/segment/column/RowSignatureTest.java +++ b/processing/src/test/java/org/apache/druid/segment/column/RowSignatureTest.java @@ -20,6 +20,7 @@ package org.apache.druid.segment.column; import com.fasterxml.jackson.databind.ObjectMapper; +import com.google.common.collect.ImmutableList; import org.apache.druid.segment.TestHelper; import org.junit.Assert; import org.junit.Test; @@ -29,7 +30,62 @@ public class RowSignatureTest { @Test - public void testJson() throws IOException + public void test_add_withConflict() + { + final RowSignature.Builder builder = + RowSignature.builder() + .add("s", ColumnType.STRING) + .add("d", ColumnType.DOUBLE) + .add("d", ColumnType.LONG); + + Assert.assertThrows( + "Column [d] has conflicting types", + IllegalArgumentException.class, + builder::build + ); + } + + @Test + public void test_addAll() + { + final RowSignature expectedSignature = + RowSignature.builder() + .add("s", ColumnType.STRING) + .add("d", ColumnType.DOUBLE) + .add("l", ColumnType.LONG) + .build(); + + final RowSignature signature = + RowSignature.builder() + .addAll(RowSignature.builder().add("s", ColumnType.STRING).add("d", ColumnType.DOUBLE).build()) + .addAll(RowSignature.builder().add("l", ColumnType.LONG).build()) + .build(); + + Assert.assertEquals(expectedSignature, signature); + } + + @Test + public void test_addAll_withOverlap() + { + final RowSignature expectedSignature = + RowSignature.builder() + .add("s", ColumnType.STRING) + .add("d", ColumnType.DOUBLE) + .add("d", ColumnType.DOUBLE) + .build(); + + final RowSignature signature = + RowSignature.builder() + .addAll(RowSignature.builder().add("s", ColumnType.STRING).add("d", ColumnType.DOUBLE).build()) + .addAll(RowSignature.builder().add("d", ColumnType.DOUBLE).build()) + .build(); + + Assert.assertEquals(ImmutableList.of("s", "d", "d"), expectedSignature.getColumnNames()); + Assert.assertEquals(expectedSignature, signature); + } + + @Test + public void test_json() throws IOException { final String signatureString = "[{\"name\":\"s\",\"type\":\"STRING\"}," @@ -59,4 +115,19 @@ public void testJson() throws IOException signature ); } + + @Test + public void test_json_missingName() + { + final String signatureString = + "[{\"name\":\"s\",\"type\":\"STRING\"}," + + "{\"type\":\"DOUBLE\"}]"; + + final ObjectMapper mapper = TestHelper.makeJsonMapper(); + Assert.assertThrows( + "Column name must be non-empty", + IOException.class, + () -> mapper.readValue(signatureString, RowSignature.class) + ); + } } diff --git a/sql/src/main/java/org/apache/druid/sql/calcite/run/QueryMakerFactory.java b/sql/src/main/java/org/apache/druid/sql/calcite/run/QueryMakerFactory.java index 559b607af8c9..0c7ffcdfba6c 100644 --- a/sql/src/main/java/org/apache/druid/sql/calcite/run/QueryMakerFactory.java +++ b/sql/src/main/java/org/apache/druid/sql/calcite/run/QueryMakerFactory.java @@ -38,6 +38,7 @@ public interface QueryMakerFactory * * @throws ValidationException if this factory cannot build an executor for the provided query */ + @SuppressWarnings("RedundantThrows") QueryMaker buildForSelect(RelRoot relRoot, PlannerContext plannerContext) throws ValidationException; /** From 61bc7edd163b447fb7ec7a645f074f1e6320f36b Mon Sep 17 00:00:00 2001 From: Gian Merlino Date: Mon, 22 Nov 2021 08:55:11 -0800 Subject: [PATCH 5/5] Adjust DruidAvaticaHandlerTest for stricter test authorizations. --- .../sql/avatica/DruidAvaticaHandlerTest.java | 20 +++++++++++++++++-- 1 file changed, 18 insertions(+), 2 deletions(-) diff --git a/sql/src/test/java/org/apache/druid/sql/avatica/DruidAvaticaHandlerTest.java b/sql/src/test/java/org/apache/druid/sql/avatica/DruidAvaticaHandlerTest.java index d32b601f6396..6b35daf1774f 100644 --- a/sql/src/test/java/org/apache/druid/sql/avatica/DruidAvaticaHandlerTest.java +++ b/sql/src/test/java/org/apache/druid/sql/avatica/DruidAvaticaHandlerTest.java @@ -33,6 +33,7 @@ import com.google.inject.multibindings.Multibinder; import com.google.inject.name.Names; import org.apache.calcite.avatica.AvaticaClientRuntimeException; +import org.apache.calcite.avatica.AvaticaSqlException; import org.apache.calcite.avatica.Meta; import org.apache.calcite.avatica.MissingResultsException; import org.apache.calcite.avatica.NoSuchStatementException; @@ -1100,9 +1101,24 @@ public void testParameterBinding() throws Exception } @Test - public void testSysTableParameterBinding() throws Exception + public void testSysTableParameterBindingRegularUser() throws Exception { - PreparedStatement statement = client.prepareStatement("SELECT COUNT(*) AS cnt FROM sys.servers WHERE servers.host = ?"); + PreparedStatement statement = + client.prepareStatement("SELECT COUNT(*) AS cnt FROM sys.servers WHERE servers.host = ?"); + statement.setString(1, "dummy"); + + Assert.assertThrows( + "Insufficient permission to view servers", + AvaticaSqlException.class, + statement::executeQuery + ); + } + + @Test + public void testSysTableParameterBindingSuperUser() throws Exception + { + PreparedStatement statement = + superuserClient.prepareStatement("SELECT COUNT(*) AS cnt FROM sys.servers WHERE servers.host = ?"); statement.setString(1, "dummy"); final ResultSet resultSet = statement.executeQuery(); final List> rows = getRows(resultSet);