From 38411847f1ecc1e30658d14aecfc962e4f5eb9be Mon Sep 17 00:00:00 2001 From: Paul Rogers Date: Thu, 19 May 2022 10:36:48 -0700 Subject: [PATCH 1/8] Planner test framework Also cleans up various "Calcite test" helper classes. Includes a large set of tests converted from the various Calcite?QueryTest JUnit tests. --- .../druid/common/config/NullHandling.java | 10 +- .../config/NullValueHandlingConfig.java | 13 +- .../druid/java/util/common/StringUtils.java | 11 + .../druid/math/expr/ExpressionProcessing.java | 19 +- .../org/apache/druid/query/QueryContexts.java | 111 + .../druid/query/filter/SelectorDimFilter.java | 1 + .../apache/druid/query/QueryContextsTest.java | 18 + sql/pom.xml | 5 + .../org/apache/druid/sql/SqlLifecycle.java | 23 +- .../druid/sql/avatica/DruidStatement.java | 5 +- .../sql/calcite/expression/Expressions.java | 4 +- .../druid/sql/calcite/planner/Calcites.java | 27 +- .../sql/calcite/planner/CapturedState.java | 111 + .../sql/calcite/planner/DruidPlanner.java | 63 +- .../sql/calcite/planner/NoOpCapture.java | 83 + .../sql/calcite/planner/PlannerConfig.java | 275 +- .../sql/calcite/planner/PlannerContext.java | 17 +- .../calcite/planner/PlannerStateCapture.java | 45 + .../sql/calcite/rel/DruidOuterQueryRel.java | 4 +- .../druid/sql/calcite/rel/DruidQuery.java | 6 +- .../druid/sql/calcite/rel/DruidQueryRel.java | 4 +- .../druid/sql/calcite/rel/DruidRel.java | 7 +- .../druid/sql/calcite/rel/DruidUnionRel.java | 13 + .../FilterJoinExcludePushToChildRule.java | 2 +- .../sql/calcite/run/NativeQueryMaker.java | 96 +- .../druid/sql/calcite/run/QueryMaker.java | 12 + .../druid/sql/calcite/schema/DruidSchema.java | 36 +- .../calcite/schema/DruidSchemaCatalog.java | 2 +- .../calcite/schema/RootSchemaProvider.java | 4 +- .../sql/calcite/table/RowSignatures.java | 6 +- .../calcite/view/InProcessViewManager.java | 10 +- .../apache/druid/sql/http/SqlParameter.java | 2 +- .../sql/calcite/BaseCalciteQueryTest.java | 332 +- .../sql/calcite/CalciteArraysQueryTest.java | 18 +- .../calcite/CalciteCorrelatedQueryTest.java | 1 - .../sql/calcite/CalciteIngestionDmlTest.java | 4 +- .../sql/calcite/CalciteInsertDmlTest.java | 16 +- .../sql/calcite/CalciteJoinQueryTest.java | 9 +- .../CalciteMultiValueStringQueryTest.java | 2 +- .../calcite/CalciteParameterQueryTest.java | 7 +- .../druid/sql/calcite/CalciteQueryTest.java | 19 + .../apache/druid/sql/calcite/QueryDefn.java | 114 + .../sql/calcite/TestInsertQueryMaker.java | 6 + .../sql/calcite/TestQueryMakerFactory.java | 2 +- .../sql/calcite/planner/DruidPlannerTest.java | 199 + .../druid/sql/calcite/rel/DruidQueryTest.java | 1 - .../sql/calcite/schema/DruidSchemaTest.java | 2 - .../sql/calcite/tester/ActualResults.java | 697 ++++ .../calcite/tester/CalciteTestCapture.java | 369 ++ .../calcite/tester/CalciteTestRecorder.java | 154 + .../sql/calcite/tester/ContextSection.java | 102 + .../sql/calcite/tester/LinesSection.java | 304 ++ .../sql/calcite/tester/OptionsSection.java | 180 + .../sql/calcite/tester/ParametersSection.java | 90 + .../calcite/tester/ParseTreeSerializer.java | 100 + .../calcite/tester/ParseTreeVisualizer.java | 150 + .../sql/calcite/tester/PatternSection.java | 373 ++ .../sql/calcite/tester/PlannerFixture.java | 396 ++ .../druid/sql/calcite/tester/QueryRun.java | 225 ++ .../druid/sql/calcite/tester/QueryRunner.java | 227 ++ .../sql/calcite/tester/QueryTestCase.java | 252 ++ .../calcite/tester/QueryTestCaseRunner.java | 661 ++++ .../sql/calcite/tester/QueryTestCases.java | 255 ++ .../sql/calcite/tester/QueryTestSet.java | 223 ++ .../apache/druid/sql/calcite/tester/README.md | 597 +++ .../sql/calcite/tester/ResourcesSection.java | 209 + .../sql/calcite/tester/SectionContainer.java | 111 + .../sql/calcite/tester/TestCaseLoader.java | 750 ++++ .../calcite/tester/TestCaseLoaderTest.java | 742 ++++ .../sql/calcite/tester/TestCaseMerger.java | 765 ++++ .../sql/calcite/tester/TestCaseTest.java | 226 ++ .../sql/calcite/tester/TestCaseWriter.java | 288 ++ .../druid/sql/calcite/tester/TestSection.java | 112 + .../druid/sql/calcite/tester/TextSection.java | 143 + .../druid/sql/calcite/tester/internals.md | 709 ++++ .../druid/sql/calcite/util/CalciteTests.java | 85 +- .../druid/sql/calcite/util/QueryLogHook.java | 4 +- .../sql/calcite/util/RootSchemaBuilder.java | 338 ++ .../resources/calcite/cases/arrayQuery.case | 3397 ++++++++++++++++ .../calcite/cases/correlatedQuery.case | 1119 ++++++ .../resources/calcite/cases/insertDML.case | 1102 ++++++ .../resources/calcite/cases/joinQuery01.case | 1614 ++++++++ .../resources/calcite/cases/joinQuery02.case | 1572 ++++++++ .../resources/calcite/cases/joinQuery03.case | 1636 ++++++++ .../resources/calcite/cases/joinQuery04.case | 1457 +++++++ .../resources/calcite/cases/joinQuery05.case | 1545 ++++++++ .../resources/calcite/cases/joinQuery06.case | 1514 +++++++ .../resources/calcite/cases/joinQuery07.case | 1094 ++++++ .../calcite/cases/multiValueStringQuery.case | 2863 ++++++++++++++ .../calcite/cases/parameterQuery.case | 1354 +++++++ .../test/resources/calcite/cases/query01.case | 1440 +++++++ .../test/resources/calcite/cases/query02.case | 2036 ++++++++++ .../test/resources/calcite/cases/query03.case | 1624 ++++++++ .../test/resources/calcite/cases/query04.case | 1420 +++++++ .../test/resources/calcite/cases/query05.case | 1640 ++++++++ .../test/resources/calcite/cases/query06.case | 1999 ++++++++++ .../test/resources/calcite/cases/query07.case | 995 +++++ .../test/resources/calcite/cases/query08.case | 2574 ++++++++++++ .../test/resources/calcite/cases/query09.case | 2003 ++++++++++ .../test/resources/calcite/cases/query10.case | 1806 +++++++++ .../test/resources/calcite/cases/query11.case | 3488 +++++++++++++++++ .../test/resources/calcite/cases/query12.case | 1451 +++++++ .../test/resources/calcite/cases/query13.case | 1257 ++++++ .../test/resources/calcite/cases/query14.case | 2275 +++++++++++ .../test/resources/calcite/cases/query15.case | 403 ++ 105 files changed, 57915 insertions(+), 377 deletions(-) create mode 100644 sql/src/main/java/org/apache/druid/sql/calcite/planner/CapturedState.java create mode 100644 sql/src/main/java/org/apache/druid/sql/calcite/planner/NoOpCapture.java create mode 100644 sql/src/main/java/org/apache/druid/sql/calcite/planner/PlannerStateCapture.java create mode 100644 sql/src/test/java/org/apache/druid/sql/calcite/QueryDefn.java create mode 100644 sql/src/test/java/org/apache/druid/sql/calcite/planner/DruidPlannerTest.java create mode 100644 sql/src/test/java/org/apache/druid/sql/calcite/tester/ActualResults.java create mode 100644 sql/src/test/java/org/apache/druid/sql/calcite/tester/CalciteTestCapture.java create mode 100644 sql/src/test/java/org/apache/druid/sql/calcite/tester/CalciteTestRecorder.java create mode 100644 sql/src/test/java/org/apache/druid/sql/calcite/tester/ContextSection.java create mode 100644 sql/src/test/java/org/apache/druid/sql/calcite/tester/LinesSection.java create mode 100644 sql/src/test/java/org/apache/druid/sql/calcite/tester/OptionsSection.java create mode 100644 sql/src/test/java/org/apache/druid/sql/calcite/tester/ParametersSection.java create mode 100644 sql/src/test/java/org/apache/druid/sql/calcite/tester/ParseTreeSerializer.java create mode 100644 sql/src/test/java/org/apache/druid/sql/calcite/tester/ParseTreeVisualizer.java create mode 100644 sql/src/test/java/org/apache/druid/sql/calcite/tester/PatternSection.java create mode 100644 sql/src/test/java/org/apache/druid/sql/calcite/tester/PlannerFixture.java create mode 100644 sql/src/test/java/org/apache/druid/sql/calcite/tester/QueryRun.java create mode 100644 sql/src/test/java/org/apache/druid/sql/calcite/tester/QueryRunner.java create mode 100644 sql/src/test/java/org/apache/druid/sql/calcite/tester/QueryTestCase.java create mode 100644 sql/src/test/java/org/apache/druid/sql/calcite/tester/QueryTestCaseRunner.java create mode 100644 sql/src/test/java/org/apache/druid/sql/calcite/tester/QueryTestCases.java create mode 100644 sql/src/test/java/org/apache/druid/sql/calcite/tester/QueryTestSet.java create mode 100644 sql/src/test/java/org/apache/druid/sql/calcite/tester/README.md create mode 100644 sql/src/test/java/org/apache/druid/sql/calcite/tester/ResourcesSection.java create mode 100644 sql/src/test/java/org/apache/druid/sql/calcite/tester/SectionContainer.java create mode 100644 sql/src/test/java/org/apache/druid/sql/calcite/tester/TestCaseLoader.java create mode 100644 sql/src/test/java/org/apache/druid/sql/calcite/tester/TestCaseLoaderTest.java create mode 100644 sql/src/test/java/org/apache/druid/sql/calcite/tester/TestCaseMerger.java create mode 100644 sql/src/test/java/org/apache/druid/sql/calcite/tester/TestCaseTest.java create mode 100644 sql/src/test/java/org/apache/druid/sql/calcite/tester/TestCaseWriter.java create mode 100644 sql/src/test/java/org/apache/druid/sql/calcite/tester/TestSection.java create mode 100644 sql/src/test/java/org/apache/druid/sql/calcite/tester/TextSection.java create mode 100644 sql/src/test/java/org/apache/druid/sql/calcite/tester/internals.md create mode 100644 sql/src/test/java/org/apache/druid/sql/calcite/util/RootSchemaBuilder.java create mode 100644 sql/src/test/resources/calcite/cases/arrayQuery.case create mode 100644 sql/src/test/resources/calcite/cases/correlatedQuery.case create mode 100644 sql/src/test/resources/calcite/cases/insertDML.case create mode 100644 sql/src/test/resources/calcite/cases/joinQuery01.case create mode 100644 sql/src/test/resources/calcite/cases/joinQuery02.case create mode 100644 sql/src/test/resources/calcite/cases/joinQuery03.case create mode 100644 sql/src/test/resources/calcite/cases/joinQuery04.case create mode 100644 sql/src/test/resources/calcite/cases/joinQuery05.case create mode 100644 sql/src/test/resources/calcite/cases/joinQuery06.case create mode 100644 sql/src/test/resources/calcite/cases/joinQuery07.case create mode 100644 sql/src/test/resources/calcite/cases/multiValueStringQuery.case create mode 100644 sql/src/test/resources/calcite/cases/parameterQuery.case create mode 100644 sql/src/test/resources/calcite/cases/query01.case create mode 100644 sql/src/test/resources/calcite/cases/query02.case create mode 100644 sql/src/test/resources/calcite/cases/query03.case create mode 100644 sql/src/test/resources/calcite/cases/query04.case create mode 100644 sql/src/test/resources/calcite/cases/query05.case create mode 100644 sql/src/test/resources/calcite/cases/query06.case create mode 100644 sql/src/test/resources/calcite/cases/query07.case create mode 100644 sql/src/test/resources/calcite/cases/query08.case create mode 100644 sql/src/test/resources/calcite/cases/query09.case create mode 100644 sql/src/test/resources/calcite/cases/query10.case create mode 100644 sql/src/test/resources/calcite/cases/query11.case create mode 100644 sql/src/test/resources/calcite/cases/query12.case create mode 100644 sql/src/test/resources/calcite/cases/query13.case create mode 100644 sql/src/test/resources/calcite/cases/query14.case create mode 100644 sql/src/test/resources/calcite/cases/query15.case diff --git a/core/src/main/java/org/apache/druid/common/config/NullHandling.java b/core/src/main/java/org/apache/druid/common/config/NullHandling.java index f7d31469c318..825d6ee5696f 100644 --- a/core/src/main/java/org/apache/druid/common/config/NullHandling.java +++ b/core/src/main/java/org/apache/druid/common/config/NullHandling.java @@ -68,11 +68,12 @@ public static void initializeForTestsWithValues(Boolean useDefForNull, Boolean i } /** - * whether nulls should be replaced with default value. + * Whether nulls should be replaced with default value. */ public static boolean replaceWithDefault() { - // this should only be null in a unit test context, in production this will be injected by the null handling module + // This should only be null in a unit test context, in production this will + // be injected by the null handling module. if (INSTANCE == null) { throw new IllegalStateException("NullHandling module not initialized, call NullHandling.initializeForTests()"); } @@ -80,11 +81,12 @@ public static boolean replaceWithDefault() } /** - * whether nulls should be counted during String cardinality + * Whether nulls should be counted during String cardinality */ public static boolean ignoreNullsForStringCardinality() { - // this should only be null in a unit test context, in production this will be injected by the null handling module + // This should only be null in a unit test context, in production this will + // be injected by the null handling module. if (INSTANCE == null) { throw new IllegalStateException("NullHandling module not initialized, call NullHandling.initializeForTests()"); } diff --git a/core/src/main/java/org/apache/druid/common/config/NullValueHandlingConfig.java b/core/src/main/java/org/apache/druid/common/config/NullValueHandlingConfig.java index fbdc852105d8..b69fca2e7344 100644 --- a/core/src/main/java/org/apache/druid/common/config/NullValueHandlingConfig.java +++ b/core/src/main/java/org/apache/druid/common/config/NullValueHandlingConfig.java @@ -26,8 +26,8 @@ public class NullValueHandlingConfig { public static final String NULL_HANDLING_CONFIG_STRING = "druid.generic.useDefaultValueForNull"; - //added to preserve backward compatibility - //and not count nulls during cardinality aggrgation over strings + // Added to preserve backward compatibility + // and not count nulls during cardinality aggregation over strings. public static final String NULL_HANDLING_DURING_STRING_CARDINALITY = "druid.generic.ignoreNullsForStringCardinality"; @@ -37,7 +37,6 @@ public class NullValueHandlingConfig @JsonProperty("ignoreNullsForStringCardinality") private final boolean ignoreNullsForStringCardinality; - @JsonCreator public NullValueHandlingConfig( @JsonProperty("useDefaultValueForNull") Boolean useDefaultValuesForNull, @@ -54,12 +53,10 @@ public NullValueHandlingConfig( NULL_HANDLING_DURING_STRING_CARDINALITY, "false" )); + } else if (this.useDefaultValuesForNull) { + this.ignoreNullsForStringCardinality = ignoreNullsForStringCardinality; } else { - if (this.useDefaultValuesForNull) { - this.ignoreNullsForStringCardinality = ignoreNullsForStringCardinality; - } else { - this.ignoreNullsForStringCardinality = false; - } + this.ignoreNullsForStringCardinality = false; } } diff --git a/core/src/main/java/org/apache/druid/java/util/common/StringUtils.java b/core/src/main/java/org/apache/druid/java/util/common/StringUtils.java index fd74e48d9a6a..0337cbddd40d 100644 --- a/core/src/main/java/org/apache/druid/java/util/common/StringUtils.java +++ b/core/src/main/java/org/apache/druid/java/util/common/StringUtils.java @@ -23,6 +23,7 @@ import javax.annotation.Nonnull; import javax.annotation.Nullable; + import java.io.UnsupportedEncodingException; import java.net.URLDecoder; import java.net.URLEncoder; @@ -36,6 +37,7 @@ import java.util.Base64; import java.util.IllegalFormatException; import java.util.Locale; +import java.util.regex.Pattern; /** * As of OpenJDK / Oracle JDK 8, the JVM is optimized around String charset variable instead of Charset passing, that @@ -364,6 +366,15 @@ public static String replace(String s, String target, String replacement) return sb.toString(); } + /** + * Replacement for String.replaceAll, which forbiddenapi checks forbids. + * The implementation simply does what String.replaceAll() does. + */ + public static String replaceAll(String string, String pattern, String replacement) + { + return Pattern.compile(pattern).matcher(string).replaceAll(replacement); + } + /** * Returns the given string if it is non-null; the empty string otherwise. * This method should only be used at places where null to empty conversion is diff --git a/core/src/main/java/org/apache/druid/math/expr/ExpressionProcessing.java b/core/src/main/java/org/apache/druid/math/expr/ExpressionProcessing.java index 905e0850f712..a5db659727bd 100644 --- a/core/src/main/java/org/apache/druid/math/expr/ExpressionProcessing.java +++ b/core/src/main/java/org/apache/druid/math/expr/ExpressionProcessing.java @@ -39,7 +39,6 @@ public class ExpressionProcessing @Inject private static ExpressionProcessingConfig INSTANCE; - /** * Many unit tests do not setup modules for this value to be injected, this method provides a manual way to initialize * {@link #INSTANCE} @@ -63,6 +62,24 @@ public static void initializeForHomogenizeNullMultiValueStrings() INSTANCE = new ExpressionProcessingConfig(null, null, null, true); } + /** + * Get the current configuration when tests want to change it dynamically. + */ + @VisibleForTesting + public static ExpressionProcessingConfig currentConfig() + { + return INSTANCE; + } + + /** + * Restore a previous config after tests change it. + */ + @VisibleForTesting + public static void restoreConfig(ExpressionProcessingConfig config) + { + INSTANCE = config; + } + /** * [['is expression support for'],['nested arrays'],['enabled?']] */ diff --git a/processing/src/main/java/org/apache/druid/query/QueryContexts.java b/processing/src/main/java/org/apache/druid/query/QueryContexts.java index 67cb49be9150..0c4fb4f8d98b 100644 --- a/processing/src/main/java/org/apache/druid/query/QueryContexts.java +++ b/processing/src/main/java/org/apache/druid/query/QueryContexts.java @@ -28,10 +28,13 @@ import org.apache.druid.java.util.common.ISE; import org.apache.druid.java.util.common.Numbers; import org.apache.druid.java.util.common.StringUtils; +import org.apache.druid.query.groupby.GroupByQueryConfig; +import org.apache.druid.query.timeseries.TimeseriesQuery; import org.apache.druid.segment.QueryableIndexStorageAdapter; import java.util.Map; import java.util.TreeMap; +import java.util.concurrent.ConcurrentHashMap; import java.util.concurrent.TimeUnit; @PublicApi @@ -600,4 +603,112 @@ static > E parseEnum(Query query, String key, Class c throw new ISE("Unknown type [%s]. Cannot parse!", val.getClass()); } } + + public enum EntryType + { + STRING, + BOOLEAN, + INT, + LONG, + VECTORIZE, + OBJECT; + + public Object parse(String value) + { + if (value == null) { + return null; + } + if (this != STRING) { + value = value.trim(); + if (value.length() == 0) { + return null; + } + } + switch (this) { + case BOOLEAN: + return Numbers.parseBoolean(value); + case LONG: + return Numbers.parseLong(value); + case INT: + return Numbers.parseInt(value); + case VECTORIZE: + return Vectorize.valueOf(StringUtils.toUpperCase(value)); + default: + return value; + } + } + } + + /** + * Definition of non-String context variables. At present, provides only the + * type. This can be expanded to provide other attributes when useful: whether + * the item is internal or external, whether it is only for the SQL planner, and + * can be stripped out of the query before execution, the default value, etc. + */ + public static final ConcurrentHashMap ENTRY_DEFNS = new ConcurrentHashMap<>(); + + // List of known context keys with type and default value (where known). + // Some of these are probably internal: add the flag where that is true. + + static { + ENTRY_DEFNS.put(BROKER_PARALLEL_MERGE_KEY, EntryType.BOOLEAN); + ENTRY_DEFNS.put(BROKER_PARALLEL_MERGE_INITIAL_YIELD_ROWS_KEY, EntryType.INT); + ENTRY_DEFNS.put(BROKER_PARALLEL_MERGE_SMALL_BATCH_ROWS_KEY, EntryType.INT); + ENTRY_DEFNS.put(BROKER_PARALLELISM, EntryType.INT); + ENTRY_DEFNS.put(BY_SEGMENT_KEY, EntryType.BOOLEAN); + ENTRY_DEFNS.put(DEFAULT_TIMEOUT_KEY, EntryType.INT); + ENTRY_DEFNS.put(ENABLE_DEBUG, EntryType.BOOLEAN); + ENTRY_DEFNS.put(FINALIZE_KEY, EntryType.BOOLEAN); + ENTRY_DEFNS.put(IN_SUB_QUERY_THRESHOLD_KEY, EntryType.INT); + ENTRY_DEFNS.put(JOIN_FILTER_PUSH_DOWN_KEY, EntryType.BOOLEAN); + ENTRY_DEFNS.put(JOIN_FILTER_REWRITE_ENABLE_KEY, EntryType.BOOLEAN); + ENTRY_DEFNS.put(JOIN_FILTER_REWRITE_MAX_SIZE_KEY, EntryType.LONG); + ENTRY_DEFNS.put( + JOIN_FILTER_REWRITE_VALUE_COLUMN_FILTERS_ENABLE_KEY, + EntryType.BOOLEAN); + ENTRY_DEFNS.put(MAX_NUMERIC_IN_FILTERS, EntryType.INT); + ENTRY_DEFNS.put(MAX_QUEUED_BYTES_KEY, EntryType.LONG); + ENTRY_DEFNS.put(MAX_SCATTER_GATHER_BYTES_KEY, EntryType.INT); + ENTRY_DEFNS.put(MAX_SUBQUERY_ROWS_KEY, EntryType.INT); + ENTRY_DEFNS.put(NUM_RETRIES_ON_MISSING_SEGMENTS_KEY, EntryType.INT); + ENTRY_DEFNS.put(POPULATE_CACHE_KEY, EntryType.BOOLEAN); + ENTRY_DEFNS.put(POPULATE_RESULT_LEVEL_CACHE_KEY, EntryType.BOOLEAN); + ENTRY_DEFNS.put(PRIORITY_KEY, EntryType.INT); + ENTRY_DEFNS.put(RETURN_PARTIAL_RESULTS_KEY, EntryType.BOOLEAN); + ENTRY_DEFNS.put(REWRITE_JOIN_TO_FILTER_ENABLE_KEY, EntryType.BOOLEAN); + ENTRY_DEFNS.put(SECONDARY_PARTITION_PRUNING_KEY, EntryType.BOOLEAN); + ENTRY_DEFNS.put(SERIALIZE_DATE_TIME_AS_LONG_KEY, EntryType.BOOLEAN); + ENTRY_DEFNS.put(SERIALIZE_DATE_TIME_AS_LONG_INNER_KEY, EntryType.BOOLEAN); + ENTRY_DEFNS.put(SQL_JOIN_LEFT_SCAN_DIRECT, EntryType.BOOLEAN); + ENTRY_DEFNS.put(TIME_BOUNDARY_PLANNING_KEY, EntryType.BOOLEAN); + ENTRY_DEFNS.put(TIMEOUT_KEY, EntryType.INT); + ENTRY_DEFNS.put(UNCOVERED_INTERVALS_LIMIT_KEY, EntryType.INT); + ENTRY_DEFNS.put(USE_CACHE_KEY, EntryType.BOOLEAN); + ENTRY_DEFNS.put(USE_FILTER_CNF_KEY, EntryType.BOOLEAN); + ENTRY_DEFNS.put(USE_RESULT_LEVEL_CACHE_KEY, EntryType.BOOLEAN); + ENTRY_DEFNS.put(VECTOR_SIZE_KEY, EntryType.INT); + ENTRY_DEFNS.put(VECTORIZE_KEY, EntryType.BOOLEAN); + ENTRY_DEFNS.put(VECTORIZE_VIRTUAL_COLUMNS_KEY, EntryType.BOOLEAN); + + ENTRY_DEFNS.put(GroupByQueryConfig.CTX_KEY_FORCE_LIMIT_PUSH_DOWN, EntryType.BOOLEAN); + ENTRY_DEFNS.put(GroupByQueryConfig.CTX_KEY_ENABLE_MULTI_VALUE_UNNESTING, EntryType.BOOLEAN); + + // From PlannerContext: constants not visible here. + ENTRY_DEFNS.put("sqlOuterLimit", EntryType.INT); + ENTRY_DEFNS.put("sqlStringifyArrays", EntryType.BOOLEAN); + ENTRY_DEFNS.put("useApproximateTopN", EntryType.BOOLEAN); + + // From TimeseriesQuery + ENTRY_DEFNS.put(TimeseriesQuery.SKIP_EMPTY_BUCKETS, EntryType.BOOLEAN); + } + + /** + * Get the definition (currently, only the type) of the context key. + * Defaults to STRING unless a different type is explicitly registered. + */ + public static EntryType definition(String key) + { + EntryType defn = ENTRY_DEFNS.get(key); + return defn == null ? EntryType.STRING : defn; + } } diff --git a/processing/src/main/java/org/apache/druid/query/filter/SelectorDimFilter.java b/processing/src/main/java/org/apache/druid/query/filter/SelectorDimFilter.java index 5d8e56774c28..c9c3e7115688 100644 --- a/processing/src/main/java/org/apache/druid/query/filter/SelectorDimFilter.java +++ b/processing/src/main/java/org/apache/druid/query/filter/SelectorDimFilter.java @@ -122,6 +122,7 @@ public String getDimension() */ @Nullable @JsonProperty + @JsonInclude(JsonInclude.Include.NON_NULL) public String getValue() { return value; diff --git a/processing/src/test/java/org/apache/druid/query/QueryContextsTest.java b/processing/src/test/java/org/apache/druid/query/QueryContextsTest.java index 2ee9b9363e15..3f7b80fda3da 100644 --- a/processing/src/test/java/org/apache/druid/query/QueryContextsTest.java +++ b/processing/src/test/java/org/apache/druid/query/QueryContextsTest.java @@ -23,6 +23,8 @@ import com.google.common.collect.ImmutableMap; import org.apache.druid.java.util.common.IAE; import org.apache.druid.java.util.common.Intervals; +import org.apache.druid.query.QueryContexts.EntryType; +import org.apache.druid.query.QueryContexts.Vectorize; import org.apache.druid.query.spec.MultipleIntervalSegmentSpec; import org.junit.Assert; import org.junit.Rule; @@ -222,6 +224,22 @@ public void testEnableQueryDebuggingSetToTrue() Assert.assertTrue(QueryContexts.isDebug(query.getContext())); } + @Test + public void testMetadata() + { + Assert.assertEquals(EntryType.BOOLEAN, QueryContexts.definition(QueryContexts.FINALIZE_KEY)); + Assert.assertEquals(EntryType.STRING, QueryContexts.definition("unknown")); + + Assert.assertNull(EntryType.STRING.parse(null)); + Assert.assertNull(EntryType.INT.parse("")); + Assert.assertNull(EntryType.INT.parse(" ")); + Assert.assertEquals(true, EntryType.BOOLEAN.parse(" true ")); + Assert.assertEquals(10, EntryType.INT.parse(" 10 ")); + Assert.assertEquals(20L, EntryType.LONG.parse(" 20 ")); + Assert.assertEquals(Vectorize.FORCE, EntryType.VECTORIZE.parse(" force ")); + Assert.assertEquals("foo", EntryType.OBJECT.parse(" foo ")); + } + @Test public void testGetAs() { diff --git a/sql/pom.xml b/sql/pom.xml index 67cb3e51c27f..d583c06bb19a 100644 --- a/sql/pom.xml +++ b/sql/pom.xml @@ -185,6 +185,11 @@ slf4j-api provided + + org.apache.commons + commons-text + 1.3 + diff --git a/sql/src/main/java/org/apache/druid/sql/SqlLifecycle.java b/sql/src/main/java/org/apache/druid/sql/SqlLifecycle.java index 9faca5c600f2..a77c997856e4 100644 --- a/sql/src/main/java/org/apache/druid/sql/SqlLifecycle.java +++ b/sql/src/main/java/org/apache/druid/sql/SqlLifecycle.java @@ -106,7 +106,7 @@ public class SqlLifecycle @GuardedBy("stateLock") private State state = State.NEW; - // init during intialize + // init during initialize private String sql; private QueryContext queryContext; private List parameters; @@ -170,7 +170,7 @@ private String sqlQueryId() } /** - * Assign dynamic parameters to be used to substitute values during query exection. This can be performed at any + * Assign dynamic parameters to be used to substitute values during query execution. This can be performed at any * part of the lifecycle. */ public void setParameters(List parameters) @@ -232,11 +232,11 @@ private ValidationResult validate(AuthenticationResult authenticationResult) { try (DruidPlanner planner = plannerFactory.createPlanner(sql, queryContext)) { // set planner context for logs/metrics in case something explodes early - this.plannerContext = planner.getPlannerContext(); - this.plannerContext.setAuthenticationResult(authenticationResult); + plannerContext = planner.getPlannerContext(); + plannerContext.setAuthenticationResult(authenticationResult); // set parameters on planner context, if parameters have already been set - this.plannerContext.setParameters(parameters); - this.validationResult = planner.validate(authConfig.authorizeQueryContextParams()); + plannerContext.setParameters(parameters); + validationResult = planner.validate(authConfig.authorizeQueryContextParams()); return validationResult; } // we can't collapse catch clauses since SqlPlanningException has type-sensitive constructors. @@ -269,14 +269,14 @@ private void checkAccess(Access access) /** * Prepare the query lifecycle for execution, without completely planning into something that is executable, but - * including some initial parsing and validation and any dyanmic parameter type resolution, to support prepared + * including some initial parsing and validation and any dynamic parameter type resolution, to support prepared * statements via JDBC. */ public PrepareResult prepare() throws RelConversionException { synchronized (stateLock) { if (state != State.AUTHORIZED) { - throw new ISE("Cannot prepare because current state[%s] is not [%s].", state, State.AUTHORIZED); + throw new ISE("Cannot prepare because current state [%s] is not [%s].", state, State.AUTHORIZED); } } Preconditions.checkNotNull(plannerContext, "Cannot prepare, plannerContext is null"); @@ -314,6 +314,12 @@ public void plan() throws RelConversionException } } + @VisibleForTesting + public PlannerResult plannerResult() + { + return plannerResult; + } + /** * This method must be called after {@link #plan()}. */ @@ -376,7 +382,6 @@ public void after(boolean isDone, Throwable thrown) }); } - @VisibleForTesting public ValidationResult runAnalyzeResources(AuthenticationResult authenticationResult) { diff --git a/sql/src/main/java/org/apache/druid/sql/avatica/DruidStatement.java b/sql/src/main/java/org/apache/druid/sql/avatica/DruidStatement.java index b3c7e41284e0..6b7ed6e9716a 100644 --- a/sql/src/main/java/org/apache/druid/sql/avatica/DruidStatement.java +++ b/sql/src/main/java/org/apache/druid/sql/avatica/DruidStatement.java @@ -54,6 +54,7 @@ public class DruidStatement implements Closeable { public static final long START_OFFSET = 0; + private final String connectionId; private final int statementId; private final QueryContext queryContext; @@ -82,7 +83,7 @@ public class DruidStatement implements Closeable private long maxRowCount; private Meta.Signature signature; private Yielder yielder; - private int offset = 0; + private int offset; private Throwable throwable; private AuthenticationResult authenticationResult; @@ -207,7 +208,6 @@ public DruidStatement prepare( } } - public DruidStatement execute(List parameters) { synchronized (lock) { @@ -385,7 +385,6 @@ private AvaticaParameter createParameter(RelDataTypeField field, RelDataType typ ); } - private DruidStatement closeAndPropagateThrowable(Throwable t) { this.throwable = t; diff --git a/sql/src/main/java/org/apache/druid/sql/calcite/expression/Expressions.java b/sql/src/main/java/org/apache/druid/sql/calcite/expression/Expressions.java index 67bc6cbfe884..fc49900eb83b 100644 --- a/sql/src/main/java/org/apache/druid/sql/calcite/expression/Expressions.java +++ b/sql/src/main/java/org/apache/druid/sql/calcite/expression/Expressions.java @@ -497,7 +497,7 @@ private static DimFilter toSimpleLeafFilter( || kind == SqlKind.LESS_THAN || kind == SqlKind.LESS_THAN_OR_EQUAL) { final List operands = ((RexCall) rexNode).getOperands(); - Preconditions.checkState(operands.size() == 2, "Expected 2 operands, got[%,d]", operands.size()); + Preconditions.checkState(operands.size() == 2, "Expected 2 operands, got [%,d]", operands.size()); boolean flip = false; RexNode lhs = operands.get(0); RexNode rhs = operands.get(1); @@ -532,7 +532,7 @@ private static DimFilter toSimpleLeafFilter( flippedKind = SqlKind.GREATER_THAN_OR_EQUAL; break; default: - throw new ISE("Kind[%s] not expected here", kind); + throw new ISE("Kind [%s] not expected here", kind); } } else { flippedKind = kind; diff --git a/sql/src/main/java/org/apache/druid/sql/calcite/planner/Calcites.java b/sql/src/main/java/org/apache/druid/sql/calcite/planner/Calcites.java index c12c69a85e0d..1f5237d4590a 100644 --- a/sql/src/main/java/org/apache/druid/sql/calcite/planner/Calcites.java +++ b/sql/src/main/java/org/apache/druid/sql/calcite/planner/Calcites.java @@ -135,7 +135,32 @@ public static String escapeStringLiteral(final String s) } builder.append("'"); return isPlainAscii ? builder.toString() : "U&" + builder; + } + /** + * Relaxed form of the above. Only escapes those characters that are + * absolutely necessary: single quotes and non-ASCII characters. SQL is + * perfectly happy with newlines and punctuation inside a quote. This + * form is more readable. + */ + public static String escapeStringLiteralLenient(final String s) + { + Preconditions.checkNotNull(s); + boolean isPlainAscii = true; + final StringBuilder builder = new StringBuilder("'"); + for (int i = 0; i < s.length(); i++) { + final char c = s.charAt(i); + if (c == '\'') { + builder.append("''"); + } else if (c > 127) { + builder.append("\\").append(BaseEncoding.base16().encode(Chars.toByteArray(c))); + isPlainAscii = false; + } else { + builder.append(c); + } + } + builder.append("'"); + return isPlainAscii ? builder.toString() : "U&" + builder; } /** @@ -264,8 +289,6 @@ public static RelDataType createSqlArrayTypeWithNullability( final boolean nullable ) { - - final RelDataType dataType = typeFactory.createArrayType( createSqlTypeWithNullability(typeFactory, elementTypeName, nullable), -1 diff --git a/sql/src/main/java/org/apache/druid/sql/calcite/planner/CapturedState.java b/sql/src/main/java/org/apache/druid/sql/calcite/planner/CapturedState.java new file mode 100644 index 000000000000..f8b7092d54f5 --- /dev/null +++ b/sql/src/main/java/org/apache/druid/sql/calcite/planner/CapturedState.java @@ -0,0 +1,111 @@ +/* + * Licensed to the Apache Software Foundation (ASF) under one + * or more contributor license agreements. See the NOTICE file + * distributed with this work for additional information + * regarding copyright ownership. The ASF licenses this file + * to you under the Apache License, Version 2.0 (the + * "License"); you may not use this file except in compliance + * with the License. You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, + * software distributed under the License is distributed on an + * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY + * KIND, either express or implied. See the License for the + * specific language governing permissions and limitations + * under the License. + */ + +package org.apache.druid.sql.calcite.planner; + +import org.apache.calcite.interpreter.BindableRel; +import org.apache.calcite.rel.RelRoot; +import org.apache.calcite.rel.type.RelDataType; +import org.apache.calcite.sql.SqlInsert; +import org.apache.calcite.sql.SqlNode; +import org.apache.druid.sql.calcite.rel.DruidRel; + +/** + * Planner state capture for tests. The captured objects are available as + * public fields since this is only ever meant to be used in tests, and + * tests are already tightly coupled to the planner. + *

+ * Spotbugs really doesn't like public fields referenced in another (test) + * package. This file appears in spotbugs-exclude.xml to avoid the issue. + */ +public class CapturedState implements PlannerStateCapture +{ + public String sql; + public SqlNode sqlNode; + public RelRoot relRoot; + public DruidRel druidRel; + public RelDataType parameterTypes; + public PlannerContext plannerContext; + public ValidationResult validationResult; + public SqlNode queryNode; + public SqlInsert insertNode; + public BindableRel bindableRel; + public Object execPlan; + + @Override + public void capturePlannerContext(PlannerContext plannerContext) + { + this.plannerContext = plannerContext; + } + + @Override + public void captureSql(String sql) + { + this.sql = sql; + } + + @Override + public void captureParse(SqlNode root) + { + this.sqlNode = root; + } + + @Override + public void captureQueryRel(RelRoot rootQueryRel) + { + this.relRoot = rootQueryRel; + } + + @Override + public void captureDruidRel(DruidRel druidRel) + { + this.druidRel = druidRel; + this.execPlan = druidRel.dryRun(); + } + + @Override + public void captureParameterTypes(RelDataType parameterTypes) + { + this.parameterTypes = parameterTypes; + } + + @Override + public void captureValidationResult(ValidationResult validationResult) + { + this.validationResult = validationResult; + } + + @Override + public void captureQuery(SqlNode query) + { + this.queryNode = query; + } + + @Override + public void captureInsert(SqlInsert insert) + { + this.insertNode = insert; + } + + @Override + public void captureBindableRel(BindableRel bindableRel) + { + this.bindableRel = bindableRel; + } +} diff --git a/sql/src/main/java/org/apache/druid/sql/calcite/planner/DruidPlanner.java b/sql/src/main/java/org/apache/druid/sql/calcite/planner/DruidPlanner.java index ff139c7153f8..9b27dc78098b 100644 --- a/sql/src/main/java/org/apache/druid/sql/calcite/planner/DruidPlanner.java +++ b/sql/src/main/java/org/apache/druid/sql/calcite/planner/DruidPlanner.java @@ -23,6 +23,7 @@ import com.fasterxml.jackson.databind.ObjectMapper; import com.fasterxml.jackson.databind.node.ArrayNode; import com.fasterxml.jackson.databind.node.ObjectNode; +import com.google.common.annotations.VisibleForTesting; import com.google.common.base.Preconditions; import com.google.common.base.Supplier; import com.google.common.base.Suppliers; @@ -110,11 +111,18 @@ public class DruidPlanner implements Closeable { private static final EmittingLogger log = new EmittingLogger(DruidPlanner.class); private static final Pattern UNNAMED_COLUMN_PATTERN = Pattern.compile("^EXPR\\$\\d+$", Pattern.CASE_INSENSITIVE); + @VisibleForTesting + public static final String UNNAMED_INGESTION_COLUMN_ERROR = + "Cannot ingest expressions that do not have an alias " + + "or columns with names like EXPR$[digit].\n" + + "E.g. if you are ingesting \"func(X)\", then you can rewrite it as " + + "\"func(X) as myColumn\""; private final FrameworkConfig frameworkConfig; private final Planner planner; private final PlannerContext plannerContext; private final QueryMakerFactory queryMakerFactory; + private PlannerStateCapture stateCapture; private RexBuilder rexBuilder; @@ -128,6 +136,23 @@ public class DruidPlanner implements Closeable this.planner = Frameworks.getPlanner(frameworkConfig); this.plannerContext = plannerContext; this.queryMakerFactory = queryMakerFactory; + this.stateCapture = new NoOpCapture(); + } + + public void captureState(PlannerStateCapture capture) + { + this.stateCapture = capture; + this.stateCapture.capturePlannerContext(plannerContext); + } + + private ParsedNodes parse() throws SqlParseException, ValidationException + { + resetPlanner(); + SqlNode root = planner.parse(plannerContext.getSql()); + stateCapture.captureSql(plannerContext.getSql()); + final ParsedNodes parsed = ParsedNodes.create(root, plannerContext.getTimeZone()); + stateCapture.captureParse(root); + return parsed; } /** @@ -137,8 +162,7 @@ public class DruidPlanner implements Closeable */ public ValidationResult validate(boolean authorizeContextParams) throws SqlParseException, ValidationException { - resetPlanner(); - final ParsedNodes parsed = ParsedNodes.create(planner.parse(plannerContext.getSql()), plannerContext.getTimeZone()); + final ParsedNodes parsed = parse(); final SqlValidator validator = getValidator(); final SqlNode validatedQueryNode; @@ -165,7 +189,9 @@ public ValidationResult validate(boolean authorizeContextParams) throws SqlParse } plannerContext.setResourceActions(resourceActions); - return new ValidationResult(resourceActions); + ValidationResult validationResult = new ValidationResult(resourceActions); + stateCapture.captureValidationResult(validationResult); + return validationResult; } /** @@ -177,15 +203,16 @@ public ValidationResult validate(boolean authorizeContextParams) throws SqlParse */ public PrepareResult prepare() throws SqlParseException, ValidationException, RelConversionException { - resetPlanner(); - - final ParsedNodes parsed = ParsedNodes.create(planner.parse(plannerContext.getSql()), plannerContext.getTimeZone()); + final ParsedNodes parsed = parse(); final SqlNode validatedQueryNode = planner.validate(parsed.getQueryNode()); + stateCapture.captureQuery(validatedQueryNode); final RelRoot rootQueryRel = planner.rel(validatedQueryNode); + stateCapture.captureQueryRel(rootQueryRel); final SqlValidator validator = getValidator(); final RelDataTypeFactory typeFactory = rootQueryRel.rel.getCluster().getTypeFactory(); final RelDataType parameterTypes = validator.getParameterRowType(validator.validate(validatedQueryNode)); + stateCapture.captureParameterTypes(parameterTypes); final RelDataType returnedRowType; if (parsed.getExplainNode() != null) { @@ -208,9 +235,7 @@ public PrepareResult prepare() throws SqlParseException, ValidationException, Re */ public PlannerResult plan() throws SqlParseException, ValidationException, RelConversionException { - resetPlanner(); - - final ParsedNodes parsed = ParsedNodes.create(planner.parse(plannerContext.getSql()), plannerContext.getTimeZone()); + final ParsedNodes parsed = parse(); try { if (parsed.getIngestionGranularity() != null) { @@ -235,6 +260,8 @@ public PlannerResult plan() throws SqlParseException, ValidationException, RelCo this.rexBuilder = new RexBuilder(planner.getTypeFactory()); final SqlNode parameterizedQueryNode = rewriteDynamicParameters(parsed.getQueryNode()); final SqlNode validatedQueryNode = planner.validate(parameterizedQueryNode); + stateCapture.captureQuery(validatedQueryNode); + stateCapture.captureInsert(parsed.getInsertOrReplace()); final RelRoot rootQueryRel = planner.rel(validatedQueryNode); try { @@ -243,7 +270,7 @@ public PlannerResult plan() throws SqlParseException, ValidationException, RelCo catch (Exception e) { Throwable cannotPlanException = Throwables.getCauseOfType(e, RelOptPlanner.CannotPlanException.class); if (null == cannotPlanException) { - // Not a CannotPlanningException, rethrow without trying with bindable + // Not a CannotPlanException, rethrow without trying with bindable throw e; } @@ -310,7 +337,9 @@ private PlannerResult planWithDruidConvention( ) throws ValidationException, RelConversionException { final RelRoot possiblyLimitedRoot = possiblyWrapRootWithOuterLimitFromContext(root); - final QueryMaker queryMaker = buildQueryMaker(root, insertOrReplace); + stateCapture.captureQueryRel(possiblyLimitedRoot); + + final QueryMaker queryMaker = buildQueryMaker(possiblyLimitedRoot, insertOrReplace); plannerContext.setQueryMaker(queryMaker); RelNode parameterized = rewriteRelDynamicParameters(possiblyLimitedRoot.rel); @@ -321,6 +350,7 @@ private PlannerResult planWithDruidConvention( .plus(root.collation), parameterized ); + stateCapture.captureDruidRel(druidRel); if (explain != null) { return planExplanation(druidRel, explain, true); @@ -333,6 +363,9 @@ private PlannerResult planWithDruidConvention( .filter(action -> action.getAction() == Action.READ) .collect(Collectors.toSet()); + // TODO: This is not really a state check since there is a race condition. + // This can be seen as verifying that a check was done, or as redoing the + // check with the latest info (if the permissions are updated in between.) Preconditions.checkState( readResourceActions.isEmpty() == druidRel.getDataSourceNames().isEmpty() // The resources found in the plannerContext can be less than the datasources in @@ -366,6 +399,7 @@ private PlannerResult planWithBindableConvention( planner.getEmptyTraitSet().replace(BindableConvention.INSTANCE).plus(root.collation), root.rel ); + stateCapture.captureBindableRel(bindableRel); if (!root.isRefTrivial()) { // Add a projection on top to accommodate root.fields. @@ -469,7 +503,7 @@ private PlannerResult planExplanation( /** * This method doesn't utilize the Calcite's internal {@link RelOptUtil#dumpPlan} since that tends to be verbose * and not indicative of the native Druid Queries which will get executed - * This method assumes that the Planner has converted the RelNodes to DruidRels, and thereby we can implictly cast it + * This method assumes that the Planner has converted the RelNodes to DruidRels, and thereby we can implicitly cast it * * @param rel Instance of the root {@link DruidRel} which is formed by running the planner transformations on it * @return A string representing an array of native queries that correspond to the given SQL query, in JSON format @@ -736,10 +770,7 @@ private void validateColumnsForIngestion(RelRoot rootQueryRel) throws Validation // Check that there are no unnamed columns in the insert. for (Pair field : rootQueryRel.fields) { if (UNNAMED_COLUMN_PATTERN.matcher(field.right).matches()) { - throw new ValidationException("Cannot ingest expressions that do not have an alias " - + "or columns with names like EXPR$[digit]." - + "E.g. if you are ingesting \"func(X)\", then you can rewrite it as " - + "\"func(X) as myColumn\""); + throw new ValidationException(UNNAMED_INGESTION_COLUMN_ERROR); } } } diff --git a/sql/src/main/java/org/apache/druid/sql/calcite/planner/NoOpCapture.java b/sql/src/main/java/org/apache/druid/sql/calcite/planner/NoOpCapture.java new file mode 100644 index 000000000000..2dd0863e6912 --- /dev/null +++ b/sql/src/main/java/org/apache/druid/sql/calcite/planner/NoOpCapture.java @@ -0,0 +1,83 @@ +/* + * Licensed to the Apache Software Foundation (ASF) under one + * or more contributor license agreements. See the NOTICE file + * distributed with this work for additional information + * regarding copyright ownership. The ASF licenses this file + * to you under the Apache License, Version 2.0 (the + * "License"); you may not use this file except in compliance + * with the License. You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, + * software distributed under the License is distributed on an + * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY + * KIND, either express or implied. See the License for the + * specific language governing permissions and limitations + * under the License. + */ + +package org.apache.druid.sql.calcite.planner; + +import org.apache.calcite.interpreter.BindableRel; +import org.apache.calcite.rel.RelRoot; +import org.apache.calcite.rel.type.RelDataType; +import org.apache.calcite.sql.SqlInsert; +import org.apache.calcite.sql.SqlNode; +import org.apache.druid.sql.calcite.rel.DruidRel; + +/** + * Do-nothing planner state capture used in production code. + */ +public class NoOpCapture implements PlannerStateCapture +{ + @Override + public void capturePlannerContext(PlannerContext plannerContext) + { + } + + @Override + public void captureSql(String sql) + { + } + + @Override + public void captureParse(SqlNode root) + { + } + + @Override + public void captureQueryRel(RelRoot rootQueryRel) + { + } + + @Override + public void captureDruidRel(DruidRel druidRel) + { + } + + @Override + public void captureParameterTypes(RelDataType parameterTypes) + { + } + + @Override + public void captureValidationResult(ValidationResult validationResult) + { + } + + @Override + public void captureQuery(SqlNode query) + { + } + + @Override + public void captureInsert(SqlInsert insert) + { + } + + @Override + public void captureBindableRel(BindableRel bindableRel) + { + } +} diff --git a/sql/src/main/java/org/apache/druid/sql/calcite/planner/PlannerConfig.java b/sql/src/main/java/org/apache/druid/sql/calcite/planner/PlannerConfig.java index f7ceaf51f6ba..6d9908a68925 100644 --- a/sql/src/main/java/org/apache/druid/sql/calcite/planner/PlannerConfig.java +++ b/sql/src/main/java/org/apache/druid/sql/calcite/planner/PlannerConfig.java @@ -83,6 +83,8 @@ public class PlannerConfig @JsonProperty private int maxNumericInFilters = NUM_FILTER_NOT_USED; + private boolean serializeComplexValues = true; + public long getMetadataSegmentPollPeriod() { return metadataSegmentPollPeriod; @@ -98,8 +100,6 @@ public boolean isMetadataSegmentCacheEnable() return metadataSegmentCacheEnable; } - private boolean serializeComplexValues = true; - public Period getMetadataRefreshPeriod() { return metadataRefreshPeriod; @@ -174,71 +174,9 @@ public PlannerConfig withOverrides(final QueryContext queryContext) if (queryContext.isEmpty()) { return this; } - - final PlannerConfig newConfig = new PlannerConfig(); - newConfig.metadataRefreshPeriod = getMetadataRefreshPeriod(); - newConfig.maxTopNLimit = getMaxTopNLimit(); - newConfig.useApproximateCountDistinct = queryContext.getAsBoolean( - CTX_KEY_USE_APPROXIMATE_COUNT_DISTINCT, - isUseApproximateCountDistinct() - ); - newConfig.useGroupingSetForExactDistinct = queryContext.getAsBoolean( - CTX_KEY_USE_GROUPING_SET_FOR_EXACT_DISTINCT, - isUseGroupingSetForExactDistinct() - ); - newConfig.useApproximateTopN = queryContext.getAsBoolean( - CTX_KEY_USE_APPROXIMATE_TOPN, - isUseApproximateTopN() - ); - newConfig.computeInnerJoinCostAsFilter = queryContext.getAsBoolean( - CTX_COMPUTE_INNER_JOIN_COST_AS_FILTER, - computeInnerJoinCostAsFilter - ); - newConfig.useNativeQueryExplain = queryContext.getAsBoolean( - CTX_KEY_USE_NATIVE_QUERY_EXPLAIN, - isUseNativeQueryExplain() - ); - newConfig.forceExpressionVirtualColumns = queryContext.getAsBoolean( - CTX_KEY_FORCE_EXPRESSION_VIRTUAL_COLUMNS, - isForceExpressionVirtualColumns() - ); - final int systemConfigMaxNumericInFilters = getMaxNumericInFilters(); - final int queryContextMaxNumericInFilters = queryContext.getAsInt( - CTX_MAX_NUMERIC_IN_FILTERS, - getMaxNumericInFilters() - ); - newConfig.maxNumericInFilters = validateMaxNumericInFilters(queryContextMaxNumericInFilters, - systemConfigMaxNumericInFilters); - newConfig.requireTimeCondition = isRequireTimeCondition(); - newConfig.sqlTimeZone = getSqlTimeZone(); - newConfig.awaitInitializationOnStart = isAwaitInitializationOnStart(); - newConfig.metadataSegmentCacheEnable = isMetadataSegmentCacheEnable(); - newConfig.metadataSegmentPollPeriod = getMetadataSegmentPollPeriod(); - newConfig.serializeComplexValues = shouldSerializeComplexValues(); - newConfig.authorizeSystemTablesDirectly = isAuthorizeSystemTablesDirectly(); - return newConfig; - } - - private int validateMaxNumericInFilters(int queryContextMaxNumericInFilters, int systemConfigMaxNumericInFilters) - { - // if maxNumericInFIlters through context == 0 catch exception - // else if query context exceeds system set value throw error - if (queryContextMaxNumericInFilters == 0) { - throw new UOE("[%s] must be greater than 0", CTX_MAX_NUMERIC_IN_FILTERS); - } else if (queryContextMaxNumericInFilters > systemConfigMaxNumericInFilters - && systemConfigMaxNumericInFilters != NUM_FILTER_NOT_USED) { - throw new UOE( - "Expected parameter[%s] cannot exceed system set value of [%d]", - CTX_MAX_NUMERIC_IN_FILTERS, - systemConfigMaxNumericInFilters - ); - } - // if system set value is not present, thereby inferring default of -1 - if (systemConfigMaxNumericInFilters == NUM_FILTER_NOT_USED) { - return systemConfigMaxNumericInFilters; - } - // all other cases return the valid query context value - return queryContextMaxNumericInFilters; + return toBuilder() + .withOverrides(queryContext) + .build(); } @Override @@ -302,4 +240,207 @@ public String toString() ", useNativeQueryExplain=" + useNativeQueryExplain + '}'; } + + public static Builder builder() + { + return new PlannerConfig().toBuilder(); + } + + public Builder toBuilder() + { + return new Builder(this); + } + + /** + * Builder for {@link PlannerConfig}, primarily for use in tests to + * allow setting options programmatically rather than from the command + * line or a properties file. Starts with values from an existing + * (typically default) config. + */ + public static class Builder + { + private Period metadataRefreshPeriod; + private int maxTopNLimit; + private boolean useApproximateCountDistinct; + private boolean useApproximateTopN; + private boolean requireTimeCondition; + private boolean awaitInitializationOnStart; + private DateTimeZone sqlTimeZone; + private boolean metadataSegmentCacheEnable; + private long metadataSegmentPollPeriod; + private boolean useGroupingSetForExactDistinct; + private boolean computeInnerJoinCostAsFilter; + private boolean authorizeSystemTablesDirectly; + private boolean useNativeQueryExplain; + private boolean forceExpressionVirtualColumns; + private int maxNumericInFilters; + private boolean serializeComplexValues; + + public Builder(PlannerConfig base) + { + metadataRefreshPeriod = base.metadataRefreshPeriod; + maxTopNLimit = base.maxTopNLimit; + useApproximateCountDistinct = base.useApproximateCountDistinct; + useApproximateTopN = base.useApproximateTopN; + requireTimeCondition = base.requireTimeCondition; + awaitInitializationOnStart = base.awaitInitializationOnStart; + sqlTimeZone = base.sqlTimeZone; + metadataSegmentCacheEnable = base.metadataSegmentCacheEnable; + useGroupingSetForExactDistinct = base.useGroupingSetForExactDistinct; + metadataSegmentCacheEnable = base.metadataSegmentCacheEnable; + metadataSegmentPollPeriod = base.metadataSegmentPollPeriod; + computeInnerJoinCostAsFilter = base.computeInnerJoinCostAsFilter; + authorizeSystemTablesDirectly = base.authorizeSystemTablesDirectly; + useNativeQueryExplain = base.useNativeQueryExplain; + forceExpressionVirtualColumns = base.isForceExpressionVirtualColumns(); + maxNumericInFilters = base.maxNumericInFilters; + serializeComplexValues = base.serializeComplexValues; + } + + public Builder requireTimeCondition(boolean option) + { + this.requireTimeCondition = option; + return this; + } + + public Builder maxTopNLimit(int value) + { + this.maxTopNLimit = value; + return this; + } + + public Builder maxNumericInFilters(int value) + { + this.maxNumericInFilters = value; + return this; + } + + public Builder useApproximateCountDistinct(boolean option) + { + this.useApproximateCountDistinct = option; + return this; + } + + public Builder useApproximateTopN(boolean option) + { + this.useApproximateTopN = option; + return this; + } + + public Builder useGroupingSetForExactDistinct(boolean option) + { + this.useGroupingSetForExactDistinct = option; + return this; + } + + public Builder computeInnerJoinCostAsFilter(boolean option) + { + this.computeInnerJoinCostAsFilter = option; + return this; + } + + public Builder sqlTimeZone(DateTimeZone value) + { + this.sqlTimeZone = value; + return this; + } + + public Builder authorizeSystemTablesDirectly(boolean option) + { + this.authorizeSystemTablesDirectly = option; + return this; + } + + public Builder serializeComplexValues(boolean option) + { + this.serializeComplexValues = option; + return this; + } + + public Builder useNativeQueryExplain(boolean option) + { + this.useNativeQueryExplain = option; + return this; + } + + public Builder withOverrides(final QueryContext queryContext) + { + useApproximateCountDistinct = queryContext.getAsBoolean( + CTX_KEY_USE_APPROXIMATE_COUNT_DISTINCT, + useApproximateCountDistinct + ); + useGroupingSetForExactDistinct = queryContext.getAsBoolean( + CTX_KEY_USE_GROUPING_SET_FOR_EXACT_DISTINCT, + useGroupingSetForExactDistinct + ); + useApproximateTopN = queryContext.getAsBoolean( + CTX_KEY_USE_APPROXIMATE_TOPN, + useApproximateTopN + ); + computeInnerJoinCostAsFilter = queryContext.getAsBoolean( + CTX_COMPUTE_INNER_JOIN_COST_AS_FILTER, + computeInnerJoinCostAsFilter + ); + useNativeQueryExplain = queryContext.getAsBoolean( + CTX_KEY_USE_NATIVE_QUERY_EXPLAIN, + useNativeQueryExplain + ); + forceExpressionVirtualColumns = queryContext.getAsBoolean( + CTX_KEY_FORCE_EXPRESSION_VIRTUAL_COLUMNS, + forceExpressionVirtualColumns + ); + final int queryContextMaxNumericInFilters = queryContext.getAsInt( + CTX_MAX_NUMERIC_IN_FILTERS, + maxNumericInFilters + ); + maxNumericInFilters = validateMaxNumericInFilters( + queryContextMaxNumericInFilters, + maxNumericInFilters); + return this; + } + + private static int validateMaxNumericInFilters(int queryContextMaxNumericInFilters, int systemConfigMaxNumericInFilters) + { + // if maxNumericInFIlters through context == 0 catch exception + // else if query context exceeds system set value throw error + if (queryContextMaxNumericInFilters == 0) { + throw new UOE("[%s] must be greater than 0", CTX_MAX_NUMERIC_IN_FILTERS); + } else if (queryContextMaxNumericInFilters > systemConfigMaxNumericInFilters + && systemConfigMaxNumericInFilters != NUM_FILTER_NOT_USED) { + throw new UOE( + "Expected parameter[%s] cannot exceed system set value of [%d]", + CTX_MAX_NUMERIC_IN_FILTERS, + systemConfigMaxNumericInFilters + ); + } + // if system set value is not present, thereby inferring default of -1 + if (systemConfigMaxNumericInFilters == NUM_FILTER_NOT_USED) { + return systemConfigMaxNumericInFilters; + } + // all other cases return the valid query context value + return queryContextMaxNumericInFilters; + } + + public PlannerConfig build() + { + PlannerConfig config = new PlannerConfig(); + config.metadataRefreshPeriod = metadataRefreshPeriod; + config.maxTopNLimit = maxTopNLimit; + config.useApproximateCountDistinct = useApproximateCountDistinct; + config.useApproximateTopN = useApproximateTopN; + config.requireTimeCondition = requireTimeCondition; + config.awaitInitializationOnStart = awaitInitializationOnStart; + config.sqlTimeZone = sqlTimeZone; + config.metadataSegmentCacheEnable = metadataSegmentCacheEnable; + config.metadataSegmentPollPeriod = metadataSegmentPollPeriod; + config.useGroupingSetForExactDistinct = useGroupingSetForExactDistinct; + config.computeInnerJoinCostAsFilter = computeInnerJoinCostAsFilter; + config.authorizeSystemTablesDirectly = authorizeSystemTablesDirectly; + config.useNativeQueryExplain = useNativeQueryExplain; + config.maxNumericInFilters = maxNumericInFilters; + config.forceExpressionVirtualColumns = forceExpressionVirtualColumns; + config.serializeComplexValues = serializeComplexValues; + return config; + } + } } diff --git a/sql/src/main/java/org/apache/druid/sql/calcite/planner/PlannerContext.java b/sql/src/main/java/org/apache/druid/sql/calcite/planner/PlannerContext.java index 41b5a6340c53..42afc7054292 100644 --- a/sql/src/main/java/org/apache/druid/sql/calcite/planner/PlannerContext.java +++ b/sql/src/main/java/org/apache/druid/sql/calcite/planner/PlannerContext.java @@ -54,8 +54,8 @@ import java.util.concurrent.CopyOnWriteArrayList; /** - * Like {@link PlannerConfig}, but that has static configuration and this class contains dynamic, per-query - * configuration. + * Like {@link PlannerConfig}, but that has static configuration and this class + * contains dynamic, per-query configuration. */ public class PlannerContext { @@ -65,8 +65,9 @@ public class PlannerContext public static final String CTX_SQL_TIME_ZONE = "sqlTimeZone"; public static final String CTX_SQL_STRINGIFY_ARRAYS = "sqlStringifyArrays"; - // This context parameter is an undocumented parameter, used internally, to allow the web console to - // apply a limit without having to rewrite the SQL query. + // This context parameter is an undocumented parameter, used internally, + // to allow the web console to apply a limit without having to rewrite + // the SQL query. public static final String CTX_SQL_OUTER_LIMIT = "sqlOuterLimit"; // DataContext keys @@ -85,10 +86,12 @@ public class PlannerContext private final CopyOnWriteArrayList nativeQueryIds = new CopyOnWriteArrayList<>(); // bindings for dynamic parameters to bind during planning private List parameters = Collections.emptyList(); - // result of authentication, providing identity to authorize set of resources produced by validation + // result of authentication, providing identity to authorize set of resources + // produced by validation private AuthenticationResult authenticationResult; - // set of datasources and views which must be authorized, initialized to null so we can detect if it has been set. - private Set resourceActions = null; + // set of datasources and views which must be authorized, initialized to null + // so we can detect if it has been set. + private Set resourceActions; // result of authorizing set of resources against authentication identity private Access authorizationResult; // error messages encountered while planning the query diff --git a/sql/src/main/java/org/apache/druid/sql/calcite/planner/PlannerStateCapture.java b/sql/src/main/java/org/apache/druid/sql/calcite/planner/PlannerStateCapture.java new file mode 100644 index 000000000000..5b9c25d411ed --- /dev/null +++ b/sql/src/main/java/org/apache/druid/sql/calcite/planner/PlannerStateCapture.java @@ -0,0 +1,45 @@ +/* + * Licensed to the Apache Software Foundation (ASF) under one + * or more contributor license agreements. See the NOTICE file + * distributed with this work for additional information + * regarding copyright ownership. The ASF licenses this file + * to you under the Apache License, Version 2.0 (the + * "License"); you may not use this file except in compliance + * with the License. You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, + * software distributed under the License is distributed on an + * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY + * KIND, either express or implied. See the License for the + * specific language governing permissions and limitations + * under the License. + */ + +package org.apache.druid.sql.calcite.planner; + +import org.apache.calcite.interpreter.BindableRel; +import org.apache.calcite.rel.RelRoot; +import org.apache.calcite.rel.type.RelDataType; +import org.apache.calcite.sql.SqlInsert; +import org.apache.calcite.sql.SqlNode; +import org.apache.druid.sql.calcite.rel.DruidRel; + +/** + * Generic mechanism to capture internal planner state for inspection + * in tests. + */ +public interface PlannerStateCapture +{ + void capturePlannerContext(PlannerContext plannerContext); + void captureSql(String sql); + void captureBindableRel(BindableRel bindableRel); + void captureValidationResult(ValidationResult validationResult); + void captureParameterTypes(RelDataType parameterTypes); + void captureDruidRel(DruidRel druidRel); + void captureQueryRel(RelRoot rootQueryRel); + void captureParse(SqlNode root); + void captureQuery(SqlNode query); + void captureInsert(SqlInsert insert); +} diff --git a/sql/src/main/java/org/apache/druid/sql/calcite/rel/DruidOuterQueryRel.java b/sql/src/main/java/org/apache/druid/sql/calcite/rel/DruidOuterQueryRel.java index d9bd16343ef1..dfccdfc863e3 100644 --- a/sql/src/main/java/org/apache/druid/sql/calcite/rel/DruidOuterQueryRel.java +++ b/sql/src/main/java/org/apache/druid/sql/calcite/rel/DruidOuterQueryRel.java @@ -102,7 +102,7 @@ public DruidOuterQueryRel withPartialQuery(final PartialDruidQuery newQueryBuild public DruidQuery toDruidQuery(final boolean finalizeAggregations) { // Must finalize aggregations on subqueries. - final DruidQuery subQuery = ((DruidRel) sourceRel).toDruidQuery(true); + final DruidQuery subQuery = ((DruidRel) sourceRel).toDruidQuery(true); final RowSignature sourceRowSignature = subQuery.getOutputRowSignature(); return partialQuery.build( new QueryDataSource(subQuery.getQuery()), @@ -150,7 +150,7 @@ public List getInputs() public void replaceInput(int ordinalInParent, RelNode p) { if (ordinalInParent != 0) { - throw new IndexOutOfBoundsException(StringUtils.format("Invalid ordinalInParent[%s]", ordinalInParent)); + throw new IndexOutOfBoundsException(StringUtils.format("Invalid ordinalInParent [%s]", ordinalInParent)); } this.sourceRel = p; } diff --git a/sql/src/main/java/org/apache/druid/sql/calcite/rel/DruidQuery.java b/sql/src/main/java/org/apache/druid/sql/calcite/rel/DruidQuery.java index 075401eff102..f670f365667b 100644 --- a/sql/src/main/java/org/apache/druid/sql/calcite/rel/DruidQuery.java +++ b/sql/src/main/java/org/apache/druid/sql/calcite/rel/DruidQuery.java @@ -126,7 +126,7 @@ public class DruidQuery @Nullable private final Sorting sorting; - private final Query query; + private final Query query; private final RowSignature outputRowSignature; private final RelDataType outputRowType; private final VirtualColumnRegistry virtualColumnRegistry; @@ -785,7 +785,7 @@ public RowSignature getOutputRowSignature() return outputRowSignature; } - public Query getQuery() + public Query getQuery() { return query; } @@ -796,7 +796,7 @@ public Query getQuery() * * @return Druid query */ - private Query computeQuery(final QueryFeatureInspector queryFeatureInspector) + private Query computeQuery(final QueryFeatureInspector queryFeatureInspector) { if (dataSource instanceof QueryDataSource) { // If there is a subquery, then we prefer the outer query to be a groupBy if possible, since this potentially diff --git a/sql/src/main/java/org/apache/druid/sql/calcite/rel/DruidQueryRel.java b/sql/src/main/java/org/apache/druid/sql/calcite/rel/DruidQueryRel.java index 373e3d4abffd..1f18df5960fd 100644 --- a/sql/src/main/java/org/apache/druid/sql/calcite/rel/DruidQueryRel.java +++ b/sql/src/main/java/org/apache/druid/sql/calcite/rel/DruidQueryRel.java @@ -65,7 +65,7 @@ private DruidQueryRel( } /** - * Create a DruidQueryRel representing a full scan of a builtin table or lookup. + * Create a DruidQueryRel representing a full scan of a built in table or lookup. */ public static DruidQueryRel scanTable( final LogicalTableScan scanRel, @@ -103,7 +103,7 @@ public static DruidQueryRel scanExternal( } /** - * Create a DruidQueryRel representing a full scan of inline, literal values. + * Create a DruidQueryRel representing a full scan of in-line, literal values. */ public static DruidQueryRel scanValues( final LogicalValues valuesRel, diff --git a/sql/src/main/java/org/apache/druid/sql/calcite/rel/DruidRel.java b/sql/src/main/java/org/apache/druid/sql/calcite/rel/DruidRel.java index 6f601ec5aa52..3105673c3987 100644 --- a/sql/src/main/java/org/apache/druid/sql/calcite/rel/DruidRel.java +++ b/sql/src/main/java/org/apache/druid/sql/calcite/rel/DruidRel.java @@ -28,7 +28,7 @@ import javax.annotation.Nullable; import java.util.Set; -public abstract class DruidRel extends AbstractRelNode +public abstract class DruidRel> extends AbstractRelNode { private final PlannerContext plannerContext; @@ -54,6 +54,11 @@ public Sequence runQuery() return getPlannerContext().getQueryMaker().runQuery(toDruidQuery(false)); } + public Object dryRun() + { + return getPlannerContext().getQueryMaker().explain(toDruidQuery(false)); + } + public abstract T withPartialQuery(PartialDruidQuery newQueryBuilder); public boolean isValidDruidQuery() diff --git a/sql/src/main/java/org/apache/druid/sql/calcite/rel/DruidUnionRel.java b/sql/src/main/java/org/apache/druid/sql/calcite/rel/DruidUnionRel.java index 25e6e9f52326..32062d3086c6 100644 --- a/sql/src/main/java/org/apache/druid/sql/calcite/rel/DruidUnionRel.java +++ b/sql/src/main/java/org/apache/druid/sql/calcite/rel/DruidUnionRel.java @@ -21,6 +21,7 @@ import com.google.common.base.Preconditions; import com.google.common.collect.FluentIterable; +import com.google.common.collect.ImmutableMap; import org.apache.calcite.plan.RelOptCluster; import org.apache.calcite.plan.RelOptCost; import org.apache.calcite.plan.RelOptPlanner; @@ -115,6 +116,18 @@ public Sequence runQuery() } } + @Override + public Object dryRun() + { + List plans = new ArrayList<>(); + if (limit > 0) { + for (RelNode rel : rels) { + plans.add(((DruidRel) rel).dryRun()); + } + } + return ImmutableMap.of("type", "union", "inputs", plans); + } + @Override public DruidUnionRel withPartialQuery(final PartialDruidQuery newQueryBuilder) { diff --git a/sql/src/main/java/org/apache/druid/sql/calcite/rule/FilterJoinExcludePushToChildRule.java b/sql/src/main/java/org/apache/druid/sql/calcite/rule/FilterJoinExcludePushToChildRule.java index 42759a8f9b57..23abd6520712 100644 --- a/sql/src/main/java/org/apache/druid/sql/calcite/rule/FilterJoinExcludePushToChildRule.java +++ b/sql/src/main/java/org/apache/druid/sql/calcite/rule/FilterJoinExcludePushToChildRule.java @@ -185,7 +185,7 @@ protected void perform(RelOptRuleCall call, Filter filter, Join join) filterPushed = true; } - // once the filters are pushed to join from top, try to remove redudant 'IS NOT NULL' filters + // once the filters are pushed to join from top, try to remove redundant 'IS NOT NULL' filters removeRedundantIsNotNullFilters(joinFilters, joinType, NullHandling.sqlCompatible()); // if nothing actually got pushed and there is nothing leftover, diff --git a/sql/src/main/java/org/apache/druid/sql/calcite/run/NativeQueryMaker.java b/sql/src/main/java/org/apache/druid/sql/calcite/run/NativeQueryMaker.java index 1bd6c8cb62e5..03d5433ad150 100644 --- a/sql/src/main/java/org/apache/druid/sql/calcite/run/NativeQueryMaker.java +++ b/sql/src/main/java/org/apache/druid/sql/calcite/run/NativeQueryMaker.java @@ -19,6 +19,8 @@ package org.apache.druid.sql.calcite.run; +import com.fasterxml.jackson.annotation.JsonProperty; +import com.fasterxml.jackson.annotation.JsonPropertyOrder; import com.fasterxml.jackson.core.JsonProcessingException; import com.fasterxml.jackson.databind.ObjectMapper; import com.google.common.collect.Iterables; @@ -120,11 +122,75 @@ public boolean feature(QueryFeature feature) } } + @Override + public Object explain(DruidQuery druidQuery) + { + final Query query = druidQuery.getQuery(); + return prepare(druidQuery, query); + } + + /** + * Execution plan: primarily for testing. Captures the final form + * of the query used by the execution engine. Jackson-serializable, + * but only for serialization: never deserialized. + */ + @JsonPropertyOrder({"query", "rowOrder", "newFields", "newTypes"}) + public static class ExecutionPlan + { + final Query query; + final List rowOrder; + final List newFields; + final List newTypes; + + public ExecutionPlan( + final Query query, + final List rowOrder, + final List newFields, + final List newTypes) + { + super(); + this.query = query; + this.rowOrder = rowOrder; + this.newFields = newFields; + this.newTypes = newTypes; + } + + @JsonProperty + public Query getQuery() + { + return query; + } + + @JsonProperty + public List getRowOrder() + { + return rowOrder; + } + + @JsonProperty + public List getNewFields() + { + return newFields; + } + + @JsonProperty + public List getNewTypes() + { + return newTypes; + } + } + @Override public Sequence runQuery(final DruidQuery druidQuery) { final Query query = druidQuery.getQuery(); + ExecutionPlan plan = prepare(druidQuery, query); + return execute(query, plan); + } + private ExecutionPlan prepare(final DruidQuery druidQuery, Query query) + { + // TODO: Move this check to plan time, not run time. if (plannerContext.getPlannerConfig().isRequireTimeCondition() && !(druidQuery.getDataSource() instanceof InlineDataSource)) { if (Intervals.ONLY_ETERNITY.equals(findBaseDataSourceIntervals(query))) { @@ -140,7 +206,7 @@ public Sequence runQuery(final DruidQuery druidQuery) // a BoundFilter is created internally for each of the values // whereas when Vi s are String the Filters are converted as BoundFilter to SelectorFilter to InFilter // which takes lesser processing for bitmaps - // So in a case where user executes a query with multiple numeric INs, flame graph shows BoundFilter.getBitmapColumnIndex + // So in a case where user executes a query with multiple numeric INs, flame graph shows BoundFilter.getBitmapResult // and BoundFilter.match predicate eating up processing time which stalls a historical for a query with large number // of numeric INs (> 10K). In such cases user should change the query to specify the IN clauses as String // Instead of IN(v1,v2,v3) user should specify IN('v1','v2','v3') @@ -185,8 +251,17 @@ public Sequence runQuery(final DruidQuery druidQuery) .map(f -> f.getType().getSqlTypeName()) .collect(Collectors.toList()); - return execute( + if (query.getId() == null) { + final String queryId = UUID.randomUUID().toString(); + plannerContext.addNativeQueryId(queryId); + query = query.withId(queryId); + } + + query = query.withSqlQueryId(plannerContext.getSqlQueryId()); + + return new ExecutionPlan( query, + rowOrder, mapColumnList(rowOrder, fieldMapping), mapColumnList(columnTypes, fieldMapping) ); @@ -200,17 +275,11 @@ private List findBaseDataSourceIntervals(Query query) .orElseGet(query::getIntervals); } - private Sequence execute(Query query, final List newFields, final List newTypes) + private Sequence execute(final Query originalQuery, final ExecutionPlan plan) { - Hook.QUERY_PLAN.run(query); - - if (query.getId() == null) { - final String queryId = UUID.randomUUID().toString(); - plannerContext.addNativeQueryId(queryId); - query = query.withId(queryId); - } - - query = query.withSqlQueryId(plannerContext.getSqlQueryId()); + @SuppressWarnings("unchecked") + Query query = (Query) plan.query; + Hook.QUERY_PLAN.run(originalQuery); final AuthenticationResult authenticationResult = plannerContext.getAuthenticationResult(); final Access authorizationResult = plannerContext.getAuthorizationResult(); @@ -223,11 +292,12 @@ private Sequence execute(Query query, final List newFie final Sequence results = queryLifecycle.runSimple(query, authenticationResult, authorizationResult); //noinspection unchecked + @SuppressWarnings("unchecked") final QueryToolChest> toolChest = queryLifecycle.getToolChest(); final List resultArrayFields = toolChest.resultArraySignature(query).getColumnNames(); final Sequence resultArrays = toolChest.resultsAsArrays(query, results); - return mapResultSequence(resultArrays, resultArrayFields, newFields, newTypes); + return mapResultSequence(resultArrays, resultArrayFields, plan.newFields, plan.newTypes); } private Sequence mapResultSequence( diff --git a/sql/src/main/java/org/apache/druid/sql/calcite/run/QueryMaker.java b/sql/src/main/java/org/apache/druid/sql/calcite/run/QueryMaker.java index c76504b2bddc..3bbbf75ef40a 100644 --- a/sql/src/main/java/org/apache/druid/sql/calcite/run/QueryMaker.java +++ b/sql/src/main/java/org/apache/druid/sql/calcite/run/QueryMaker.java @@ -19,6 +19,7 @@ package org.apache.druid.sql.calcite.run; +import com.google.common.collect.ImmutableMap; import org.apache.calcite.rel.type.RelDataType; import org.apache.druid.java.util.common.guava.Sequence; import org.apache.druid.sql.calcite.rel.DruidQuery; @@ -34,6 +35,17 @@ public interface QueryMaker extends QueryFeatureInspector */ RelDataType getResultType(); + /** + * Do everything that would be done to run a query, don't actually run. + * Instead return what would have been sent to the execution engine. + * The result is a Jackson-serializable query plan. + */ + default Object explain(DruidQuery druidQuery) + { + // Temporary to ensure extensions don't break. + return ImmutableMap.of("type", "unsupported"); + } + /** * Executes a given Druid query, which is expected to correspond to the SQL query that this QueryMaker was originally * created for. The returned arrays match the row type given by {@link #getResultType()}. diff --git a/sql/src/main/java/org/apache/druid/sql/calcite/schema/DruidSchema.java b/sql/src/main/java/org/apache/druid/sql/calcite/schema/DruidSchema.java index 702ad82e3846..4c8acf9eb2e8 100644 --- a/sql/src/main/java/org/apache/druid/sql/calcite/schema/DruidSchema.java +++ b/sql/src/main/java/org/apache/druid/sql/calcite/schema/DruidSchema.java @@ -193,17 +193,17 @@ public class DruidSchema extends AbstractSchema private final BrokerInternalQueryConfig brokerInternalQueryConfig; @GuardedBy("lock") - private boolean refreshImmediately = false; + private boolean refreshImmediately; @GuardedBy("lock") - private boolean isServerViewInitialized = false; + private boolean isServerViewInitialized; /** * Counts the total number of known segments. This variable is used only for the segments table in the system schema * to initialize a map with a more proper size when it creates a snapshot. As a result, it doesn't have to be exact, * and thus there is no concurrency control for this variable. */ - private int totalSegments = 0; + private int totalSegments; @Inject public DruidSchema( @@ -486,7 +486,7 @@ protected void addSegment(final DruidServerMetadata server, final DataSegment se // If a segment shows up on a replicatable (historical) server at any point, then it must be immutable, // even if it's also available on non-replicatable (realtime) servers. unmarkSegmentAsMutable(segment.getId()); - log.debug("Segment[%s] has become immutable.", segment.getId()); + log.debug("Segment [%s] has become immutable.", segment.getId()); } } assert segmentMetadata != null; @@ -511,7 +511,7 @@ void removeSegment(final DataSegment segment) { // Get lock first so that we won't wait in ConcurrentMap.compute(). synchronized (lock) { - log.debug("Segment[%s] is gone.", segment.getId()); + log.debug("Segment [%s] is gone.", segment.getId()); segmentsNeedingRefresh.remove(segment.getId()); unmarkSegmentAsMutable(segment.getId()); @@ -520,17 +520,17 @@ void removeSegment(final DataSegment segment) segment.getDataSource(), (dataSource, segmentsMap) -> { if (segmentsMap == null) { - log.warn("Unknown segment[%s] was removed from the cluster. Ignoring this event.", segment.getId()); + log.warn("Unknown segment [%s] was removed from the cluster. Ignoring this event.", segment.getId()); return null; } else { if (segmentsMap.remove(segment.getId()) == null) { - log.warn("Unknown segment[%s] was removed from the cluster. Ignoring this event.", segment.getId()); + log.warn("Unknown segment [%s] was removed from the cluster. Ignoring this event.", segment.getId()); } else { totalSegments--; } if (segmentsMap.isEmpty()) { tables.remove(segment.getDataSource()); - log.info("dataSource[%s] no longer exists, all metadata removed.", segment.getDataSource()); + log.info("dataSource [%s] no longer exists, all metadata removed.", segment.getDataSource()); return null; } else { markDataSourceAsNeedRebuild(segment.getDataSource()); @@ -549,13 +549,13 @@ void removeServerSegment(final DruidServerMetadata server, final DataSegment seg { // Get lock first so that we won't wait in ConcurrentMap.compute(). synchronized (lock) { - log.debug("Segment[%s] is gone from server[%s]", segment.getId(), server.getName()); + log.debug("Segment [%s] is gone from server [%s]", segment.getId(), server.getName()); segmentMetadataInfo.compute( segment.getDataSource(), (datasource, knownSegments) -> { if (knownSegments == null) { log.warn( - "Unknown segment[%s] is removed from server[%s]. Ignoring this event", + "Unknown segment [%s] is removed from server [%s]. Ignoring this event", segment.getId(), server.getHost() ); @@ -575,7 +575,7 @@ void removeServerSegment(final DruidServerMetadata server, final DataSegment seg (segmentId, segmentMetadata) -> { if (segmentMetadata == null) { log.warn( - "Unknown segment[%s] is removed from server[%s]. Ignoring this event", + "Unknown segment [%s] is removed from server [%s]. Ignoring this event", segment.getId(), server.getHost() ); @@ -690,7 +690,7 @@ private Set refreshSegmentsForDataSource(final String dataSource, fin throw new ISE("'segments' must all match 'dataSource'!"); } - log.debug("Refreshing metadata for dataSource[%s].", dataSource); + log.debug("Refreshing metadata for dataSource [%s].", dataSource); final long startTime = System.currentTimeMillis(); @@ -710,17 +710,17 @@ private Set refreshSegmentsForDataSource(final String dataSource, fin final SegmentId segmentId = segmentIdMap.get(analysis.getId()); if (segmentId == null) { - log.warn("Got analysis for segment[%s] we didn't ask for, ignoring.", analysis.getId()); + log.warn("Got analysis for segment [%s] we didn't ask for, ignoring.", analysis.getId()); } else { final RowSignature rowSignature = analysisToRowSignature(analysis); - log.debug("Segment[%s] has signature[%s].", segmentId, rowSignature); + log.debug("Segment [%s] has signature[%s].", segmentId, rowSignature); segmentMetadataInfo.compute( dataSource, (datasourceKey, dataSourceSegments) -> { if (dataSourceSegments == null) { // Datasource may have been removed or become unavailable while this refresh was ongoing. log.warn( - "No segment map found with datasource[%s], skipping refresh of segment[%s]", + "No segment map found with datasource [%s], skipping refresh of segment [%s]", datasourceKey, segmentId ); @@ -730,7 +730,7 @@ private Set refreshSegmentsForDataSource(final String dataSource, fin segmentId, (segmentIdKey, segmentMetadata) -> { if (segmentMetadata == null) { - log.warn("No segment[%s] found, skipping refresh", segmentId); + log.warn("No segment [%s] found, skipping refresh", segmentId); return null; } else { final AvailableSegmentMetadata updatedSegmentMetadata = AvailableSegmentMetadata @@ -762,7 +762,7 @@ private Set refreshSegmentsForDataSource(final String dataSource, fin } log.debug( - "Refreshed metadata for dataSource[%s] in %,d ms (%d segments queried, %d segments left).", + "Refreshed metadata for dataSource [%s] in %,d ms (%d segments queried, %d segments left).", dataSource, System.currentTimeMillis() - startTime, retVal.size(), @@ -913,7 +913,7 @@ static RowSignature analysisToRowSignature(final SegmentAnalysis analysis) // flavor of COMPLEX. if (valueType == null) { // at some point in the future this can be simplified to the contents of the catch clause here, once the - // likelyhood of upgrading from some version lower than 0.23 is low + // likelihood of upgrading from some version lower than 0.23 is low try { valueType = ColumnType.fromString(entry.getValue().getType()); } diff --git a/sql/src/main/java/org/apache/druid/sql/calcite/schema/DruidSchemaCatalog.java b/sql/src/main/java/org/apache/druid/sql/calcite/schema/DruidSchemaCatalog.java index 34f15af4baf0..ee1d1b4b934a 100644 --- a/sql/src/main/java/org/apache/druid/sql/calcite/schema/DruidSchemaCatalog.java +++ b/sql/src/main/java/org/apache/druid/sql/calcite/schema/DruidSchemaCatalog.java @@ -56,7 +56,7 @@ public DruidSchemaCatalog( } /** - * Root calcite schema, used to plan and execute queries + * Root Calcite schema, used to plan and execute queries */ public SchemaPlus getRootSchema() { diff --git a/sql/src/main/java/org/apache/druid/sql/calcite/schema/RootSchemaProvider.java b/sql/src/main/java/org/apache/druid/sql/calcite/schema/RootSchemaProvider.java index 0c6176335880..ae188253406a 100644 --- a/sql/src/main/java/org/apache/druid/sql/calcite/schema/RootSchemaProvider.java +++ b/sql/src/main/java/org/apache/druid/sql/calcite/schema/RootSchemaProvider.java @@ -32,9 +32,9 @@ import java.util.stream.Collectors; /** - * Provides the RootSchema for calcite with + * Provides the RootSchema for Calcite with * - metadata schema disabled because it's not needed - * - caching disabled because druid's caching is better. + * - caching disabled because Druid's caching is better. * * All the provided schema are added to the rootSchema. */ diff --git a/sql/src/main/java/org/apache/druid/sql/calcite/table/RowSignatures.java b/sql/src/main/java/org/apache/druid/sql/calcite/table/RowSignatures.java index 73c2527d6f5f..6fe0c02747c5 100644 --- a/sql/src/main/java/org/apache/druid/sql/calcite/table/RowSignatures.java +++ b/sql/src/main/java/org/apache/druid/sql/calcite/table/RowSignatures.java @@ -101,7 +101,7 @@ public static RelDataType toRelDataType(final RowSignature rowSignature, final R } else { final ColumnType columnType = rowSignature.getColumnType(columnName) - .orElseThrow(() -> new ISE("Encountered null type for column[%s]", columnName)); + .orElseThrow(() -> new ISE("Encountered null type for column [%s]", columnName)); switch (columnType.getType()) { case STRING: @@ -129,7 +129,7 @@ public static RelDataType toRelDataType(final RowSignature rowSignature, final R type = Calcites.createSqlArrayTypeWithNullability(typeFactory, SqlTypeName.DOUBLE, nullNumeric); break; default: - throw new ISE("valueType[%s] not translatable", columnType); + throw new ISE("valueType [%s] not translatable", columnType); } break; case COMPLEX: @@ -139,7 +139,7 @@ public static RelDataType toRelDataType(final RowSignature rowSignature, final R ); break; default: - throw new ISE("valueType[%s] not translatable", columnType); + throw new ISE("valueType [%s] not translatable", columnType); } } diff --git a/sql/src/main/java/org/apache/druid/sql/calcite/view/InProcessViewManager.java b/sql/src/main/java/org/apache/druid/sql/calcite/view/InProcessViewManager.java index 763d903e1975..8e3aa03ca141 100644 --- a/sql/src/main/java/org/apache/druid/sql/calcite/view/InProcessViewManager.java +++ b/sql/src/main/java/org/apache/druid/sql/calcite/view/InProcessViewManager.java @@ -30,8 +30,8 @@ import java.util.concurrent.ConcurrentMap; /** - * View manager that stores all views in-process. Not meant for serious usage, since views are not saved nor - * are they shared across processes. + * View manager that stores all views in-process. Not meant for serious usage, + * since views are not saved nor are they shared across processes. */ public class InProcessViewManager implements ViewManager { @@ -51,7 +51,7 @@ public void createView(final PlannerFactory plannerFactory, final String viewNam { final TableMacro oldValue = views.putIfAbsent(viewName, druidViewMacroFactory.create(plannerFactory, viewSql)); if (oldValue != null) { - throw new ISE("View[%s] already exists", viewName); + throw new ISE("View [%s] already exists", viewName); } } @@ -60,7 +60,7 @@ public void alterView(final PlannerFactory plannerFactory, final String viewName { final TableMacro oldValue = views.replace(viewName, druidViewMacroFactory.create(plannerFactory, viewSql)); if (oldValue != null) { - throw new ISE("View[%s] does not exist", viewName); + throw new ISE("View [%s] does not exist", viewName); } } @@ -69,7 +69,7 @@ public void dropView(final String viewName) { final TableMacro oldValue = views.remove(viewName); if (oldValue == null) { - throw new ISE("View[%s] does not exist", viewName); + throw new ISE("View [%s] does not exist", viewName); } } diff --git a/sql/src/main/java/org/apache/druid/sql/http/SqlParameter.java b/sql/src/main/java/org/apache/druid/sql/http/SqlParameter.java index 7e8e190d3efa..87f1cea387d3 100644 --- a/sql/src/main/java/org/apache/druid/sql/http/SqlParameter.java +++ b/sql/src/main/java/org/apache/druid/sql/http/SqlParameter.java @@ -100,7 +100,7 @@ public TypedValue getTypedValue() public String toString() { return "SqlParameter{" + - ", value={" + type.name() + ',' + value + '}' + + "value={" + type.name() + ',' + value + '}' + '}'; } diff --git a/sql/src/test/java/org/apache/druid/sql/calcite/BaseCalciteQueryTest.java b/sql/src/test/java/org/apache/druid/sql/calcite/BaseCalciteQueryTest.java index e5b190d90095..81da417bb4f2 100644 --- a/sql/src/test/java/org/apache/druid/sql/calcite/BaseCalciteQueryTest.java +++ b/sql/src/test/java/org/apache/druid/sql/calcite/BaseCalciteQueryTest.java @@ -91,6 +91,8 @@ import org.apache.druid.sql.calcite.planner.PlannerFactory; import org.apache.druid.sql.calcite.schema.DruidSchemaCatalog; import org.apache.druid.sql.calcite.schema.NoopDruidSchemaManager; +import org.apache.druid.sql.calcite.tester.CalciteTestCapture; +import org.apache.druid.sql.calcite.tester.CalciteTestRecorder; import org.apache.druid.sql.calcite.util.CalciteTestBase; import org.apache.druid.sql.calcite.util.CalciteTests; import org.apache.druid.sql.calcite.util.QueryLogHook; @@ -144,82 +146,42 @@ public static void setupNullValues() public static final Logger log = new Logger(BaseCalciteQueryTest.class); public static final PlannerConfig PLANNER_CONFIG_DEFAULT = new PlannerConfig(); - public static final PlannerConfig PLANNER_CONFIG_DEFAULT_NO_COMPLEX_SERDE = new PlannerConfig() - { - @Override - public boolean shouldSerializeComplexValues() - { - return false; - } - }; - public static final PlannerConfig PLANNER_CONFIG_REQUIRE_TIME_CONDITION = new PlannerConfig() - { - @Override - public boolean isRequireTimeCondition() - { - return true; - } - }; - public static final PlannerConfig PLANNER_CONFIG_NO_TOPN = new PlannerConfig() - { - @Override - public int getMaxTopNLimit() - { - return 0; - } - }; - public static final PlannerConfig PLANNER_CONFIG_NO_HLL = new PlannerConfig() - { - @Override - public boolean isUseApproximateCountDistinct() - { - return false; - } - }; - public static final PlannerConfig PLANNER_CONFIG_LOS_ANGELES = new PlannerConfig() - { - @Override - public DateTimeZone getSqlTimeZone() - { - return DateTimes.inferTzFromString("America/Los_Angeles"); - } - }; + public static final PlannerConfig PLANNER_CONFIG_DEFAULT_NO_COMPLEX_SERDE = + PlannerConfig.builder().serializeComplexValues(false).build(); - public static final PlannerConfig PLANNER_CONFIG_AUTHORIZE_SYS_TABLES = new PlannerConfig() - { - @Override - public boolean isAuthorizeSystemTablesDirectly() - { - return true; - } - }; + public static final PlannerConfig PLANNER_CONFIG_REQUIRE_TIME_CONDITION = + PlannerConfig.builder().requireTimeCondition(true).build(); - public static final PlannerConfig PLANNER_CONFIG_NATIVE_QUERY_EXPLAIN = new PlannerConfig() - { - @Override - public boolean isUseNativeQueryExplain() - { - return true; - } - }; + public static final PlannerConfig PLANNER_CONFIG_NO_TOPN = + PlannerConfig.builder().maxTopNLimit(0).build(); + + public static final PlannerConfig PLANNER_CONFIG_NO_HLL = + PlannerConfig.builder().useApproximateCountDistinct(false).build(); + + public static final String LOS_ANGELES = "America/Los_Angeles"; + public static final PlannerConfig PLANNER_CONFIG_LOS_ANGELES = + PlannerConfig + .builder() + .sqlTimeZone(DateTimes.inferTzFromString(LOS_ANGELES)) + .build(); + + public static final PlannerConfig PLANNER_CONFIG_AUTHORIZE_SYS_TABLES = + PlannerConfig.builder().authorizeSystemTablesDirectly(true).build(); + + public static final PlannerConfig PLANNER_CONFIG_NATIVE_QUERY_EXPLAIN = + PlannerConfig.builder().useNativeQueryExplain(true).build(); public static final int MAX_NUM_IN_FILTERS = 100; - public static final PlannerConfig PLANNER_CONFIG_MAX_NUMERIC_IN_FILTER = new PlannerConfig() - { - @Override - public int getMaxNumericInFilters() - { - return MAX_NUM_IN_FILTERS; - } - }; + public static final PlannerConfig PLANNER_CONFIG_MAX_NUMERIC_IN_FILTER = + PlannerConfig.builder().maxNumericInFilters(MAX_NUM_IN_FILTERS).build(); public static final String DUMMY_SQL_ID = "dummy"; - public static final String LOS_ANGELES = "America/Los_Angeles"; + public static final String PRETEND_CURRENT_TIME = "2000-01-01T00:00:00Z"; private static final ImmutableMap.Builder DEFAULT_QUERY_CONTEXT_BUILDER = ImmutableMap.builder() .put(PlannerContext.CTX_SQL_QUERY_ID, DUMMY_SQL_ID) - .put(PlannerContext.CTX_SQL_CURRENT_TIMESTAMP, "2000-01-01T00:00:00Z") + .put(PlannerContext.CTX_SQL_CURRENT_TIMESTAMP, PRETEND_CURRENT_TIME) .put(QueryContexts.DEFAULT_TIMEOUT_KEY, QueryContexts.DEFAULT_TIMEOUT_MILLIS) .put(QueryContexts.MAX_SCATTER_GATHER_BYTES_KEY, Long.MAX_VALUE); public static final Map QUERY_CONTEXT_DEFAULT = DEFAULT_QUERY_CONTEXT_BUILDER.build(); @@ -230,7 +192,7 @@ public int getMaxNumericInFilters() public static final Map QUERY_CONTEXT_DONT_SKIP_EMPTY_BUCKETS = ImmutableMap.of( PlannerContext.CTX_SQL_QUERY_ID, DUMMY_SQL_ID, - PlannerContext.CTX_SQL_CURRENT_TIMESTAMP, "2000-01-01T00:00:00Z", + PlannerContext.CTX_SQL_CURRENT_TIMESTAMP, PRETEND_CURRENT_TIME, TimeseriesQuery.SKIP_EMPTY_BUCKETS, false, QueryContexts.DEFAULT_TIMEOUT_KEY, QueryContexts.DEFAULT_TIMEOUT_MILLIS, QueryContexts.MAX_SCATTER_GATHER_BYTES_KEY, Long.MAX_VALUE @@ -238,7 +200,7 @@ public int getMaxNumericInFilters() public static final Map QUERY_CONTEXT_DO_SKIP_EMPTY_BUCKETS = ImmutableMap.of( PlannerContext.CTX_SQL_QUERY_ID, DUMMY_SQL_ID, - PlannerContext.CTX_SQL_CURRENT_TIMESTAMP, "2000-01-01T00:00:00Z", + PlannerContext.CTX_SQL_CURRENT_TIMESTAMP, PRETEND_CURRENT_TIME, TimeseriesQuery.SKIP_EMPTY_BUCKETS, true, QueryContexts.DEFAULT_TIMEOUT_KEY, QueryContexts.DEFAULT_TIMEOUT_MILLIS, QueryContexts.MAX_SCATTER_GATHER_BYTES_KEY, Long.MAX_VALUE @@ -246,7 +208,7 @@ public int getMaxNumericInFilters() public static final Map QUERY_CONTEXT_NO_TOPN = ImmutableMap.of( PlannerContext.CTX_SQL_QUERY_ID, DUMMY_SQL_ID, - PlannerContext.CTX_SQL_CURRENT_TIMESTAMP, "2000-01-01T00:00:00Z", + PlannerContext.CTX_SQL_CURRENT_TIMESTAMP, PRETEND_CURRENT_TIME, PlannerConfig.CTX_KEY_USE_APPROXIMATE_TOPN, "false", QueryContexts.DEFAULT_TIMEOUT_KEY, QueryContexts.DEFAULT_TIMEOUT_MILLIS, QueryContexts.MAX_SCATTER_GATHER_BYTES_KEY, Long.MAX_VALUE @@ -254,7 +216,7 @@ public int getMaxNumericInFilters() public static final Map QUERY_CONTEXT_LOS_ANGELES = ImmutableMap.of( PlannerContext.CTX_SQL_QUERY_ID, DUMMY_SQL_ID, - PlannerContext.CTX_SQL_CURRENT_TIMESTAMP, "2000-01-01T00:00:00Z", + PlannerContext.CTX_SQL_CURRENT_TIMESTAMP, PRETEND_CURRENT_TIME, PlannerContext.CTX_SQL_TIME_ZONE, LOS_ANGELES, QueryContexts.DEFAULT_TIMEOUT_KEY, QueryContexts.DEFAULT_TIMEOUT_MILLIS, QueryContexts.MAX_SCATTER_GATHER_BYTES_KEY, Long.MAX_VALUE @@ -263,7 +225,7 @@ public int getMaxNumericInFilters() // Matches QUERY_CONTEXT_DEFAULT public static final Map TIMESERIES_CONTEXT_BY_GRAN = ImmutableMap.of( PlannerContext.CTX_SQL_QUERY_ID, DUMMY_SQL_ID, - PlannerContext.CTX_SQL_CURRENT_TIMESTAMP, "2000-01-01T00:00:00Z", + PlannerContext.CTX_SQL_CURRENT_TIMESTAMP, PRETEND_CURRENT_TIME, TimeseriesQuery.SKIP_EMPTY_BUCKETS, true, QueryContexts.DEFAULT_TIMEOUT_KEY, QueryContexts.DEFAULT_TIMEOUT_MILLIS, QueryContexts.MAX_SCATTER_GATHER_BYTES_KEY, Long.MAX_VALUE @@ -300,11 +262,12 @@ public static Map getTimeseriesContextWithFloorTime( public TemporaryFolder temporaryFolder = new TemporaryFolder(); - public boolean cannotVectorize = false; - public boolean skipVectorize = false; + public boolean cannotVectorize; + public boolean skipVectorize; + public int numMergeBuffers; public ObjectMapper queryJsonMapper; - public SpecificSegmentsQuerySegmentWalker walker = null; + public SpecificSegmentsQuerySegmentWalker walker; public QueryLogHook queryLogHook; static { @@ -318,6 +281,14 @@ public static Map getTimeseriesContextWithFloorTime( OUTER_LIMIT_CONTEXT.put(PlannerContext.CTX_SQL_OUTER_LIMIT, 2); } + /** + * Allows recording of the planning details of a test. Enable it to convert a Java test + * into the planner test framework ".case" file format. Change OFF to PLAN_AND_RUN to + * enable capture. + */ + protected static CalciteTestRecorder recorder = + CalciteTestRecorder.create(CalciteTestRecorder.Option.OFF); + // Generate timestamps for expected results public static long timestamp(final String timeString) { @@ -520,6 +491,7 @@ public void setUp() throws Exception @After public void tearDown() throws Exception { + recorder.emit(); walker.close(); walker = null; } @@ -558,13 +530,21 @@ public Map getJacksonInjectables() } public final void setMapperInjectableValues(ObjectMapper mapper, Map injectables) + { + setMapperInjectableValues(mapper, injectables, createMacroTable()); + } + + public static void setMapperInjectableValues( + ObjectMapper mapper, + Map injectables, + ExprMacroTable macroTable) { // duplicate the injectable values from CalciteTests.INJECTOR initialization, mainly to update the injectable // macro table, or whatever else you feel like injecting to a mapper LookupExtractorFactoryContainerProvider lookupProvider = CalciteTests.INJECTOR.getInstance(LookupExtractorFactoryContainerProvider.class); mapper.setInjectableValues(new InjectableValues.Std(injectables) - .addValue(ExprMacroTable.class.getName(), createMacroTable()) + .addValue(ExprMacroTable.class.getName(), macroTable) .addValue(ObjectMapper.class.getName(), mapper) .addValue( DataSegment.PruneSpecsHolder.class, @@ -636,10 +616,9 @@ public void assertQueryIsForbidden( } } - public void testQuery( final String sql, - final List expectedQueries, + final List> expectedQueries, final List expectedResults ) throws Exception { @@ -657,7 +636,7 @@ public void testQuery( public void testQuery( final String sql, final Map context, - final List expectedQueries, + final List> expectedQueries, final List expectedResults ) throws Exception { @@ -674,7 +653,7 @@ public void testQuery( public void testQuery( final String sql, - final List expectedQueries, + final List> expectedQueries, final List expectedResults, final List parameters ) throws Exception @@ -694,7 +673,7 @@ public void testQuery( final PlannerConfig plannerConfig, final String sql, final AuthenticationResult authenticationResult, - final List expectedQueries, + final List> expectedQueries, final List expectedResults ) throws Exception { @@ -712,7 +691,7 @@ public void testQuery( public void testQuery( final String sql, final Map context, - final List expectedQueries, + final List> expectedQueries, final ResultsVerifier expectedResultsVerifier ) throws Exception { @@ -733,15 +712,56 @@ public void testQuery( final Map queryContext, final String sql, final AuthenticationResult authenticationResult, - final List expectedQueries, + final List> expectedQueries, + final List expectedResults + ) throws Exception + { + testQuery( + plannerConfig, + QueryDefn.builder(sql) + .context(queryContext) + .authResult(authenticationResult) + .build(), + expectedQueries, + expectedResults + ); + } + + public void testQuery( + final PlannerConfig plannerConfig, + QueryDefn queryDefn, + final List> expectedQueries, final List expectedResults ) throws Exception { - log.info("SQL: %s", sql); + log.info("SQL: %s", queryDefn.sql()); + final CalciteTestCapture capture; + if (recorder.isLive()) { + capture = new CalciteTestCapture( + plannerConfig, + queryDefn, + expectedQueries, + null, + null + ); + capture.options(cannotVectorize, numMergeBuffers); + capture.results(expectedResults); + recorder.record(capture); + } else { + capture = null; + } queryLogHook.clearRecordedQueries(); - final List plannerResults = - getResults(plannerConfig, queryContext, DEFAULT_PARAMETERS, sql, authenticationResult); - verifyResults(sql, expectedQueries, expectedResults, plannerResults); + try { + final List plannerResults = + getResults(plannerConfig, queryDefn, null); + verifyResults(queryDefn.sql(), expectedQueries, expectedResults, plannerResults); + } + catch (Exception e) { + if (capture != null) { + capture.exception(e); + } + throw e; + } } public void testQuery( @@ -750,7 +770,7 @@ public void testQuery( final List parameters, final String sql, final AuthenticationResult authenticationResult, - final List expectedQueries, + final List> expectedQueries, final List expectedResults ) throws Exception { @@ -772,12 +792,33 @@ public void testQuery( final List parameters, final String sql, final AuthenticationResult authenticationResult, - final List expectedQueries, + final List> expectedQueries, final ResultsVerifier expectedResultsVerifier, @Nullable final Consumer expectedExceptionInitializer ) throws Exception { - log.info("SQL: %s", sql); + testQuery( + plannerConfig, + QueryDefn.builder(sql) + .context(queryContext) + .parameters(parameters) + .authResult(authenticationResult) + .build(), + expectedQueries, + expectedResultsVerifier, + expectedExceptionInitializer + ); + } + + public void testQuery( + final PlannerConfig plannerConfig, + QueryDefn queryDefn, + final List> expectedQueries, + final ResultsVerifier expectedResultsVerifier, + @Nullable final Consumer expectedExceptionInitializer + ) throws Exception + { + log.info("SQL: %s", queryDefn.sql()); final List vectorizeValues = new ArrayList<>(); @@ -787,31 +828,66 @@ public void testQuery( vectorizeValues.add("force"); } + boolean captured = false; for (final String vectorize : vectorizeValues) { queryLogHook.clearRecordedQueries(); - final Map theQueryContext = new HashMap<>(queryContext); + final Map theQueryContext = new HashMap<>(); theQueryContext.put(QueryContexts.VECTORIZE_KEY, vectorize); theQueryContext.put(QueryContexts.VECTORIZE_VIRTUAL_COLUMNS_KEY, vectorize); if (!"false".equals(vectorize)) { theQueryContext.put(QueryContexts.VECTOR_SIZE_KEY, 2); // Small vector size to ensure we use more than one. } + QueryDefn specificDefn = queryDefn.withOverrides(theQueryContext); - final List theQueries = new ArrayList<>(); - for (Query query : expectedQueries) { - theQueries.add(recursivelyOverrideContext(query, theQueryContext)); + final List> theQueries = new ArrayList<>(); + for (Query query : expectedQueries) { + theQueries.add(recursivelyOverrideContext(query, specificDefn.context())); } + boolean skipCapture = !recorder.isLive(); if (cannotVectorize && "force".equals(vectorize)) { expectedException.expect(RuntimeException.class); expectedException.expectMessage("Cannot vectorize"); + // Don't bother capturing vectorization failures. + skipCapture = true; } else if (expectedExceptionInitializer != null) { expectedExceptionInitializer.accept(expectedException); } - final List plannerResults = getResults(plannerConfig, theQueryContext, parameters, sql, authenticationResult); - verifyResults(sql, theQueries, plannerResults, expectedResultsVerifier); + final CalciteTestCapture capture; + if (skipCapture || captured) { + capture = null; + } else { + captured = true; + capture = new CalciteTestCapture( + plannerConfig, + queryDefn, + expectedQueries, + expectedResultsVerifier, + expectedExceptionInitializer + ); + capture.options(cannotVectorize, numMergeBuffers); + recorder.record(capture); + } + + try { + final List plannerResults = getResults( + plannerConfig, + specificDefn, + capture); + if (capture != null) { + capture.results(plannerResults); + } + verifyResults(queryDefn.sql(), theQueries, plannerResults, expectedResultsVerifier); + } + catch (Exception e) { + if (capture != null) { + capture.exception(e); + } + throw e; + } } } @@ -820,32 +896,45 @@ public List getResults( final Map queryContext, final List parameters, final String sql, - final AuthenticationResult authenticationResult + final AuthenticationResult authenticationResult, + final CalciteTestCapture capture ) throws Exception { return getResults( plannerConfig, - queryContext, - parameters, - sql, - authenticationResult, + new QueryDefn( + sql, + queryContext, + parameters, + authenticationResult), + capture); + } + + public List getResults( + final PlannerConfig plannerConfig, + final QueryDefn queryDefn, + final CalciteTestCapture capture + ) throws Exception + { + return getResults( + plannerConfig, + queryDefn, createOperatorTable(), createMacroTable(), CalciteTests.TEST_AUTHORIZER_MAPPER, - queryJsonMapper + queryJsonMapper, + capture ); } public List getResults( final PlannerConfig plannerConfig, - final Map queryContext, - final List parameters, - final String sql, - final AuthenticationResult authenticationResult, + final QueryDefn queryDefn, final DruidOperatorTable operatorTable, final ExprMacroTable macroTable, final AuthorizerMapper authorizerMapper, - final ObjectMapper objectMapper + final ObjectMapper objectMapper, + final CalciteTestCapture capture ) throws Exception { final SqlLifecycleFactory sqlLifecycleFactory = getSqlLifecycleFactory( @@ -857,12 +946,21 @@ public List getResults( objectMapper ); - return sqlLifecycleFactory.factorize().runSimple(sql, queryContext, parameters, authenticationResult).toList(); + SqlLifecycle lifecycle = sqlLifecycleFactory.factorize(); + List results = lifecycle.runSimple( + queryDefn.sql(), + queryDefn.context(), + queryDefn.parameters(), + queryDefn.authResult()).toList(); + if (capture != null) { + capture.plannerResult(lifecycle.plannerResult()); + } + return results; } public void verifyResults( final String sql, - final List expectedQueries, + final List> expectedQueries, final List expectedResults, final List results ) @@ -872,7 +970,7 @@ public void verifyResults( public void verifyResults( final String sql, - final List expectedQueries, + final List> expectedQueries, final List results, final ResultsVerifier expectedResultsVerifier ) @@ -888,11 +986,11 @@ public void verifyResults( private void verifyQueries( final String sql, - @Nullable final List expectedQueries + @Nullable final List> expectedQueries ) { if (expectedQueries != null) { - final List recordedQueries = queryLogHook.getRecordedQueries(); + final List> recordedQueries = queryLogHook.getRecordedQueries(); Assert.assertEquals( StringUtils.format("query count: %s", sql), @@ -943,7 +1041,7 @@ public void testQueryThrows(final String sql, Consumer expect public void testQueryThrows( final String sql, final Map queryContext, - final List expectedQueries, + final List> expectedQueries, final Consumer expectedExceptionInitializer ) throws Exception { @@ -1080,7 +1178,7 @@ protected void skipVectorize() skipVectorize = true; } - protected static boolean isRewriteJoinToFilter(final Map queryContext) + public static boolean isRewriteJoinToFilter(final Map queryContext) { return (boolean) queryContext.getOrDefault( QueryContexts.REWRITE_JOIN_TO_FILTER_ENABLE_KEY, @@ -1103,7 +1201,7 @@ public static Query recursivelyOverrideContext(final Query query, fina private static DataSource recursivelyOverrideContext(final DataSource dataSource, final Map context) { if (dataSource instanceof QueryDataSource) { - final Query subquery = ((QueryDataSource) dataSource).getQuery(); + final Query subquery = ((QueryDataSource) dataSource).getQuery(); return new QueryDataSource(recursivelyOverrideContext(subquery, context)); } else { return dataSource.withChildren( @@ -1195,6 +1293,7 @@ protected Map withLeftDirectAccessEnabled(Map co */ protected void requireMergeBuffers(int numMergeBuffers) throws IOException { + this.numMergeBuffers = numMergeBuffers; conglomerate = QueryStackTests.createQueryRunnerFactoryConglomerate( resourceCloser, QueryStackTests.getProcessingConfig(true, numMergeBuffers) @@ -1237,5 +1336,10 @@ public void verify(String sql, List results) Assert.assertEquals(StringUtils.format("result count: %s", sql), expectedResults.size(), results.size()); assertResultsEquals(sql, expectedResults, results); } + + public List expectedResults() + { + return expectedResults; + } } } diff --git a/sql/src/test/java/org/apache/druid/sql/calcite/CalciteArraysQueryTest.java b/sql/src/test/java/org/apache/druid/sql/calcite/CalciteArraysQueryTest.java index fd9915d67d43..8c70593b6987 100644 --- a/sql/src/test/java/org/apache/druid/sql/calcite/CalciteArraysQueryTest.java +++ b/sql/src/test/java/org/apache/druid/sql/calcite/CalciteArraysQueryTest.java @@ -67,8 +67,8 @@ */ public class CalciteArraysQueryTest extends BaseCalciteQueryTest { - // test some query stuffs, sort of limited since no native array column types so either need to use constructor or - // array aggregator + // test some query stuff, sort of limited since no native array column + // types so either need to use constructor or array aggregator @Test public void testSelectConstantArrayExpressionFromTable() throws Exception { @@ -361,7 +361,7 @@ public void testSomeArrayFunctionsWithScanQueryNoStringify() throws Exception // when not stringifying arrays, some things are still stringified, because they are inferred to be typed as strings // the planner context which controls stringification of arrays does not apply to multi-valued string columns, // which will still always be stringified to ultimately adhere to the varchar type - // as array support increases in the engine this will likely change since using explict array functions should + // as array support increases in the engine this will likely change since using explicit array functions should // probably kick it into an array List expectedResults; if (useDefault) { @@ -938,7 +938,7 @@ public void testArrayOffset() throws Exception @Test public void testArrayGroupAsLongArray() throws Exception { - // Cannot vectorize as we donot have support in native query subsytem for grouping on arrays + // Cannot vectorize as we do not have support in native query subsystem for grouping on arrays cannotVectorize(); testQuery( "SELECT ARRAY[l1], SUM(cnt) FROM druid.numfoo GROUP BY 1 ORDER BY 2 DESC", @@ -987,7 +987,7 @@ public void testArrayGroupAsLongArray() throws Exception @Test public void testArrayGroupAsDoubleArray() throws Exception { - // Cannot vectorize as we donot have support in native query subsytem for grouping on arrays as keys + // Cannot vectorize as we do not have support in native query subsystem for grouping on arrays as keys cannotVectorize(); testQuery( "SELECT ARRAY[d1], SUM(cnt) FROM druid.numfoo GROUP BY 1 ORDER BY 2 DESC", @@ -1036,7 +1036,7 @@ public void testArrayGroupAsDoubleArray() throws Exception @Test public void testArrayGroupAsFloatArray() throws Exception { - // Cannot vectorize as we donot have support in native query subsytem for grouping on arrays as keys + // Cannot vectorize as we do not have support in native query subsystem for grouping on arrays as keys cannotVectorize(); testQuery( "SELECT ARRAY[f1], SUM(cnt) FROM druid.numfoo GROUP BY 1 ORDER BY 2 DESC", @@ -2407,7 +2407,7 @@ public void testArrayAggArrayContainsSubquery() throws Exception ); } testQuery( - "SELECT dim1,dim2 FROM foo WHERE ARRAY_CONTAINS((SELECT ARRAY_AGG(DISTINCT dim1) FROM foo WHERE dim1 is not null), dim1)", + "SELECT dim1, dim2 FROM foo WHERE ARRAY_CONTAINS((SELECT ARRAY_AGG(DISTINCT dim1) FROM foo WHERE dim1 is not null), dim1)", ImmutableList.of( Druids.newScanQueryBuilder() .dataSource( @@ -2560,8 +2560,8 @@ public static void assertResultsDeepEquals(String sql, List expected, public static void assertDeepEquals(String path, Object expected, Object actual) { if (expected instanceof List && actual instanceof List) { - List expectedList = (List) expected; - List actualList = (List) actual; + List expectedList = (List) expected; + List actualList = (List) actual; Assert.assertEquals(path + " arrays length mismatch", expectedList.size(), actualList.size()); for (int i = 0; i < expectedList.size(); i++) { assertDeepEquals(path + "[" + i + "]", expectedList.get(i), actualList.get(i)); diff --git a/sql/src/test/java/org/apache/druid/sql/calcite/CalciteCorrelatedQueryTest.java b/sql/src/test/java/org/apache/druid/sql/calcite/CalciteCorrelatedQueryTest.java index 9a2dca42f493..480562e3f2ec 100644 --- a/sql/src/test/java/org/apache/druid/sql/calcite/CalciteCorrelatedQueryTest.java +++ b/sql/src/test/java/org/apache/druid/sql/calcite/CalciteCorrelatedQueryTest.java @@ -55,7 +55,6 @@ @RunWith(JUnitParamsRunner.class) public class CalciteCorrelatedQueryTest extends BaseCalciteQueryTest { - @Test @Parameters(source = QueryContextForJoinProvider.class) public void testCorrelatedSubquery(Map queryContext) throws Exception diff --git a/sql/src/test/java/org/apache/druid/sql/calcite/CalciteIngestionDmlTest.java b/sql/src/test/java/org/apache/druid/sql/calcite/CalciteIngestionDmlTest.java index 7040e7fc5394..adcbb0fbdfdd 100644 --- a/sql/src/test/java/org/apache/druid/sql/calcite/CalciteIngestionDmlTest.java +++ b/sql/src/test/java/org/apache/druid/sql/calcite/CalciteIngestionDmlTest.java @@ -300,7 +300,7 @@ private void verifySuccess() throws Exception throw new ISE("Test must have expectedResources"); } - final List expectedQueries = + final List> expectedQueries = expectedQuery == null ? Collections.emptyList() : Collections.singletonList(recursivelyOverrideContext(expectedQuery, queryContext)); @@ -311,7 +311,7 @@ private void verifySuccess() throws Exception ); final List results = - getResults(plannerConfig, queryContext, Collections.emptyList(), sql, authenticationResult); + getResults(plannerConfig, queryContext, Collections.emptyList(), sql, authenticationResult, null); verifyResults( sql, diff --git a/sql/src/test/java/org/apache/druid/sql/calcite/CalciteInsertDmlTest.java b/sql/src/test/java/org/apache/druid/sql/calcite/CalciteInsertDmlTest.java index aa7a8eb8856f..1c41feb8d9cb 100644 --- a/sql/src/test/java/org/apache/druid/sql/calcite/CalciteInsertDmlTest.java +++ b/sql/src/test/java/org/apache/druid/sql/calcite/CalciteInsertDmlTest.java @@ -38,6 +38,7 @@ import org.apache.druid.sql.calcite.external.ExternalOperatorConversion; import org.apache.druid.sql.calcite.filtration.Filtration; import org.apache.druid.sql.calcite.parser.DruidSqlInsert; +import org.apache.druid.sql.calcite.planner.DruidPlanner; import org.apache.druid.sql.calcite.planner.PlannerConfig; import org.apache.druid.sql.calcite.util.CalciteTests; import org.hamcrest.CoreMatchers; @@ -699,10 +700,7 @@ public void testInsertWithUnnamedColumnInSelectStatement() .sql("INSERT INTO t SELECT dim1, dim2 || '-lol' FROM foo PARTITIONED BY ALL") .expectValidationError( SqlPlanningException.class, - "Cannot ingest expressions that do not have an alias " - + "or columns with names like EXPR$[digit]." - + "E.g. if you are ingesting \"func(X)\", then you can rewrite it as " - + "\"func(X) as myColumn\"" + DruidPlanner.UNNAMED_INGESTION_COLUMN_ERROR ) .verify(); } @@ -714,10 +712,7 @@ public void testInsertWithInvalidColumnNameInIngest() .sql("INSERT INTO t SELECT __time, dim1 AS EXPR$0 FROM foo PARTITIONED BY ALL") .expectValidationError( SqlPlanningException.class, - "Cannot ingest expressions that do not have an alias " - + "or columns with names like EXPR$[digit]." - + "E.g. if you are ingesting \"func(X)\", then you can rewrite it as " - + "\"func(X) as myColumn\"" + DruidPlanner.UNNAMED_INGESTION_COLUMN_ERROR ) .verify(); } @@ -731,10 +726,7 @@ public void testInsertWithUnnamedColumnInNestedSelectStatement() + "(SELECT __time, LOWER(dim1) FROM foo) PARTITIONED BY ALL TIME") .expectValidationError( SqlPlanningException.class, - "Cannot ingest expressions that do not have an alias " - + "or columns with names like EXPR$[digit]." - + "E.g. if you are ingesting \"func(X)\", then you can rewrite it as " - + "\"func(X) as myColumn\"" + DruidPlanner.UNNAMED_INGESTION_COLUMN_ERROR ) .verify(); } diff --git a/sql/src/test/java/org/apache/druid/sql/calcite/CalciteJoinQueryTest.java b/sql/src/test/java/org/apache/druid/sql/calcite/CalciteJoinQueryTest.java index aad48cff19cc..8f320e064205 100644 --- a/sql/src/test/java/org/apache/druid/sql/calcite/CalciteJoinQueryTest.java +++ b/sql/src/test/java/org/apache/druid/sql/calcite/CalciteJoinQueryTest.java @@ -162,7 +162,6 @@ public void testInnerJoinWithLimitAndAlias() throws Exception ); } - @Test public void testExactTopNOnInnerJoinWithLimit() throws Exception { @@ -592,7 +591,7 @@ public void testJoinOnGroupByInsteadOfTimeseriesWithFloorOnTime() throws Excepti @Test @Parameters(source = QueryContextForJoinProvider.class) - public void testFilterAndGroupByLookupUsingJoinOperatorWithValueFilterPushdownMatchesNothig(Map queryContext) + public void testFilterAndGroupByLookupUsingJoinOperatorWithValueFilterPushdownMatchesNothing(Map queryContext) throws Exception { // Cannot vectorize JOIN operator. @@ -716,7 +715,7 @@ public void testFilterAndGroupByLookupUsingJoinOperatorBackwards(Map queryContext) + public void testFilterAndGroupByLookupUsingJoinOperatorWithoutFilter(Map queryContext) throws Exception { // Cannot vectorize JOIN operator. @@ -1000,8 +999,6 @@ public void testLeftJoinTwoLookupsUsingJoinOperator(Map queryCon ); } - - @Test @Parameters(source = QueryContextForJoinProvider.class) public void testInnerJoinTableLookupLookupWithFilterWithOuterLimit(Map queryContext) throws Exception @@ -2524,7 +2521,7 @@ public void testUsingSubqueryWithExtractionFns(Map queryContext) testQuery( "SELECT dim2, COUNT(*) FROM druid.foo " - + "WHERE substring(dim2, 1, 1) IN (SELECT substring(dim1, 1, 1) FROM druid.foo WHERE dim1 <> '')" + + "WHERE substring(dim2, 1, 1) IN (SELECT substring(dim1, 1, 1) FROM druid.foo WHERE dim1 <> '')\n" + "group by dim2", queryContext, ImmutableList.of( diff --git a/sql/src/test/java/org/apache/druid/sql/calcite/CalciteMultiValueStringQueryTest.java b/sql/src/test/java/org/apache/druid/sql/calcite/CalciteMultiValueStringQueryTest.java index 28fd4d463f6a..af6a23914413 100644 --- a/sql/src/test/java/org/apache/druid/sql/calcite/CalciteMultiValueStringQueryTest.java +++ b/sql/src/test/java/org/apache/druid/sql/calcite/CalciteMultiValueStringQueryTest.java @@ -1568,7 +1568,7 @@ public void testFilterOnMultiValueListFilterMatchLike() throws Exception @Test public void testMultiValueToArrayGroupAsArrayWithMultiValueDimension() throws Exception { - // Cannot vectorize as we donot have support in native query subsytem for grouping on arrays as keys + // Cannot vectorize as we do not have support in native query subsystem for grouping on arrays as keys cannotVectorize(); testQuery( "SELECT MV_TO_ARRAY(dim3), SUM(cnt) FROM druid.numfoo GROUP BY 1 ORDER BY 2 DESC", diff --git a/sql/src/test/java/org/apache/druid/sql/calcite/CalciteParameterQueryTest.java b/sql/src/test/java/org/apache/druid/sql/calcite/CalciteParameterQueryTest.java index 27500323875a..e7d123ec8634 100644 --- a/sql/src/test/java/org/apache/druid/sql/calcite/CalciteParameterQueryTest.java +++ b/sql/src/test/java/org/apache/druid/sql/calcite/CalciteParameterQueryTest.java @@ -369,7 +369,6 @@ public void testParametersInCases() throws Exception ); } - @Test public void testTimestamp() throws Exception { @@ -504,7 +503,6 @@ public void testDoubles() throws Exception ) ); - testQuery( "SELECT COUNT(*) FROM druid.foo WHERE cnt = ? or cnt = ?", ImmutableList.of( @@ -668,8 +666,9 @@ public void testWrongTypeParameter() throws Exception public void testNullParameter() throws Exception { cannotVectorize(); - // contrived example of using null as an sql parameter to at least test the codepath because lots of things dont - // actually work as null and things like 'IS NULL' fail to parse in calcite if expressed as 'IS ?' + // contrived example of using null as an SQL parameter to at least test the code path + // because lots of things don't actually work as null and things like 'IS NULL' fail to + // parse in Calcite if expressed as 'IS ?' // this will optimize out the 3rd argument because 2nd argument will be constant and not null testQuery( diff --git a/sql/src/test/java/org/apache/druid/sql/calcite/CalciteQueryTest.java b/sql/src/test/java/org/apache/druid/sql/calcite/CalciteQueryTest.java index 15680504c6db..ed8c6bb79e38 100644 --- a/sql/src/test/java/org/apache/druid/sql/calcite/CalciteQueryTest.java +++ b/sql/src/test/java/org/apache/druid/sql/calcite/CalciteQueryTest.java @@ -134,6 +134,25 @@ import java.util.Map; import java.util.stream.Collectors; +/** + * Massive set of query tests. + *

+ * The tests use a system property, + * {@code druid.generic.useDefaultValueForNull} set external to + * the tests. They run with "replace with default" by default. Set + * this flag in your IDE to run the tests manually with a + * different value. + *

+ * Note that these tests may not run individually as the tests here + * are not stable. Some tests set the merge buffer count, which is + * a static. Later tests use that count: it is not reset. Test order + * is undefined: it is whatever order that the compiler happens to + * list the methods. As a result, random later methods benefit from + * the merge buffer count set in earlier tests. The result is that + * if you try to run some tests from the debugger, they'll fail + * because of not enough buffers, even though they run fine when + * run as part of the entire class. + */ public class CalciteQueryTest extends BaseCalciteQueryTest { @Test diff --git a/sql/src/test/java/org/apache/druid/sql/calcite/QueryDefn.java b/sql/src/test/java/org/apache/druid/sql/calcite/QueryDefn.java new file mode 100644 index 000000000000..16dec324b9f1 --- /dev/null +++ b/sql/src/test/java/org/apache/druid/sql/calcite/QueryDefn.java @@ -0,0 +1,114 @@ +/* + * Licensed to the Apache Software Foundation (ASF) under one + * or more contributor license agreements. See the NOTICE file + * distributed with this work for additional information + * regarding copyright ownership. The ASF licenses this file + * to you under the Apache License, Version 2.0 (the + * "License"); you may not use this file except in compliance + * with the License. You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, + * software distributed under the License is distributed on an + * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY + * KIND, either express or implied. See the License for the + * specific language governing permissions and limitations + * under the License. + */ + +package org.apache.druid.sql.calcite; + +import org.apache.calcite.avatica.remote.TypedValue; +import org.apache.druid.server.security.AuthenticationResult; +import org.apache.druid.sql.calcite.tester.QueryRunner.Builder; +import org.apache.druid.sql.http.SqlParameter; +import org.apache.druid.sql.http.SqlQuery; + +import java.util.HashMap; +import java.util.List; +import java.util.Map; + +/** + * Defines the per-query inputs to the planner: the inputs normally + * obtained from a SQL query (SQL, context, parameters) along with + * the authorization result. + */ +public class QueryDefn +{ + private final String sql; + private final Map context; + private final List parameters; + private final AuthenticationResult authenticationResult; + + public QueryDefn( + String sql, + Map context, + List parameters, + AuthenticationResult authenticationResult + ) + { + super(); + this.sql = sql; + this.context = context; + this.parameters = parameters; + this.authenticationResult = authenticationResult; + } + + public QueryDefn( + SqlQuery query, + AuthenticationResult authenticationResult + ) + { + this.sql = query.getQuery(); + this.context = query.getContext(); + this.parameters = query.getParameters(); + this.authenticationResult = authenticationResult; + } + + public static Builder builder(String sql) + { + return new Builder(sql); + } + + public String sql() + { + return sql; + } + + public Map context() + { + return context; + } + + public List parameters() + { + return parameters; + } + + public List typedParameters() + { + return SqlQuery.getParameterList(parameters); + } + + public AuthenticationResult authResult() + { + return authenticationResult; + } + + public QueryDefn withOverrides(Map overrides) + { + if (overrides == null || overrides.isEmpty()) { + return this; + } + Map newContext; + if (context.isEmpty()) { + newContext = overrides; + } else { + newContext = new HashMap<>(); + newContext.putAll(context); + newContext.putAll(overrides); + } + return new QueryDefn(sql, newContext, parameters, authenticationResult); + } +} diff --git a/sql/src/test/java/org/apache/druid/sql/calcite/TestInsertQueryMaker.java b/sql/src/test/java/org/apache/druid/sql/calcite/TestInsertQueryMaker.java index 31b9c3163184..ec6599f1af13 100644 --- a/sql/src/test/java/org/apache/druid/sql/calcite/TestInsertQueryMaker.java +++ b/sql/src/test/java/org/apache/druid/sql/calcite/TestInsertQueryMaker.java @@ -98,4 +98,10 @@ public Sequence runQuery(final DruidQuery druidQuery) // 2) Return the dataSource and signature of the insert operation, so tests can confirm they are correct. return Sequences.simple(ImmutableList.of(new Object[]{targetDataSource, signature})); } + + @Override + public Object explain(DruidQuery druidQuery) + { + return druidQuery.getQuery(); + } } diff --git a/sql/src/test/java/org/apache/druid/sql/calcite/TestQueryMakerFactory.java b/sql/src/test/java/org/apache/druid/sql/calcite/TestQueryMakerFactory.java index c2fbe5aeeefd..b5e50236fde6 100644 --- a/sql/src/test/java/org/apache/druid/sql/calcite/TestQueryMakerFactory.java +++ b/sql/src/test/java/org/apache/druid/sql/calcite/TestQueryMakerFactory.java @@ -34,7 +34,7 @@ public class TestQueryMakerFactory implements QueryMakerFactory private final QueryLifecycleFactory queryLifecycleFactory; private final ObjectMapper jsonMapper; - TestQueryMakerFactory( + public TestQueryMakerFactory( final QueryLifecycleFactory queryLifecycleFactory, final ObjectMapper jsonMapper ) diff --git a/sql/src/test/java/org/apache/druid/sql/calcite/planner/DruidPlannerTest.java b/sql/src/test/java/org/apache/druid/sql/calcite/planner/DruidPlannerTest.java new file mode 100644 index 000000000000..e4918fbc0594 --- /dev/null +++ b/sql/src/test/java/org/apache/druid/sql/calcite/planner/DruidPlannerTest.java @@ -0,0 +1,199 @@ +/* + * Licensed to the Apache Software Foundation (ASF) under one + * or more contributor license agreements. See the NOTICE file + * distributed with this work for additional information + * regarding copyright ownership. The ASF licenses this file + * to you under the Apache License, Version 2.0 (the + * "License"); you may not use this file except in compliance + * with the License. You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, + * software distributed under the License is distributed on an + * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY + * KIND, either express or implied. See the License for the + * specific language governing permissions and limitations + * under the License. + */ + +package org.apache.druid.sql.calcite.planner; + +import org.apache.druid.java.util.common.StringUtils; +import org.apache.druid.java.util.common.logger.Logger; +import org.apache.druid.sql.calcite.BaseCalciteQueryTest; +import org.apache.druid.sql.calcite.tester.PlannerFixture; +import org.apache.druid.sql.calcite.tester.QueryTestSet; +import org.apache.druid.sql.calcite.util.CalciteTests; +import org.junit.Rule; +import org.junit.Test; +import org.junit.rules.TemporaryFolder; + +import java.io.IOException; + +import static org.junit.Assert.assertTrue; + +/** + * Test runner for query planner tests defined in ".case" files. + * If the test fails, the test itself won't report many details. + * Instead, look in target/actual for the "actual" files for failed + * test. Diff them with the cases in test/resources/calcite/cases + * to determine what changed. + *

+ * Planner setup is mostly handled by the {@code PlannerFixture} + * class, with some additional test-specific configuration + * applied for each group of test (each case file or set of case + * files). + *

+ * All tests use the set of hard-coded, in-memory segments defined + * by {@code CalciteTests}. Tests can optionally include lookups + * and views, if required for those tests. + */ +public class DruidPlannerTest +{ + public static final Logger log = new Logger(DruidPlannerTest.class); + + @Rule + public TemporaryFolder temporaryFolder = new TemporaryFolder(); + + // Converted from CalciteInsertDmlTest + @Test + public void testInsertDml() throws IOException + { + assertTrue( + QueryTestSet + .fromResource("/calcite/cases/insertDML.case") + .run( + standardBuilder() + // To allow access to external tables + .withAuthResult(CalciteTests.SUPER_USER_AUTH_RESULT) + .withView( + "aview", + "SELECT SUBSTRING(dim1, 1, 1) AS dim1_firstchar FROM foo WHERE dim2 = 'a'"))); + } + + private PlannerFixture.Builder standardBuilder() throws IOException + { + return new PlannerFixture + .Builder(temporaryFolder.newFolder()) + .defaultQueryOptions(BaseCalciteQueryTest.QUERY_CONTEXT_DEFAULT) + .withLookups(); + } + + // Converted from CalciteArraysQueryTest + @Test + public void testArrayQuery() throws IOException + { + assertTrue( + QueryTestSet + .fromResource("/calcite/cases/arrayQuery.case") + .run(standardBuilder())); + } + + // Converted from CalciteCorrelatedQueryTest + @Test + public void testCorrelatedQuery() throws IOException + { + assertTrue( + QueryTestSet + .fromResource("/calcite/cases/correlatedQuery.case") + .run(standardBuilder())); + } + + // Converted from CalciteJoinQueryTest + @Test + public void testJoinQuery() throws IOException + { + assertTrue( + runMultiple( + standardBuilder(), + "joinQuery", + 7)); + } + + private boolean runMultiple( + PlannerFixture.Builder builder, + String base, + int count + ) + { + boolean ok = true; + for (int i = 1; i <= count; i++) { + String testCase = StringUtils.format("%s%02d.case", base, i); + QueryTestSet testSet = QueryTestSet.fromResource("/calcite/cases/" + testCase); + if (!testSet.run(builder)) { + log.warn("Test failed: " + testCase); + ok = false; + } + } + return ok; + } + + // Converted from CalciteMultiValueStringQueryTest + @Test + public void testMultiValueStringQuery() throws IOException + { + assertTrue( + QueryTestSet + .fromResource("/calcite/cases/multiValueStringQuery.case") + .run(standardBuilder())); + } + + // Converted from CalciteParameterQueryTest + @Test + public void testParameterQuery() throws IOException + { + assertTrue( + QueryTestSet + .fromResource("/calcite/cases/parameterQuery.case") + .run(standardBuilder())); + } + + // Converted from CalciteQueryTest + // The original file is huge. The tests are split into multiple files, + // in groups of around 25, in the same order as they appear in the + // Java file. + @Test + public void testQuery() throws IOException + { + PlannerFixture fixture = new PlannerFixture + .Builder(temporaryFolder.newFolder()) + .defaultQueryOptions(BaseCalciteQueryTest.QUERY_CONTEXT_DEFAULT) + .withLookups() + .withMergeBufferCount(4) + .withView( + "aview", + "SELECT SUBSTRING(dim1, 1, 1) AS dim1_firstchar FROM foo WHERE dim2 = 'a'") + .withView( + "bview", + "SELECT COUNT(*) FROM druid.foo\n" + + "WHERE __time >= CURRENT_TIMESTAMP + INTERVAL '1' DAY AND __time < TIMESTAMP '2002-01-01 00:00:00'") + .withView( + "cview", + "SELECT SUBSTRING(bar.dim1, 1, 1) AS dim1_firstchar, bar.dim2 as dim2, dnf.l2 as l2\n" + + "FROM (SELECT * from foo WHERE dim2 = 'a') as bar INNER JOIN druid.numfoo dnf ON bar.dim2 = dnf.dim2") + .withView( + "dview", + "SELECT SUBSTRING(dim1, 1, 1) AS numfoo FROM foo WHERE dim2 = 'a'") + .withView( + "forbiddenView", + "SELECT __time, SUBSTRING(dim1, 1, 1) AS dim1_firstchar, dim2 FROM foo WHERE dim2 = 'a'") + .withView( + "restrictedView", + "SELECT __time, dim1, dim2, m1 FROM druid.forbiddenDatasource WHERE dim2 = 'a'") + .withView( + "invalidView", + "SELECT __time, dim1, dim2, m1 FROM druid.invalidDatasource WHERE dim2 = 'a'") + .build(); + boolean ok = true; + for (int i = 1; i <= 15; i++) { + String testCase = StringUtils.format("query%02d.case", i); + QueryTestSet testSet = QueryTestSet.fromResource("/calcite/cases/" + testCase); + if (!testSet.run(fixture)) { + log.warn("Test failed: " + testCase); + ok = false; + } + } + assertTrue(ok); + } +} diff --git a/sql/src/test/java/org/apache/druid/sql/calcite/rel/DruidQueryTest.java b/sql/src/test/java/org/apache/druid/sql/calcite/rel/DruidQueryTest.java index 68edf3f1f284..ea5d7db502b5 100644 --- a/sql/src/test/java/org/apache/druid/sql/calcite/rel/DruidQueryTest.java +++ b/sql/src/test/java/org/apache/druid/sql/calcite/rel/DruidQueryTest.java @@ -43,7 +43,6 @@ public class DruidQueryTest { - static { NullHandling.initializeForTests(); } diff --git a/sql/src/test/java/org/apache/druid/sql/calcite/schema/DruidSchemaTest.java b/sql/src/test/java/org/apache/druid/sql/calcite/schema/DruidSchemaTest.java index 2a36d08c47e4..9554d1b0f1ef 100644 --- a/sql/src/test/java/org/apache/druid/sql/calcite/schema/DruidSchemaTest.java +++ b/sql/src/test/java/org/apache/druid/sql/calcite/schema/DruidSchemaTest.java @@ -212,7 +212,6 @@ void markDataSourceAsNeedRebuild(String datasource) null ) { - boolean throwException = true; @Override @@ -274,7 +273,6 @@ public void testSchemaInit() throws InterruptedException schema2.stop(); } - @Test public void testGetTableMapFoo() { diff --git a/sql/src/test/java/org/apache/druid/sql/calcite/tester/ActualResults.java b/sql/src/test/java/org/apache/druid/sql/calcite/tester/ActualResults.java new file mode 100644 index 000000000000..eb2b3063b788 --- /dev/null +++ b/sql/src/test/java/org/apache/druid/sql/calcite/tester/ActualResults.java @@ -0,0 +1,697 @@ +/* + * Licensed to the Apache Software Foundation (ASF) under one + * or more contributor license agreements. See the NOTICE file + * distributed with this work for additional information + * regarding copyright ownership. The ASF licenses this file + * to you under the Apache License, Version 2.0 (the + * "License"); you may not use this file except in compliance + * with the License. You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, + * software distributed under the License is distributed on an + * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY + * KIND, either express or implied. See the License for the + * specific language governing permissions and limitations + * under the License. + */ + +package org.apache.druid.sql.calcite.tester; + +import com.fasterxml.jackson.databind.ObjectMapper; +import org.apache.druid.common.config.NullHandling; +import org.apache.druid.java.util.common.StringUtils; +import org.apache.druid.server.security.ResourceAction; +import org.apache.druid.sql.calcite.tester.LinesSection.ResultsSection; +import org.apache.druid.sql.calcite.tester.TestSection.Section; + +import java.io.IOException; +import java.util.ArrayList; +import java.util.List; +import java.util.Map; +import java.util.Map.Entry; +import java.util.Set; + +/** + * Records the actual results of running a planner test so that + * the results can be compared with expected, or, on an error, + * emitted to an "actuals" file. + */ +public class ActualResults +{ + /** + * Gathers errors found when verifying a test case against actual + * results. The prefix allows a section to declare itself, then + * invoke a generic verifier that doesn't know about the specific + * section. + */ + public static class ErrorCollector + { + private final List errors = new ArrayList<>(); + private String prefix; + + public void setSection(String section) + { + prefix = section; + } + + public void add(String error) + { + if (prefix != null) { + error = prefix + ": " + error; + } + errors.add(error); + } + + public boolean ok() + { + return errors.isEmpty(); + } + + public List errors() + { + return errors; + } + } + + /** + * Equivalent of a {@link TestSection}, but for actual results. + * Holds a specific, labeled kind of actual results and tracks + * if those actuals match the expected results. + */ + public abstract static class ActualResultsSection + { + boolean ok; + + public abstract void verify(ErrorCollector errors); + public abstract void write(TestCaseWriter writer) throws IOException; + } + + /** + * Simple string results, such as for an exception. + */ + public static class StringResults extends ActualResultsSection + { + final PatternSection expected; + final String actual; + + public StringResults(PatternSection expected, String actual) + { + this.expected = expected; + this.actual = actual; + } + + @Override + public void write(TestCaseWriter writer) throws IOException + { + if (ok) { + expected.write(writer); + } else { + writer.emitSection(expected.name(), actual); + } + } + + @Override + public void verify(ErrorCollector errors) + { + ok = expected.verify(actual, errors); + } + } + + /** + * Results represented as a string array, such as when breaking a + * block of text into lines, for matching line-by-line. + */ + public static class StringArrayResults extends ActualResultsSection + { + final PatternSection expected; + final String[] actual; + + public StringArrayResults(PatternSection expected, String[] actual) + { + this.expected = expected; + this.actual = actual; + } + + @Override + public void write(TestCaseWriter writer) throws IOException + { + if (ok) { + expected.write(writer); + } else { + writer.emitSection(expected.name(), actual); + } + } + + @Override + public void verify(ErrorCollector errors) + { + ok = expected.verify(actual, errors); + } + } + + /** + * Query run results, when comparing as strings. + */ + public static class RowResults extends ActualResultsSection + { + final ResultsSection expected; + final List actual; + + public RowResults(ResultsSection expected, List actual) + { + this.expected = expected; + this.actual = actual; + } + + @Override + public void write(TestCaseWriter writer) throws IOException + { + if (ok) { + expected.write(writer); + } else { + writer.emitSection(expected.name(), actual); + } + } + + @Override + public void verify(ErrorCollector errors) + { + ok = expected.verify(actual, errors); + } + } + + /** + * Actual query output when compared as Java objects. Handles the + * case where a string compare is unstable (such as when results + * contain float or double values.) + */ + public static class JsonResults extends ActualResultsSection + { + final ResultsSection expected; + final List actual; + final ObjectMapper mapper; + + public JsonResults( + ResultsSection expected, + List actual, + ObjectMapper mapper) + { + this.expected = expected; + this.actual = actual; + this.mapper = mapper; + } + + @Override + public void write(TestCaseWriter writer) throws IOException + { + if (ok) { + expected.write(writer); + } else { + List lines = QueryTestCases.resultsToJson(actual, mapper); + writer.emitSection(expected.name(), lines); + } + } + + @Override + public void verify(ErrorCollector errors) + { + ok = expected.verify(actual, mapper, errors); + } + } + + /** + * Results for an exception. + * + */ + public static class ExceptionResults extends ActualResultsSection + { + final TextSection.ExceptionSection expected; + final Exception actual; + + public ExceptionResults(TextSection.ExceptionSection expected, Exception actual) + { + this.expected = expected; + this.actual = actual; + } + + @Override + public void write(TestCaseWriter writer) throws IOException + { + if (ok) { + expected.write(writer); + } else { + writer.emitException(actual); + } + } + + @Override + public void verify(ErrorCollector errors) + { + ok = expected.verify(actual, errors); + } + } + + /** + * Actual resource action results. + */ + public static class ResourceResults extends ActualResultsSection + { + final ResourcesSection expected; + final Set actual; + + public ResourceResults(ResourcesSection expected, Set actual) + { + this.expected = expected; + this.actual = actual; + } + + @Override + public void write(TestCaseWriter writer) throws IOException + { + if (ok) { + expected.write(writer); + } else { + writer.emitResources(actual); + } + } + + @Override + public void verify(ErrorCollector errors) + { + ok = expected.verify(actual, errors); + } + } + + public static class ActualRun + { + final QueryRun run; + final ActualResultsSection rows; + final Exception actualException; + final StringResults error; + final ExceptionResults exception; + final Map actualContext; + private final boolean actualSqlCompatibleNulls; + private boolean ok = true; + + public ActualRun( + QueryRun run, + Map actualContext, + List rows, + ObjectMapper mapper) + { + this.run = run; + ResultsSection results = run.resultsSection(); + boolean typedCompare = run.booleanOption(OptionsSection.TYPED_COMPARE); + if (typedCompare) { + this.rows = new JsonResults(results, rows, mapper); + } else { + this.rows = new RowResults(results, QueryTestCases.resultsToJson(rows, mapper)); + } + this.exception = null; + this.error = null; + this.actualException = null; + this.actualContext = actualContext; + this.actualSqlCompatibleNulls = NullHandling.sqlCompatible(); + } + + public ActualRun(QueryRun run, Map actualContext, Exception e) + { + this.run = run; + this.rows = null; + this.actualException = e; + QueryTestCase testCase = run.testCase(); + TextSection.ExceptionSection exSection = testCase.exception(); + if (exSection == null) { + this.exception = null; + } else { + this.exception = new ExceptionResults(exSection, e); + } + PatternSection errorSection = testCase.error(); + if (errorSection == null) { + this.error = null; + } else { + this.error = new StringResults(errorSection, e.getMessage()); + } + this.actualContext = actualContext; + this.actualSqlCompatibleNulls = NullHandling.sqlCompatible(); + } + + public void verify(ErrorCollector errors) + { + errors.setSection(run.displayLabel()); + boolean shouldFail = run.shouldFail(); + if (shouldFail && exception != null) { + errors.add("Expected failure but run suceeded"); + ok = false; + return; + } else if (!shouldFail && exception != null) { + errors.add("Expected success but run failed"); + ok = false; + return; + } + if (exception != null) { + if (exception != null) { + exception.verify(errors); + ok = exception.ok; + } + if (error != null) { + error.verify(errors); + ok &= error.ok; + } + } else if (rows != null) { + rows.verify(errors); + ok = rows.ok; + } + } + + public void write(TestCaseWriter writer) throws IOException + { + if (ok) { + run.write(writer); + return; + } + StringBuilder buf = new StringBuilder() + .append("sqlCompatibleNulls=") + .append(actualSqlCompatibleNulls) + .append("\nContext:\n"); + for (Entry entry : actualContext.entrySet()) { + buf.append(entry.getKey()) + .append("=") + .append(entry.getValue()) + .append("\n"); + } + writer.emitComment(buf.toString()); + writer.emitSection("run", run.label); + + if (actualException != null) { + if (exception != null) { + exception.write(writer); + } + if (error != null) { + error.write(writer); + } + return; + } + for (TestSection section : run.fileOrder) { + if (section.section() == Section.RESULTS) { + rows.write(writer); + } else { + section.write(writer); + } + } + } + } + + private final QueryTestCase testCase; + protected StringResults ast; + protected StringResults unparsed; + protected StringResults plan; + protected StringResults execPlan; + protected ExceptionResults exception; + protected StringResults error; + protected StringResults explain; + protected StringArrayResults schema; + protected StringArrayResults targetSchema; + protected StringResults nativeQuery; + protected ResourceResults resourceActions; + protected Exception actualException; + protected List runs = new ArrayList<>(); + private ActualResults.ErrorCollector errors = new ActualResults.ErrorCollector(); + + public ActualResults(QueryTestCase testCase) + { + this.testCase = testCase; + } + + public void exception(Exception e) + { + this.actualException = e; + TextSection.ExceptionSection exSection = testCase.exception(); + if (exSection != null) { + this.exception = new ExceptionResults(exSection, e); + } + PatternSection errorSection = testCase.error(); + if (errorSection != null) { + this.error = new StringResults(errorSection, e.getMessage()); + } + } + + public void unparsed(PatternSection section, String text) + { + this.unparsed = new StringResults(section, text); + } + + public void ast(PatternSection section, String text) + { + this.ast = new StringResults(section, text); + } + + public void plan(PatternSection section, String text) + { + this.plan = new StringResults(section, text); + } + + public void execPlan(PatternSection section, String text) + { + this.execPlan = new StringResults(section, text); + } + + public void schema(PatternSection section, String[] schema) + { + this.schema = new StringArrayResults(section, schema); + } + + public void targetSchema(PatternSection section, String[] schema) + { + this.targetSchema = new StringArrayResults(section, schema); + } + + public void nativeQuery(PatternSection section, String text) + { + this.nativeQuery = new StringResults(section, text); + } + + public void resourceActions(ResourcesSection section, Set resourceActions) + { + this.resourceActions = new ResourceResults(section, resourceActions); + } + + public void explain(PatternSection section, String text) + { + this.explain = new StringResults(section, text); + } + + public void run( + QueryRun run, + Map actualContext, + List rows, + ObjectMapper mapper) + { + runs.add(new ActualRun(run, actualContext, rows, mapper)); + } + + public void runFailed(QueryRun run, Map actualContext, Exception e) + { + runs.add(new ActualRun(run, actualContext, e)); + } + + public ActualResults.ErrorCollector errors() + { + return errors; + } + + public boolean ok() + { + return errors.ok(); + } + + public void verify() + { + verifyException(); + if (testCase.shouldFail() || !ok()) { + return; + } + verify(ast); + verify(unparsed); + verify(plan); + verify(execPlan); + verify(schema); + verify(targetSchema); + verify(explain); + verify(nativeQuery); + verify(resourceActions); + verifyRuns(); + } + + private void verify(ActualResultsSection section) + { + if (section != null) { + section.verify(errors); + } + } + + public void verifyException() + { + boolean shouldFail = testCase.shouldFail(); + if (!shouldFail) { + if (actualException != null) { + errors.add(StringUtils.format( + "Failed with exception %s: [%s]", + actualException.getClass().getSimpleName(), + actualException.getMessage())); + } + return; + } + if (shouldFail && actualException == null) { + errors.add("Expected failure but got success"); + return; + } + verify(exception); + verify(error); + } + + public void verifyRuns() + { + for (ActualRun run : runs) { + run.verify(errors); + } + } + + public void write(TestCaseWriter writer) throws IOException + { + writeSetup(writer); + + if (actualException == null) { + writeResults(writer); + writeRuns(writer); + } else { + writeFailure(writer); + } + } + + private void writeSetup(TestCaseWriter writer) throws IOException + { + for (TestSection section : testCase.sections()) { + switch (section.section()) { + case COMMENTS: + section.write(writer); + writer.emitErrors(errors.errors); + break; + case CASE: + case SQL: + case CONTEXT: + case OPTIONS: + case PARAMETERS: + section.write(writer); + break; + default: + break; + } + } + } + + private void writeFailure(TestCaseWriter writer) throws IOException + { + if (testCase.shouldFail()) { + for (TestSection section : testCase.sections()) { + switch (section.section()) { + case EXCEPTION: + if (actualException == null) { + section.write(writer); + } else { + exception.write(writer); + } + break; + case ERROR: + if (actualException == null) { + section.write(writer); + } else { + error.write(writer); + } + break; + default: + break; + } + } + } else { + writer.emitException(actualException); + writer.emitError(actualException); + } + } + + private void writeResults(TestCaseWriter writer) throws IOException + { + for (TestSection section : testCase.sections()) { + switch (section.section()) { + case AST: + writeSection(section, ast, writer); + break; + case UNPARSED: + writeSection(section, unparsed, writer); + break; + case EXPLAIN: + writeSection(section, explain, writer); + break; + case PLAN: + writeSection(section, plan, writer); + break; + case EXEC_PLAN: + writeSection(section, execPlan, writer); + break; + case SCHEMA: + writeSection(section, schema, writer); + break; + case TARGET_SCHEMA: + writeSection(section, targetSchema, writer); + break; + case NATIVE: + writeSection(section, nativeQuery, writer); + break; + case RESOURCES: + writeSection(section, resourceActions, writer); + break; + case RESULTS: + writeSection(section, null, writer); + break; + default: + break; + } + } + } + + private void writeRuns(TestCaseWriter writer) throws IOException + { + List copy = new ArrayList<>(runs); + for (QueryRun expectedRun : testCase.runs()) { + ActualRun actualRun = null; + for (int i = 0; i < copy.size(); i++) { + if (copy.get(i).run == expectedRun) { + ActualRun run = copy.remove(i); + if (actualRun == null && !run.ok) { + actualRun = run; + } + } + } + if (actualRun == null) { + expectedRun.write(writer); + } else { + actualRun.write(writer); + } + } + } + + private void writeSection( + TestSection testSection, + ActualResultsSection resultsSection, + TestCaseWriter writer + ) throws IOException + { + if (resultsSection != null) { + resultsSection.write(writer); + } else if (actualException == null) { + testSection.write(writer); + } + } +} diff --git a/sql/src/test/java/org/apache/druid/sql/calcite/tester/CalciteTestCapture.java b/sql/src/test/java/org/apache/druid/sql/calcite/tester/CalciteTestCapture.java new file mode 100644 index 000000000000..ec5a56b0580f --- /dev/null +++ b/sql/src/test/java/org/apache/druid/sql/calcite/tester/CalciteTestCapture.java @@ -0,0 +1,369 @@ +/* + * Licensed to the Apache Software Foundation (ASF) under one + * or more contributor license agreements. See the NOTICE file + * distributed with this work for additional information + * regarding copyright ownership. The ASF licenses this file + * to you under the Apache License, Version 2.0 (the + * "License"); you may not use this file except in compliance + * with the License. You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, + * software distributed under the License is distributed on an + * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY + * KIND, either express or implied. See the License for the + * specific language governing permissions and limitations + * under the License. + */ + +package org.apache.druid.sql.calcite.tester; + +import com.fasterxml.jackson.databind.ObjectMapper; +import com.google.common.collect.ImmutableMap; +import junitparams.Parameters; +import org.apache.druid.common.config.NullHandling; +import org.apache.druid.java.util.common.Pair; +import org.apache.druid.math.expr.ExpressionProcessing; +import org.apache.druid.query.Query; +import org.apache.druid.sql.calcite.BaseCalciteQueryTest; +import org.apache.druid.sql.calcite.BaseCalciteQueryTest.DefaultResultsVerifier; +import org.apache.druid.sql.calcite.BaseCalciteQueryTest.ResultsVerifier; +import org.apache.druid.sql.calcite.QueryDefn; +import org.apache.druid.sql.calcite.planner.PlannerConfig; +import org.apache.druid.sql.calcite.planner.PlannerResult; +import org.apache.druid.sql.calcite.util.CalciteTests; +import org.junit.rules.ExpectedException; + +import javax.annotation.Nullable; + +import java.io.IOException; +import java.lang.reflect.Method; +import java.util.Collections; +import java.util.HashMap; +import java.util.List; +import java.util.Map; +import java.util.function.Consumer; + +/** + * Captures a single CalciteTestCase test in a form that + * can be converted to a Planner test case. + */ +public class CalciteTestCapture +{ + private final PlannerConfig plannerConfig; + private final QueryDefn queryDefn; + private final List> expectedQueries; + private final ResultsVerifier expectedResultsVerifier; + @Nullable private final Consumer expectedExceptionInitializer; + private PlannerResult plannerResult; + private List results; + protected String methodName; + private Exception exception; + private boolean cannotVectorize; + private int mergeBufCount; + private final boolean sqlCompatibleNulls; + private final boolean allowNestedArrays; + protected final String provider; + private String user; + + /** + * Grab information available in {code testQuery()}. + */ + public CalciteTestCapture( + final PlannerConfig plannerConfig, + final QueryDefn queryDefn, + final List> expectedQueries, + final ResultsVerifier expectedResultsVerifier, + @Nullable final Consumer expectedExceptionInitializer + ) + { + this.plannerConfig = plannerConfig; + this.queryDefn = queryDefn; + this.expectedQueries = expectedQueries; + this.expectedResultsVerifier = expectedResultsVerifier; + this.expectedExceptionInitializer = expectedExceptionInitializer; + Pair testMethod = captureMethod(); + this.methodName = testMethod.rhs; + if (testMethod.lhs == null) { + this.provider = null; + } else { + this.provider = getProvider(testMethod.lhs, testMethod.rhs); + } + if (queryDefn.authResult() == CalciteTests.SUPER_USER_AUTH_RESULT) { + user(CalciteTests.TEST_SUPERUSER_NAME); + } + this.sqlCompatibleNulls = NullHandling.sqlCompatible(); + this.allowNestedArrays = ExpressionProcessing.allowNestedArrays(); + } + + /** + * Find the last Druid method before we hit the first JUnit method. + * That is usually the test name. The stack trace is listed lowest + * method first. + */ + private static Pair captureMethod() + { + String testClass = null; + String methodName = "unknown"; + StackTraceElement[] trace = Thread.currentThread().getStackTrace(); + for (StackTraceElement element : trace) { + String className = element.getClassName(); + if (className.startsWith("org.apache.druid")) { + testClass = className; + methodName = element.getMethodName(); + } else if (className.startsWith("org.junit")) { + break; + } + } + return Pair.of(testClass, methodName); + } + + private String getProvider(String className, String methodName) + { + Class testClass; + try { + testClass = getClass().getClassLoader().loadClass(className); + } + catch (ClassNotFoundException e) { + return null; + } + if (testClass == null) { + return null; + } + for (Method method : testClass.getDeclaredMethods()) { + if (method.getName().equals(methodName)) { + Parameters params = method.getAnnotation(Parameters.class); + if (params == null) { + return null; + } + return params.source().getSimpleName(); + } + } + return null; + } + + /** + * Capture the results from the planner. + */ + public void plannerResult(PlannerResult plannerResult) + { + this.plannerResult = plannerResult; + } + + /** + * Capture results as a list of objects. + */ + public void results(List runResults) + { + this.results = runResults; + } + + /** + * Capture the options specified via various methods and held in + * variables. + */ + public void options( + boolean cannotVectorize, + int mergeBufCount + ) + { + this.cannotVectorize = cannotVectorize; + this.mergeBufCount = mergeBufCount; + } + + /** + * Write the gathered information in test case format. + */ + protected void write( + boolean includeRun, + TestCaseWriter writer, + ObjectMapper jsonMapper + ) throws IOException + { + writeCase(includeRun, writer); + if (exception != null) { + writeException(writer); + } else { + writeSchema(writer); + writer.emitPlan("unavailable\n"); + writeNative(writer, jsonMapper); + if (includeRun) { + writeResults(writer, jsonMapper); + } + } + } + + private void writeException(TestCaseWriter writer) throws IOException + { + writer.emitException(exception); + writer.emitError(exception); + } + + private void writeCase(boolean includeRun, TestCaseWriter writer) throws IOException + { + String comment = "Converted from " + methodName + "()"; + writer.emitComment(Collections.singletonList(comment)); + String label = decodeMethod(methodName); + writer.emitCase(label); + + writer.emitSql(queryDefn.sql()); + writer.emitContext(QueryTestCases.rewriteContext(queryDefn.context())); + if (!queryDefn.parameters().isEmpty()) { + writer.emitParameters(queryDefn.parameters()); + } + Map options = new HashMap<>(); + if (allowNestedArrays) { + options.put(OptionsSection.ALLOW_NESTED_ARRAYS, allowNestedArrays); + } + if (provider != null) { + options.put(OptionsSection.PROVIDER_CLASS, provider); + } + if (user != null) { + options.put(OptionsSection.USER_OPTION, user); + } + options.put(OptionsSection.VECTORIZE_OPTION, !cannotVectorize); + savePlannerConfig(options); + if (includeRun) { + if (mergeBufCount != 0) { + options.put(OptionsSection.MERGE_BUFFER_COUNT, mergeBufCount); + } + } + writer.emitOptions(options); + } + + /** + * Convert the method name into an English-like test label. + */ + private String decodeMethod(String methodName) + { + if (methodName.startsWith("test")) { + methodName = methodName.substring(4); + } + StringBuilder buf = new StringBuilder(); + for (int i = 0; i < methodName.length(); i++) { + char c = methodName.charAt(i); + if (i == 0) { + buf.append(Character.toUpperCase(c)); + } else if (Character.isUpperCase(c)) { + buf.append(' ').append(Character.toLowerCase(c)); + } else { + buf.append(c); + } + } + return buf.toString(); + } + + /** + * Convert selected planner config options into test case options + * that will then be used to recreate the planner config. + */ + private void savePlannerConfig(Map options) + { + PlannerConfig base = BaseCalciteQueryTest.PLANNER_CONFIG_DEFAULT; + if (base.getMaxTopNLimit() != plannerConfig.getMaxTopNLimit()) { + options.put( + OptionsSection.PLANNER_MAX_TOP_N, + plannerConfig.getMaxTopNLimit()); + } + if (base.isUseApproximateCountDistinct() != plannerConfig.isUseApproximateCountDistinct()) { + options.put( + OptionsSection.PLANNER_APPROX_COUNT_DISTINCT, + plannerConfig.isUseApproximateCountDistinct()); + } + if (base.isUseApproximateTopN() != plannerConfig.isUseApproximateTopN()) { + options.put( + OptionsSection.PLANNER_APPROX_TOP_N, + plannerConfig.isUseApproximateTopN()); + } + if (base.isRequireTimeCondition() != plannerConfig.isRequireTimeCondition()) { + options.put( + OptionsSection.PLANNER_REQUIRE_TIME_CONDITION, + plannerConfig.isRequireTimeCondition()); + } + if (base.getSqlTimeZone() != plannerConfig.getSqlTimeZone()) { + options.put( + OptionsSection.PLANNER_SQL_TIME_ZONE, + plannerConfig.getSqlTimeZone()); + } + if (base.isUseGroupingSetForExactDistinct() != plannerConfig.isUseGroupingSetForExactDistinct()) { + options.put( + OptionsSection.PLANNER_USE_GROUPING_SET_FOR_EXACT_DISTINCT, + plannerConfig.isUseGroupingSetForExactDistinct()); + } + if (base.isComputeInnerJoinCostAsFilter() != plannerConfig.isComputeInnerJoinCostAsFilter()) { + options.put( + OptionsSection.PLANNER_COMPUTE_INNER_JOIN_COST_AS_FILTER, + plannerConfig.isComputeInnerJoinCostAsFilter()); + } + if (base.isUseNativeQueryExplain() != plannerConfig.isUseNativeQueryExplain()) { + options.put( + OptionsSection.PLANNER_NATIVE_QUERY_EXPLAIN, + plannerConfig.isUseNativeQueryExplain()); + } + if (base.getMaxNumericInFilters() != plannerConfig.getMaxNumericInFilters()) { + options.put( + OptionsSection.PLANNER_MAX_NUMERIC_IN_FILTERS, + plannerConfig.getMaxNumericInFilters()); + } + } + + private void writeSchema(TestCaseWriter writer) throws IOException + { + if (plannerResult != null) { + writer.emitSchema(QueryTestCases.formatSchema(plannerResult)); + } + } + + private void writeNative(TestCaseWriter writer, ObjectMapper mapper) throws IOException + { + if (expectedQueries.isEmpty()) { + return; + } + if (expectedQueries.size() == 1) { + writer.emitNative(QueryTestCases.serializeQuery(mapper, expectedQueries.get(0))); + return; + } + // Create a fake "union query" to hold the expected queries. + writer.emitNative( + QueryTestCases.serializeQuery( + mapper, + ImmutableMap.of( + "artificialQueryType", + "union", + "inputs", + expectedQueries))); + } + + private void writeResults(TestCaseWriter writer, ObjectMapper mapper) throws IOException + { + if (results == null && expectedResultsVerifier == null) { + return; + } + writer.emitSection("run"); + writer.emitOptions(ImmutableMap.of(OptionsSection.SQL_COMPATIBLE_NULLS, sqlCompatibleNulls)); + if (results != null) { + writer.emitResults(QueryTestCases.resultsToJson(results, mapper)); + return; + } + if (!(expectedResultsVerifier instanceof DefaultResultsVerifier)) { + CalciteTestRecorder.log.warn( + "%s(): Results verifier is of type %s - cannot record.", + methodName, + expectedResultsVerifier.getClass().getSimpleName()); + return; + } + DefaultResultsVerifier verifier = (DefaultResultsVerifier) expectedResultsVerifier; + writer.emitResults(QueryTestCases.resultsToJson(verifier.expectedResults(), mapper)); + } + + public void exception(Exception e) + { + this.exception = e; + } + + public void user(String user) + { + this.user = user; + } +} diff --git a/sql/src/test/java/org/apache/druid/sql/calcite/tester/CalciteTestRecorder.java b/sql/src/test/java/org/apache/druid/sql/calcite/tester/CalciteTestRecorder.java new file mode 100644 index 000000000000..14168cea006d --- /dev/null +++ b/sql/src/test/java/org/apache/druid/sql/calcite/tester/CalciteTestRecorder.java @@ -0,0 +1,154 @@ +/* + * Licensed to the Apache Software Foundation (ASF) under one + * or more contributor license agreements. See the NOTICE file + * distributed with this work for additional information + * regarding copyright ownership. The ASF licenses this file + * to you under the Apache License, Version 2.0 (the + * "License"); you may not use this file except in compliance + * with the License. You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, + * software distributed under the License is distributed on an + * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY + * KIND, either express or implied. See the License for the + * specific language governing permissions and limitations + * under the License. + */ + +package org.apache.druid.sql.calcite.tester; + +import com.fasterxml.jackson.databind.ObjectMapper; +import org.apache.druid.java.util.common.FileUtils; +import org.apache.druid.java.util.common.ISE; +import org.apache.druid.java.util.common.logger.Logger; +import org.apache.druid.sql.calcite.util.CalciteTests; + +import java.io.File; +import java.io.FileOutputStream; +import java.io.IOException; +import java.io.OutputStreamWriter; +import java.io.Writer; +import java.nio.charset.StandardCharsets; +import java.util.ArrayList; +import java.util.List; +import java.util.Objects; + +/** + * Hooks into the various CalciteQueryTest classes to record + * the tests. Generates a "starter" test case file from the recorded + * values. + *

+ * Note that JUnit runs the tests in the order that Java reports methods, + * which is generally not the order in which they appear in the + * source file. Use {@link TestCaseMerger} to reorder the tests to match + * the source file order. + */ +public interface CalciteTestRecorder +{ + Logger log = new Logger(CalciteTestRecorder.class); + + class DummyRecorder implements CalciteTestRecorder + { + @Override + public void record(CalciteTestCapture test) + { + } + + @Override + public void emit() + { + } + + @Override + public boolean isLive() + { + return false; + } + } + + class Recorder implements CalciteTestRecorder + { + private final boolean captureRun; + private final List tests = new ArrayList<>(); + private final ObjectMapper jsonMapper = CalciteTests.getJsonMapper(); + + public Recorder(boolean captureRun) + { + this.captureRun = captureRun; + } + + @Override + public boolean isLive() + { + return true; + } + + @Override + public void record(CalciteTestCapture test) + { + if (test == null) { + return; + } + if (tests.isEmpty()) { + tests.add(test); + return; + } + CalciteTestCapture prev = tests.get(tests.size() - 1); + if (!prev.methodName.equals(test.methodName) || + !Objects.equals(prev.provider, test.provider)) { + tests.add(test); + } + } + + @Override + public void emit() + { + File dest = new File("target/actual/recorded.case"); + try { + FileUtils.mkdirp(dest.getParentFile()); + } + catch (IOException e) { + throw new ISE("Cannot create directory: " + dest.getParent()); + } + emit(dest); + } + + private void emit(File dest) + { + try (Writer writer = new OutputStreamWriter(new FileOutputStream(dest), StandardCharsets.UTF_8)) { + TestCaseWriter testWriter = new TestCaseWriter(writer); + for (CalciteTestCapture test : tests) { + test.write(captureRun, testWriter, jsonMapper); + } + } + catch (IOException e) { + log.warn(e, "Failed to emit recorded tests"); + } + } + } + + boolean isLive(); + void record(CalciteTestCapture test); + void emit(); + + enum Option + { + OFF, + PLAN_ONLY, + PLAN_AND_RUN + } + + static CalciteTestRecorder create(Option option) + { + switch (option) { + case PLAN_ONLY: + return new Recorder(false); + case PLAN_AND_RUN: + return new Recorder(true); + default: + return new DummyRecorder(); + } + } +} diff --git a/sql/src/test/java/org/apache/druid/sql/calcite/tester/ContextSection.java b/sql/src/test/java/org/apache/druid/sql/calcite/tester/ContextSection.java new file mode 100644 index 000000000000..01b5beffc1c4 --- /dev/null +++ b/sql/src/test/java/org/apache/druid/sql/calcite/tester/ContextSection.java @@ -0,0 +1,102 @@ +/* + * Licensed to the Apache Software Foundation (ASF) under one + * or more contributor license agreements. See the NOTICE file + * distributed with this work for additional information + * regarding copyright ownership. The ASF licenses this file + * to you under the Apache License, Version 2.0 (the + * "License"); you may not use this file except in compliance + * with the License. You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, + * software distributed under the License is distributed on an + * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY + * KIND, either express or implied. See the License for the + * specific language governing permissions and limitations + * under the License. + */ + +package org.apache.druid.sql.calcite.tester; + +import java.io.IOException; +import java.util.ArrayList; +import java.util.Collections; +import java.util.List; +import java.util.Map; +import java.util.Objects; + +/** + * The (query) context test case section. + */ +public class ContextSection extends TestSection +{ + protected final Map context; + + protected ContextSection(Map context) + { + this(context, false); + } + + protected ContextSection(Map context, boolean copy) + { + super(Section.CONTEXT.sectionName(), copy); + this.context = context; + } + + @Override + public TestSection.Section section() + { + return TestSection.Section.CONTEXT; + } + + @Override + public TestSection copy() + { + return new ContextSection(context, true); + } + + public Map context() + { + return context; + } + + public List sorted() + { + List keys = new ArrayList<>(context.keySet()); + Collections.sort(keys); + List sorted = new ArrayList<>(); + for (String key : keys) { + sorted.add(key + "=" + context.get(key)); + } + return sorted; + } + + @Override + public boolean equals(Object o) + { + if (o == this) { + return true; + } + if (o == null || o.getClass() != getClass()) { + return false; + } + ContextSection other = (ContextSection) o; + return context.equals(other.context); + } + + /** + * Never used (doesn't make sense). But, needed to make static checks happy. + */ + @Override + public int hashCode() + { + return Objects.hash(context); + } + + @Override + public void writeSection(TestCaseWriter writer) throws IOException + { + writer.emitContext(context); + } +} diff --git a/sql/src/test/java/org/apache/druid/sql/calcite/tester/LinesSection.java b/sql/src/test/java/org/apache/druid/sql/calcite/tester/LinesSection.java new file mode 100644 index 000000000000..245cf40fb896 --- /dev/null +++ b/sql/src/test/java/org/apache/druid/sql/calcite/tester/LinesSection.java @@ -0,0 +1,304 @@ +/* + * Licensed to the Apache Software Foundation (ASF) under one + * or more contributor license agreements. See the NOTICE file + * distributed with this work for additional information + * regarding copyright ownership. The ASF licenses this file + * to you under the Apache License, Version 2.0 (the + * "License"); you may not use this file except in compliance + * with the License. You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, + * software distributed under the License is distributed on an + * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY + * KIND, either express or implied. See the License for the + * specific language governing permissions and limitations + * under the License. + */ + +package org.apache.druid.sql.calcite.tester; + +import com.fasterxml.jackson.core.type.TypeReference; +import com.fasterxml.jackson.databind.ObjectMapper; +import org.apache.druid.java.util.common.StringUtils; +import org.apache.druid.java.util.common.UOE; +import org.apache.druid.sql.calcite.tester.ActualResults.ErrorCollector; +import org.junit.Assert; +import org.junit.internal.ComparisonCriteria; +import org.junit.internal.InexactComparisonCriteria; + +import java.io.IOException; +import java.util.List; +import java.util.Objects; + +/** + * A test case section that consists of a set of literal lines. + */ +public abstract class LinesSection extends TestSection +{ + /** + * The case test case section. Contents is a single line + * that gives the test case label. + */ + public static class CaseSection extends LinesSection + { + protected CaseSection(List lines) + { + super(Section.CASE.sectionName(), lines, false); + } + + @Override + public TestSection.Section section() + { + return TestSection.Section.CASE; + } + + @Override + public TestSection copy() + { + throw new UOE("CaseSection.copy()"); + } + } + + /** + * The (expected) results test case section. + */ + public static class ResultsSection extends LinesSection + { + protected ResultsSection(List lines) + { + this(lines, false); + } + + protected ResultsSection(List lines, boolean copy) + { + super(Section.RESULTS.sectionName(), lines, copy); + } + + @Override + public TestSection.Section section() + { + return TestSection.Section.RESULTS; + } + + @Override + public TestSection copy() + { + return new ResultsSection(lines, true); + } + + /** + * Verify results using a simple string compare. Works fine for all but + * double and float types. + */ + public boolean verify(List actual, ErrorCollector errors) + { + if (!verifySize(actual.size(), errors)) { + return false; + } + boolean ok = true; + for (int i = 0; i < lines.size(); i++) { + if (!actual.get(i).equals(lines.get(i))) { + errors.add( + StringUtils.format( + "Results differ at line %d", + i + 1)); + ok = false; + } + } + return ok; + } + + private static final TypeReference OBJECT_ARRAY_REFERENCE = new TypeReference() + { + }; + + /** + * JUnit-style comparison criteria for the case of an object deserialized + * from JSON. JSON does not know the original types (the types used by + * the query engine). Instead, it infers equivalent types from the data. + * Thus, longs may be integers, floats may be doubles, etc. This class + * works out the equivalences, and also compares doubles using an approximate + * comparison. + * + * The result is generally useful, but a bit slow: use it only when there + * are actual ambiguities. + */ + public static class JsonComparsionCriteria extends InexactComparisonCriteria + { + public JsonComparsionCriteria(double delta) + { + super(delta); + } + + @Override + protected void assertElementsEqual(Object expected, Object actual) + { + // If both elements are a floating point type, convert both to double + // and do an inexact compare. + if (expected instanceof Float || expected instanceof Double && + actual instanceof Float || actual instanceof Double) { + double eDouble = (expected instanceof Float) ? (Float) expected : (Double) expected; + double aDouble = (actual instanceof Float) ? (Float) actual : (Double) actual; + Assert.assertEquals(eDouble, aDouble, (double) fDelta); + return; + + // If both types are integral, convert both to longs and do an exact + // compare. + } else if (expected instanceof Integer || expected instanceof Long && + actual instanceof Integer || actual instanceof Long) { + long eLong = (expected instanceof Integer) ? (Integer) expected : (Long) expected; + long aLong = (actual instanceof Integer) ? (Integer) actual : (Long) actual; + Assert.assertEquals(eLong, aLong); + return; + + // Lists of objects? Lists are equivalent if they are of the same length, + // all items in both sets are null (regardless of type, which JSON won't + // know), or if the elements are equivalent as defined here. + } else if (expected instanceof List && actual instanceof List) { + List eList = (List) expected; + List aList = (List) actual; + if (eList.size() == aList.size()) { + for (int i = 0; i < eList.size(); i++) { + Object eItem = eList.get(i); + Object aItem = aList.get(i); + + // Nulls of any type are equal. + if (eItem == null && aItem == null) { + continue; + } + assertElementsEqual(eItem, aItem); + } + return; + } + } + + // Not a special case, use a generic compare. This compare uses exact + // semantics, so if it turns out that there are, say, embedded arrays, + // we'd have to extend the above to handle that case. + Assert.assertEquals(expected, actual); + } + } + + /** + * Compare actual results, as Java objects, with the expected results, + * parsed as JSON from string lines. Uses an inexact comparison that provides + * a delta of 1% for float and double values. + */ + public boolean verify(List actual, ObjectMapper mapper, ErrorCollector errors) + { + if (!verifySize(actual.size(), errors)) { + return false; + } + ComparisonCriteria compare = new JsonComparsionCriteria(0.01); + boolean ok = true; + for (int i = 0; i < lines.size(); i++) { + Object expectedRow; + try { + expectedRow = mapper.readValue(lines.get(i), OBJECT_ARRAY_REFERENCE); + } + catch (IOException e) { + errors.add( + StringUtils.format( + "Invalid JSON row object: on line %d: %s", + i + 1, + e.getMessage())); + ok = false; + continue; + } + try { + compare.arrayEquals("", expectedRow, actual.get(i)); + } + catch (Exception e) { + errors.add( + StringUtils.format( + "Results differ at line %d: %s", + i + 1, + e.getMessage())); + ok = false; + } + } + return ok; + } + + private boolean verifySize(int actualSize, ErrorCollector errors) + { + if (actualSize != lines.size()) { + errors.add( + StringUtils.format( + "Expected %d rows but got %d", + lines.size(), + actualSize)); + return false; + } + return true; + } + } + + /** + * The comments test case section which precedes the + * start of the test case. + */ + public static class CommentsSection extends LinesSection + { + protected CommentsSection(List lines) + { + super(Section.COMMENTS.sectionName(), lines, false); + } + + @Override + public TestSection.Section section() + { + return TestSection.Section.COMMENTS; + } + + @Override + public TestSection copy() + { + throw new UOE("CommentsSection.copy()"); + } + + @Override + public void write(TestCaseWriter writer) throws IOException + { + writer.emitComment(lines); + } + } + + protected final List lines; + + protected LinesSection(String name, List lines, boolean copy) + { + super(name, copy); + this.lines = lines; + } + + @Override + public boolean equals(Object o) + { + if (o == this) { + return true; + } + if (o == null || o.getClass() != getClass()) { + return false; + } + LinesSection other = (LinesSection) o; + return lines.equals(other.lines); + } + + /** + * Never used (doesn't make sense). But, needed to make static checks happy. + */ + @Override + public int hashCode() + { + return Objects.hash(lines); + } + + @Override + public void writeSection(TestCaseWriter writer) throws IOException + { + writer.emitSection(name, lines); + } +} diff --git a/sql/src/test/java/org/apache/druid/sql/calcite/tester/OptionsSection.java b/sql/src/test/java/org/apache/druid/sql/calcite/tester/OptionsSection.java new file mode 100644 index 000000000000..6ce6c1448e5e --- /dev/null +++ b/sql/src/test/java/org/apache/druid/sql/calcite/tester/OptionsSection.java @@ -0,0 +1,180 @@ +/* + * Licensed to the Apache Software Foundation (ASF) under one + * or more contributor license agreements. See the NOTICE file + * distributed with this work for additional information + * regarding copyright ownership. The ASF licenses this file + * to you under the Apache License, Version 2.0 (the + * "License"); you may not use this file except in compliance + * with the License. You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, + * software distributed under the License is distributed on an + * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY + * KIND, either express or implied. See the License for the + * specific language governing permissions and limitations + * under the License. + */ + +package org.apache.druid.sql.calcite.tester; + +import java.io.IOException; +import java.util.ArrayList; +import java.util.Collections; +import java.util.List; +import java.util.Map; +import java.util.Objects; + +/** + * The options test case section. + */ +public class OptionsSection extends TestSection +{ + /** + * Specifies the "user" (actually, authentication result) to use. + * The user is a regular user by default. Set to "super" to run as + * the super user. + */ + public static final String USER_OPTION = "user"; + public static final String MERGE_BUFFER_COUNT = "mergeBufferCount"; + + /** + * Filter on a test case or run section that says whether the test or + * run should be done for each of the SQL-compatible null modes. + * "true" means use SQL-compatible nulls, "false" means use "replace nulls + * with default" and "both" means the expected results are the same in + * both cases. + */ + public static final String SQL_COMPATIBLE_NULLS = "sqlCompatibleNulls"; + public static final String NULL_HANDLING_BOTH = "both"; + + /** + * Indicates that results should be compared as Java objects, with a + * delta used for float and double values. + */ + public static final String TYPED_COMPARE = "typedCompare"; + + // Planner variations. Corresponds to the various settings + // in BaseCalciteTest. Since each of those configs alters only + // one value from the default, these are also the name of the + // PlannerConfig options which are changed. + public static final String PLANNER_MAX_TOP_N = "planner.maxTopNLimit"; + public static final String PLANNER_APPROX_COUNT_DISTINCT = "planner.useApproximateCountDistinct"; + public static final String PLANNER_APPROX_TOP_N = "planner.useApproximateTopN"; + public static final String PLANNER_REQUIRE_TIME_CONDITION = "planner.requireTimeCondition"; + public static final String PLANNER_USE_GROUPING_SET_FOR_EXACT_DISTINCT = "planner.useGroupingSetForExactDistinct"; + public static final String PLANNER_COMPUTE_INNER_JOIN_COST_AS_FILTER = "planner.computeInnerJoinCostAsFilter"; + public static final String PLANNER_NATIVE_QUERY_EXPLAIN = "planner.useNativeQueryExplain"; + public static final String PLANNER_MAX_NUMERIC_IN_FILTERS = "planner.maxNumericInFilters"; + public static final String PLANNER_SQL_TIME_ZONE = "planner.sqlTimeZone"; + + /** + * Vectorization option. This option represents a bundle of context + * options. It is represented as an option to avoid copy/paste of the + * details. Also, if those details change, only the code that handles this + * option changes: we don't have to also change all the test cases. + */ + public static final String VECTORIZE_OPTION = "vectorize"; + + public static final String FAILURE_OPTION = "failure"; + public static final String FAIL_AT_RUN = "run"; + public static final String FAIL_AT_PLAN = "plan"; + + /** + * Causes the test code to unescape Java-encoded Unicode characters + * in the SQL string. Used for one test case: + * {@code CalciteQueryTset.testUnicodeFilterAndGroupBy} which + * uses a Hebrew character which is difficult to paste into the + * test {code .case} file. It is uses a Java-encoded Unicode sequence + * instead. + */ + public static final String UNICODE_ESCAPE_OPTION = "unicodeEscapes"; + + /** + * Causes {@code ExpressionProcessingConfig} to allow nested arrays + * by calling {@code initializeForTests(true)}. + */ + public static final String ALLOW_NESTED_ARRAYS = "allowNestedArrays"; + public static final String PROVIDER_CLASS = "provider"; + + /** + * Set ExpressionProcessing.initializeForHomogenizeNullMultiValueStrings() + * Used in only one multi-value string test case. + */ + public static final String HOMOGENIZE_NULL_MULTI_VALUE_STRINGS = "homogenizeNullMultiValueStrings"; + + protected final Map options; + + protected OptionsSection(Map options) + { + this(options, false); + } + + protected OptionsSection(Map options, boolean copy) + { + super(Section.OPTIONS.sectionName(), copy); + this.options = options; + } + + @Override + public TestSection.Section section() + { + return TestSection.Section.OPTIONS; + } + + @Override + public TestSection copy() + { + return new OptionsSection(options, true); + } + + public Map options() + { + return options; + } + + public String get(String key) + { + return options.get(key); + } + + public List sorted() + { + List keys = new ArrayList<>(options.keySet()); + Collections.sort(keys); + List sorted = new ArrayList<>(); + for (String key : keys) { + sorted.add(key + "=" + options.get(key)); + } + return sorted; + } + + @Override + public boolean equals(Object o) + { + if (o == this) { + return true; + } + if (o == null || o.getClass() != getClass()) { + return false; + } + OptionsSection other = (OptionsSection) o; + return options.equals(other.options); + } + + /** + * Never used (doesn't make sense). But, needed to make static checks happy. + */ + @Override + public int hashCode() + { + return Objects.hash(options); + } + + @Override + public void writeSection(TestCaseWriter writer) throws IOException + { + writer.emitOptions(options); + } +} diff --git a/sql/src/test/java/org/apache/druid/sql/calcite/tester/ParametersSection.java b/sql/src/test/java/org/apache/druid/sql/calcite/tester/ParametersSection.java new file mode 100644 index 000000000000..2bffb08453a8 --- /dev/null +++ b/sql/src/test/java/org/apache/druid/sql/calcite/tester/ParametersSection.java @@ -0,0 +1,90 @@ +/* + * Licensed to the Apache Software Foundation (ASF) under one + * or more contributor license agreements. See the NOTICE file + * distributed with this work for additional information + * regarding copyright ownership. The ASF licenses this file + * to you under the Apache License, Version 2.0 (the + * "License"); you may not use this file except in compliance + * with the License. You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, + * software distributed under the License is distributed on an + * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY + * KIND, either express or implied. See the License for the + * specific language governing permissions and limitations + * under the License. + */ + +package org.apache.druid.sql.calcite.tester; + +import org.apache.druid.sql.http.SqlParameter; + +import java.io.IOException; +import java.util.List; +import java.util.Objects; + +/** + * The parameters test case section. + */ +public class ParametersSection extends TestSection +{ + protected final List parameters; + + protected ParametersSection(List parameters) + { + this(parameters, false); + } + + protected ParametersSection(List parameters, boolean copy) + { + super(Section.PARAMETERS.sectionName(), copy); + this.parameters = parameters; + } + + public List parameters() + { + return parameters; + } + + @Override + public TestSection.Section section() + { + return TestSection.Section.PARAMETERS; + } + + @Override + public TestSection copy() + { + return new ParametersSection(parameters, true); + } + + @Override + public boolean equals(Object o) + { + if (o == this) { + return true; + } + if (o == null || o.getClass() != getClass()) { + return false; + } + ParametersSection other = (ParametersSection) o; + return parameters.equals(other.parameters); + } + + /** + * Never used (doesn't make sense). But, needed to make static checks happy. + */ + @Override + public int hashCode() + { + return Objects.hash(parameters); + } + + @Override + public void writeSection(TestCaseWriter writer) throws IOException + { + writer.emitParameters(parameters); + } +} diff --git a/sql/src/test/java/org/apache/druid/sql/calcite/tester/ParseTreeSerializer.java b/sql/src/test/java/org/apache/druid/sql/calcite/tester/ParseTreeSerializer.java new file mode 100644 index 000000000000..014be7cbc0f3 --- /dev/null +++ b/sql/src/test/java/org/apache/druid/sql/calcite/tester/ParseTreeSerializer.java @@ -0,0 +1,100 @@ +/* + * Licensed to the Apache Software Foundation (ASF) under one + * or more contributor license agreements. See the NOTICE file + * distributed with this work for additional information + * regarding copyright ownership. The ASF licenses this file + * to you under the Apache License, Version 2.0 (the + * "License"); you may not use this file except in compliance + * with the License. You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, + * software distributed under the License is distributed on an + * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY + * KIND, either express or implied. See the License for the + * specific language governing permissions and limitations + * under the License. + */ + +package org.apache.druid.sql.calcite.tester; + +import com.google.api.client.util.Strings; +import org.apache.calcite.sql.SqlNode; + +/** + * Serializes the Calcite parse tree into form handy for the + * test cases. Puts each node on a separate line. + */ +public class ParseTreeSerializer +{ + private int level; + private String prefix; + private final StringBuilder buf = new StringBuilder(); + + public void indent() + { + for (int i = 0; i < level; i++) { + buf.append(" "); + } + } + + public void prefix(String prefix) + { + this.prefix = prefix; + } + + public void node(SqlNode node, String details) + { + indent(); + emitPrefix(); + String name = node.getClass().getSimpleName(); + if (name.startsWith("Sql")) { + name = name.substring(3); + } + String kind = node.getKind().toString(); + buf.append(kind); + if (!kind.equalsIgnoreCase(name)) { + buf.append(" - "); + buf.append(name); + } + if (!Strings.isNullOrEmpty(details)) { + buf.append(" ("); + buf.append(details); + buf.append(")"); + } + buf.append("\n"); + } + + public void text(String text) + { + indent(); + emitPrefix(); + buf.append(text); + buf.append("\n"); + } + + private void emitPrefix() + { + if (prefix != null) { + buf.append(prefix); + buf.append(": "); + prefix = null; + } + } + + public void push() + { + level++; + } + + public void pop() + { + level--; + } + + public String result() + { + return buf.toString(); + } +} diff --git a/sql/src/test/java/org/apache/druid/sql/calcite/tester/ParseTreeVisualizer.java b/sql/src/test/java/org/apache/druid/sql/calcite/tester/ParseTreeVisualizer.java new file mode 100644 index 000000000000..eca3d765c70c --- /dev/null +++ b/sql/src/test/java/org/apache/druid/sql/calcite/tester/ParseTreeVisualizer.java @@ -0,0 +1,150 @@ +/* + * Licensed to the Apache Software Foundation (ASF) under one + * or more contributor license agreements. See the NOTICE file + * distributed with this work for additional information + * regarding copyright ownership. The ASF licenses this file + * to you under the Apache License, Version 2.0 (the + * "License"); you may not use this file except in compliance + * with the License. You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, + * software distributed under the License is distributed on an + * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY + * KIND, either express or implied. See the License for the + * specific language governing permissions and limitations + * under the License. + */ + +package org.apache.druid.sql.calcite.tester; + +import org.apache.calcite.sql.SqlCall; +import org.apache.calcite.sql.SqlDataTypeSpec; +import org.apache.calcite.sql.SqlDynamicParam; +import org.apache.calcite.sql.SqlIdentifier; +import org.apache.calcite.sql.SqlIntervalQualifier; +import org.apache.calcite.sql.SqlLiteral; +import org.apache.calcite.sql.SqlNode; +import org.apache.calcite.sql.SqlNodeList; +import org.apache.calcite.sql.SqlSelect; +import org.apache.calcite.sql.util.SqlVisitor; + +import java.util.List; + +/** + * Calcite SqlVisitor to visualize a parse tree for use in + * a test case. + */ +public class ParseTreeVisualizer implements SqlVisitor +{ + private final ParseTreeSerializer out = new ParseTreeSerializer(); + + @Override + public Void visit(SqlLiteral literal) + { + out.node(literal, literal.toString()); + return null; + } + + @Override + public Void visit(SqlCall call) + { + switch (call.getKind()) { + case SELECT: + expandSelect((SqlSelect) call); + break; + default: + out.node(call, null); + if (call.getOperandList() != null) { + visit(call.getOperandList()); + } + } + return null; + } + + private void expandSelect(SqlSelect node) + { + // Node and keywords + out.node(node, node.getOperandList().get(0).toString()); + out.push(); + prefixed("SELECT", node.getSelectList()); + prefixed("FROM", node.getFrom()); + prefixed("WHERE", node.getWhere()); + prefixed("GROUP BY", node.getGroup()); + prefixed("HAVING", node.getHaving()); + prefixed("WINDOW", node.getWindowList()); + prefixed("ORDER BY", node.getOrderList()); + prefixed("OFFSET", node.getOffset()); + prefixed("FETCH", node.getFetch()); + out.pop(); + } + + private void prefixed(String prefix, SqlNode node) + { + if (node == null) { + return; + } + out.prefix(prefix); + node.accept(this); + } + + @Override + public Void visit(SqlNodeList nodeList) + { + if (nodeList.getList().isEmpty()) { + out.prefix(null); + } else { + out.text("("); + visit(nodeList.getList()); + out.text(")"); + } + return null; + } + + public void visit(List nodeList) + { + out.push(); + for (SqlNode node : nodeList) { + if (node == null) { + out.text(""); + } else { + node.accept(this); + } + } + out.pop(); + } + + @Override + public Void visit(SqlIdentifier id) + { + out.node(id, id.toString()); + return null; + } + + @Override + public Void visit(SqlDataTypeSpec type) + { + out.node(type, type.toString()); + return null; + } + + @Override + public Void visit(SqlDynamicParam param) + { + out.node(param, param.toString()); + return null; + } + + @Override + public Void visit(SqlIntervalQualifier intervalQualifier) + { + out.node(intervalQualifier, intervalQualifier.toString()); + return null; + } + + public String result() + { + return out.result(); + } +} diff --git a/sql/src/test/java/org/apache/druid/sql/calcite/tester/PatternSection.java b/sql/src/test/java/org/apache/druid/sql/calcite/tester/PatternSection.java new file mode 100644 index 000000000000..358997e0198e --- /dev/null +++ b/sql/src/test/java/org/apache/druid/sql/calcite/tester/PatternSection.java @@ -0,0 +1,373 @@ +/* + * Licensed to the Apache Software Foundation (ASF) under one + * or more contributor license agreements. See the NOTICE file + * distributed with this work for additional information + * regarding copyright ownership. The ASF licenses this file + * to you under the Apache License, Version 2.0 (the + * "License"); you may not use this file except in compliance + * with the License. You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, + * software distributed under the License is distributed on an + * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY + * KIND, either express or implied. See the License for the + * specific language governing permissions and limitations + * under the License. + */ + +package org.apache.druid.sql.calcite.tester; + +import org.apache.druid.java.util.common.StringUtils; + +import java.io.IOException; +import java.util.List; +import java.util.Objects; +import java.util.regex.Matcher; +import java.util.regex.Pattern; + +/** + * Generic regex-based test case section. + */ +public class PatternSection extends TestSection +{ + public interface ExpectedLine + { + boolean matches(String line); + void write(TestCaseWriter writer) throws IOException; + } + + /** + * A single line of expected input. The line must match + * exactly (ignoring leading and trailing whitespace.) + */ + public static class ExpectedLiteral implements ExpectedLine + { + protected final String line; + + public ExpectedLiteral(String line) + { + this.line = line; + } + + @Override + public boolean matches(String actual) + { + return line.trim().equals(actual.trim()); + } + + @Override + public String toString() + { + return line; + } + + @Override + public boolean equals(Object o) + { + if (o == this) { + return true; + } + if (o == null || o.getClass() != getClass()) { + return false; + } + ExpectedLiteral other = (ExpectedLiteral) o; + return Objects.equals(line, other.line); + } + + /** + * Never used (doesn't make sense). But, needed to make static checks happy. + */ + @Override + public int hashCode() + { + return line.hashCode(); + } + + @Override + public void write(TestCaseWriter writer) throws IOException + { + writer.emitLiteral(line); + } + } + + /** + * Expected value for a single line when using regular expressions + * to match the line. Normal Java regular expression rules apply. + * Matches the expected and actual lines after stripping leading + * and trailing whitespace. + * + */ + public static class ExpectedRegex implements ExpectedLine + { + protected final String line; + + public ExpectedRegex(String line) + { + this.line = line; + } + + @Override + public String toString() + { + return line; + } + + @Override + public boolean matches(String actual) + { + // Each line is used only once or twice: no advantage to caching. + Pattern p = Pattern.compile(line.trim()); + Matcher m = p.matcher(actual.trim()); + return m.matches(); + } + + @Override + public boolean equals(Object o) + { + if (o == this) { + return true; + } + if (o == null || o.getClass() != getClass()) { + return false; + } + ExpectedRegex other = (ExpectedRegex) o; + return Objects.equals(line, other.line); + } + + /** + * Never used (doesn't make sense). But, needed to make static checks happy. + */ + @Override + public int hashCode() + { + return line.hashCode(); + } + + @Override + public void write(TestCaseWriter writer) throws IOException + { + writer.emitPattern(line); + } + } + + /** + * Matches any number of lines up to the first match of + * the following pattern. + */ + public static class SkipAny implements ExpectedLine + { + @Override + public String toString() + { + return ""; + } + + @Override + public boolean matches(String actual) + { + return true; + } + + @Override + public boolean equals(Object o) + { + if (o == this) { + return true; + } + return o != null && o.getClass() == getClass(); + } + + /** + * Never used (doesn't make sense). But, needed to make static checks happy. + */ + @Override + public int hashCode() + { + return 1; + } + + @Override + public void write(TestCaseWriter writer) throws IOException + { + writer.emitOptionalLine("**"); + } + } + + /** + * Represents a block of expected lines: literals, regular + * expressions or wild cards. + */ + public static class ExpectedText + { + protected final List lines; + + public ExpectedText(List lines) + { + this.lines = lines; + } + + public void verify(String actual, ActualResults.ErrorCollector errors) + { + if (actual == null) { + errors.add("Actual value is null"); + } else { + verify(actual.split("\n"), errors); + } + } + + public boolean verify(String[] lines, ActualResults.ErrorCollector errors) + { + int aPosn = 0; + int ePosn = 0; + while (aPosn < lines.length && ePosn < this.lines.size()) { + ExpectedLine expected = this.lines.get(ePosn++); + if (expected instanceof SkipAny) { + if (ePosn == this.lines.size()) { + return true; + } + expected = this.lines.get(ePosn); + while (aPosn < lines.length) { + if (expected.matches(lines[aPosn])) { + aPosn++; + ePosn++; + break; + } + aPosn++; + } + } else { + if (!expected.matches(lines[aPosn])) { + errors.add( + StringUtils.format("line %d: expected [%s], actual [%s]", + aPosn + 1, + expected, + lines[aPosn])); + return false; + } + aPosn++; + } + } + if (ePosn < this.lines.size()) { + errors.add("Missing lines from actual result"); + return false; + } + // Ignore trailing newlines + while (aPosn < lines.length && lines[aPosn].trim().length() == 0) { + aPosn++; + } + if (aPosn < lines.length) { + errors.add("Unexpected lines at line " + (aPosn + 1)); + return false; + } + return true; + } + + public void write(TestCaseWriter writer) throws IOException + { + for (ExpectedLine line : lines) { + line.write(writer); + } + } + + @Override + public boolean equals(Object o) + { + if (o == this) { + return true; + } + if (o == null || o.getClass() != getClass()) { + return false; + } + ExpectedText other = (ExpectedText) o; + return lines.equals(other.lines); + } + + /** + * Never used (doesn't make sense). But, needed to make static checks happy. + */ + @Override + public int hashCode() + { + return lines.hashCode(); + } + } + + protected final TestSection.Section section; + protected final PatternSection.ExpectedText expected; + + protected PatternSection(Section section, String name, ExpectedText expected) + { + this(section, name, expected, false); + } + + protected PatternSection(Section section, String name, ExpectedText expected, boolean copy) + { + super(name, copy); + this.section = section; + this.expected = expected; + } + + public PatternSection.ExpectedText expected() + { + return expected; + } + + @Override + public TestSection.Section section() + { + return section; + } + + @Override + public TestSection copy() + { + return new PatternSection(section, name, expected, true); + } + + public boolean verify(String actual, ActualResults.ErrorCollector errors) + { + String[] lines = actual == null ? null : actual.split("\n"); + return verify(lines, errors); + } + + public boolean verify(String[] actual, ActualResults.ErrorCollector errors) + { + errors.setSection(section().sectionName()); + if (actual == null) { + errors.add("Section " + section + " actual results are missing."); + return false; + } else { + return expected.verify(actual, errors); + } + } + + @Override + public boolean equals(Object o) + { + if (o == this) { + return true; + } + if (o == null || o.getClass() != getClass()) { + return false; + } + PatternSection other = (PatternSection) o; + return expected.equals(other.expected); + } + + /** + * Never used (doesn't make sense). But, needed to make static checks happy. + */ + @Override + public int hashCode() + { + return java.util.Objects.hash(section, name, expected); + } + + @Override + public void writeSection(TestCaseWriter writer) throws IOException + { + writer.emitSection(name); + expected.write(writer); + } +} diff --git a/sql/src/test/java/org/apache/druid/sql/calcite/tester/PlannerFixture.java b/sql/src/test/java/org/apache/druid/sql/calcite/tester/PlannerFixture.java new file mode 100644 index 000000000000..a8e12f346afa --- /dev/null +++ b/sql/src/test/java/org/apache/druid/sql/calcite/tester/PlannerFixture.java @@ -0,0 +1,396 @@ +/* + * Licensed to the Apache Software Foundation (ASF) under one + * or more contributor license agreements. See the NOTICE file + * distributed with this work for additional information + * regarding copyright ownership. The ASF licenses this file + * to you under the Apache License, Version 2.0 (the + * "License"); you may not use this file except in compliance + * with the License. You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, + * software distributed under the License is distributed on an + * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY + * KIND, either express or implied. See the License for the + * specific language governing permissions and limitations + * under the License. + */ + +package org.apache.druid.sql.calcite.tester; + +import com.fasterxml.jackson.databind.Module; +import com.fasterxml.jackson.databind.ObjectMapper; +import com.fasterxml.jackson.databind.module.SimpleModule; +import org.apache.calcite.tools.RelConversionException; +import org.apache.druid.jackson.DefaultObjectMapper; +import org.apache.druid.java.util.common.Pair; +import org.apache.druid.java.util.common.UOE; +import org.apache.druid.java.util.common.io.Closer; +import org.apache.druid.math.expr.ExprMacroTable; +import org.apache.druid.math.expr.ExpressionProcessing; +import org.apache.druid.query.QueryContexts; +import org.apache.druid.query.QueryRunnerFactoryConglomerate; +import org.apache.druid.query.lookup.LookupSerdeModule; +import org.apache.druid.query.topn.TopNQueryConfig; +import org.apache.druid.server.QueryStackTests; +import org.apache.druid.server.security.AuthConfig; +import org.apache.druid.server.security.AuthenticationResult; +import org.apache.druid.server.security.AuthorizerMapper; +import org.apache.druid.sql.SqlLifecycleFactory; +import org.apache.druid.sql.calcite.BaseCalciteQueryTest; +import org.apache.druid.sql.calcite.external.ExternalDataSource; +import org.apache.druid.sql.calcite.planner.Calcites; +import org.apache.druid.sql.calcite.planner.DruidOperatorTable; +import org.apache.druid.sql.calcite.planner.PlannerConfig; +import org.apache.druid.sql.calcite.planner.PlannerFactory; +import org.apache.druid.sql.calcite.run.QueryMakerFactory; +import org.apache.druid.sql.calcite.schema.DruidSchemaCatalog; +import org.apache.druid.sql.calcite.util.CalciteTests; +import org.apache.druid.sql.calcite.util.RootSchemaBuilder; +import org.apache.druid.sql.calcite.util.RootSchemaBuilder.CatalogResult; +import org.apache.druid.sql.calcite.util.SpecificSegmentsQuerySegmentWalker; +import org.apache.druid.sql.calcite.view.InProcessViewManager; +import org.apache.druid.sql.calcite.view.ViewManager; +import org.apache.druid.sql.http.SqlParameter; + +import java.io.File; +import java.util.ArrayList; +import java.util.Collections; +import java.util.HashMap; +import java.util.List; +import java.util.Map; + +/** + * Configures and holds the Druid planner and its associated + * helper classes. By default, sets up the planner to mimic the + * {@code BaseCalciteTest} class, but all bits are configurable for + * special cases. (To do that, extend the {@link Builder} class + * with the required methods.) + */ +public class PlannerFixture +{ + /** + * Builds the planner fixture by allowing the test case to customize + * parts of the build process without copy/pasting the entire messy + * setup process. The builder is also a "rebuilder" to build a second + * planner factory when the planner settings change. Since the planner + * settings holds more than just planner settings, it also + * is used in code that supports the planner. The structure works fine + * when Druid is run normally, but is awkward in tests. This builder + * hides all that cruft. + */ + public static class Builder + { + static { + Calcites.setSystemProperties(); + ExpressionProcessing.initializeForTests(null); + } + + final File temporaryFolder; + List jacksonModules; + Map jacksonInjectables = new HashMap<>(); + + // Planner config contains values use by the planner, but also + // by the Druid schema to control the refresh interval. The + // value here is used by the mock schema objects. It is also + // used when planning unless a case provides its own config. + // Test-specific configs do not contain values that influence + // the schema usage of the config. Rather confusing. + PlannerConfig plannerConfig = new PlannerConfig(); + DruidSchemaCatalog rootSchema; + AuthConfig authConfig = new AuthConfig(); + DruidOperatorTable operatorTable = CalciteTests.createOperatorTable(); + ExprMacroTable macroTable = CalciteTests.createExprMacroTable(); + AuthorizerMapper authorizerMapper = CalciteTests.TEST_AUTHORIZER_MAPPER; + ObjectMapper objectMapper; + String druidSchemaName = CalciteTests.DRUID_SCHEMA_NAME; + QueryMakerFactory queryMakerFactory; + Closer resourceCloser = Closer.create(); + int minTopNThreshold = TopNQueryConfig.DEFAULT_MIN_TOPN_THRESHOLD; + ViewManager viewManager; + File resultsDir = new File("target/actual"); + List> views = new ArrayList<>(); + Map defaultQueryOptions; + AuthenticationResult defaultAuthResult = CalciteTests.REGULAR_USER_AUTH_RESULT; + QueryRunnerFactoryConglomerate conglomerate; + SpecificSegmentsQuerySegmentWalker walker; + boolean includeLookups; + int mergeBufferCount = -1; + + public Builder(File temporaryFolder) + { + this.temporaryFolder = temporaryFolder; + + // See BaseCalciteQueryTest.getJacksonModules() + jacksonModules = new ArrayList<>(new LookupSerdeModule().getJacksonModules()); + jacksonModules.add(new SimpleModule().registerSubtypes(ExternalDataSource.class)); + + // See BaseCalciteQueryTest.createQueryJsonMapper() + objectMapper = new DefaultObjectMapper().registerModules(jacksonModules); + BaseCalciteQueryTest.setMapperInjectableValues(objectMapper, jacksonInjectables, macroTable); + } + + public Builder withView(String viewName, String stmt) + { + views.add(Pair.of(viewName, stmt)); + return this; + } + + public Builder withPlannerConfig(PlannerConfig plannerConfig) + { + this.plannerConfig = plannerConfig; + return this; + } + + public Builder withQueryMaker(QueryMakerFactory queryMakerFactory) + { + this.queryMakerFactory = queryMakerFactory; + return this; + } + + public Builder defaultQueryOptions(Map defaultQueryOptions) + { + this.defaultQueryOptions = defaultQueryOptions; + return this; + } + + public Builder withLookups() + { + this.includeLookups = true; + return this; + } + + public Builder withMergeBufferCount(int count) + { + this.mergeBufferCount = count; + return this; + } + + public Builder withAuthResult(AuthenticationResult authResult) + { + this.defaultAuthResult = authResult; + return this; + } + + public ObjectMapper jsonMapper() + { + return this.objectMapper; + } + + public Builder copy() + { + Builder copy = new Builder(temporaryFolder); + copy.jacksonModules = jacksonModules; + copy.jacksonInjectables = jacksonInjectables; + copy.plannerConfig = plannerConfig; + copy.authConfig = authConfig; + copy.operatorTable = operatorTable; + copy.macroTable = macroTable; + copy.authorizerMapper = authorizerMapper; + copy.objectMapper = objectMapper; + copy.druidSchemaName = druidSchemaName; + copy.queryMakerFactory = queryMakerFactory; + copy.minTopNThreshold = minTopNThreshold; + copy.viewManager = viewManager; + copy.resultsDir = resultsDir; + copy.defaultQueryOptions = defaultQueryOptions; + copy.defaultAuthResult = defaultAuthResult; + copy.includeLookups = includeLookups; + // Don't copy the conglomerate or walker: one of them + // caches the null handling setting and causes tests to + // fail if they are reused. + // Don't copy the views: they are already in the view manager. + copy.views = new ArrayList<>(); + return copy; + } + + public PlannerFixture build() + { + return new PlannerFixture(this); + } + } + + public static class ExplainFixture + { + final PlannerFixture plannerFixture; + final String sql; + final Map context; + final List parameters; + final AuthenticationResult authenticationResult; + private List results; + + public ExplainFixture( + PlannerFixture plannerFixture, + String sql, + Map context, + List parameters, + AuthenticationResult authenticationResult) + { + this.plannerFixture = plannerFixture; + this.sql = sql; + this.context = context; + this.parameters = parameters; + this.authenticationResult = authenticationResult; + } + + public ExplainFixture(PlannerFixture plannerFixture, String sql, Map context) + { + this( + plannerFixture, + sql, context, + Collections.emptyList(), + CalciteTests.REGULAR_USER_AUTH_RESULT); + } + + public void explain() throws RelConversionException + { + results = plannerFixture.sqlLifecycleFactory + .factorize() + .runSimple(sql, context, parameters, authenticationResult) + .toList(); + } + + public Pair results() + { + Object[] row = results.get(0); + return Pair.of((String) row[0], (String) row[1]); + } + } + + final Builder builder; + final QueryRunnerFactoryConglomerate conglomerate; + final SpecificSegmentsQuerySegmentWalker walker; + final SqlLifecycleFactory sqlLifecycleFactory; + final ObjectMapper jsonMapper; + final File resultsDir; + final ViewManager viewManager; + final Map defaultQueryOptions; + final AuthenticationResult defaultAuthResult; + final QueryRunner queryRunner; + + public PlannerFixture(Builder builder) + { + this.builder = builder; + + // Must rebuild the schema (and its mock data) each time since + // a change to global options will change the generated mock segments. + if (builder.conglomerate == null) { + if (builder.mergeBufferCount > -1) { + conglomerate = QueryStackTests.createQueryRunnerFactoryConglomerate( + builder.resourceCloser, + QueryStackTests.getProcessingConfig(true, builder.mergeBufferCount), + () -> builder.minTopNThreshold); + } else { + conglomerate = QueryStackTests.createQueryRunnerFactoryConglomerate( + builder.resourceCloser, + () -> builder.minTopNThreshold); + } + } else { + conglomerate = builder.conglomerate; + } + if (builder.walker == null) { + walker = CalciteTests.createMockWalker( + conglomerate, + builder.temporaryFolder + ); + } else { + walker = builder.walker; + } + RootSchemaBuilder rootSchemaBuilder = new RootSchemaBuilder( + builder.plannerConfig, + builder.authorizerMapper) + .congolomerate(conglomerate) + .walker(walker) + .withLookupSchema(builder.includeLookups); + if (builder.viewManager == null) { + viewManager = new InProcessViewManager(CalciteTests.DRUID_VIEW_MACRO_FACTORY); + } else { + viewManager = builder.viewManager; + } + if (viewManager != null) { + rootSchemaBuilder.viewManager(viewManager); + } + CatalogResult result = rootSchemaBuilder.build(); + PlannerFactory plannerFactory = new PlannerFactory( + result.catalog, + result.createQueryMakerFactory(builder.objectMapper), + builder.operatorTable, + builder.macroTable, + builder.plannerConfig, + builder.authorizerMapper, + builder.objectMapper, + builder.druidSchemaName + ); + this.queryRunner = new QueryRunner(plannerFactory, builder.authorizerMapper); + for (Pair view : builder.views) { + viewManager.createView(plannerFactory, view.lhs, view.rhs); + } + this.sqlLifecycleFactory = CalciteTests.createSqlLifecycleFactory( + plannerFactory, + builder.authConfig); + this.resultsDir = builder.resultsDir; + this.jsonMapper = builder.objectMapper; + this.defaultQueryOptions = builder.defaultQueryOptions; + this.defaultAuthResult = builder.defaultAuthResult; + } + + public static Builder builder(File tempDir) + { + return new Builder(tempDir); + } + + /** + * Create a copy of the builder to change planner options. + * Leaves the conglomerate and walker, as they depend on + * null handling which must not change in the copy. + */ + public Builder toBuilder() + { + Builder newBuilder = builder.copy(); + newBuilder.conglomerate = conglomerate; + newBuilder.walker = walker; + return newBuilder; + } + + public File resultsDir() + { + return resultsDir; + } + + public File tempDir() + { + return builder.temporaryFolder; + } + + public PlannerConfig plannerConfig() + { + return builder.plannerConfig; + } + + public AuthenticationResult authResultFor(String user) + { + if (user == null) { + return defaultAuthResult; + } + throw new UOE("Not yet"); + } + + public QueryRunner queryRunner() + { + return queryRunner; + } + + public Map applyDefaultContext(Map context) + { + if (defaultQueryOptions != null) { + context = QueryContexts.override(defaultQueryOptions, context); + } + return context; + } + + public ActualResults runTestCase(QueryTestCase testCase) + { + return new QueryTestCaseRunner(this, testCase).run(); + } +} diff --git a/sql/src/test/java/org/apache/druid/sql/calcite/tester/QueryRun.java b/sql/src/test/java/org/apache/druid/sql/calcite/tester/QueryRun.java new file mode 100644 index 000000000000..9f90db011042 --- /dev/null +++ b/sql/src/test/java/org/apache/druid/sql/calcite/tester/QueryRun.java @@ -0,0 +1,225 @@ +/* + * Licensed to the Apache Software Foundation (ASF) under one + * or more contributor license agreements. See the NOTICE file + * distributed with this work for additional information + * regarding copyright ownership. The ASF licenses this file + * to you under the Apache License, Version 2.0 (the + * "License"); you may not use this file except in compliance + * with the License. You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, + * software distributed under the License is distributed on an + * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY + * KIND, either express or implied. See the License for the + * specific language governing permissions and limitations + * under the License. + */ + +package org.apache.druid.sql.calcite.tester; + +import com.google.common.base.Strings; +import com.google.common.collect.ImmutableMap; +import org.apache.druid.query.QueryContexts; +import org.apache.druid.sql.calcite.tester.LinesSection.ResultsSection; + +import java.io.IOException; +import java.util.ArrayList; +import java.util.Collections; +import java.util.HashMap; +import java.util.List; +import java.util.Map; + +/** + * A query run consists of a set of results or an execution/error, along + * with an optional set of query context values and execution options. + *

+ * If a test case has more than one run, then there should be an options + * or query context section to identify what changes between runs. The + * typical case is that one run covers "classic" null handling, while another + * covers SQL-compatible null handling. Options and query context "inherit" + * values from the query test case, overridden by any values set in the run. + */ +public class QueryRun extends SectionContainer +{ + /** + * Builder for a test case. Allows the test case itself to be + * immutable. + */ + public static class Builder + { + private final String label; + protected boolean isExplicit; + protected List sections = new ArrayList<>(); + protected String exception; + + public Builder(String label) + { + this.label = label; + } + + public Builder explicit(boolean isExplicit) + { + this.isExplicit = isExplicit; + return this; + } + + public void add(TestSection section) + { + if (section != null) { + sections.add(section); + } + } + + public QueryRun build(QueryTestCase testCase) + { + return new QueryRun(testCase, this); + } + } + + private final QueryTestCase testCase; + /** + * Whether the run section was explicitly included or was implied. + * Used when writing cases to recreate the original format. + */ + private final boolean isExplicit; + /** + * Order of the run within the test case. Used for generating a label + * for a case when no label is provided in the source file. + */ + private final int ordinal; + + public QueryRun(QueryTestCase testCase, Builder builder) + { + super(builder.label, builder.sections); + this.testCase = testCase; + this.ordinal = testCase.runs().size() + 1; + this.isExplicit = builder.isExplicit; + } + + public QueryRun( + QueryTestCase testCase, + String label, + List sections, + boolean isExplicit) + { + super(label, sections); + this.testCase = testCase; + this.ordinal = testCase.runs().size() + 1; + this.isExplicit = isExplicit; + } + + public QueryTestCase testCase() + { + return testCase; + } + + public boolean isExplicit() + { + return isExplicit; + } + + public String displayLabel() + { + String value = label(); + if (Strings.isNullOrEmpty(value)) { + return "Run " + ordinal; + } else { + return value; + } + } + + public ResultsSection resultsSection() + { + return (LinesSection.ResultsSection) section(TestSection.Section.RESULTS); + } + + public List results() + { + ResultsSection resultsSection = resultsSection(); + return resultsSection == null ? Collections.emptyList() : resultsSection.lines; + } + + @Override + public Map context() + { + ContextSection section = contextSection(); + ContextSection querySection = testCase.contextSection(); + if (querySection == null) { + return section == null ? ImmutableMap.of() : section.context; + } + if (section == null) { + return querySection == null ? ImmutableMap.of() : querySection.context; + } + Map merged = new HashMap<>(); + merged.putAll(querySection.context); + merged.putAll(section.context); + return merged; + } + + public boolean shouldRunFail() + { + return failOnRun(); + } + + public boolean failOnRun() + { + return OptionsSection.FAIL_AT_RUN.equalsIgnoreCase(option(OptionsSection.FAILURE_OPTION)); + } + + @Override + public Map options() + { + Map caseOptions = testCase.options(); + Map options = super.options(); + if (caseOptions.isEmpty()) { + return options; + } + if (options.isEmpty()) { + return caseOptions; + } + Map merged = new HashMap<>(caseOptions); + merged.putAll(options); + return merged; + } + + @Override + public boolean booleanOption(String key) + { + return QueryContexts.getAsBoolean(key, option(key), false); + } + + @Override + public String option(String key) + { + String value = super.option(key); + if (value == null) { + value = testCase.option(key); + } + return value; + } + + public QueryRun copy(QueryTestCase testCase, boolean isExplicit) + { + return new QueryRun(testCase, label, fileOrder, isExplicit); + } + + public QueryRun copy(QueryTestCase testCase) + { + return copy(testCase, isExplicit); + } + + public void write(TestCaseWriter writer) throws IOException + { + if (isExplicit) { + writer.emitSection("run"); + if (!Strings.isNullOrEmpty(label)) { + writer.emitOptionalLine(label); + } + } + for (TestSection section : fileOrder) { + section.write(writer); + } + } +} diff --git a/sql/src/test/java/org/apache/druid/sql/calcite/tester/QueryRunner.java b/sql/src/test/java/org/apache/druid/sql/calcite/tester/QueryRunner.java new file mode 100644 index 000000000000..1f0528b09966 --- /dev/null +++ b/sql/src/test/java/org/apache/druid/sql/calcite/tester/QueryRunner.java @@ -0,0 +1,227 @@ +/* + * Licensed to the Apache Software Foundation (ASF) under one + * or more contributor license agreements. See the NOTICE file + * distributed with this work for additional information + * regarding copyright ownership. The ASF licenses this file + * to you under the Apache License, Version 2.0 (the + * "License"); you may not use this file except in compliance + * with the License. You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, + * software distributed under the License is distributed on an + * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY + * KIND, either express or implied. See the License for the + * specific language governing permissions and limitations + * under the License. + */ + +package org.apache.druid.sql.calcite.tester; + +import com.google.common.collect.ImmutableMap; +import org.apache.calcite.sql.parser.SqlParseException; +import org.apache.calcite.tools.RelConversionException; +import org.apache.calcite.tools.ValidationException; +import org.apache.druid.java.util.common.guava.Sequence; +import org.apache.druid.query.QueryContext; +import org.apache.druid.server.security.Access; +import org.apache.druid.server.security.AuthenticationResult; +import org.apache.druid.server.security.AuthorizationUtils; +import org.apache.druid.server.security.AuthorizerMapper; +import org.apache.druid.server.security.ForbiddenException; +import org.apache.druid.sql.calcite.QueryDefn; +import org.apache.druid.sql.calcite.planner.CapturedState; +import org.apache.druid.sql.calcite.planner.DruidPlanner; +import org.apache.druid.sql.calcite.planner.PlannerContext; +import org.apache.druid.sql.calcite.planner.PlannerFactory; +import org.apache.druid.sql.calcite.planner.PlannerResult; +import org.apache.druid.sql.calcite.planner.PlannerStateCapture; +import org.apache.druid.sql.calcite.planner.ValidationResult; +import org.apache.druid.sql.http.SqlParameter; +import org.apache.druid.sql.http.SqlQuery; + +import java.util.Collections; +import java.util.List; +import java.util.Map; + +/** + * Druid SQL query runner. Encapsulates the planner functionality needed + * to plan and run a query. Provides the ability to introspect the + * planner details when testing. + *

+ * This class wraps functionality which was previously spread widely + * in the code, or tightly coupled to a particular representation. This + * for is usable for both "production" and test code. + */ +public class QueryRunner +{ + /** + * Builder for the query definition. + */ + public static class Builder + { + private final String sql; + private Map context; + private List parameters; + private AuthenticationResult authenticationResult; + + public Builder(String sql) + { + this.sql = sql; + } + + public Builder(SqlQuery query) + { + this.sql = query.getQuery(); + this.context = query.getContext(); + this.parameters = query.getParameters(); + } + + public Builder context(Map context) + { + this.context = context; + return this; + } + + public Builder parameters(List parameters) + { + this.parameters = parameters; + return this; + } + + public Builder authResult(AuthenticationResult authenticationResult) + { + this.authenticationResult = authenticationResult; + return this; + } + + public QueryDefn build() + { + return new QueryDefn( + sql, + context == null ? ImmutableMap.of() : context, + parameters == null ? Collections.emptyList() : parameters, + authenticationResult); + } + } + + /** + * Introspected planner details, typically for testing. + */ + public static class PlanDetails + { + private final QueryDefn queryDefn; + private final PlannerResult plannerResult; + private final CapturedState validateState; + private final CapturedState planState; + + public PlanDetails( + QueryDefn queryDefn, + CapturedState validateState, + CapturedState planState, + PlannerResult plannerResult) + { + this.queryDefn = queryDefn; + this.validateState = validateState; + this.planState = planState; + this.plannerResult = plannerResult; + } + + public QueryDefn queryDefn() + { + return queryDefn; + } + + public PlannerResult plannerResult() + { + return plannerResult; + } + + public CapturedState planState() + { + return planState; + } + + public ValidationResult validationResult() + { + return validateState.validationResult; + } + } + + private final PlannerFactory plannerFactory; + private final AuthorizerMapper authorizerMapper; + + public QueryRunner( + PlannerFactory plannerFactory, + AuthorizerMapper authorizerMapper + ) + { + this.plannerFactory = plannerFactory; + this.authorizerMapper = authorizerMapper; + } + + /** + * Run a query and provide the result set. + */ + public Sequence run(QueryDefn defn) throws SqlParseException, ValidationException, RelConversionException + { + return plan(defn).run(); + } + + /** + * Plan the query and provide the planner details for testing. + */ + public PlanDetails introspectPlan(QueryDefn defn) throws Exception + { + CapturedState validateState = new CapturedState(); + CapturedState planState = new CapturedState(); + PlannerResult plannerResult = plan(defn, validateState, planState); + return new PlanDetails(defn, validateState, planState, plannerResult); + } + + /** + * Plan a query. + */ + public PlannerResult plan(QueryDefn defn) throws SqlParseException, ValidationException, RelConversionException + { + return plan(defn, null, null); + } + + public PlannerResult plan( + QueryDefn defn, + PlannerStateCapture validationCapture, + PlannerStateCapture planCapture + ) throws SqlParseException, ValidationException, RelConversionException + { + // Oddly, Druid runs the whole parser and conversion twice per query... + PlannerContext plannerContext; + try (DruidPlanner planner = plannerFactory.createPlanner( + defn.sql(), + new QueryContext(defn.context()))) { + if (validationCapture != null) { + planner.captureState(validationCapture); + } + plannerContext = planner.getPlannerContext(); + plannerContext.setParameters(defn.typedParameters()); + plannerContext.setAuthenticationResult(defn.authResult()); + ValidationResult validationResult = planner.validate(false); + Access access = + AuthorizationUtils.authorizeAllResourceActions( + defn.authResult(), + validationResult.getResourceActions(), + authorizerMapper + ); + plannerContext.setAuthorizationResult(access); + if (!access.isAllowed()) { + throw new ForbiddenException(access.toString()); + } + } + try (DruidPlanner planner = plannerFactory.createPlannerWithContext(plannerContext)) { + if (planCapture != null) { + planner.captureState(planCapture); + } + return planner.plan(); + } + } +} diff --git a/sql/src/test/java/org/apache/druid/sql/calcite/tester/QueryTestCase.java b/sql/src/test/java/org/apache/druid/sql/calcite/tester/QueryTestCase.java new file mode 100644 index 000000000000..30a954138660 --- /dev/null +++ b/sql/src/test/java/org/apache/druid/sql/calcite/tester/QueryTestCase.java @@ -0,0 +1,252 @@ +/* + * Licensed to the Apache Software Foundation (ASF) under one + * or more contributor license agreements. See the NOTICE file + * distributed with this work for additional information + * regarding copyright ownership. The ASF licenses this file + * to you under the Apache License, Version 2.0 (the + * "License"); you may not use this file except in compliance + * with the License. You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, + * software distributed under the License is distributed on an + * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY + * KIND, either express or implied. See the License for the + * specific language governing permissions and limitations + * under the License. + */ + +package org.apache.druid.sql.calcite.tester; + +import com.google.common.collect.ImmutableMap; +import org.apache.commons.text.StringEscapeUtils; +import org.apache.druid.sql.calcite.tester.LinesSection.CommentsSection; +import org.apache.druid.sql.calcite.tester.TestSection.Section; +import org.apache.druid.sql.http.SqlParameter; + +import java.io.IOException; +import java.util.ArrayList; +import java.util.Collections; +import java.util.List; +import java.util.Map; +import java.util.Objects; + +/** + * Represents one test case to exercise within the planner test + * framework. A test must have a SQL statement and may have any number + * of expected results sections. A case may include additional inputs + * such as query context settings, parameters and test options. + *

+ * A test case consists of a SQL statement, optional planner, optional + * options and optional planner results. It also includes zero or more + * runs of the query, each with optional results. Typically there are one + * or two runs: one for each kind of null handling. + */ +public class QueryTestCase extends SectionContainer +{ + /** + * Builder for a test case. Allows the test case itself to be + * immutable. + */ + public static class Builder + { + private final String label; + protected List sections = new ArrayList<>(); + protected String exception; + protected List runBuilders = new ArrayList<>(); + + public Builder(String label) + { + this.label = label; + } + + public void add(TestSection section) + { + if (section != null) { + sections.add(section); + } + } + + public QueryRun.Builder addRun(String label, boolean isExplicit) + { + QueryRun.Builder runBuilder = new QueryRun.Builder(label); + runBuilder.explicit(isExplicit); + runBuilders.add(runBuilder); + return runBuilder; + } + + public QueryTestCase build() + { + QueryTestCase testCase = new QueryTestCase(this); + for (QueryRun.Builder runBuilder : runBuilders) { + testCase.addRun(runBuilder.build(testCase)); + } + return testCase; + } + } + + private List runs = new ArrayList<>(); + + public QueryTestCase(Builder builder) + { + super(builder.label, builder.sections); + } + + protected void addRun(QueryRun run) + { + runs.add(run); + } + + public void addRuns(List runs) + { + this.runs.addAll(runs); + } + + public TextSection.SqlSection sqlSection() + { + return (TextSection.SqlSection) section(TestSection.Section.SQL); + } + + public String sql() + { + String sql = sqlSection().text(); + if (booleanOption(OptionsSection.UNICODE_ESCAPE_OPTION)) { + sql = StringEscapeUtils.unescapeJava(sql); + } + return sql; + } + + public String comment() + { + CommentsSection comments = (CommentsSection) section(TestSection.Section.COMMENTS); + if (comments == null || comments.lines.isEmpty()) { + return null; + } + if (comments.lines.size() == 1) { + return comments.lines.get(0); + } + return String.join("\n", comments.lines); + } + + public String user() + { + OptionsSection options = optionsSection(); + return options == null ? null : options.get(OptionsSection.USER_OPTION); + } + + public PatternSection ast() + { + return (PatternSection) section(TestSection.Section.AST); + } + + public PatternSection plan() + { + return (PatternSection) section(TestSection.Section.PLAN); + } + + public PatternSection execPlan() + { + return (PatternSection) section(TestSection.Section.EXEC_PLAN); + } + + @Override + public Map context() + { + ContextSection section = contextSection(); + return section == null ? ImmutableMap.of() : section.context; + } + + public PatternSection explain() + { + return (PatternSection) section(TestSection.Section.EXPLAIN); + } + + public PatternSection unparsed() + { + return (PatternSection) section(TestSection.Section.UNPARSED); + } + + public PatternSection schema() + { + return (PatternSection) section(TestSection.Section.SCHEMA); + } + + public PatternSection targetSchema() + { + return (PatternSection) section(TestSection.Section.TARGET_SCHEMA); + } + + public PatternSection nativeQuery() + { + return (PatternSection) section(TestSection.Section.NATIVE); + } + + public ResourcesSection resourceActions() + { + return (ResourcesSection) section(TestSection.Section.RESOURCES); + } + + public ParametersSection parametersSection() + { + return (ParametersSection) section(TestSection.Section.PARAMETERS); + } + + public List parameters() + { + ParametersSection params = parametersSection(); + return params == null ? Collections.emptyList() : params.parameters(); + } + + /** + * The Druid planner is designed to be configured once per run, + * but tests want to be more flexible. If the test wants to change a + * planner setting, we must reset the whole planner stack. Less than + * idea, but it is what it is. + */ + public boolean requiresCustomPlanner() + { + OptionsSection options = optionsSection(); + if (options == null) { + return false; + } + for (String key : options.options.keySet()) { + if (key.startsWith("planner.")) { + return true; + } + } + return false; + } + + public List runs() + { + return runs; + } + + public boolean hasRuns() + { + return runs != null && !runs.isEmpty(); + } + + protected TestSection copySection(Section section) + { + TestSection thisSection = section(section); + return thisSection == null ? null : thisSection.copy(); + } + + public void write(TestCaseWriter writer) throws IOException + { + for (TestSection section : fileOrder) { + section.write(writer); + } + for (QueryRun run : runs) { + run.write(writer); + } + } + + public boolean matches(QueryTestCase testCase) + { + return Objects.equals(options(), testCase.options()) + && Objects.equals(context(), testCase.context()); + } +} diff --git a/sql/src/test/java/org/apache/druid/sql/calcite/tester/QueryTestCaseRunner.java b/sql/src/test/java/org/apache/druid/sql/calcite/tester/QueryTestCaseRunner.java new file mode 100644 index 000000000000..2c4226e4fdd9 --- /dev/null +++ b/sql/src/test/java/org/apache/druid/sql/calcite/tester/QueryTestCaseRunner.java @@ -0,0 +1,661 @@ +/* + * Licensed to the Apache Software Foundation (ASF) under one + * or more contributor license agreements. See the NOTICE file + * distributed with this work for additional information + * regarding copyright ownership. The ASF licenses this file + * to you under the Apache License, Version 2.0 (the + * "License"); you may not use this file except in compliance + * with the License. You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, + * software distributed under the License is distributed on an + * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY + * KIND, either express or implied. See the License for the + * specific language governing permissions and limitations + * under the License. + */ + +package org.apache.druid.sql.calcite.tester; + +import com.google.common.collect.ImmutableMap; +import org.apache.calcite.plan.RelOptUtil; +import org.apache.calcite.rel.type.RelDataTypeField; +import org.apache.calcite.sql.SqlExplainFormat; +import org.apache.calcite.sql.SqlExplainLevel; +import org.apache.calcite.sql.SqlInsert; +import org.apache.druid.common.config.NullHandling; +import org.apache.druid.java.util.common.ISE; +import org.apache.druid.java.util.common.Pair; +import org.apache.druid.java.util.common.StringUtils; +import org.apache.druid.java.util.common.logger.Logger; +import org.apache.druid.math.expr.ExpressionProcessing; +import org.apache.druid.math.expr.ExpressionProcessingConfig; +import org.apache.druid.query.QueryContexts; +import org.apache.druid.query.QueryContexts.Vectorize; +import org.apache.druid.server.security.AuthenticationResult; +import org.apache.druid.sql.calcite.BaseCalciteQueryTest; +import org.apache.druid.sql.calcite.BaseCalciteQueryTest.QueryContextForJoinProvider; +import org.apache.druid.sql.calcite.QueryDefn; +import org.apache.druid.sql.calcite.parser.DruidSqlInsert; +import org.apache.druid.sql.calcite.parser.DruidSqlReplace; +import org.apache.druid.sql.calcite.planner.PlannerConfig; +import org.apache.druid.sql.calcite.rel.DruidRel; +import org.apache.druid.sql.calcite.tester.PlannerFixture.ExplainFixture; +import org.apache.druid.sql.calcite.tester.QueryRunner.PlanDetails; +import org.apache.druid.sql.calcite.util.CalciteTests; + +import java.util.Collections; +import java.util.List; +import java.util.Map; + +/** + * Runs a test case and captures the planning-related aspects + * of the query that the test case says to verify. + *

+ * Druid is irritating in that several options are global, yet tests + * want to test variations. This appears to normally be done by running + * tests with different command-line settings, which is clunky. We want + * to set those options in-line. Further, some of the the global options + * are cached in the planner, forcing us to rebuild the entire planner + * when the options change. This is clearly an opportunity for improvement. + */ +public class QueryTestCaseRunner +{ + public static final Logger log = new Logger(QueryTestCaseRunner.class); + + private static final Map ENABLE_VECTORIZE_CONTEXT = + ImmutableMap.of( + QueryContexts.VECTORIZE_KEY, + Vectorize.FORCE.name(), + QueryContexts.VECTORIZE_VIRTUAL_COLUMNS_KEY, + Vectorize.FORCE.name(), + QueryContexts.VECTOR_SIZE_KEY, + 2); // Small vector size to ensure we use more than one. + private static final Map DISABLE_VECTORIZE_CONTEXT = + ImmutableMap.of( + QueryContexts.VECTORIZE_KEY, + Vectorize.FALSE.name(), + QueryContexts.VECTORIZE_VIRTUAL_COLUMNS_KEY, + Vectorize.FALSE.name()); + + private final PlannerFixture baseFixture; + private final QueryTestCase testCase; + private final ActualResults results; + private PlannerFixture plannerFixture; + private PlanDetails planDetails; + private ExplainFixture explainFixture; + + public QueryTestCaseRunner(PlannerFixture plannerFixture, QueryTestCase testCase) + { + this.baseFixture = plannerFixture; + this.plannerFixture = plannerFixture; + this.testCase = testCase; + this.results = new ActualResults(testCase); + } + + public ActualResults run() + { + return runWithExpressionOptions(); + } + + private ActualResults runWithExpressionOptions() + { + // Horrible, hacky way to change the way Druid handles + // expressions. The config is meant to be global, initialized + // on startup. This is a cheap workaround. + // Only works for single-threaded tests. + boolean allowNestedArrays = testCase.booleanOption(OptionsSection.ALLOW_NESTED_ARRAYS); + boolean homogenizeNullMultiValueStrings = testCase.booleanOption(OptionsSection.HOMOGENIZE_NULL_MULTI_VALUE_STRINGS); + if (allowNestedArrays == ExpressionProcessing.allowNestedArrays() && + homogenizeNullMultiValueStrings == ExpressionProcessing.isHomogenizeNullMultiValueStringArrays()) { + return runWithNullHandingOptions(); + } + ExpressionProcessingConfig prevExprConfig = ExpressionProcessing.currentConfig(); + try { + ExpressionProcessing.initializeForTests(allowNestedArrays); + if (homogenizeNullMultiValueStrings) { + ExpressionProcessing.initializeForHomogenizeNullMultiValueStrings(); + } + plannerFixture = null; + return runWithNullHandingOptions(); + } + finally { + ExpressionProcessing.restoreConfig(prevExprConfig); + } + } + + private ActualResults runWithNullHandingOptions() + { + // Horrible, hacky way to change the way Druid handles + // nulls. The config is meant to be global, initialized + // on startup. This is a cheap workaround. + // Only works for single-threaded tests. + String sqlNullHandling = testCase.option(OptionsSection.SQL_COMPATIBLE_NULLS); + if (sqlNullHandling == null) { + return runWithCustomPlanner(); + } + boolean useSqlNulls = QueryContexts.getAsBoolean( + OptionsSection.SQL_COMPATIBLE_NULLS, + sqlNullHandling, + true); + if (useSqlNulls != NullHandling.sqlCompatible()) { + return null; + } + return runWithCustomPlanner(); + } + + /** + * The planner factory and surrounding objects are designed to be created once + * at the start of a Druid run. Test cases, however, want to try variations. + * If the test case has planner settings, create a new planner fixture + * (and all its associated knick-knacks), just for that one test. The custom + * planner starts with the configuration for the "global" planner. + *

+ * To do: since we want to test the planner, restructure the code to allow + * changing just the planner config without needing to rebuild everything + * else. + *

+ * The planner fixture (and its associated mock segments) also must be + * recreated if the global options change, such as null handling. Again, ugly, + * but the best we can do. + *

+ * The planner fixture is not global, so we can create a new one just for + * this test, leaving the original one unchanged. + */ + private ActualResults runWithCustomPlanner() + { + boolean hasCustomConfig = testCase.requiresCustomPlanner(); + if (plannerFixture != null && !hasCustomConfig) { + return doRun(); + } + PlannerFixture.Builder builder = baseFixture.toBuilder(); + if (hasCustomConfig) { + PlannerConfig customConfig = QueryTestCases.applyOptions( + plannerFixture.plannerConfig(), + testCase.optionsSection().options()); + builder.withPlannerConfig(customConfig); + } + plannerFixture = builder.build(); + return doRun(); + } + + private ActualResults doRun() + { + gatherResults(); + results.verify(); + return results; + } + + // Lazy planning evaluation in case the test only wants to EXPLAIN, + // but not capture detail plan results. + private void preparePlan() throws Exception + { + if (planDetails != null) { + return; + } + QueryDefn queryDefn = QueryDefn + .builder(testCase.sql()) + // Plan with only the context in the test case. Ensures that the + // case with no extra context works. Makes native queries smaller. + .context(testCase.context()) + .parameters(testCase.parameters()) + .authResult(plannerFixture.authResultFor(testCase.user())) + .build(); + planDetails = plannerFixture.queryRunner().introspectPlan(queryDefn); + } + + private void gatherResults() + { + try { + // Planning is done on demand. If we should fail in planning, + // go ahead and try now. If the query succeeds, no need to try + // the other items as success and failure are mutually exclusive. + if (testCase.shouldFail()) { + preparePlan(); + return; + } + + // Gather actual plan results to compare against expected values. + gatherParseTree(); + gatherUnparse(); + gatherSchema(); + gatherPlan(); + gatherNativeQuery(); + gatherResources(); + gatherTargetSchema(); + gatherExplain(); + gatherExecPlan(); + } + catch (Exception e) { + results.exception(e); + return; + } + + // Run the query with the requested options + for (QueryRun run : testCase.runs()) { + runQuery(run); + } + } + + private void gatherParseTree() throws Exception + { + PatternSection ast = testCase.ast(); + if (ast == null) { + return; + } + preparePlan(); + ParseTreeVisualizer visitor = new ParseTreeVisualizer(); + planDetails.planState().sqlNode.accept(visitor); + String output = visitor.result(); + results.ast(ast, output); + } + + private void gatherUnparse() throws Exception + { + PatternSection testSection = testCase.unparsed(); + if (testSection == null) { + return; + } + preparePlan(); + String unparsed = planDetails.planState().sqlNode.toString(); + results.unparsed(testSection, unparsed); + } + + private void gatherPlan() throws Exception + { + PatternSection testSection = testCase.plan(); + if (testSection == null) { + return; + } + preparePlan(); + if (planDetails.planState().bindableRel != null) { + gatherBindablePlan(testSection); + } else if (planDetails.planState().relRoot != null) { + gatherDruidPlan(testSection); + } else { + throw new ISE( + StringUtils.format( + "Test case [%s] has a plan but the planner did not produce one.", + testCase.label())); + } + } + + private void gatherDruidPlan(PatternSection testSection) + { + // Do-it-ourselves plan since the actual plan omits insert. + String queryPlan = RelOptUtil.dumpPlan( + "", + planDetails.planState().relRoot.rel, + SqlExplainFormat.TEXT, + SqlExplainLevel.DIGEST_ATTRIBUTES); + String plan; + SqlInsert insertNode = planDetails.planState().insertNode; + if (insertNode == null) { + plan = queryPlan; + } else if (insertNode instanceof DruidSqlInsert) { + DruidSqlInsert druidInsertNode = (DruidSqlInsert) insertNode; + // The target is a SQLIdentifier literal, pre-resolution, so does + // not include the schema. + plan = StringUtils.format( + "LogicalInsert(target=[%s], granularity=[%s])\n", + druidInsertNode.getTargetTable(), + druidInsertNode.getPartitionedBy() == null ? "" : druidInsertNode.getPartitionedBy()); + if (druidInsertNode.getClusteredBy() != null) { + plan += " Clustered By: " + druidInsertNode.getClusteredBy(); + } + plan += + " " + StringUtils.replace(queryPlan, "\n ", "\n "); + } else if (insertNode instanceof DruidSqlReplace) { + DruidSqlReplace druidInsertNode = (DruidSqlReplace) insertNode; + // The target is a SQLIdentifier literal, pre-resolution, so does + // not include the schema. + plan = StringUtils.format( + "LogicalInsert(target=[%s], granularity=[%s])\n", + druidInsertNode.getTargetTable(), + druidInsertNode.getPartitionedBy() == null ? "" : druidInsertNode.getPartitionedBy()); + if (druidInsertNode.getClusteredBy() != null) { + plan += " Clustered By: " + druidInsertNode.getClusteredBy(); + } + plan += + " " + StringUtils.replace(queryPlan, "\n ", "\n "); + } else { + plan = queryPlan; + } + results.plan(testSection, plan); + } + + private void gatherBindablePlan(PatternSection testSection) + { + String queryPlan = RelOptUtil.dumpPlan( + "", + planDetails.planState().bindableRel, + SqlExplainFormat.TEXT, + SqlExplainLevel.DIGEST_ATTRIBUTES); + results.plan(testSection, queryPlan); + } + + private void gatherExecPlan() + { + PatternSection testSection = testCase.execPlan(); + if (testSection == null) { + return; + } + results.execPlan(testSection, + QueryTestCases.formatJson( + plannerFixture.jsonMapper, + planDetails.planState().execPlan)); + } + + private void gatherNativeQuery() throws Exception + { + PatternSection testSection = testCase.nativeQuery(); + if (testSection == null) { + return; + } + preparePlan(); + DruidRel druidRel = planDetails.planState().druidRel; + if (druidRel == null) { + throw new ISE( + StringUtils.format( + "Test case [%s] has a native query but the planner did not produce one.", + testCase.label())); + } + results.nativeQuery( + testSection, + QueryTestCases.serializeDruidRel(plannerFixture.jsonMapper, druidRel)); + } + + private void gatherSchema() throws Exception + { + PatternSection section = testCase.schema(); + if (section == null) { + return; + } + preparePlan(); + results.schema( + section, + QueryTestCases.formatSchema(planDetails.plannerResult())); + } + + private void gatherResources() throws Exception + { + ResourcesSection section = testCase.resourceActions(); + if (section == null) { + return; + } + preparePlan(); + results.resourceActions( + section, + planDetails.validationResult().getResourceActions()); + } + + private void gatherTargetSchema() throws Exception + { + PatternSection section = testCase.targetSchema(); + if (section == null) { + return; + } + preparePlan(); + if (planDetails.planState().insertNode == null) { + results.errors().add( + StringUtils.format( + "Query [%s] expects a target schema, but the SQL is not an INSERT statement.", + testCase.label())); + return; + } + + List fields = planDetails.planState().relRoot.validatedRowType.getFieldList(); + String[] actual = new String[fields.size()]; + for (int i = 0; i < actual.length; i++) { + RelDataTypeField field = fields.get(i); + actual[i] = field.getName() + " " + field.getType(); + } + results.targetSchema(section, actual); + } + + private void gatherExplain() throws Exception + { + PatternSection testSection = testCase.explain(); + if (testSection == null) { + return; + } + // User mapping is a bit lame: there are only two: the regular user (default) + // or the super user. The super user is required for tests with an extern data + // source as the regular user test setup doesn't provide access. + AuthenticationResult authenticationResult; + String user = testCase.user(); + if (user != null && user.equals(CalciteTests.TEST_SUPERUSER_NAME)) { + authenticationResult = CalciteTests.SUPER_USER_AUTH_RESULT; + } else { + authenticationResult = CalciteTests.REGULAR_USER_AUTH_RESULT; + } + explainFixture = new ExplainFixture( + plannerFixture, + testCase.sql(), + testCase.context(), + Collections.emptyList(), + authenticationResult); + explainFixture.explain(); + Pair explained = explainFixture.results(); + results.explain( + testSection, + QueryTestCases.formatExplain( + plannerFixture.jsonMapper, + explained.lhs, + explained.rhs)); + } + + private interface QueryExec + { + void run(QueryDefn queryDefn, Map options); + } + + private class ConcreteExec implements QueryExec + { + private QueryRun queryRun; + + private ConcreteExec(QueryRun queryRun) + { + this.queryRun = queryRun; + } + + @Override + public void run(QueryDefn queryDefn, Map options) + { + try { + List rows = plannerFixture.queryRunner.run(queryDefn).toList(); + results.run(queryRun, queryDefn.context(), rows, plannerFixture.jsonMapper); + } + catch (Exception e) { + results.runFailed(queryRun, queryDefn.context(), e); + } + } + } + + private static class VectorizeExec implements QueryExec + { + private final QueryExec child; + + public VectorizeExec(QueryExec child) + { + this.child = child; + } + + @Override + public void run(QueryDefn queryDefn, Map options) + { + child.run(queryDefn.withOverrides(DISABLE_VECTORIZE_CONTEXT), options); + boolean canVectorize = QueryTestCases.booleanOption( + options, + OptionsSection.VECTORIZE_OPTION, + true); + if (!canVectorize) { + return; + } + child.run(queryDefn.withOverrides(ENABLE_VECTORIZE_CONTEXT), options); + } + } + + /** + * Filter to only pass along runs that match the current "replace with + * null" setting initialized externally. It matches if no options is given + * for the run, the option is "both", or the option Boolean value matches + * the current setting. + */ + private static class NullStrategyFilter implements QueryExec + { + private final boolean sqlCompatible = NullHandling.sqlCompatible(); + private final QueryExec child; + + public NullStrategyFilter(QueryExec child) + { + this.child = child; + } + + @Override + public void run(QueryDefn queryDefn, Map options) + { + String sqlNullOption = options.get(OptionsSection.SQL_COMPATIBLE_NULLS); + if (sqlNullOption == null || + OptionsSection.NULL_HANDLING_BOTH.equals(sqlNullOption) || + QueryContexts.getAsBoolean( + OptionsSection.SQL_COMPATIBLE_NULLS, + sqlNullOption, + false) == sqlCompatible) { + child.run(queryDefn, options); + } + } + } + + /** + * Iterates over the contexts provided by QueryContextForJoinProvider, + * which is a provider class used in JUnit, but adapted for use here. + * The class provides not just the join options, but also a set of + * "default" options which are the same as the defaults used in the + * JUnit tests, so no harm in applying them. + */ + private static class JoinContextProvider implements QueryExec + { + private final QueryExec child; + + public JoinContextProvider(QueryExec child) + { + this.child = child; + } + + @Override + public void run(QueryDefn queryDefn, Map options) + { + for (Object obj : QueryContextForJoinProvider.provideQueryContexts()) { + @SuppressWarnings("unchecked") + Map context = (Map) obj; + QueryDefn rewritten = queryDefn.withOverrides(context); + child.run(rewritten, options); + } + } + } + + /** + * Special version of {@link JoinContextProvider} that filters on + * {@code enableJoinFilterRewrite} to handle bugs in + * {@code testLeftJoinSubqueryWithNullKeyFilter}. + */ + private static class JoinContextProviderFilterRewriteFilter implements QueryExec + { + private final QueryExec child; + private final boolean value; + + public JoinContextProviderFilterRewriteFilter(QueryExec child, boolean value) + { + this.child = child; + this.value = value; + } + + @Override + public void run(QueryDefn queryDefn, Map options) + { + for (Object obj : QueryContextForJoinProvider.provideQueryContexts()) { + @SuppressWarnings("unchecked") + Map context = (Map) obj; + // Per testLeftJoinSubqueryWithNullKeyFilter(), the default value is true. + if (QueryTestCases.booleanOption(context, QueryContexts.JOIN_FILTER_REWRITE_ENABLE_KEY, true) == value) { + QueryDefn rewritten = queryDefn.withOverrides(context); + child.run(rewritten, options); + } + } + } + } + + + /** + * Special version of {@link JoinContextProvider} that filters on + * {@code enableJoinFilterRewrite} to handle bugs in + * {@code testLeftJoinSubqueryWithNullKeyFilter}. + */ + private static class JoinContextProviderJoinToFilterRewriteFilter implements QueryExec + { + private final QueryExec child; + private final boolean value; + + public JoinContextProviderJoinToFilterRewriteFilter(QueryExec child, boolean value) + { + this.child = child; + this.value = value; + } + + @Override + public void run(QueryDefn queryDefn, Map options) + { + for (Object obj : QueryContextForJoinProvider.provideQueryContexts()) { + @SuppressWarnings("unchecked") + Map context = (Map) obj; + // Per testLeftJoinSubqueryWithNullKeyFilter(), the default value is true. + if (BaseCalciteQueryTest.isRewriteJoinToFilter(context) == value) { + QueryDefn rewritten = queryDefn.withOverrides(context); + child.run(rewritten, options); + } + } + } + } + + private void runQuery(QueryRun run) + { + QueryDefn queryDefn = QueryDefn + .builder(run.testCase().sql()) + // Run with the same defaults as used in the original JUnit-based + // tests to ensure results are consistent. + .context(plannerFixture.applyDefaultContext(run.context())) + .parameters(run.testCase().parameters()) + .authResult(plannerFixture.authResultFor(run.testCase().user())) + .build(); + QueryExec exec = new VectorizeExec( + new ConcreteExec(run)); + // Hard-coded support for the known providers. + String provider = run.option(OptionsSection.PROVIDER_CLASS); + if (provider != null) { + switch (provider) { + case "QueryContextForJoinProvider": + exec = new JoinContextProvider(exec); + break; + case "QueryContextForJoinProviderNoFilterRewrite": + exec = new JoinContextProviderFilterRewriteFilter(exec, false); + break; + case "QueryContextForJoinProviderWithFilterRewrite": + exec = new JoinContextProviderFilterRewriteFilter(exec, true); + break; + case "QueryContextForJoinProviderNoRewriteJoinToFilter": + exec = new JoinContextProviderJoinToFilterRewriteFilter(exec, false); + break; + case "QueryContextForJoinProviderWithRewriteJoinToFilter": + exec = new JoinContextProviderJoinToFilterRewriteFilter(exec, true); + break; + default: + log.warn("Undefined provider: %s", provider); + } + } + exec = new NullStrategyFilter(exec); + exec.run(queryDefn, run.options()); + } +} diff --git a/sql/src/test/java/org/apache/druid/sql/calcite/tester/QueryTestCases.java b/sql/src/test/java/org/apache/druid/sql/calcite/tester/QueryTestCases.java new file mode 100644 index 000000000000..ac0eb46fc3f9 --- /dev/null +++ b/sql/src/test/java/org/apache/druid/sql/calcite/tester/QueryTestCases.java @@ -0,0 +1,255 @@ +/* + * Licensed to the Apache Software Foundation (ASF) under one + * or more contributor license agreements. See the NOTICE file + * distributed with this work for additional information + * regarding copyright ownership. The ASF licenses this file + * to you under the Apache License, Version 2.0 (the + * "License"); you may not use this file except in compliance + * with the License. You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, + * software distributed under the License is distributed on an + * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY + * KIND, either express or implied. See the License for the + * specific language governing permissions and limitations + * under the License. + */ + +package org.apache.druid.sql.calcite.tester; + +import com.fasterxml.jackson.core.JsonProcessingException; +import com.fasterxml.jackson.databind.ObjectMapper; +import com.google.common.collect.ImmutableMap; +import org.apache.calcite.rel.RelNode; +import org.apache.calcite.rel.type.RelDataTypeField; +import org.apache.druid.java.util.common.DateTimes; +import org.apache.druid.java.util.common.IAE; +import org.apache.druid.java.util.common.StringUtils; +import org.apache.druid.query.QueryContexts; +import org.apache.druid.sql.calcite.planner.PlannerConfig; +import org.apache.druid.sql.calcite.planner.PlannerResult; +import org.apache.druid.sql.calcite.rel.DruidRel; +import org.apache.druid.sql.calcite.rel.DruidUnionRel; + +import java.util.ArrayList; +import java.util.HashMap; +import java.util.List; +import java.util.Map; +import java.util.regex.Matcher; +import java.util.regex.Pattern; + +/** + * Collection of utilities for working with test cases. + */ +public class QueryTestCases +{ + public static String serializeQuery(ObjectMapper mapper, Object query) + { + return formatJson(mapper, query); + } + + public static String serializeDruidRel(ObjectMapper mapper, DruidRel druidRel) + { + // Note: must pass false to toDruidQuery (that is, don't finalize + // aggregations) to match the native queries expected by the + // various CalciteXQueryTest classes. + return serializeQuery(mapper, createQuery(druidRel)); + } + + /** + * Creates a native query to serialize. The union query is not a + * native query: it is instead handled as a list of such queries. + * We simulate that here by creating an "artificial" union query. + */ + public static Object createQuery(DruidRel druidRel) + { + if (druidRel instanceof DruidUnionRel) { + List inputs = new ArrayList<>(); + for (RelNode input : druidRel.getInputs()) { + inputs.add(createQuery((DruidRel) input)); + } + return ImmutableMap.of( + "artificialQueryType", + "union", + "inputs", + inputs); + } else { + return druidRel.toDruidQuery(false).getQuery(); + } + } + + /** + * Reformat the plan. It includes a big wad of JSON all on one line + * which is hard to read. This reformats into a mixture of formatted + * JSON and the Calcite formatting. Ugly code, but the result is less + * ugly than the single long line. + */ + public static String formatExplain(ObjectMapper mapper, String plan, String signature) + { + StringBuilder buf = new StringBuilder(); + Pattern p = Pattern.compile("DruidQueryRel\\(query=\\[(.*)], signature=\\[(.*)]\\)"); + Matcher m = p.matcher(plan.trim()); + if (m.matches()) { + buf.append("DruidQueryRel(query=[(\n") + .append(reformatJson(mapper, m.group(1))) + .append(",\nsignature=[(\n ") + // The signature only looks like JSON: it does not have proper quoting. + .append(m.group(2)) + .append("\n])\n"); + } else { + buf.append(plan.trim()).append("\n"); + } + // Separate the signature from the above part. + buf.append("---\n") + .append(reformatJson(mapper, signature)) + .append("\n"); + return buf.toString(); + } + + public static String[] formatSchema(PlannerResult plannerResult) + { + List fields = plannerResult.rowType().getFieldList(); + String[] actual = new String[fields.size()]; + for (int i = 0; i < actual.length; i++) { + RelDataTypeField field = fields.get(i); + actual[i] = field.getName() + " " + field.getType(); + } + return actual; + } + + public static String formatJson(ObjectMapper mapper, Object obj) + { + try { + return mapper + .writerWithDefaultPrettyPrinter() + .writeValueAsString(obj); + } + catch (JsonProcessingException e) { + throw new RuntimeException("JSON conversion failed", e); + } + } + + public static String reformatJson(ObjectMapper mapper, String json) + { + try { + Object obj = mapper.readValue(json, Object.class); + return formatJson(mapper, obj); + } + catch (JsonProcessingException e) { + throw new RuntimeException("JSON parse failed", e); + } + } + + public static List resultsToJson(List results, ObjectMapper mapper) + { + try { + List jsonLines = new ArrayList<>(); + for (Object[] row : results) { + jsonLines.add(mapper.writeValueAsString(row)); + } + return jsonLines; + } + catch (Exception e) { + throw new IAE(e, "Results conversion to JSON failed"); + } + } + + public static String valueToString(Object value) + { + if (value == null) { + return "\\N"; + } else if (value instanceof String) { + return "\"" + StringUtils.replace((String) value, "\"", "\\\"") + "\""; + } else { + return value.toString(); + } + } + + public static Map rewriteContext(Map context) + { + Map copy = new HashMap<>(context); + copy.remove(QueryContexts.DEFAULT_TIMEOUT_KEY); + copy.remove(QueryContexts.MAX_SCATTER_GATHER_BYTES_KEY); + copy.remove("sqlCurrentTimestamp"); + copy.remove("sqlQueryId"); + copy.remove("vectorize"); + copy.remove("vectorizeVirtualColumns"); + copy.remove("vectorSize"); + return copy; + } + + public static String unquote(String value) + { + if (value.length() < 2) { + return value; + } + char first = value.charAt(0); + if (first != '\'' && first != '"') { + return value; + } + char last = value.charAt(value.length() - 1); + if (last != first) { + return value; + } + return value.substring(1, value.length() - 1); + } + + public static boolean booleanOption(Map options, String key, boolean defaultValue) + { + return QueryContexts.getAsBoolean(key, options.get(key), defaultValue); + } + + public static PlannerConfig applyOptions(PlannerConfig base, Map options) + { + PlannerConfig.Builder builder = base + .toBuilder() + .maxTopNLimit( + QueryContexts.getAsInt( + OptionsSection.PLANNER_MAX_TOP_N, + options.get(OptionsSection.PLANNER_MAX_TOP_N), + base.getMaxTopNLimit())) + .useApproximateCountDistinct( + booleanOption( + options, + OptionsSection.PLANNER_APPROX_COUNT_DISTINCT, + base.isUseApproximateCountDistinct())) + .useApproximateTopN( + booleanOption( + options, + OptionsSection.PLANNER_APPROX_TOP_N, + base.isUseApproximateTopN())) + .requireTimeCondition( + booleanOption( + options, + OptionsSection.PLANNER_REQUIRE_TIME_CONDITION, + base.isRequireTimeCondition())) + .useGroupingSetForExactDistinct( + booleanOption( + options, + OptionsSection.PLANNER_USE_GROUPING_SET_FOR_EXACT_DISTINCT, + base.isUseGroupingSetForExactDistinct())) + .computeInnerJoinCostAsFilter( + booleanOption( + options, + OptionsSection.PLANNER_COMPUTE_INNER_JOIN_COST_AS_FILTER, + base.isComputeInnerJoinCostAsFilter())) + .useNativeQueryExplain( + booleanOption( + options, + OptionsSection.PLANNER_NATIVE_QUERY_EXPLAIN, + base.isUseNativeQueryExplain())) + .maxNumericInFilters( + QueryContexts.getAsInt( + OptionsSection.PLANNER_MAX_NUMERIC_IN_FILTERS, + options.get(OptionsSection.PLANNER_MAX_NUMERIC_IN_FILTERS), + base.getMaxNumericInFilters())); + + String timeZone = options.get(OptionsSection.PLANNER_SQL_TIME_ZONE); + if (timeZone != null) { + builder.sqlTimeZone(DateTimes.inferTzFromString(timeZone)); + } + return builder.build(); + } +} diff --git a/sql/src/test/java/org/apache/druid/sql/calcite/tester/QueryTestSet.java b/sql/src/test/java/org/apache/druid/sql/calcite/tester/QueryTestSet.java new file mode 100644 index 000000000000..215700f17443 --- /dev/null +++ b/sql/src/test/java/org/apache/druid/sql/calcite/tester/QueryTestSet.java @@ -0,0 +1,223 @@ +/* + * Licensed to the Apache Software Foundation (ASF) under one + * or more contributor license agreements. See the NOTICE file + * distributed with this work for additional information + * regarding copyright ownership. The ASF licenses this file + * to you under the Apache License, Version 2.0 (the + * "License"); you may not use this file except in compliance + * with the License. You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, + * software distributed under the License is distributed on an + * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY + * KIND, either express or implied. See the License for the + * specific language governing permissions and limitations + * under the License. + */ + +package org.apache.druid.sql.calcite.tester; + +import org.apache.druid.common.config.NullHandling; +import org.apache.druid.common.config.NullValueHandlingConfig; +import org.apache.druid.java.util.common.FileUtils; +import org.apache.druid.java.util.common.IAE; +import org.apache.druid.java.util.common.ISE; +import org.apache.druid.java.util.common.logger.Logger; +import org.apache.druid.sql.calcite.util.CalciteTests; + +import java.io.File; +import java.io.FileOutputStream; +import java.io.IOException; +import java.io.OutputStreamWriter; +import java.io.Writer; +import java.nio.charset.StandardCharsets; +import java.util.ArrayList; +import java.util.List; + +/** + * Holds a set of test cases to execute, runs the test, and applies + * filters to select which tests to run. + */ +public class QueryTestSet +{ + public static final Logger log = new Logger(QueryTestSet.class); + + /** + * Tests are run in multiple passes: one with SQL compatible null + * handling, another without. This class correlates the actual results + * for the two passes. Then, within each pass, the query is planned, + * run zero or more times, and verified. This class correlates those + * multiple passes. + * + * If a test run fails, then we emit the full set of tests. If this + * test was OK, we just repeat the whole expected test case. Else, + * we emit the merged expected/actuals test case for each variation + * which failed. + */ + private static class TestResults + { + final QueryTestCase testCase; + final List results = new ArrayList<>(); + boolean ok = true; + + private TestResults(QueryTestCase testCase) + { + this.testCase = testCase; + } + + public void add(ActualResults caseResults) + { + if (caseResults == null) { + return; + } + results.add(caseResults); + ok = ok && caseResults.ok(); + } + + public void write(TestCaseWriter testWriter) throws IOException + { + if (ok) { + testCase.write(testWriter); + } else { + for (ActualResults actual : results) { + if (!actual.ok()) { + actual.write(testWriter); + break; + } + } + } + } + } + + private final String label; + private final List results = new ArrayList<>(); + + public QueryTestSet(String label, List testCases) + { + this.label = label; + for (QueryTestCase testCase : testCases) { + results.add(new TestResults(testCase)); + } + } + + public static QueryTestSet fromFile(File file) + { + return new QueryTestSet( + file.getName(), + TestCaseLoader.loadFile(file) + ); + } + + public static QueryTestSet fromResource(String resource) + { + int posn = resource.lastIndexOf('/'); + return new QueryTestSet( + posn == -1 ? resource : resource.substring(posn + 1), + TestCaseLoader.loadResource(resource) + ); + } + + public static QueryTestSet fromString(String label, String body) + { + return new QueryTestSet( + label, + TestCaseLoader.loadString(body) + ); + } + + public boolean run(PlannerFixture.Builder builder) + { + if (isDebugMode()) { + setSqlCompatibleNulls(true); + boolean sqlCompatOK = run(builder.build()); + setSqlCompatibleNulls(false); + boolean classicOK = run(builder.build()); + return sqlCompatOK && classicOK; + } else { + return run(builder.build()); + } + } + + /** + * Check if "debug mode" is set. If the system property + * {code -Ddruid.debug=true`} is set, then the test will run + * both the SQL-compatible nulls, and legacy "replace nulls with + * defaults" modes. This saves you from having to twiddle those + * values when running tests in your IDE. When run from Maven, + * tests will run in whatever mode is current, as set by the + * Travis job. + */ + private boolean isDebugMode() + { + return Boolean.parseBoolean( + System.getProperty("druid.debug", Boolean.FALSE.toString())); + } + + private void setSqlCompatibleNulls(boolean option) + { + System.setProperty( + NullValueHandlingConfig.NULL_HANDLING_CONFIG_STRING, + Boolean.toString(!option)); + NullHandling.initializeForTests(); + CalciteTests.reset(); + } + + public boolean run(PlannerFixture fixture) + { + boolean ok = true; + for (TestResults testCase : results) { + ActualResults caseResults = fixture.runTestCase(testCase.testCase); + testCase.add(caseResults); + ok = ok && testCase.ok; + } + File dest = new File(fixture.resultsDir(), label); + if (ok) { + // This run is clean. Remove any output files from previous + // runs to prevent confusion. + dest.delete(); + } else { + reportResults(); + try { + FileUtils.mkdirp(fixture.resultsDir()); + } + catch (IOException e) { + throw new ISE(e, "Could not make results dir: " + fixture.resultsDir()); + } + writeResults(dest); + } + return ok; + } + + private void reportResults() + { + log.error("Test case failed: %s", label); + for (TestResults testCase : results) { + if (testCase.ok) { + continue; + } + log.error("=== " + testCase.testCase.label() + " ==="); + for (ActualResults caseResults : testCase.results) { + for (String error : caseResults.errors().errors()) { + log.error(error); + } + } + } + } + + private void writeResults(File dest) + { + try { + try (Writer writer = new OutputStreamWriter(new FileOutputStream(dest), StandardCharsets.UTF_8)) { + TestCaseWriter testWriter = new TestCaseWriter(writer); + for (TestResults testCase : results) { + testCase.write(testWriter); + } + } + } + catch (IOException e) { + throw new IAE(e, "Could not write test results to " + dest); + } + } +} diff --git a/sql/src/test/java/org/apache/druid/sql/calcite/tester/README.md b/sql/src/test/java/org/apache/druid/sql/calcite/tester/README.md new file mode 100644 index 000000000000..cee6c5ea703c --- /dev/null +++ b/sql/src/test/java/org/apache/druid/sql/calcite/tester/README.md @@ -0,0 +1,597 @@ + + +# Druid Planner Tests + +The classes in this module provide a test framework for the Druid planner. +The tests are based on "case" files that provide a SQL query and expected +results. The tests themselves are defined as a JUnit test which runs through +all tests in the case file. + +Tests primarily check planning artifacts (Calcite plan, native queries, etc.) +but also test semantics via test runs using an abbreviated, in-memory query +engine. + +## Running the Tests + +Tests run as part of a JUnit test: `DruidPlannerTest`. That file runs each of +the case files, compares actual with expected results, and reports success +or failure. If a case fails, the code writes a new file, with actual results, +in the `target/actual` folder. Use your favorite "diff" tool to compare the +expected and actual files to see what is off. Either fix the issue, or accept +the new behavior, to update the "expected" case file to match the actual +results. + +## Writing a Test + +Planner tests can be super simple or quite complex. Let's step through the +process. Let's suppose you want to create a new case file. Go ahead and +create one: `src/test/resources/calcite/cases/myCase.case`. Copy the +copyright heading from an existing file. + +A case has, at a minimum, a `case` element that names the test, a SQL +statement, and one or more expected results. For example, if we just +want to plan a query and capture the native results: + + +```text +============================================================== +Example of the world's simplest test case. +=== case +My first test case +=== SQL +SELECT * FROM foo +=== native +... +``` + +The file has a rather unusual syntax: lines that start with three +equal signs (`===`) indicate sections. Why the odd syntax? Test cases +include JSON, SQL comments and CSV results. We need a syntax that is +very unlikly to collide with these various contents. + +In the SQL above `foo` is an in-memory datasource provided by the test +framework. We want to verify the native query. Notice that we've just +left the native query section as "...". That's because we're lazy: +we'll let the test tell us the answer. + +Next, we create a driver function. Let's assume we'll add this to the +existing `DruidPlannerTest` file: + +```java + @Test + public void testMyCase() throws IOException + { + assertTrue( + QueryTestSet + .fromResource("/calcite/cases/myCase.case") + .run(standardBuilder())); + } +``` + +Run this in your IDE as a JUnit test. It will fail, but that's what we expect. +Find `target/actual/myCase.case`. Open it in an editor and use your "diff" +tool to copy over the actual native query (after eyeballing it to make sure +it is correct.) Run the test again. Now it passes. + +Congrats, you've created your first test! Of course, they're not all this +easy. Let's dive into the details. + +## Capture More Planning Artifacts + +In addition to the native query, we can also capture: + +* Parser AST (abstract syntax tree): `ast` section. +* "Unparse" of the SQL, to see what Calcite things we said: `unparse` section. +* The output schema: `schema` section. +* The resource actions (the datasources that the query uses and the kind of +access: READ or WRITE): the `resources` section. +* The Calcite logical plan: the `plan` section. +* The explained plan: the `explain` section. This section is almost the same +as the result of an `EXPLAIN` query, with a few minor differences. The framework +formats the output so it is *far* easier to read than running an actual `EXPLAIN` +query and comparing a huge, long results line. + +For example, to capture some of these, add the following to our test +case: + +```text +=== SQL +SELECT * FROM foo +=== schema +... +=== plan +... +=== native +``` + +Again, run the query, compare the actual results, and copy over the actuals +to become the expected values. Here's an example: + +```text +=== SQL +SELECT DISTINCT SCHEMA_NAME +FROM INFORMATION_SCHEMA.SCHEMATA +=== schema +SCHEMA_NAME VARCHAR +=== plan +BindableAggregate(group=[{1}]) + BindableTableScan(table=[[INFORMATION_SCHEMA, SCHEMATA]]) +``` + +The above works with Druid's "virtual" `INFORMATION_SCHEMA`. +There is no native query for such queries. + +## Query Context + +Queries are planned with an empty query context. For one thing, this keeps +the captured native queries simple. Your test may want to change a context +value, such as forcing the "current date" to some specific value. You do that +using the `context` section: + +```text +=== context +sqlCurrentTimestamp=2000-01-01T00:00:00Z +``` + +The syntax is like a properties file: `key=value`. Strings need not be +quoted unless they start or end with spaces. The test framework will +convert the value to the right type based on metadata which appears +in `QueryContexts`. If you add a new non-string context variable, +you may need to update the metadata for the value to parse +correctly. + +## Run Tests + +Thus far we've talked about capturing planning artifacts. The many existing +JUnit tests also capture query results, using an abbreviated test-specific +execution engine. Let's do that: + +``` +=== SQL +SELECT DISTINCT SCHEMA_NAME +FROM INFORMATION_SCHEMA.SCHEMATA +=== results +["lookup"] +["view"] +["druid"] +["sys"] +["INFORMATION_SCHEMA"] +``` + +The above shows that, if we run the specified query, we expect to get +the results shown. In practice, you can use the same trick as above: +add a `results` section, run the query, and copy results from the +actual output file. + +### Vectorization + +The test framework always runs queries (at least) two ways: with vectorization +off, and with it on. When we run test, we should specify whether the query +is vectorizable or not. We do that with the `options` section: instructions +to the test framework itself. For example: + +```text +=== options +vectorize=true +``` + +Or + +```text +=== options +vectorize=false +``` + +Most test explain why they can't be vectorized: + +```text +============================================================== +Converted from testEarliestAggregatorsNumericNulls() + +Cannot vectorize EARLIEST aggregator. +=== case +Earliest aggregators numeric nulls +=== SQL +SELECT EARLIEST(l1), EARLIEST(d1), EARLIEST(f1) +FROM druid.numfoo +=== options +vectorize=false +``` + +When `vectorize` is `true`, the framework runs the test once with +`vectorize=FALSE`, a second time with `vectorize=FORCE`. If the +`vectorize` option is `false`, then only the first is done: +`vectorize=False`. + +### SQL-Compatible Nulls + +Druid has an unusual feature: it can use "classic" "null" handling in +which a blank string (or 0 numeric) is considered the same as SQL `NULL`, +or "SQL compatible" mode in which SQL `NULL` is a distinct value. In +the happy path, the query produces the same results either way and +we use an option, `sqlCompatibleNulls=both`, to say so: + +```text +=== SQL +SELECT DISTINCT SCHEMA_NAME +FROM INFORMATION_SCHEMA.SCHEMATA +=== options +sqlCompatibleNulls=both +vectorize=true +``` + +### Debug Mode + +When the tests are run in Maven on Travis, then Travis will run the entire +set of unit tests with SQL-compatible mode enabled, then another time with +the mode disabled. This is the default behavior of the test framework. + +When working in an IDE, it is a pain to have to change the debug setup for +tests to try both modes. To avoid that, the framework recognizes a special +system property: + +```text +-Ddruid.debug=true +``` + +Set that (once) in your IDE for the `DruidPlannerTest` setup. Then, the +tests will, internally, run all tests once with SQL-compabible mode, a second +time with "classic" (replace nulls with defaults) mode. + +### Differing Run Results + +From here the story gets pretty complex, so get ready. In some cases, +only the query results differ. In this case, we specify multiple query +"runs" per test case using the `run` section. For example: + +```text +=== run +=== options +sqlCompatibleNulls=false +=== results +["",1] +["def",1] +=== run +=== options +sqlCompatibleNulls=true +=== results +["def",1] +["abc",1] +``` + +Each `run` section can have a name if we like: + +```text +=== run +Results for SQL-compatible mode. +``` + +However, in the above, the meaning is clear from the options so we skip the +name. Each `run` section can also specify options which are scoped to just +that run. Each run happens with the "main" options overridden with the +per-run options. In the above, we had one run with SQL-compatible nulls, +the other without. There are cases where we want to very other options +as well, but that gets pretty advanced. + +Then, we list the results for that specific configuration. + +As it turns out, the null mode is baked deeply into Druid: it is the kind of +option you want to select at the first installation, then never change. At +run-time, the null-handling model is a global setting: there is no way to +change it per query (or per datasource). So, how to the test handle this? +Very carefully, and with several hacks, as it turns out. The test framework +runs all tests in a case file with SQL-compatible nulls turned off, then +runs them again with SQL-compatible nulls enabled. (If your counting, we're +up to four runs of each test case for nulls and vectorization.) + +The `sqlCompatibleNulls` option acts like a filter: the test framework +skips tests (or runs) that don't match the current null-handling option. +(The `both` value matches both settings.) + +### Comparing Floating-point Values + +If your test expects floating-point values (`float` or `double`), which +are not nice, even integers (such as `10.0`), then you cannot do the default +text-based comparison of results. You have to tell the framework to do the +slow, complex, typed comparsion: + +```text +=== options +typedCompare=true +``` + +This flag causes the expected values to be parsed as JSON into Java objects, +then compares floating-point values using a delta of 1%. If you find special +cases that also need handling, modify the rather baroque class +`LinesSection.JsonComparsionCriteria` to handle those cases. + +### Differing Plan Artifacts + +In many cases, the null-handling option affects not just the results, but +also the native plan. There is no "planning" section like there is a `run` +section: there is only one set of planning artifacts per test case. But, all +is not lost: we just create two cases, and tell the framework to copy over +the common parts: + +```text +=== case +My case +=== SQL +SELECT ... +=== options +sqlCompatibleNulls=false +=== schema +foo VARCHAR +=== native + +=== case +My Case +=== SQL copy +=== options +sqlCompatibleNulls=true +=== schema copy +=== native + +``` + +When we pull this trick, we put the results in the respective test case with no +need for a `run` section. (The `run` section is needed only if there are two +or more runs per test case.) + +### Join Option Generator + +Many join-related tests use a JUnit parameter annotation to repeat tests with +various query contexts: + +```java + @Test + @Parameters(source = QueryContextForJoinProvider.class) + public void testJoinOuterGroupByAndSubqueryNoLimit( + Map queryContext) throws Exception +``` + +The test framework provides the same mechanism via an option: + +```text +=== options +provider=QueryContextForJoinProvider +``` + +The name is the same as the Java class, but this is just a convention: the +actual generator is hard-coded. (If you need a different one, you'll have to +add yours to the `QueryTestCaseRunner` class.) The result is that the test +is run as many times as there are options variations. There are eight variations +for `QueryContextForJoinProvider` (three Boolean context variables.) Since +we said we already did four variations, we now have a total of 32 runs of +queryies that use this option. That should test the heck out of any query! + +You'll see some tests that use variations on this theme, but where the plan +is different depending on the options. A special generator generates the +"plan A" option variations, while the opposite generates "plan B." This is +an obscure feature: you can ignore it until you need it. + +## Handling Errors + +Good test cover not only the "happy path", but also test for errors. Every +test case (or run) either suceeds or fails. If it suceeds, you use the sections +above. If it fails, you use the `exception` and `error` sections: + +```text +=== exception +ValidationException +=== error +!.*Column count mismatch in UNION ALL +``` + +The rule for `exception` is that the actual exception must be, or derive from, +the exception named in the section. The `error` section gives the text of the +error. + +Put these two sections in the main body for plan-time errors. Place them in +a `run` section for run-time errors. + +### Regular Expressions + +This is a good time to introduce a "special feature" of the framework: regular +expressions. Almost every expected values section (that is, those that are +outputs from Druid rather than inputs to Druid) can contain regular expressions. +The one exception is the `results` section. + +By default, lines are treated as literals. But, if a line starts with an +exclamation point, the rest of the line is a regular expression. In the example +above: we match away all the detailed cruft in the error message up to the +part that conveys the essential meaning. + +Consult the [Java regular expression syntax]( +https://docs.oracle.com/javase/7/docs/api/java/util/regex/Pattern.html) +for details. The most useful patterns are `.*` (match anything) and +`\Q...\E` (match everything between the two markers.) Many messages contain +regular expression characers (such as parens, brackets, etc.) Use a backslash, +or that quote syntax, to escape them. + +There is one other handy feature: a line with just `**` matches any number of +actual lines. This is occasionally useful. It would be more useful if we recorded +just the essential bits of a native query and not the entire text. Perhaps we only +care that a particular filter was added. The existing tests are all literal captures +of native queries coded in Java, and so don't use the `**` feature as much as they +probably should. + +Finally, any line that starts with a backslash is a literal, starting with the +second character. This is how you match `!foo`, for example. See +[internals.md] for more details. + +## Investigate Issues + +You've written some tests, added more sections, or added results, and something +breaks. Or, you've tinkered with the planner code, and query results change. How +do you track down problems? + +If the test succeeds, you'll get a JUnit pass and you can be confident that +that things work. But, if something changed, you'll get a failure. The JUnit +run itself will just tell you that the test failed. The test framework produces +a file to explain exactly what went wrong. The file is in +`target/actual/.case`: that is, the same name as the case file, but in +the target directory. + +To see what changed, use your IDE or favorite diff tool to compare the two +files. The "actual" file has a header per case that identifies failures, and +then lists the actual output. Inspect the differences. + +If the difference is expected (that is, you just made a change that +intentionally caused the difference), then copy the actual input over to the +test case as the new "expected" value. On the other hand, if you didn't +expect the change, you've got to track down what went wrong. + +Sometimes the failure is due to an unstable JSON serialization. (Java `Set`s, +for example, have a non-deterministic serialization order.) For sanity, we +want to fix serialization so it is deterministic. Other times, the there is +a difference because of something that changes from run to run, such as the +query ID. Find a way to hold the value constant, or use regular expressions +(see below) to pattern-match and work around the values that change. + +Most times, however, something broke unintentionally. Debug the problem, +make a fix and try the test again until it is happy. + +## Create a New Test - Advanced + +You've seen the test creation process step-by-step. Once you've done one, you +will want to skip the steps and just cut to the chase. + +To create a new test, you can do it the hard way or the easy way. The hard way is +to work out the expected values for each section and spell that out in the test. +The easy way is to let the computer do the work for you. Specify the inputs, but +provide bogus values for the outputs. For a query that should succeed: + +```text +============================================================== + +=== case + +=== SQL + +=== options +sqlCompatibleNulls=both +vectorize=true +=== schema +foo +=== resources +foo/foo/READ +=== plan +foo +=== native +foo +=== results +foo +``` + +Fill in the three `<...>` sections above. (The comments are optional, but +helpful.) Alter the options as needed. Add any context settings. The general +convention is to use the separator line between cases, add a comment, list +the SQL, then context, then options. Expected result sections follow, in +roughly the order that Druid produces them: the ast, unparsed SQL, the +schema, resources, plan and native query. Results (and runs, if needed) +appear list. Sort options in alphabetical order. + +The above conventions are not required, but they will make it easier to +view the actual results file. (It is also the order that cases are generated if +you convert a JUnit test. See the [internals.md](internals.md) file for the +details of JUnit conversion.) + +Then, run the test. It will, of course, fail. It will produce an +actual output file (as described above). Open that in your IDE then compare +your test case (expected) file with the actual file. Use your IDE to copy +across the actual values, making those the expected values. Run the test +again. It should now pass. Note: don't copy blindly: inspect to ensure that +the actual values are, in fact, what you expect. + +If you expect an error, use this template instead: + +``` +============================================================== + +=== case + +=== SQL + +=== exception +foo +=== error +foo +``` + +As it turns out, you will get different exceptions if you run the test in +the test framework than if you run it in the `BaseCalciteQueryTest` framework. +An easy workaround is to omit the exception and check only the error text. + +## JUnit Test Case + +The Planner tests use a number of internal classes. The `PlannerFixture` is the +core: it allows your code to configure the planner however you need it for your +tests. A `Builder` lets you choose options, otherwise the fixture uses the same +defaults and mock elements used by `BaseCalciteTest`. + +```java +public class DruidPlannerTest +{ + @Rule + public TemporaryFolder temporaryFolder = new TemporaryFolder(); + + // Converted from CalciteInsertDmlTest + @Test + public void testInsertDml() throws IOException + { + PlannerFixture.Builder builder = new PlannerFixture + .Builder(temporaryFolder.newFolder()) + .withView( + "aview", + "SELECT SUBSTRING(dim1, 1, 1) AS dim1_firstchar FROM foo WHERE dim2 = 'a'"); + QueryTestSet testSet = QueryTestSet.fromResource("/calcite/cases/insertDml.case"); + assertTrue(testSet.run(builder)); + } +``` + +The easiest approach is to add your test case as another method within +`DruidPlannerTest`: there is no advantage to creating a separate JUnit test +class. Your method will suceed if all tests pass, fail if any test fails. +Use the `target/actual/.case` file to locate actual failures. + +The intro section showed an abbreviated way to write the test if you use the +"standard" builder. The one here shows how to customize the builder. Note that +you pass the *builder*, not the *built* object into the test runner. The test +runner will use the builder multiple times to build the world first without +SQL-compatible nulls, then again with them. + +You may find you have to extend the JUnit test case if the Java tests are doing +something special. For example, you can use `PlannerFixture` to create custom +schemas, load any needed views, set default query context options, and so on. + +You may also have to extend the test framework itself if you need new options +for special cases not yet covered, or other kinds of unusual cases. + +## Status + +This framework is new. At present, the framework duplicates the tests from the various +JUnit `CalciteXQueryTest` cases. Once the framwork is solid, we'll make a final +conversion pass of any newly added or change tests the, if the team agrees, we'll +deprecate the existing tests so we don't have to keep the two sets in sync. + +The framework as support for the new `INSERT` syntax via the `targetSchema` +section. However, the required support is not quite ready in Druid so this part +is a work-in-progress. diff --git a/sql/src/test/java/org/apache/druid/sql/calcite/tester/ResourcesSection.java b/sql/src/test/java/org/apache/druid/sql/calcite/tester/ResourcesSection.java new file mode 100644 index 000000000000..70f1abf6edb2 --- /dev/null +++ b/sql/src/test/java/org/apache/druid/sql/calcite/tester/ResourcesSection.java @@ -0,0 +1,209 @@ +/* + * Licensed to the Apache Software Foundation (ASF) under one + * or more contributor license agreements. See the NOTICE file + * distributed with this work for additional information + * regarding copyright ownership. The ASF licenses this file + * to you under the Apache License, Version 2.0 (the + * "License"); you may not use this file except in compliance + * with the License. You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, + * software distributed under the License is distributed on an + * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY + * KIND, either express or implied. See the License for the + * specific language governing permissions and limitations + * under the License. + */ + +package org.apache.druid.sql.calcite.tester; + +import org.apache.druid.java.util.common.StringUtils; +import org.apache.druid.server.security.Action; +import org.apache.druid.server.security.ResourceAction; + +import java.io.IOException; +import java.util.ArrayList; +import java.util.Collections; +import java.util.List; +import java.util.Objects; +import java.util.Set; + +/** + * The resource (actions) test case section. + */ +public class ResourcesSection extends TestSection +{ + /** + * Indicates an expected resource action. + */ + public static class Resource + { + final String type; + final String name; + final Action action; + + public Resource(String type, String name, Action action) + { + this.type = type; + this.name = name; + this.action = action; + } + + public Resource(ResourceAction action) + { + this( + action.getResource().getType(), + action.getResource().getName(), + action.getAction() + ); + } + + @Override + public String toString() + { + return type + "/" + name + "/" + action.name(); + } + + public static List convert(Set actions) + { + List converted = new ArrayList<>(); + for (ResourceAction action : actions) { + converted.add(new Resource(action)); + } + return converted; + } + + public static List sort(List list) + { + List sorted = new ArrayList<>(list); + Collections.sort( + sorted, + (l, r) -> { + int value = l.type.compareTo(r.type); + if (value != 0) { + return value; + } + value = l.name.compareTo(r.name); + if (value != 0) { + return value; + } + return l.action.compareTo(r.action); + } + ); + return sorted; + } + + @Override + public boolean equals(Object o) + { + if (o == this) { + return true; + } + if (o == null || o.getClass() != getClass()) { + return false; + } + Resource other = (Resource) o; + return this.type.equalsIgnoreCase(other.type) + && this.name.equals(other.name) + && this.action == other.action; + } + + /** + * Never used (doesn't make sense). But, needed to make static checks happy. + */ + @Override + public int hashCode() + { + return Objects.hash(type, name, action); + } + } + + protected final List resourceActions; + + protected ResourcesSection(List resourceActions) + { + this(resourceActions, false); + } + + protected ResourcesSection(List resourceActions, boolean copy) + { + super(Section.RESOURCES.sectionName(), copy); + this.resourceActions = resourceActions; + } + + public List resourceActions() + { + return resourceActions; + } + + @Override + public TestSection.Section section() + { + return TestSection.Section.RESOURCES; + } + + @Override + public TestSection copy() + { + return new ResourcesSection(resourceActions, true); + } + + public boolean verify(Set actual, ActualResults.ErrorCollector errors) + { + if (actual == null) { + return true; + } + if (actual.size() != resourceActions.size()) { + errors.setSection(section().sectionName()); + errors.add( + StringUtils.format( + "expected %d entries, got %d", + resourceActions.size(), + actual.size())); + return false; + } + List expectedActions = ResourcesSection.Resource.sort(resourceActions); + List actualActions = ResourcesSection.Resource.sort(ResourcesSection.Resource.convert(actual)); + for (int i = 0; i < expectedActions.size(); i++) { + if (!expectedActions.get(i).equals(actualActions.get(i))) { + errors.setSection(section().sectionName()); + errors.add( + StringUtils.format( + "resource did not match: [%s]", + actualActions.get(i))); + return false; + } + } + return true; + } + + @Override + public boolean equals(Object o) + { + if (o == this) { + return true; + } + if (o == null || o.getClass() != getClass()) { + return false; + } + ResourcesSection other = (ResourcesSection) o; + return resourceActions.equals(other.resourceActions); + } + + /** + * Never used (doesn't make sense). But, needed to make static checks happy. + */ + @Override + public int hashCode() + { + return Objects.hash(resourceActions); + } + + @Override + public void writeSection(TestCaseWriter writer) throws IOException + { + writer.emitResources(resourceActions); + } +} diff --git a/sql/src/test/java/org/apache/druid/sql/calcite/tester/SectionContainer.java b/sql/src/test/java/org/apache/druid/sql/calcite/tester/SectionContainer.java new file mode 100644 index 000000000000..a77393bc01b1 --- /dev/null +++ b/sql/src/test/java/org/apache/druid/sql/calcite/tester/SectionContainer.java @@ -0,0 +1,111 @@ +/* + * Licensed to the Apache Software Foundation (ASF) under one + * or more contributor license agreements. See the NOTICE file + * distributed with this work for additional information + * regarding copyright ownership. The ASF licenses this file + * to you under the Apache License, Version 2.0 (the + * "License"); you may not use this file except in compliance + * with the License. You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, + * software distributed under the License is distributed on an + * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY + * KIND, either express or implied. See the License for the + * specific language governing permissions and limitations + * under the License. + */ + +package org.apache.druid.sql.calcite.tester; + +import org.apache.druid.query.QueryContexts; +import org.apache.druid.sql.calcite.tester.TextSection.ExceptionSection; + +import java.util.Collections; +import java.util.HashMap; +import java.util.List; +import java.util.Map; + +/** + * Common parent for test cases and runs: things that have labels + * and contain sections. Sections are kept in file order for writing, + * and indexed for retrieval. + */ +public abstract class SectionContainer +{ + protected final String label; + protected final Map sections = new HashMap<>(); + protected final List fileOrder; + + public SectionContainer( + String label, + List sections + ) + { + this.label = label; + this.fileOrder = sections; + for (TestSection section : sections) { + this.sections.put(section.section(), section); + } + } + + public String label() + { + return label; + } + + public List sections() + { + return fileOrder; + } + + public TestSection section(TestSection.Section section) + { + return sections.get(section); + } + + public OptionsSection optionsSection() + { + return (OptionsSection) section(TestSection.Section.OPTIONS); + } + + public Map options() + { + OptionsSection section = optionsSection(); + return section == null ? Collections.emptyMap() : section.options(); + } + + public String option(String key) + { + OptionsSection options = optionsSection(); + return options == null ? null : options.options.get(key); + } + + public ContextSection contextSection() + { + return (ContextSection) section(TestSection.Section.CONTEXT); + } + + public ExceptionSection exception() + { + return (TextSection.ExceptionSection) section(TestSection.Section.EXCEPTION); + } + + public PatternSection error() + { + return (PatternSection) section(TestSection.Section.ERROR); + } + + public boolean shouldFail() + { + return exception() != null || error() != null; + } + + public boolean booleanOption(String key) + { + return QueryContexts.getAsBoolean(key, option(key), false); + } + + public abstract Map context(); +} diff --git a/sql/src/test/java/org/apache/druid/sql/calcite/tester/TestCaseLoader.java b/sql/src/test/java/org/apache/druid/sql/calcite/tester/TestCaseLoader.java new file mode 100644 index 000000000000..08ccb5bb4df2 --- /dev/null +++ b/sql/src/test/java/org/apache/druid/sql/calcite/tester/TestCaseLoader.java @@ -0,0 +1,750 @@ +/* + * Licensed to the Apache Software Foundation (ASF) under one + * or more contributor license agreements. See the NOTICE file + * distributed with this work for additional information + * regarding copyright ownership. The ASF licenses this file + * to you under the Apache License, Version 2.0 (the + * "License"); you may not use this file except in compliance + * with the License. You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, + * software distributed under the License is distributed on an + * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY + * KIND, either express or implied. See the License for the + * specific language governing permissions and limitations + * under the License. + */ + +package org.apache.druid.sql.calcite.tester; + +import com.google.common.base.Strings; +import org.apache.calcite.avatica.SqlType; +import org.apache.druid.java.util.common.IAE; +import org.apache.druid.java.util.common.ISE; +import org.apache.druid.java.util.common.Pair; +import org.apache.druid.java.util.common.StringUtils; +import org.apache.druid.query.QueryContexts; +import org.apache.druid.server.security.Action; +import org.apache.druid.sql.calcite.tester.LinesSection.CaseSection; +import org.apache.druid.sql.calcite.tester.LinesSection.CommentsSection; +import org.apache.druid.sql.calcite.tester.LinesSection.ResultsSection; +import org.apache.druid.sql.calcite.tester.PatternSection.ExpectedLine; +import org.apache.druid.sql.calcite.tester.PatternSection.ExpectedRegex; +import org.apache.druid.sql.calcite.tester.PatternSection.ExpectedText; +import org.apache.druid.sql.calcite.tester.PatternSection.SkipAny; +import org.apache.druid.sql.calcite.tester.TestSection.Section; +import org.apache.druid.sql.calcite.tester.TextSection.ExceptionSection; +import org.apache.druid.sql.calcite.tester.TextSection.SqlSection; +import org.apache.druid.sql.http.SqlParameter; + +import java.io.File; +import java.io.FileInputStream; +import java.io.IOException; +import java.io.InputStream; +import java.io.InputStreamReader; +import java.io.LineNumberReader; +import java.io.Reader; +import java.io.StringReader; +import java.nio.charset.StandardCharsets; +import java.util.ArrayList; +import java.util.Collections; +import java.util.HashMap; +import java.util.List; +import java.util.Map; +import java.util.Map.Entry; +import java.util.Properties; + +/** + * Loads and parses a test case file, producing a list of test cases. + */ +public class TestCaseLoader +{ + public static List loadResource(String resource) + { + try (InputStream is = TestCaseLoader.class.getResourceAsStream(resource)) { + if (is == null) { + throw new IAE("Cannot open resource: " + resource); + } + return load(new InputStreamReader(is, StandardCharsets.UTF_8), resource); + } + catch (IOException e) { + throw new IAE("Cannot close resource: " + resource); + } + } + + public static List loadFile(File file) + { + try { + try (InputStream is = new FileInputStream(file)) { + return load(new InputStreamReader(is, StandardCharsets.UTF_8), file.getName()); + } + } + catch (IOException e) { + throw new IAE("Cannot open file: " + file.getAbsolutePath()); + } + } + + public static List loadString(String string) + { + return load(new StringReader(string), ""); + } + + public static List load(Reader reader, String label) + { + return new TestCaseLoader(reader, label).load(); + } + + private final String sourceLabel; + private final LineNumberReader reader; + private final List testCases = new ArrayList<>(); + private QueryTestCase.Builder testCase; + private QueryTestCase prevCase; + private QueryRun.Builder queryRun; + private String pushed; + private List comment; + private int sectionStartLine; + + public TestCaseLoader(Reader reader, String label) + { + this.reader = new LineNumberReader(reader); + this.sourceLabel = label; + } + + public List load() + { + // Ignore leading text + if (!skipComments()) { + return testCases; + } + while (loadCase()) { + // Empty + } + return testCases; + } + + private String next() + { + if (pushed != null) { + String ret = pushed; + pushed = null; + return ret; + } + try { + return reader.readLine(); + } + catch (IOException e) { + throw new ISE(e, "Failed to read query config file: " + sourceLabel); + } + } + + private void push(String line) + { + pushed = line; + } + + private Pair parseSection(String expected, boolean expectCase) + { + while (true) { + String line = next(); + if (line == null) { + return null; + } + if (line.startsWith("====")) { + if (!skipComments()) { + return null; + } + continue; + } + if (line.startsWith("===#") || "===".equals(line)) { + continue; + } + if (!line.startsWith("=== ")) { + throw new IAE( + StringUtils.format( + "[%s:%d]: Expected comments or === %s", + sourceLabel, + reader.getLineNumber(), + expected)); + } + String tail = line.substring(4).trim(); + if (tail.length() == 0 || tail.charAt(0) == '#') { + continue; + } + Pair result; + int posn = tail.indexOf(' '); + if (posn == -1) { + result = Pair.of(tail, ""); + } else { + result = Pair.of( + tail.substring(0, posn), + tail.substring(posn + 1).trim()); + } + if (!expectCase && "case".equalsIgnoreCase(result.lhs)) { + push(line); + } + return result; + } + } + + private boolean loadCase() + { + if (!loadCaseSection()) { + return false; + } + while (loadSection()) { + comment = null; + } + prevCase = testCase.build(); + if (prevCase.sqlSection() == null) { + throw new IAE( + StringUtils.format( + "[%s:%d]: missing sql section", + sourceLabel, + sectionStartLine)); + } + testCases.add(prevCase); + testCase = null; + queryRun = null; + return true; + } + + private boolean loadSection() + { + sectionStartLine = reader.getLineNumber() + 1; + Pair parts = parseSection("
", false); + if (parts == null) { + return false; + } + boolean copy = "copy".equals(parts.rhs); + if (copy) { + if (prevCase == null) { + throw new IAE( + StringUtils.format( + "[%s:%d]: Section has \"copy\" option, but is the first case: %s", + sourceLabel, + sectionStartLine, + parts.lhs + )); + } + } + switch (StringUtils.toLowerCase(parts.lhs)) { + case "case": + return false; + case "sql": + return loadQuery(copy); + case "ast": + return loadPattern(Section.AST, parts.lhs, copy); + case "plan": + return loadPattern(Section.PLAN, parts.lhs, copy); + case "execplan": + return loadPattern(Section.EXEC_PLAN, parts.lhs, copy); + case "explain": + return loadPattern(Section.EXPLAIN, parts.lhs, copy); + case "unparsed": + return loadPattern(Section.UNPARSED, parts.lhs, copy); + case "schema": + return loadPattern(Section.SCHEMA, parts.lhs, copy); + case "targetschema": + return loadPattern(Section.TARGET_SCHEMA, parts.lhs, copy); + case "native": + return loadPattern(Section.NATIVE, parts.lhs, copy); + case "resources": + return loadResources(copy); + case "context": + return loadContext(copy); + case "exception": + return loadException(copy); + case "error": + return loadError(parts.lhs, copy); + case "parameters": + return loadParameters(copy); + case "results": + return loadResults(copy); + case "options": + return loadOptions(copy); + case "run": + return loadRun(); + default: + throw new IAE( + StringUtils.format( + "[%s:%d]: unknown section [%s]", + sourceLabel, + sectionStartLine, + parts.lhs)); + } + } + + private boolean loadCaseSection() + { + Pair parts = parseSection("case", true); + if (parts == null) { + return false; + } + int startLine = reader.getLineNumber(); + if (!"case".equals(parts.lhs)) { + throw new IAE( + StringUtils.format( + "[%s:%d]: First section must be case", + sourceLabel, + startLine)); + } + Pair loaded = loadText(); + String label = loaded.lhs.trim(); + if (label.length() == 0) { + label = StringUtils.format("Case at line %d", startLine); + } + testCase = new QueryTestCase.Builder(label); + if (comment != null) { + testCase.add(new CommentsSection(comment)); + } + comment = null; + testCase.add(new CaseSection(Collections.singletonList(label))); + return loaded.rhs; + } + + private boolean loadQuery(boolean copy) + { + Pair parsed = requireText(Section.SQL, copy); + TestSection section; + if (copy) { + section = prevCase.copySection(Section.SQL); + } else { + String sql = parsed.lhs.trim(); + if (Strings.isNullOrEmpty(sql)) { + throw new IAE( + StringUtils.format( + "[%s:%d]: SQL text is missing", + sourceLabel, + reader.getLineNumber())); + } + section = new SqlSection("SQL", sql); + } + testCase.add(section); + return parsed.rhs; + } + + private Pair requireText(Section section, boolean copy) + { + Pair parsed = loadText(); + if (copy) { + if (!Strings.isNullOrEmpty(parsed.lhs)) { + throw sectionNotEmptyError(section); + } + } else { + if (Strings.isNullOrEmpty(parsed.lhs)) { + throw new IAE( + StringUtils.format( + "[%s:%d]: %s text is missing", + sourceLabel, + sectionStartLine, + section.sectionName())); + } + } + return parsed; + } + + private boolean loadPattern(Section section, String sectionName, boolean copy) + { + Pair, Boolean> result = requireExpected(section, copy); + TestSection patternSection; + if (copy) { + patternSection = copySection(section); + } else { + patternSection = new PatternSection(section, sectionName, new ExpectedText(result.lhs)); + } + testCase.add(patternSection); + return result.rhs; + } + + private TestSection copySection(Section section) + { + TestSection copy = prevCase.copySection(section); + if (copy == null) { + throw noPrevSectionError(section); + } + return copy; + } + + private Pair, Boolean> requireExpected(Section section, boolean copy) + { + Pair, Boolean> result = loadExpected(); + if (copy && !result.lhs.isEmpty()) { + throw sectionNotEmptyError(section); + } + return result; + } + + private IAE sectionNotEmptyError(Section section) + { + return new IAE( + StringUtils.format( + "[%s:%d]: %s section - \"copy\" option set, but section is not empty", + sourceLabel, + sectionStartLine, + section.sectionName())); + } + + private IAE noPrevSectionError(Section section) + { + throw new IAE( + StringUtils.format( + "[%s:%d]: %s section - \"copy\" option set, but previous test doesn't have that section", + sourceLabel, + sectionStartLine, + section.sectionName())); + } + + private boolean loadException(boolean copy) + { + Pair loaded = loadText(); + TestSection exSection; + if (copy) { + if (!Strings.isNullOrEmpty(loaded.lhs)) { + throw sectionNotEmptyError(Section.EXCEPTION); + } + exSection = copySection(Section.EXCEPTION); + } else { + exSection = new ExceptionSection(loaded.lhs.trim()); + } + addCommonSection(exSection, copy); + return loaded.rhs; + } + + private boolean loadError(String sectionName, boolean copy) + { + Pair, Boolean> result = requireExpected(Section.ERROR, copy); + TestSection testSection; + if (copy) { + testSection = copySection(Section.ERROR); + } else { + testSection = new PatternSection(Section.ERROR, sectionName, new ExpectedText(result.lhs)); + } + addCommonSection(testSection, copy); + return result.rhs; + } + + private Pair, Boolean> loadExpected() + { + Pair, Boolean> loaded = loadLines(); + List lines = new ArrayList<>(); + for (String line : loaded.lhs) { + if (line.startsWith("!")) { + lines.add(new ExpectedRegex(line.substring(1))); + continue; + } + if ("**".equals(line)) { + lines.add(new SkipAny()); + continue; + } + if (line.startsWith("\\")) { + line = line.substring(1); + } + lines.add(new PatternSection.ExpectedLiteral(line)); + } + return Pair.of(lines, loaded.rhs); + } + + private boolean loadContext(boolean copy) + { + Pair loaded = loadText(); + String text = loaded.lhs; + TestSection contextSection; + if (copy) { + if (!Strings.isNullOrEmpty(text)) { + throw sectionNotEmptyError(Section.CONTEXT); + } + contextSection = copySection(Section.CONTEXT); + } else { + Properties props = new Properties(); + try { + props.load(new StringReader(text)); + } + catch (IOException e) { + throw new IAE( + StringUtils.format( + "[%s:%d]: failed to parse context: %s", + sourceLabel, + sectionStartLine, + e.getMessage())); + } + if (props.isEmpty()) { + contextSection = null; + } else { + Map context = new HashMap<>(); + for (Entry entry : props.entrySet()) { + String key = entry.getKey().toString(); + context.put( + key, + QueryContexts.definition(key).parse( + entry.getValue().toString())); + } + contextSection = new ContextSection(context); + } + } + addCommonSection(contextSection, copy); + return loaded.rhs; + } + + private void addCommonSection(TestSection section, boolean copy) + { + if (queryRun == null) { + testCase.add(section); + } else if (copy) { + throw new IAE( + StringUtils.format( + "[%s:%d]: Cannot use \"copy\" option in run section", + sourceLabel, + sectionStartLine)); + } else { + queryRun.add(section); + } + } + + private void addRunSection(TestSection section, boolean copy) + { + if (queryRun == null) { + queryRun = testCase.addRun("", false); + } else if (copy) { + throw new IAE( + StringUtils.format( + "[%s:%d]: Cannot use \"copy\" option in run section", + sourceLabel, + sectionStartLine)); + } + queryRun.add(section); + } + + private boolean loadResources(boolean copy) + { + Pair, Boolean> loaded = loadLines(); + TestSection resourceSection; + if (copy) { + resourceSection = copySection(Section.RESOURCES); + } else { + List resourceActions = new ArrayList<>(); + for (String entry : loaded.lhs) { + String[] parts = entry.split("/"); + if (parts.length != 3) { + throw new IAE( + StringUtils.format( + "[%s:%d]: Resources is not in type/name/action format: [%s]", + sourceLabel, + sectionStartLine, + entry)); + } + Action action = Action.fromString(parts[2]); + if (action == null) { + throw new IAE( + StringUtils.format( + "[%s:%d]: Invalid action: [%s]", + sourceLabel, + sectionStartLine, + parts[2])); + } + resourceActions.add(new ResourcesSection.Resource(parts[0], parts[1], action)); + } + resourceSection = new ResourcesSection(resourceActions); + } + testCase.add(resourceSection); + return loaded.rhs; + } + + private Pair, Boolean> requireLines(Section section, boolean copy) + { + Pair, Boolean> loaded = loadLines(); + if (loaded.lhs.isEmpty()) { + return loaded; + } + if (copy) { + throw sectionNotEmptyError(section); + } + return loaded; + } + + private boolean loadParameters(boolean copy) + { + Pair, Boolean> loaded = requireLines(Section.PARAMETERS, copy); + TestSection paramsSection; + if (copy) { + paramsSection = copySection(Section.PARAMETERS); + } else { + List parameters = new ArrayList<>(); + for (int i = 0; i < loaded.lhs.size(); i++) { + String entry = loaded.lhs.get(i); + if ("null".equals(entry)) { + parameters.add(null); + continue; + } + int posn = entry.indexOf(':'); + if (posn == -1) { + throw new IAE( + StringUtils.format( + "[%s:%d]: Parameter is not in type: value format: [%s]", + sourceLabel, + sectionStartLine, + entry)); + } + String type = StringUtils.toLowerCase(entry.substring(0, posn).trim()); + String value = entry.substring(posn + 1).trim(); + try { + parameters.add(parseParameter(type, value)); + } + catch (Exception e) { + throw new IAE( + StringUtils.format( + "[%s:%d]: parameter [%s]: %s", + sourceLabel, + sectionStartLine, + entry, + e.getMessage())); + } + } + paramsSection = new ParametersSection(parameters); + } + testCase.add(paramsSection); + return loaded.rhs; + } + + public static SqlParameter parseParameter(String type, String value) + { + if ("int".equalsIgnoreCase(type)) { + type = SqlType.INTEGER.name(); + } else if ("long".equalsIgnoreCase(type)) { + type = SqlType.BIGINT.name(); + } else if ("string".equalsIgnoreCase(type)) { + type = SqlType.VARCHAR.name(); + } + SqlType sqlType = SqlType.valueOf(StringUtils.toUpperCase(type)); + if (sqlType == null) { + throw new RuntimeException("Unsupported parameter type: " + type); + } + if ("\\N".equals(value)) { + return new SqlParameter(sqlType, null); + } + Object sqlValue; + switch (sqlType) { + case INTEGER: + sqlValue = Integer.parseInt(value); + break; + case BIGINT: + sqlValue = Long.parseLong(value); + break; + case FLOAT: + case REAL: + sqlValue = Float.parseFloat(value); + break; + case DOUBLE: + sqlValue = Double.parseDouble(value); + break; + case VARCHAR: + sqlValue = QueryTestCases.unquote(value); + break; + case TIMESTAMP: + case DATE: + // Timestamps seem to appear as both quoted strings and numbers. + sqlValue = QueryTestCases.unquote(value); + break; + default: + throw new RuntimeException("Unsupported SQL type: " + type); + } + return new SqlParameter(sqlType, sqlValue); + } + + private boolean loadOptions(boolean copy) + { + Pair, Boolean> loaded = loadLines(); + TestSection optionsSection; + if (copy) { + optionsSection = copySection(Section.OPTIONS); + } else { + Map options = new HashMap<>(); + for (int i = 0; i < loaded.lhs.size(); i++) { + String line = loaded.lhs.get(i); + int posn = line.indexOf('='); + if (posn == -1) { + throw new IAE( + StringUtils.format( + "[%s:%d]: Option is not in key=value format: [%s]", + sourceLabel, + sectionStartLine, + line)); + } + String key = line.substring(0, posn).trim(); + String value = QueryTestCases.unquote(line.substring(posn + 1).trim()); + options.put(key, value); + } + if (options.isEmpty()) { + optionsSection = null; + } else { + optionsSection = new OptionsSection(options); + } + } + addCommonSection(optionsSection, copy); + return loaded.rhs; + } + + private boolean loadResults(boolean copy) + { + Pair, Boolean> loaded = loadLines(); + TestSection resultsSection; + if (copy) { + if (prevCase.runs().size() != 1) { + throw new IAE( + StringUtils.format( + "[%s:%d]: Can only copy results if previous test has only one run. Previous has %d", + sourceLabel, + sectionStartLine, + prevCase.runs().size())); + } + TestSection prevSection = prevCase.runs().get(0).section(Section.RESULTS); + if (prevSection == null) { + throw noPrevSectionError(Section.RESULTS); + } + resultsSection = prevSection.copy(); + } else { + resultsSection = new ResultsSection(loaded.lhs); + } + addRunSection(resultsSection, copy); + return loaded.rhs; + } + + private boolean loadRun() + { + Pair parsed = loadText(); + String label = parsed.lhs.trim(); + queryRun = testCase.addRun(label, true); + return parsed.rhs; + } + + private Pair, Boolean> loadLines() + { + List lines = new ArrayList<>(); + String line; + while ((line = next()) != null) { + if (line.startsWith("===")) { + push(line); + break; + } + lines.add(line); + } + return Pair.of(lines, line != null); + } + + private Pair loadText() + { + Pair, Boolean> lines = loadLines(); + // Preserve final newline if any text appears + lines.lhs.add(""); + String text = String.join("\n", lines.lhs); + return Pair.of(text, lines.rhs); + } + + private boolean skipComments() + { + Pair, Boolean> loaded = loadLines(); + comment = loaded.lhs; + return loaded.rhs; + } +} diff --git a/sql/src/test/java/org/apache/druid/sql/calcite/tester/TestCaseLoaderTest.java b/sql/src/test/java/org/apache/druid/sql/calcite/tester/TestCaseLoaderTest.java new file mode 100644 index 000000000000..3d27ff8a6d91 --- /dev/null +++ b/sql/src/test/java/org/apache/druid/sql/calcite/tester/TestCaseLoaderTest.java @@ -0,0 +1,742 @@ +/* + * Licensed to the Apache Software Foundation (ASF) under one + * or more contributor license agreements. See the NOTICE file + * distributed with this work for additional information + * regarding copyright ownership. The ASF licenses this file + * to you under the Apache License, Version 2.0 (the + * "License"); you may not use this file except in compliance + * with the License. You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, + * software distributed under the License is distributed on an + * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY + * KIND, either express or implied. See the License for the + * specific language governing permissions and limitations + * under the License. + */ + +package org.apache.druid.sql.calcite.tester; + +import org.apache.calcite.avatica.SqlType; +import org.apache.druid.java.util.common.IAE; +import org.apache.druid.query.QueryContexts; +import org.apache.druid.server.security.Action; +import org.apache.druid.sql.http.SqlParameter; +import org.junit.Test; + +import java.util.List; +import java.util.Map; + +import static org.junit.Assert.assertEquals; +import static org.junit.Assert.assertFalse; +import static org.junit.Assert.assertNotNull; +import static org.junit.Assert.assertNull; +import static org.junit.Assert.assertThrows; +import static org.junit.Assert.assertTrue; + +/** + * Tests the test case loader (parser). + */ +public class TestCaseLoaderTest +{ + @Test + public void testEmpty() + { + String input = ""; + assertTrue(TestCaseLoader.loadString(input).isEmpty()); + input = " "; + assertTrue(TestCaseLoader.loadString(input).isEmpty()); + input = "\n"; + assertTrue(TestCaseLoader.loadString(input).isEmpty()); + input = " \n\n"; + assertTrue(TestCaseLoader.loadString(input).isEmpty()); + } + + @Test + public void testLeadingComments() + { + String input = + "I'm a comment"; + assertTrue(TestCaseLoader.loadString(input).isEmpty()); + input = + "I'm a comment\n"; + assertTrue(TestCaseLoader.loadString(input).isEmpty()); + input = + "I'm a comment\n" + + "and so am I\n"; + assertTrue(TestCaseLoader.loadString(input).isEmpty()); + input = + "I'm a comment\n" + + "====\n" + + "and so am I\n"; + assertTrue(TestCaseLoader.loadString(input).isEmpty()); + } + + @Test + public void testAllComments() + { + String input = + "====\n" + + "Ignore me\n" + + "=====\n" + + "Ignore me also\n" + + "==== foo\n" + + "===#\n" + + "===# foo\\n" + + "=== #\\n" + + "=== # foo\n"; + assertTrue(TestCaseLoader.loadString(input).isEmpty()); + input = + "===="; + assertTrue(TestCaseLoader.loadString(input).isEmpty()); + } + + @Test + public void testMissingCase() + { + final String input = + "=== plan\n"; + assertThrows( + IAE.class, + () -> TestCaseLoader.loadString(input)); + } + + @Test + public void testCase() + { + String input = + "=== case\n" + + "=== SQL\n" + + "SELECT 1\n"; + List cases = TestCaseLoader.loadString(input); + assertEquals(1, cases.size()); + assertEquals("SELECT 1", cases.get(0).sql()); + assertEquals("Case at line 1", cases.get(0).label); + + input = + "\n" + + "====\n" + + "some comment\n" + + "=== case\n" + + "=== SQL\n" + + "SELECT 1\n"; + cases = TestCaseLoader.loadString(input); + assertEquals(1, cases.size()); + assertEquals("SELECT 1", cases.get(0).sql()); + assertEquals("Case at line 4", cases.get(0).label); + + input = + "=== case\n" + + "second\n" + + "=== SQL\n" + + "SELECT foo\n" + + " FROM bar\n"; + cases = TestCaseLoader.loadString(input); + assertEquals(1, cases.size()); + assertEquals("SELECT foo\n FROM bar", cases.get(0).sql()); + assertEquals("second", cases.get(0).label); + } + + @Test + public void testEmptySql() + { + { + final String input = + "=== case\n" + + "=== sql\n"; + assertThrows( + IAE.class, + () -> TestCaseLoader.loadString(input)); + } + { + final String input = + "=== case\n" + + "=== sql\n" + + "\n"; + assertThrows( + IAE.class, + () -> TestCaseLoader.loadString(input)); + } + { + final String input = + "=== case\n" + + "=== sql\n" + + " \n"; + assertThrows( + IAE.class, + () -> TestCaseLoader.loadString(input)); + } + { + final String input = + "=== case\n" + + "=== sql\n" + + "=== case\n" + + "=== sql"; + assertThrows( + IAE.class, + () -> TestCaseLoader.loadString(input)); + } + { + final String input = + "=== case\n" + + "=== sql\n" + + "\n" + + "=== case\n" + + "=== sql"; + assertThrows( + IAE.class, + () -> TestCaseLoader.loadString(input)); + } + } + + @Test + public void testInvalidSection() + { + { + final String input = + "=== case\n" + + "=== SQL\n" + + "SELECT 1\n" + + "=== bogus"; + assertThrows( + IAE.class, + () -> TestCaseLoader.loadString(input)); + } + } + + @Test + public void testPlan() + { + String input = + "=== case\n" + + "=== SQL\n" + + "SELECT 1\n" + + "=== plan\n"; + List cases = TestCaseLoader.loadString(input); + assertEquals(1, cases.size()); + PatternSection.ExpectedText plan = cases.get(0).plan().expected; + assertTrue(plan.lines.isEmpty()); + + input = + "=== case\n" + + "=== SQL\n" + + "SELECT 1\n" + + "=== plan\n" + + " a plan \n"; + cases = TestCaseLoader.loadString(input); + assertEquals(1, cases.size()); + plan = cases.get(0).plan().expected; + assertEquals(1, plan.lines.size()); + assertEquals(" a plan ", ((PatternSection.ExpectedLiteral) plan.lines.get(0)).line); + + input = + "=== case\n" + + "=== SQL\n" + + "SELECT 1\n" + + "=== plan\n" + + "**\n" + + "a plan\n" + + "!count \\d+ \n" + + " \n" + + "\\!foo \n"; + cases = TestCaseLoader.loadString(input); + assertEquals(1, cases.size()); + plan = cases.get(0).plan().expected; + assertEquals(5, plan.lines.size()); + assertTrue(plan.lines.get(0) instanceof PatternSection.SkipAny); + assertEquals("a plan", ((PatternSection.ExpectedLiteral) plan.lines.get(1)).line); + assertEquals("count \\d+ ", ((PatternSection.ExpectedRegex) plan.lines.get(2)).line); + assertEquals(" ", ((PatternSection.ExpectedLiteral) plan.lines.get(3)).line); + assertEquals("!foo ", ((PatternSection.ExpectedLiteral) plan.lines.get(4)).line); + } + + @Test + public void testSections() + { + String input = + "Example input file\n" + + "=== case\n" + + "=== SQL\n" + + "SELECT 1\n" + + "=== plan\n" + + " a plan \n" + + "=== explain\n" + + " explanation \n"; + List cases = TestCaseLoader.loadString(input); + assertEquals(1, cases.size()); + assertNotNull(cases.get(0).plan()); + assertNotNull(cases.get(0).explain()); + } + + @Test + public void testTrailingComments() + { + String input = + "=== case\n" + + "=== SQL\n" + + "SELECT 1\n" + + "=== plan\n" + + "====\n" + + "that's all, folks"; + List cases = TestCaseLoader.loadString(input); + assertEquals(1, cases.size()); + assertTrue(cases.get(0).plan().expected.lines.isEmpty()); + + input = + "=== case\n" + + "=== SQL\n" + + "SELECT 1\n" + + "=== plan\n" + + "===\n"; + cases = TestCaseLoader.loadString(input); + assertEquals(1, cases.size()); + assertTrue(cases.get(0).plan().expected.lines.isEmpty()); + } + + @Test + public void testMultipleCases() + { + String input = + "=== case\n" + + "first\n" + + "=== SQL\n" + + "SELECT 1\n" + + "=== case\n" + + "second\n" + + "=== SQL\n" + + "SELECT 2\n" + + "=== plan\n" + + "second plan\n"; + List cases = TestCaseLoader.loadString(input); + assertEquals(2, cases.size()); + assertEquals("first", cases.get(0).label); + assertEquals("second", cases.get(1).label); + + input = + "=== case\n" + + "first\n" + + "=== SQL\n" + + "SELECT 1\n" + + "=== plan\n" + + "first plan\n" + + "=== case\n" + + "second\n" + + "=== SQL\n" + + "SELECT 2\n" + + "=== plan\n" + + "second plan\n"; + cases = TestCaseLoader.loadString(input); + assertEquals(2, cases.size()); + assertEquals("first", cases.get(0).label); + assertEquals("second", cases.get(1).label); + } + + @Test + public void testContext() + { + String input = + "=== case\n" + + "first\n" + + "=== SQL\n" + + "SELECT 1\n" + + "=== context\n"; + List cases = TestCaseLoader.loadString(input); + assertEquals(1, cases.size()); + assertNull(cases.get(0).contextSection()); + assertFalse(cases.get(0).hasRuns()); + + input = + "=== case\n" + + "first\n" + + "=== SQL\n" + + "SELECT 1\n" + + "=== context\n" + + "foo=bar\n" + + QueryContexts.USE_CACHE_KEY + "=true\n" + + QueryContexts.TIMEOUT_KEY + "=10\n"; + cases = TestCaseLoader.loadString(input); + assertEquals(1, cases.size()); + Map context = cases.get(0).contextSection().context; + assertEquals(3, context.size()); + assertEquals("bar", context.get("foo")); + assertEquals(true, context.get(QueryContexts.USE_CACHE_KEY)); + assertEquals(10, context.get(QueryContexts.TIMEOUT_KEY)); + } + + @Test + public void testResources() + { + String input = + "=== case\n" + + "first\n" + + "=== SQL\n" + + "SELECT 1\n" + + "=== resources\n"; + List cases = TestCaseLoader.loadString(input); + assertTrue(cases.get(0).resourceActions().resourceActions.isEmpty()); + + input = + "=== case\n" + + "first\n" + + "=== SQL\n" + + "SELECT 1\n" + + "=== resources\n" + + "foo/bar/" + Action.READ.name() + "\n"; + cases = TestCaseLoader.loadString(input); + assertEquals(1, cases.get(0).resourceActions().resourceActions.size()); + ResourcesSection.Resource resource = cases.get(0).resourceActions().resourceActions.get(0); + assertEquals("foo", resource.type); + assertEquals("bar", resource.name); + assertEquals(Action.READ, resource.action); + } + + @Test + public void testParameters() + { + String input = + "=== case\n" + + "first\n" + + "=== SQL\n" + + "SELECT 1\n" + + "=== parameters\n"; + List cases = TestCaseLoader.loadString(input); + assertTrue(cases.get(0).parameters().isEmpty()); + + input = + "=== case\n" + + "first\n" + + "=== SQL\n" + + "SELECT 1\n" + + "=== parameters\n" + + "int: 10\n" + + "integer: 20 \n" + + "long: 30\n" + + "bigint: 40\n" + + "float: 50.1 \n" + + "double: 60.2\n" + + "string: foo \n" + + "varchar: bar \n"; + cases = TestCaseLoader.loadString(input); + List params = cases.get(0).parameters(); + assertEquals(8, params.size()); + + assertEquals(SqlType.INTEGER, params.get(0).getType()); + assertEquals(10, params.get(0).getValue()); + + assertEquals(SqlType.INTEGER, params.get(1).getType()); + assertEquals(20, params.get(1).getValue()); + + assertEquals(SqlType.BIGINT, params.get(2).getType()); + assertEquals(30L, params.get(2).getValue()); + + assertEquals(SqlType.BIGINT, params.get(3).getType()); + assertEquals(40L, params.get(3).getValue()); + + assertEquals(SqlType.FLOAT, params.get(4).getType()); + assertEquals(50.1F, params.get(4).getValue()); + + assertEquals(SqlType.DOUBLE, params.get(5).getType()); + assertEquals(60.2D, params.get(5).getValue()); + + assertEquals(SqlType.VARCHAR, params.get(6).getType()); + assertEquals("foo", params.get(6).getValue()); + + assertEquals(SqlType.VARCHAR, params.get(7).getType()); + assertEquals("bar", params.get(7).getValue()); + + input = + "=== case\n" + + "first\n" + + "=== SQL\n" + + "SELECT 1\n" + + "=== parameters\n" + + "string: ' foo '\n" + + "varchar: \" bar \"\n"; + cases = TestCaseLoader.loadString(input); + params = cases.get(0).parameters(); + assertEquals(2, params.size()); + + assertEquals(SqlType.VARCHAR, params.get(0).getType()); + assertEquals(" foo ", params.get(0).getValue()); + + assertEquals(SqlType.VARCHAR, params.get(1).getType()); + assertEquals(" bar ", params.get(1).getValue()); + + input = + "=== case\n" + + "first\n" + + "=== SQL\n" + + "SELECT 1\n" + + "=== parameters\n" + + "date: \"2022-05-09\"\n" + + "timestamp: \"2022-05-09 01:02:03\"\n"; + cases = TestCaseLoader.loadString(input); + params = cases.get(0).parameters(); + assertEquals(2, params.size()); + + assertEquals(SqlType.DATE, params.get(0).getType()); + assertEquals("2022-05-09", params.get(0).getValue()); + + assertEquals(SqlType.TIMESTAMP, params.get(1).getType()); + assertEquals("2022-05-09 01:02:03", params.get(1).getValue()); + } + + @Test + public void testOptions() + { + String input = + "=== case\n" + + "first\n" + + "=== SQL\n" + + "SELECT 1\n" + + "=== options\n"; + List cases = TestCaseLoader.loadString(input); + assertNull(cases.get(0).optionsSection()); + + input = + "=== case\n" + + "first\n" + + "=== SQL\n" + + "SELECT 1\n" + + "=== options\n" + + "p1=foo\n" + + " p2 = bar \n" + + "p3=\" mumble \"\n"; + cases = TestCaseLoader.loadString(input); + Map options = cases.get(0).optionsSection().options; + assertEquals(3, options.size()); + assertEquals("foo", options.get("p1")); + assertEquals("bar", options.get("p2")); + assertEquals(" mumble ", options.get("p3")); + assertFalse(cases.get(0).hasRuns()); + } + + @Test + public void testCopy() + { + { + final String input = + "=== case\n" + + "first\n" + + "=== SQL copy\n"; + assertThrows( + IAE.class, + () -> TestCaseLoader.loadString(input)); + } + + String input = + "=== case\n" + + "first\n" + + "=== SQL\n" + + "SELECT 1\n" + + "=== case\n" + + "second\n" + + "=== SQL copy\n"; + List cases = TestCaseLoader.loadString(input); + assertEquals(cases.get(0).sql(), cases.get(1).sql()); + + input = + "=== case\n" + + "first\n" + + "=== SQL\n" + + "SELECT 1\n" + + "=== plan\n" + + "the plan\n" + + "=== case\n" + + "second\n" + + "=== SQL copy\n" + + "=== plan copy\n"; + cases = TestCaseLoader.loadString(input); + assertEquals(cases.get(0).plan(), cases.get(1).plan()); + + input = + "=== case\n" + + "first\n" + + "=== SQL\n" + + "SELECT 1\n" + + "=== schema\n" + + "foo VARCHAR\n" + + "=== case\n" + + "second\n" + + "=== SQL copy\n" + + "=== schema copy\n"; + cases = TestCaseLoader.loadString(input); + assertEquals(cases.get(0).schema(), cases.get(1).schema()); + + input = + "=== case\n" + + "first\n" + + "=== SQL\n" + + "SELECT 1\n" + + "=== native\n" + + "foo\n" + + "=== case\n" + + "second\n" + + "=== SQL copy\n" + + "=== native copy\n"; + cases = TestCaseLoader.loadString(input); + assertEquals(cases.get(0).nativeQuery(), cases.get(1).nativeQuery()); + + input = + "=== case\n" + + "first\n" + + "=== SQL\n" + + "SELECT 1\n" + + "=== resources\n" + + "druid/foo/READ\n" + + "=== case\n" + + "second\n" + + "=== SQL copy\n" + + "=== resources copy\n"; + cases = TestCaseLoader.loadString(input); + assertEquals(cases.get(0).resourceActions(), cases.get(1).resourceActions()); + + input = + "=== case\n" + + "first\n" + + "=== SQL\n" + + "SELECT 1\n" + + "=== context\n" + + "foo=bar\n" + + "=== case\n" + + "second\n" + + "=== SQL copy\n" + + "=== context copy\n"; + cases = TestCaseLoader.loadString(input); + assertEquals(cases.get(0).contextSection(), cases.get(1).contextSection()); + + input = + "=== case\n" + + "first\n" + + "=== SQL\n" + + "SELECT 1\n" + + "=== options\n" + + "foo=bar\n" + + "=== case\n" + + "second\n" + + "=== SQL copy\n" + + "=== options copy\n"; + cases = TestCaseLoader.loadString(input); + assertEquals(cases.get(0).options(), cases.get(1).options()); + + input = + "=== case\n" + + "first\n" + + "=== SQL\n" + + "SELECT 1\n" + + "=== parameters\n" + + "VARCHAR: foo\n" + + "=== case\n" + + "second\n" + + "=== SQL copy\n" + + "=== parameters copy\n"; + cases = TestCaseLoader.loadString(input); + assertEquals(cases.get(0).parameters(), cases.get(1).parameters()); + + input = + "=== case\n" + + "first\n" + + "=== SQL\n" + + "SELECT 1\n" + + "=== results\n" + + "10, 20\n" + + "=== case\n" + + "second\n" + + "=== SQL copy\n" + + "=== results copy\n"; + cases = TestCaseLoader.loadString(input); + QueryRun run1 = cases.get(0).runs().get(0); + QueryRun run2 = cases.get(1).runs().get(0); + assertEquals(run1.results(), run2.results()); + } + + @Test + public void testRuns() + { + String input = + "=== case\n" + + "first\n" + + "=== SQL\n" + + "SELECT 1\n" + + "=== results\n" + + "[\"a\", 10]\n"; + List cases = TestCaseLoader.loadString(input); + QueryRun run = cases.get(0).runs().get(0); + assertEquals("", run.label()); + assertEquals(1, run.results().size()); + + input = + "=== case\n" + + "first\n" + + "=== SQL\n" + + "SELECT 1\n" + + "=== run\n" + + "=== options\n" + + "foo=bar\n"; + cases = TestCaseLoader.loadString(input); + run = cases.get(0).runs().get(0); + assertEquals("", run.label()); + assertEquals("Run 1", run.displayLabel()); + assertEquals("bar", run.option("foo")); + + input = + "=== case\n" + + "first\n" + + "=== SQL\n" + + "SELECT 1\n" + + "=== options\n" + + "x=a\n" + + "foo=mumble\n" + + "=== run\n" + + "=== options\n" + + "foo=bar\n"; + cases = TestCaseLoader.loadString(input); + run = cases.get(0).runs().get(0); + assertEquals("Run 1", run.displayLabel()); + assertEquals("bar", run.option("foo")); + assertEquals("a", run.option("x")); + + input = + "=== case\n" + + "first\n" + + "=== SQL\n" + + "SELECT 1\n" + + "=== run\n" + + "=== options\n" + + "foo=bar\n" + + "=== results\n" + + "[\"a\", 10]\n" + + "=== run\n" + + "=== options\n" + + "user=bob\n" + + "=== results\n" + + "[\"b\", 20]\n"; + cases = TestCaseLoader.loadString(input); + QueryTestCase testCase = cases.get(0); + assertEquals(2, testCase.runs().size()); + run = testCase.runs().get(0); + assertEquals("", run.label()); + assertEquals("Run 1", run.displayLabel()); + assertEquals("bar", run.optionsSection().get("foo")); + assertEquals(1, run.results().size()); + run = testCase.runs().get(1); + assertEquals("Run 2", run.displayLabel()); + assertEquals("bob", run.optionsSection().get("user")); + assertEquals(1, run.results().size()); + + input = + "=== case\n" + + "first\n" + + "=== SQL\n" + + "SELECT 1\n" + + "=== run\n" + + "fast\n" + + "=== options\n" + + "user=bob\n"; + cases = TestCaseLoader.loadString(input); + run = cases.get(0).runs().get(0); + assertEquals("fast", run.label()); + } +} diff --git a/sql/src/test/java/org/apache/druid/sql/calcite/tester/TestCaseMerger.java b/sql/src/test/java/org/apache/druid/sql/calcite/tester/TestCaseMerger.java new file mode 100644 index 000000000000..ab6deea4858d --- /dev/null +++ b/sql/src/test/java/org/apache/druid/sql/calcite/tester/TestCaseMerger.java @@ -0,0 +1,765 @@ +/* + * Licensed to the Apache Software Foundation (ASF) under one + * or more contributor license agreements. See the NOTICE file + * distributed with this work for additional information + * regarding copyright ownership. The ASF licenses this file + * to you under the Apache License, Version 2.0 (the + * "License"); you may not use this file except in compliance + * with the License. You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, + * software distributed under the License is distributed on an + * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY + * KIND, either express or implied. See the License for the + * specific language governing permissions and limitations + * under the License. + */ + +package org.apache.druid.sql.calcite.tester; + +import com.google.common.collect.ImmutableMap; +import org.apache.druid.java.util.common.IAE; +import org.apache.druid.java.util.common.ISE; +import org.apache.druid.java.util.common.Pair; +import org.apache.druid.java.util.common.StringUtils; +import org.apache.druid.sql.calcite.tester.TestSection.Section; + +import java.io.BufferedReader; +import java.io.BufferedWriter; +import java.io.File; +import java.io.FileInputStream; +import java.io.FileOutputStream; +import java.io.IOException; +import java.io.InputStream; +import java.io.InputStreamReader; +import java.io.OutputStreamWriter; +import java.nio.charset.StandardCharsets; +import java.util.ArrayList; +import java.util.Arrays; +import java.util.Collections; +import java.util.HashMap; +import java.util.List; +import java.util.Map; +import java.util.Objects; +import java.util.regex.Matcher; +import java.util.regex.Pattern; + +/** + * Ad-hoc utilities used to convert a JUnit test to the test framework. + * Merges the converted test file with an existing file to produce a new + * file that has the combined results. Handy for picking up changes to + * the Java code, or for combining a SQL-compatible run with a + * "replace nulls with defaults" run. Done only once in a while, by hand. + * The code must be configured (by hand) for the desired test case. + * + * Also contains utilities to rewrite test cases in various useful ways. + * You'll know you need them (or a new version) if you find yourself making + * repetitive changes. + */ +public class TestCaseMerger +{ + public static void main(String[] args) + { + //rewriteFile("query15.case"); + buildQueryTest(); + //buildArrayTest(); + //buildInsertDMLTest(); + //buildJoinTest(); + //buildCorrelatedTest(); + //buildMultiValueTest(); + //buildParameterTest(); + } + + @SuppressWarnings("unused") + private static void buildParameterTest() + { + List existing = TestCaseLoader.loadResource("/calcite/cases/parameterQuery.case"); + rewrite(existing, "org.apache.druid.sql.calcite.CalciteParameterQueryTest"); + } + + @SuppressWarnings("unused") + private static void buildMultiValueTest() + { + List existing = TestCaseLoader.loadResource("/calcite/cases/multiValueStringQuery.case"); + rewrite(existing, "org.apache.druid.sql.calcite.CalciteMultiValueStringQueryTest"); + } + + @SuppressWarnings("unused") + private static void buildCorrelatedTest() + { + List existing = TestCaseLoader.loadResource("/calcite/cases/correlatedQuery.case"); + rewrite(existing, "org.apache.druid.sql.calcite.CalciteCorrelatedQueryTest"); + } + + @SuppressWarnings("unused") + private static void buildJoinTest() + { + List existing = TestCaseLoader.loadResource("/calcite/cases/joinQuery.case"); + rewrite(existing, "org.apache.druid.sql.calcite.CalciteJoinQueryTest"); + } + + @SuppressWarnings("unused") + private static void buildInsertDMLTest() + { + // Doesn't work because of the special structure of this test case. + List existing = TestCaseLoader.loadResource("/calcite/cases/insertDML.case"); + rewrite(existing, "org.apache.druid.sql.calcite.CalciteInsertDmlTest"); + } + + @SuppressWarnings("unused") + private static void buildArrayTest() + { + List existing = TestCaseLoader.loadResource("/calcite/cases/arrayQuery.case"); + rewrite(existing, "org.apache.druid.sql.calcite.CalciteArraysQueryTest"); + } + + @SuppressWarnings("unused") + private static void buildQueryTest() + { + List existing = loadSet("query", 15); + rewrite(existing, "org.apache.druid.sql.calcite.CalciteQueryTest"); + } + + public static List loadSet(String base, int count) + { + List fullList = new ArrayList<>(); + for (int i = 0; i < count; i++) { + fullList.addAll(TestCaseLoader.loadResource( + StringUtils.format("/calcite/cases/%s%02d.case", base, i + 1))); + } + return fullList; + } + + public static List loadFile(String filePath) + { + return TestCaseLoader.loadFile(new File(filePath)); + } + + private static void rewrite(List existing, String className) + { + new TestCaseMerger(existing, className).rewrite(); + } + + public static class TestWrapper + { + boolean merged; + final QueryTestCase testCase; + + public TestWrapper(QueryTestCase testCase) + { + this.testCase = testCase; + } + } + + private final List existing; + private final List recorded; + private final List methods; + + private TestCaseMerger(List existing, String className) + { + this.existing = existing; + this.recorded = TestCaseLoader.loadFile(new File("target/actual/recorded.case")); + try { + this.methods = loadTestClass(className); + } + catch (Exception e) { + throw new IAE(e, "Class load failed"); + } + } + + public static List loadTestClass(String testClass) throws IOException + { + String classPath = StringUtils.replace(testClass, ".", "/"); + File file = new File(new File("src/test/java"), classPath + ".java"); + List methods = new ArrayList<>(); + try (BufferedReader reader = + new BufferedReader( + new InputStreamReader( + new FileInputStream(file), StandardCharsets.UTF_8))) { + String line = reader.readLine(); + if (line == null) { + return methods; + } + line = line.trim(); + while (true) { + while (!line.startsWith("@")) { + line = reader.readLine(); + if (line == null) { + return methods; + } + line = line.trim(); + } + boolean ignore = false; + while (line.startsWith("@")) { + if (line.startsWith("@Ignore")) { + ignore = true; + } + line = reader.readLine(); + if (line == null) { + return methods; + } + line = line.trim(); + } + if (ignore) { + continue; + } + while (!line.startsWith("public void test")) { + line = reader.readLine(); + if (line == null) { + return methods; + } + line = line.trim(); + } + Pattern p = Pattern.compile("public void (.*)\\(.*\\).*"); + Matcher m = p.matcher(line); + if (m.matches()) { + methods.add(m.group(1)); + } + } + } + } + + public void rewrite() + { + List rewritten; + if (existing == null) { + rewritten = reorder(recorded); + } else { + rewritten = merge(); + } + write(rewritten); + } + + private List merge() + { + // Create an index onto the recorded tests by method name and SQL + List wrapped = new ArrayList<>(); + Map> index = new HashMap<>(); + for (QueryTestCase testCase : recorded) { + TestWrapper wrapper = new TestWrapper(testCase); + wrapped.add(wrapper); + String key = testKey(testCase); + List tests = index.get(key); + if (tests == null) { + tests = new ArrayList<>(); + index.put(key, tests); + } + tests.add(wrapper); + } + + // Group existing tests by key. There may be multiple per key. + List>> existingGroups = new ArrayList<>(); + for (QueryTestCase testCase : existing) { + String key = testKey(testCase); + if (!existingGroups.isEmpty()) { + Pair> prev = existingGroups.get(existingGroups.size() - 1); + if (prev.lhs.equals(key)) { + prev.rhs.add(testCase); + continue; + } + } + List tests = new ArrayList<>(); + tests.add(testCase); + existingGroups.add(new Pair<>(key, tests)); + } + + // Match up existing tests in file order with recorded tests using + // method name and SQL. + List rewritten = new ArrayList<>(); + for (Pair> tests : existingGroups) { + List recordedCases = index.get(tests.lhs); + if (recordedCases == null) { + rewritten.addAll(tests.rhs); + } else { + rewritten.addAll(mergeCases(tests.rhs, recordedCases)); + } + } + + // If any recorded tests remain, add them in method name order. + List remainder = new ArrayList<>(); + for (TestWrapper wrapper : wrapped) { + if (!wrapper.merged) { + remainder.add(wrapper.testCase); + } + } + rewritten.addAll(reorder(remainder)); + return rewritten; + } + + private List mergeCases(List existingCases, List recordedCases) + { + // Simple case: only one test case in each category. + if (existingCases.size() == 1 && recordedCases.size() == 1) { + TestWrapper wrapper = recordedCases.get(0); + wrapper.merged = true; + return Collections.singletonList(mergeCases(existingCases.get(0), wrapper.testCase)); + } + + // Harder case: multiple runs for the same test and SQL, typically differentiated + // by options or context. Try to match up. + List recordedCopy = new ArrayList<>(recordedCases); + List merged = new ArrayList<>(); + for (QueryTestCase existing : existingCases) { + TestWrapper found = null; + for (int i = 0; i < recordedCopy.size(); i++) { + if (existing.matches(recordedCopy.get(i).testCase)) { + found = recordedCopy.remove(i); + break; + } + } + if (found == null) { + merged.add(existing); + } else { + found.merged = true; + merged.add(mergeCases(existing, found.testCase)); + } + } + for (TestWrapper wrapper : recordedCopy) { + wrapper.merged = true; + merged.add(wrapper.testCase); + } + return merged; + } + + private String testKey(QueryTestCase testCase) + { + // Compensate for any formatting applied to SQL + String sql = StringUtils.toUpperCase(testCase.sql()); + sql = StringUtils.replaceAll(sql, "\\s+", " "); + sql = StringUtils.replace(sql, "( ", "("); + sql = StringUtils.replace(sql, " )", ")"); + sql = StringUtils.replace(sql, ", ", ","); + return methodName(testCase) + " - " + sql; + } + + /** + * Merge an existing and recorded test case. Prefer the recorded sections, + * but fill in existing sections where no recorded section exists. + */ + private QueryTestCase mergeCases(QueryTestCase testCase, QueryTestCase recordedCase) + { + QueryTestCase.Builder builder = new QueryTestCase.Builder(testCase.label); + List recordedSections = new ArrayList<>(recordedCase.fileOrder); + + // First merge the "setup" sections + outer1: + for (TestSection existingSection : testCase.fileOrder) { + switch (existingSection.section()) { + case COMMENTS: + case CASE: + case SQL: + case CONTEXT: + case OPTIONS: + case PARAMETERS: + break; + default: + continue outer1; + } + TestSection recordedSection = recordedCase.section(existingSection.section()); + if (recordedSection != null) { + recordedSections.remove(recordedSection); + } + switch (existingSection.section()) { + case CONTEXT: + case OPTIONS: + case PARAMETERS: + if (recordedSection == null) { + builder.add(existingSection); + } else { + builder.add(recordedSection); + } + break; + default: + builder.add(existingSection); + break; + } + } + + // Then merge the "plan output" sections + outer2: + for (TestSection existingSection : testCase.fileOrder) { + switch (existingSection.section()) { + case COMMENTS: + case CASE: + case SQL: + case CONTEXT: + case OPTIONS: + case PARAMETERS: + continue outer2; + default: + break; + } + TestSection recordedSection = recordedCase.section(existingSection.section()); + if (recordedSection != null) { + recordedSections.remove(recordedSection); + } + switch (existingSection.section()) { + case ERROR: + case EXCEPTION: + case NATIVE: + case RESOURCES: + case SCHEMA: + if (recordedSection == null) { + builder.add(existingSection); + } else { + builder.add(recordedSection); + } + break; + default: + builder.add(existingSection); + break; + } + } + for (TestSection recordedSection : recordedSections) { + builder.add(recordedSection); + } + QueryTestCase rewritten = builder.build(); + + List recordedCopy = new ArrayList<>(recordedCase.runs()); + for (QueryRun run : testCase.runs()) { + boolean found = false; + // List search because there will usually be only 1 or 2 runs. + for (int i = 0; i < recordedCopy.size(); i++) { + QueryRun candidate = recordedCopy.get(i); + if (Objects.equals(run.results(), candidate.results())) { + rewritten.addRuns(mergeRuns(rewritten, run, candidate)); + recordedCopy.remove(i); + found = true; + break; + } + } + if (!found) { + rewritten.addRun(run); + } + } + rewritten.addRuns(recordedCopy); + return rewritten; + } + + private List mergeRuns(QueryTestCase newCase, QueryRun existing, QueryRun recorded) + { + QueryRun newRun = doMerge(newCase, existing, recorded); + if (newRun == null) { + return Arrays.asList(existing.copy(newCase), recorded.copy(newCase, true)); + } else { + return Collections.singletonList(newRun); + } + } + + private QueryRun doMerge(QueryTestCase newCase, QueryRun existing, QueryRun recorded) + { + if (!Objects.equals(existing.context(), recorded.context())) { + return null; + } + OptionsSection existingOptions = existing.optionsSection(); + OptionsSection recordedOptions = recorded.optionsSection(); + if (existingOptions == null || recordedOptions == null) { + return null; + } + if (existingOptions.options.size() != 1 || recordedOptions.options.size() != 1) { + return null; + } + String existingOption = existingOptions.options.get(OptionsSection.SQL_COMPATIBLE_NULLS); + String recordedOption = recordedOptions.options.get(OptionsSection.SQL_COMPATIBLE_NULLS); + if (existingOption == null || recordedOption == null) { + return null; + } + + // Merge. Change the replace nulls option to "both" to indicate the results + // are the same whether we replace nulls with default or not. + String newNullsOption; + if (existingOption.equals(recordedOption)) { + newNullsOption = existingOption; + } else { + newNullsOption = OptionsSection.NULL_HANDLING_BOTH; + } + + QueryRun.Builder builder = new QueryRun + .Builder(existing.label()) + .explicit(existing.isExplicit()); + for (TestSection section : existing.sections()) { + if (section.section() == Section.OPTIONS) { + builder.add(new OptionsSection( + ImmutableMap.of( + OptionsSection.SQL_COMPATIBLE_NULLS, + newNullsOption))); + } else { + builder.add(section); + } + } + return builder.build(newCase); + } + + private List reorder(List unordered) + { + Map> index = new HashMap<>(); + for (QueryTestCase testCase : unordered) { + String methodName = methodName(testCase); + List items = index.get(methodName); + if (items == null) { + items = new ArrayList<>(); + index.put(methodName, items); + } + items.add(testCase); + } + List reordered = new ArrayList<>(); + for (String methodName : methods) { + List items = index.remove(methodName); + if (items != null) { + reordered.addAll(items); + } + } + for (List items : index.values()) { + reordered.addAll(items); + } + return reordered; + } + + private String methodName(QueryTestCase testCase) + { + Pattern p = Pattern.compile("Converted from (.*)\\(\\)"); + Matcher m = p.matcher(testCase.comment()); + if (m.find()) { + return m.group(1); + } else { + return "unknown"; + } + } + + private void write(List rewritten) + { + try (BufferedWriter writer = new BufferedWriter( + new OutputStreamWriter( + new FileOutputStream(new File("target/actual/rewritten.case")), + StandardCharsets.UTF_8))) { + TestCaseWriter testWriter = new TestCaseWriter(writer); + for (QueryTestCase testCase : rewritten) { + testCase.write(testWriter); + } + } + catch (Exception e) { + throw new IAE(e, "Can't write output file"); + } + } + + @SuppressWarnings("unused") + private static void rewriteFile(String fileName) + { + String path = "/calcite/cases/" + fileName; + try (InputStream is = TestCaseLoader.class.getResourceAsStream(path)) { + if (is == null) { + throw new IAE("Cannot open resource: " + path); + } + try (BufferedReader reader = new BufferedReader(new InputStreamReader(is, StandardCharsets.UTF_8))) { + List lines = new ArrayList<>(); + String line; + while ((line = reader.readLine()) != null) { + lines.add(line); + } + rewrite(fileName, lines); + } + } + catch (IOException e) { + throw new IAE("Cannot close resource: " + path); + } + } + + private static void rewrite(String fileName, List lines) throws IOException + { + List> cases = new ArrayList<>(); + List curCase = new ArrayList<>(); + cases.add(curCase); + for (String line : lines) { + if (line.startsWith("======")) { + curCase = new ArrayList<>(); + cases.add(curCase); + } + curCase.add(line); + } + cases = rewriteCases(cases); + File path = new File(new File("target/actual"), fileName); + try (BufferedWriter writer = new BufferedWriter( + new OutputStreamWriter( + new FileOutputStream(path), + StandardCharsets.UTF_8))) { + for (List testCase : cases) { + for (String line : testCase) { + writer.append(line); + writer.append("\n"); + } + } + } + } + + private static List> rewriteCases(List> cases) + { + List> revised = new ArrayList<>(); + for (List testCase : cases) { + revised.add(rewriteCaseMergeOptions(testCase)); + } + return revised; + } + + @SuppressWarnings("unused") + private static List rewriteCaseMoveOptions(List testCase) + { + int optionPosn = -1; + List options = new ArrayList<>(); + for (int i = 0; i < testCase.size(); i++) { + String line = testCase.get(i); + if (line.startsWith("=== options")) { + optionPosn = i; + options.add(line); + continue; + } + if (optionPosn == -1) { + continue; + } + if (line.startsWith("=== run")) { + break; + } + if (line.startsWith("=== ")) { + return testCase; + } + options.add(line); + } + if (optionPosn == -1 || options.isEmpty()) { + return testCase; + } + List rewritten = new ArrayList<>(); + boolean inOptions = false; + boolean didOptions = false; + for (String line : testCase) { + if (line.startsWith("=== schema")) { + rewritten.addAll(options); + } + if (!didOptions && line.startsWith("=== options")) { + inOptions = true; + continue; + } + if (line.startsWith("=== ")) { + if (inOptions) { + didOptions = true; + } + inOptions = false; + } + if (!inOptions) { + rewritten.add(line); + } + } + return rewritten; + } + + public static class SectionLines + { + final String name; + final String heading; + final List lines = new ArrayList<>(); + + public SectionLines(String line) + { + this.heading = line; + if (line.startsWith("====")) { + this.name = "="; + } else { + Pattern p = Pattern.compile("=== ([^ ]+) ?.*"); + Matcher m = p.matcher(line); + if (!m.matches()) { + throw new ISE("Unmatched header: " + line); + } + this.name = m.group(1); + } + } + + public SectionLines() + { + this.name = null; + this.heading = null; + } + + public void appendTo(List lines) + { + if (this.heading != null) { + lines.add(this.heading); + } + lines.addAll(this.lines); + } + } + + private static List parseSections(List testCase) + { + List sections = new ArrayList<>(); + SectionLines currentSection = null; + for (String line : testCase) { + if (line.startsWith("===")) { + currentSection = new SectionLines(line); + sections.add(currentSection); + } else { + if (currentSection == null) { + currentSection = new SectionLines(); + sections.add(currentSection); + } + currentSection.lines.add(line); + } + } + return sections; + } + + @SuppressWarnings("unused") + private static List unparseSections(List sections) + { + List testCase = new ArrayList<>(); + for (SectionLines section : sections) { + section.appendTo(testCase); + } + return testCase; + } + + private static List rewriteCaseMergeOptions(List testCase) + { + List sections = parseSections(testCase); + int runCount = 0; + SectionLines mainOptions = null; + SectionLines runOptions = null; + + for (SectionLines section : sections) { + if (section.name == null) { + return testCase; + } else if ("options".equals(section.name)) { + if (runCount == 0) { + mainOptions = section; + } else if (runOptions != null) { + return testCase; + } else { + runOptions = section; + } + } else if ("run".equals(section.name)) { + runCount++; + if (runCount > 1) { + return testCase; + } + } + } + if (runCount == 0 || mainOptions == null || runOptions == null) { + return testCase; + } + + List rewritten = new ArrayList<>(); + for (SectionLines section : sections) { + if (section == mainOptions) { + rewritten.add(section.heading); + rewritten.addAll(runOptions.lines); + rewritten.addAll(mainOptions.lines); + } else if (section == runOptions) { + // Skip + } else if ("run".equals(section.name)) { + // Skip + } else { + section.appendTo(rewritten); + } + } + return rewritten; + } +} diff --git a/sql/src/test/java/org/apache/druid/sql/calcite/tester/TestCaseTest.java b/sql/src/test/java/org/apache/druid/sql/calcite/tester/TestCaseTest.java new file mode 100644 index 000000000000..4c7d79aa5bb3 --- /dev/null +++ b/sql/src/test/java/org/apache/druid/sql/calcite/tester/TestCaseTest.java @@ -0,0 +1,226 @@ +/* + * Licensed to the Apache Software Foundation (ASF) under one + * or more contributor license agreements. See the NOTICE file + * distributed with this work for additional information + * regarding copyright ownership. The ASF licenses this file + * to you under the Apache License, Version 2.0 (the + * "License"); you may not use this file except in compliance + * with the License. You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, + * software distributed under the License is distributed on an + * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY + * KIND, either express or implied. See the License for the + * specific language governing permissions and limitations + * under the License. + */ + +package org.apache.druid.sql.calcite.tester; + +import org.junit.Test; + +import java.util.List; + +import static org.junit.Assert.assertEquals; +import static org.junit.Assert.assertFalse; +import static org.junit.Assert.assertTrue; + +/** + * Tests the test case class. + */ +public class TestCaseTest +{ + @Test + public void testSql() + { + String input = + "=== case\n" + + "=== SQL\n" + + "SELECT 1\n"; + List cases = TestCaseLoader.loadString(input); + assertEquals("SELECT 1", cases.get(0).sql()); + } + + public void expectOK(PatternSection expected, String actual) + { + ActualResults.ErrorCollector errors = new ActualResults.ErrorCollector(); + expected.verify(actual, errors); + assertTrue(errors.ok()); + } + + public void expectError(PatternSection expected, String actual) + { + ActualResults.ErrorCollector errors = new ActualResults.ErrorCollector(); + expected.verify(actual, errors); + assertFalse(errors.ok()); + } + + @Test + public void testOneLiteral() + { + String input = + "=== case\n" + + "=== SQL\n" + + "SELECT 1\n" + + "=== plan\n" + + " a plan \n"; + List cases = TestCaseLoader.loadString(input); + QueryTestCase testCase = cases.get(0); + + expectOK(testCase.plan(), "a plan"); + expectOK(testCase.plan(), " a plan "); + expectOK(testCase.plan(), " a plan \n"); + expectOK(testCase.plan(), "a plan\n\n"); + + expectError(testCase.plan(), ""); + expectError(testCase.plan(), "wrong"); + String actual = + "a plan\n" + + "bogus"; + expectError(testCase.plan(), actual); + + actual = + "a plan\n" + + "\n" + + "bogus"; + expectError(testCase.plan(), actual); + } + + @Test + public void testMultipleLiterals() + { + String input = + "=== case\n" + + "=== SQL\n" + + "SELECT 1\n" + + "=== plan\n" + + " a plan \n" + + " second\n" + + " third\n"; + List cases = TestCaseLoader.loadString(input); + QueryTestCase testCase = cases.get(0); + + { + final String actual = + " a plan \n" + + " second\n" + + " third\n"; + expectOK(testCase.plan(), actual); + } + + { + final String actual = + "a plan\n" + + "second \n" + + "third\n"; + expectOK(testCase.plan(), actual); + } + + { + final String actual = + " a plan \n" + + " second'n" + + " third\n" + + " extra\n"; + expectError(testCase.plan(), actual); + } + + { + final String actual = + " a plan \n" + + " second\n"; + expectError(testCase.plan(), actual); + } + } + + @Test + public void testRegex() + { + String input = + "=== case\n" + + "=== SQL\n" + + "SELECT 1\n" + + "=== plan\n" + + "!count \\d+\n" + + "!timestamp .+\n"; + List cases = TestCaseLoader.loadString(input); + QueryTestCase testCase = cases.get(0); + + { + final String actual = + " count 1234 \n" + + "timestamp 2021-04-29T12:13:14 "; + expectOK(testCase.plan(), actual); + } + + { + final String actual = + " count 1234x \n" + + "timestamp 2021-04-29T12:13:14 \n"; + expectError(testCase.plan(), actual); + } + + { + final String actual = + " count 1234 \n" + + "timestamp\n"; + expectError(testCase.plan(), actual); + } + + { + final String actual = + " count 1234 \n" + + "bogus\n"; + expectError(testCase.plan(), actual); + } + } + + @Test + public void testSkip() + { + String input = + "=== case\n" + + "=== SQL\n" + + "SELECT 1\n" + + "=== plan\n" + + "!count \\d+\n" + + "**\n" + + "end\n"; + List cases = TestCaseLoader.loadString(input); + QueryTestCase testCase = cases.get(0); + + { + final String actual = + " count 1234 \n" + + "end\n"; + expectOK(testCase.plan(), actual); + } + + { + final String actual = + " count 1234 \n" + + " ignored \n" + + " abc 123\n" + + "end\n"; + expectOK(testCase.plan(), actual); + } + + { + final String actual = + " count 1234 \n" + + "bogus\n"; + expectError(testCase.plan(), actual); + } + + { + final String actual = + " count 1234 \n" + + " ignored \n" + + " abc 123\n" + + "bogus\n"; + expectError(testCase.plan(), actual); + } + } +} diff --git a/sql/src/test/java/org/apache/druid/sql/calcite/tester/TestCaseWriter.java b/sql/src/test/java/org/apache/druid/sql/calcite/tester/TestCaseWriter.java new file mode 100644 index 000000000000..718b8a834beb --- /dev/null +++ b/sql/src/test/java/org/apache/druid/sql/calcite/tester/TestCaseWriter.java @@ -0,0 +1,288 @@ +/* + * Licensed to the Apache Software Foundation (ASF) under one + * or more contributor license agreements. See the NOTICE file + * distributed with this work for additional information + * regarding copyright ownership. The ASF licenses this file + * to you under the Apache License, Version 2.0 (the + * "License"); you may not use this file except in compliance + * with the License. You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, + * software distributed under the License is distributed on an + * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY + * KIND, either express or implied. See the License for the + * specific language governing permissions and limitations + * under the License. + */ + +package org.apache.druid.sql.calcite.tester; + +import com.google.api.client.util.Strings; +import org.apache.druid.java.util.common.StringUtils; +import org.apache.druid.server.security.ResourceAction; +import org.apache.druid.sql.http.SqlParameter; + +import java.io.IOException; +import java.io.Writer; +import java.util.ArrayList; +import java.util.Collections; +import java.util.List; +import java.util.Map; +import java.util.Set; + +/** + * Writes (emits) the test case file format. + */ +public class TestCaseWriter +{ + private final Writer writer; + + public TestCaseWriter(Writer writer) + { + this.writer = writer; + } + + public void emitCase(String label) throws IOException + { + emitSection("case", label); + } + + public void emitComment(List comment) throws IOException + { + writer.append("==============================================================\n"); + if (comment == null) { + return; + } + emitLines(comment); + } + + public void emitComment(String comment) throws IOException + { + writer.append("==============================================================\n"); + emitOptionalLine(comment); + } + + public void emitCopy(String section) throws IOException + { + writer.append("=== ") + .append(section) + .append(" copy\n"); + } + + public void emitSql(String sql) throws IOException + { + emitSection("SQL", sql); + } + + public void emitContext(Map context) throws IOException + { + emitMap("context", context); + } + + public void emitOptions(Map options) throws IOException + { + emitMap("options", options); + } + + private void emitMap(String section, Map map) throws IOException + { + if (map.isEmpty()) { + return; + } + emitSection(section); + List keys = new ArrayList<>(map.keySet()); + Collections.sort(keys); + for (String key : keys) { + writer.append(key) + .append("=") + .append(map.get(key).toString()) + .append("\n"); + } + } + + public void emitUser(String user) throws IOException + { + emitSection("user", user); + } + + public void emitParameters(List parameters) throws IOException + { + emitSection("parameters"); + for (SqlParameter p : parameters) { + if (p == null) { + writer.append("null\n"); + } else { + writer.append(StringUtils.toLowerCase(p.getType().name())) + .append(": ") + .append(QueryTestCases.valueToString(p.getValue())) + .append("\n"); + } + } + } + + public void emitException(Exception exception) throws IOException + { + emitSection("exception", exception.getClass().getSimpleName()); + } + + public void emitError(Exception exception) throws IOException + { + emitSection("error", exception.getMessage()); + } + + public void emitResources(Set resourceActions) throws IOException + { + emitSection("resources"); + + // Sort resources so the output is deterministic. Some queries both + // read and write the same datasource, so include the action in the sort. + List actions = new ArrayList<>(resourceActions); + Collections.sort( + actions, + (l, r) -> { + int value = l.getResource().getType().compareTo(r.getResource().getType()); + if (value != 0) { + return value; + } + value = l.getResource().getName().compareTo(r.getResource().getName()); + if (value != 0) { + return value; + } + return l.getAction().compareTo(r.getAction()); + } + ); + for (ResourceAction action : actions) { + emitOptionalLine(new ResourcesSection.Resource(action).toString()); + } + } + + public void emitResources(List resources) throws IOException + { + emitSection("resources"); + + for (ResourcesSection.Resource resource : resources) { + writer + .append(resource.type) + .append("/") + .append(resource.name) + .append("/") + .append(resource.action.name()) + .append("\n"); + } + } + + void emitSchema(String[] schema) throws IOException + { + emitSection("schema", schema); + } + + void emitNative(String nativeQuery) throws IOException + { + emitSection("native", nativeQuery); + } + + void emitPlan(String plan) throws IOException + { + emitSection("plan", plan); + } + + void emitResults(String[] schema) throws IOException + { + emitSection("results", schema); + } + + void emitResults(List schema) throws IOException + { + emitSection("results", schema); + } + + void emitSection(String section, String[] body) throws IOException + { + if (body == null) { + return; + } + emitSection(section); + for (String line : body) { + emitOptionalLine(line); + } + } + + void emitSection(String section, List body) throws IOException + { + if (body == null) { + return; + } + emitSection(section); + emitLines(body); + } + + public void emitSection(String section, String body) throws IOException + { + if (body == null) { + return; + } + emitSection(section); + emitOptionalLine(body); + } + + public void emitSection(String section) throws IOException + { + writer.append("=== ") + .append(section) + .append("\n"); + } + + public void emitLines(List lines) throws IOException + { + for (String line : lines) { + emitLine(line); + } + } + + public void emitOptionalLine(String line) throws IOException + { + if (!Strings.isNullOrEmpty(line)) { + emitLine(line); + } + } + + public void emitLine(String line) throws IOException + { + writer.append(line); + if (!line.endsWith("\n")) { + writer.append('\n'); + } + } + + public void emitPattern(String line) throws IOException + { + writer.append("!"); + emitOptionalLine(line); + } + + public void emitLiteral(String line) throws IOException + { + if (line.length() == 0) { + emitLine(line); + return; + } + char c = line.charAt(0); + if ("\\=*!".indexOf(c) != -1) { + writer.append("\\"); + return; + } + emitLine(line); + } + + public void emitErrors(List errors) throws IOException + { + if (errors.isEmpty()) { + return; + } + // Errors emitted as a comment. + emitOptionalLine("==== Verification Errors ===="); + emitLines(errors); + } +} diff --git a/sql/src/test/java/org/apache/druid/sql/calcite/tester/TestSection.java b/sql/src/test/java/org/apache/druid/sql/calcite/tester/TestSection.java new file mode 100644 index 000000000000..efed5b26bbf2 --- /dev/null +++ b/sql/src/test/java/org/apache/druid/sql/calcite/tester/TestSection.java @@ -0,0 +1,112 @@ +/* + * Licensed to the Apache Software Foundation (ASF) under one + * or more contributor license agreements. See the NOTICE file + * distributed with this work for additional information + * regarding copyright ownership. The ASF licenses this file + * to you under the Apache License, Version 2.0 (the + * "License"); you may not use this file except in compliance + * with the License. You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, + * software distributed under the License is distributed on an + * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY + * KIND, either express or implied. See the License for the + * specific language governing permissions and limitations + * under the License. + */ + +package org.apache.druid.sql.calcite.tester; + +import org.apache.druid.java.util.common.StringUtils; + +import java.io.IOException; + +/** + * One section of the test case. + */ +public abstract class TestSection +{ + /** + * Enum which identifies the supported test sections. + */ + public enum Section + { + CASE("case"), + SQL("sql"), + CONTEXT("context"), + USER("user"), + PARAMETERS("parameters"), + OPTIONS("options"), + AST("ast"), + UNPARSED("unparsed"), + PLAN("plan"), + EXEC_PLAN("execplan"), + SCHEMA("schema"), + TARGET_SCHEMA("targetschema"), + ERROR("error"), + EXCEPTION("exception"), + EXPLAIN("explain"), + NATIVE("native"), + RESOURCES("resources"), + RESULTS("results"), + COMMENTS("="); + + private final String name; + + Section(String name) + { + this.name = name; + } + + public String sectionName() + { + return name; + } + + public static Section forSection(String section) + { + section = StringUtils.toLowerCase(section); + for (Section value : values()) { + if (value.name.equals(section)) { + return value; + } + } + return null; + } + } + + protected final String name; + protected final boolean copy; + + protected TestSection(String name, boolean copy) + { + this.name = name; + this.copy = copy; + } + + public abstract TestSection.Section section(); + public abstract TestSection copy(); + + public String name() + { + return name; + } + + public boolean isCopy() + { + return copy; + } + + public void write(TestCaseWriter writer) throws IOException + { + if (copy) { + writer.emitCopy(name); + } else { + writeSection(writer); + } + } + + protected abstract void writeSection(TestCaseWriter writer) throws IOException; +} diff --git a/sql/src/test/java/org/apache/druid/sql/calcite/tester/TextSection.java b/sql/src/test/java/org/apache/druid/sql/calcite/tester/TextSection.java new file mode 100644 index 000000000000..e7b6e5fdda25 --- /dev/null +++ b/sql/src/test/java/org/apache/druid/sql/calcite/tester/TextSection.java @@ -0,0 +1,143 @@ +/* + * Licensed to the Apache Software Foundation (ASF) under one + * or more contributor license agreements. See the NOTICE file + * distributed with this work for additional information + * regarding copyright ownership. The ASF licenses this file + * to you under the Apache License, Version 2.0 (the + * "License"); you may not use this file except in compliance + * with the License. You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, + * software distributed under the License is distributed on an + * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY + * KIND, either express or implied. See the License for the + * specific language governing permissions and limitations + * under the License. + */ + +package org.apache.druid.sql.calcite.tester; + +import org.apache.druid.java.util.common.StringUtils; + +import java.io.IOException; +import java.util.Objects; + +abstract class TextSection extends TestSection +{ + /** + * The SQL test case section. + */ + protected static class SqlSection extends TextSection + { + protected SqlSection(String name, String text) + { + this(name, text, false); + } + + protected SqlSection(String name, String text, boolean copy) + { + super(name, copy, text); + } + + @Override + public TestSection.Section section() + { + return TestSection.Section.SQL; + } + + @Override + public TestSection copy() + { + return new SqlSection(name, text, true); + } + } + + /** + * The exception test case section. + */ + public static class ExceptionSection extends TextSection + { + protected ExceptionSection(String text) + { + this(text, false); + } + + protected ExceptionSection(String text, boolean copy) + { + super(Section.EXCEPTION.sectionName(), copy, text); + } + + @Override + public TestSection.Section section() + { + return TestSection.Section.EXCEPTION; + } + + @Override + public TestSection copy() + { + return new ExceptionSection(text, true); + } + + public boolean verify(Exception e, ActualResults.ErrorCollector errors) + { + Throwable cause = e; + while (cause != null) { + if (text.equals(cause.getClass().getSimpleName())) { + return true; + } + cause = cause.getCause(); + } + errors.setSection(section().sectionName()); + errors.add(StringUtils.format( + "Expected exception [%s] but got [%s]: [%s]", + text, + e.getClass().getSimpleName(), + e.getMessage())); + return false; + } + } + + protected final String text; + + protected TextSection(String name, boolean copy, String text) + { + super(name, copy); + this.text = text; + } + + public String text() + { + return text; + } + + @Override + public void writeSection(TestCaseWriter writer) throws IOException + { + writer.emitSection(name, text); + } + + @Override + public boolean equals(Object o) + { + if (o == this) { + return true; + } + if (o == null || o.getClass() != getClass()) { + return false; + } + TextSection other = (TextSection) o; + return text.equals(other.text); + } + + /** + * Never used (doesn't make sense). But, needed to make static checks happy. + */ + @Override + public int hashCode() + { + return Objects.hash(text); + } +} diff --git a/sql/src/test/java/org/apache/druid/sql/calcite/tester/internals.md b/sql/src/test/java/org/apache/druid/sql/calcite/tester/internals.md new file mode 100644 index 000000000000..a23386346d16 --- /dev/null +++ b/sql/src/test/java/org/apache/druid/sql/calcite/tester/internals.md @@ -0,0 +1,709 @@ + + +# Planner Test Internals + +This page explains how the test framework does its job. You don't havt to know +this information to use the framework, but it certainly helps as the test +setup is complex and somewhat fragile as it depends on mocks and special-case +code: you may have to know how to fix or extend it. + +This page also provides a reference for the case file structure. + +## Introduction + +Druid is a bit of a black box: queries go in and results come out: + +```text + +-------+ +-------+ +---------+ + | query | --> | Druid | --> | results | + +-------+ +-------+ +---------+ + SQL JSON, CSV +``` + +While this is the perfect view for users (who, after all, just want to +query data), it is somewhat awkward for testing: the Druid black box is a +bit of a "then a miracle occurs" element from a testing perspective. + +Internally, Druid actually has two main stages: plan and run: + +```text + +-------+ +---------+ +------+ +--------+ +---------+ + | query | --> | Planner | --> | Plan | --> | Engine | --> | results | + +-------+ +---------+ +------+ +--------+ +---------+ + SQL Druid Calcite Broker + JSON, CSV + Planner + Plan + Historical + Calcite Native Query +``` + +In fact, the process is even more complex as there are up to three +parts to the planning process: + +* Calcite: parse SQL, produce a logical plan, and optimize. +* Druid native query +* "Physical plan" sent to the execution engine (mostly for `INSERT`). + +We observe that the plan (in Druid's case, the native query) is the +API between the planner and the engine. The planner' job is to translate +SQL to a native query (by way of a Calcite logical plan), and the job of +the Broker + Historical "engine" is to faithfully execute the native +query it is given. As a result, we can test the planner and engine +separately. + +The same is true for `INSERT` queries, though since implementation is +an extension, the actual checking of the corresponding physical plan +must also be done via an extension. + +The goal of this package is to provide detailed, repeatable testing of +the planner, while making the planner's artifacts easy to visualize and +inspect. Visualization is important: the planner is complex and the best +way to understand (and test) it is by looking at its artifacts. These +include: + +* The Calcite logical plan. +* The output schema. +* The Druid native query. + +We assert that the planner works (a change did not break anything) if +the same SQL as input produces the same planner outputs both before and +after the change. Or, in other cases, that the *only* change in outputs +is the one we intended to produce: there are no accidental side effects. + +### Test Flow + +To test, we create a test case that defines what to test: + +* The SQL query +* Planner settings (if any) +* Query context +* Parameter values (if any) + +Then, we define what we expect the plannner to produce: + +* The Calcite logical plan. +* The output schema. +* The Druid native query. +* Expected errors (if the query should fail.) + +We then define a JUnit test which sets up the planner with whatever +configuration we require (such as a set of views, sample inputs, etc.) +The test flow is then: + +```text + +-----------+ +------------+ +-----------+ + | Test Case | --> | JUnit Test | --> | Pass/Fail | + +-----------+ +------------+ +-----------+ +``` + +See `v2.DruidPlannerTest.java` for the test case. See +`calcite/cases/*.case` for the test case inputs. + +## Case File Structure + +The case file consists of comments and zero or more cases. The file has a +rather unusual syntax: lines that start with three equal signs (`===`) +indicate sections. Why the odd syntax? Test cases include JSON, SQL +comments and CSV results. We need a syntax that is very unlikly to collide +with these various contents. Example case structure: + +```text +=== case +I'm a test case +=== sql +SELECT * FROM myTable +``` + +The case file starts with comments which is handy way to include a +copyright notice: everything up to the first section boundary is a +comment. + +There are two kinds of sections: contents and comments. Content sections +have names which are case in-sensitive. For example, both `sql` and `SQL` +are fine. Everything from the section head to the next section head is the +body of that one section. + +Content sections themselves are of two kinds, though their syntax is +identical: + +* Inputs to the planner +* Expected outputs from the planner + +All sections (aside from `case` and `sql`) are optional. Provide the input +sections only if the input is needed, provide the output sections only for +those items to be verified. A test case with no expected output sections +will run and assert that the query does not throw an exception. + +Sections can be copied from the previous tests (see below.) Many output +sections support regular expressions (see below.) + +### Comments + +A comment section is any section that starts with four or more +equal signs. This is a handy way to separate tests and provide comments +about the test: + +```text +Test cases from the CalciteArraysQueryTest file + +============================================================== +Converted from testSelectConstantArrayExpressionFromTable() + +Verifies the array literal syntax. +=== case +SELECT constant array expression from table +=== SQL +SELECT ARRAY[1,2] as arr, dim1 FROM foo LIMIT 1 +``` + +By convention, the comments that immediately preceed a test +case are assumed to describe that test case. Here the preceeding +comments are the "Converted from..." lines, but not the +"Test cases from..." lines. + +When a test is converted from an existing Java-based test, reference +the test function as shown above. Otherwise, explain the purpose +of the test, or explain any unusual characteristics. (See the +existing cases for examples.) + +### `case` Section + +The `case` section must be the first one in each test case: it +announces the start of a new test. Everything in the case section +is the test label: it will appear in the logs if an error is found +for the test. This works best if the label is a single line. If +converting from an existing Java test, just convert the method +name to words. + +### `SQL` Section + +The SQL section contains the query for the test and is required. (It is hard +to test the planner without a SQL statement.) Please format the SQL statement +nicely as it can contain newlines: + +```text +=== SQL +SELECT + ARRAY[1,2] as arr, + dim1 +FROM foo +LIMIT 1 +``` + +### `context` Section + +The `context` section provides the query context, in "Java properties" format: +that is, as `name=value` pairs: + +```text +=== context +maxSubqueryRows=2 +``` + +The tests use metadata to determine the context variable type. In the above, +metdata tells +us that `maxSubqueryRows` is an `int`, so the value is converted to an `int` +internally. The type is assumed to be `String` if there is no metadata. If you +add a query context value, or use one not in the `QueryContexts` metadata table, +you may encounter an error if the test case loader guess the type wrong. To fix +the issue, add your parameter to the `QueryContexts` metadata table. + +As a result, you can choose to quote strings or not. You must quote +strings if they start or end with spaces: + +``` +=== context +example=" quote me! " +``` + +### `parameters` Section + +Druid supports query parameters. The `parameters` section provides the +parameter values to use when planning the query. Parameter values are typed, +so you provide values using a `: ` syntax: + +```text +=== sql +SELECT + foo, + bar + FROM myTable + WHERE foo = ? + AND bar < ? +=== parameters +varchar: "a" +integer: 1 +``` + +Use SQL types: `varchar` for `string`, `bigint` for `long`, etc. +`int` is accepted as a shorthand for `integer`. +As a convenience the code also accepts the Druid types (`string`, `long`). +But, since this is SQL, it is better to use the SQL types. + +Names are case-insensitive: + +```text +=== parameters +VARCHAR: "a" +INT: 1 +``` + +SQL requires that there be one parameter value for each parameter in the +query, listed in the order that the parameters appear textual in the query. + +Quoting of strings is optional. If unquoted, leading and trailing whitespace +is removed. Use quotes if you want to include such whitespace: + +```text +=== parameters +VARCHAR: " quote me! " +INT: 1 +``` + +### `options` Section + +The `options` section provides instructions for setting up the planner or for +running the test. Options specify things which would otherwise be specified in code +or in the various `.properties` files. The name are mostly specific to this test +framework. + +```text +=== options +failure=run +replacewithDefault=true +``` + +Supported option names include: + +* `planner.