diff --git a/fe/fe-core/src/main/java/org/apache/doris/common/NereidsSqlCacheManager.java b/fe/fe-core/src/main/java/org/apache/doris/common/NereidsSqlCacheManager.java index ad047a1693eb7c..8989375c07f7d2 100644 --- a/fe/fe-core/src/main/java/org/apache/doris/common/NereidsSqlCacheManager.java +++ b/fe/fe-core/src/main/java/org/apache/doris/common/NereidsSqlCacheManager.java @@ -51,6 +51,7 @@ import org.apache.doris.proto.InternalService; import org.apache.doris.proto.Types.PUniqueId; import org.apache.doris.qe.ConnectContext; +import org.apache.doris.qe.ResultSet; import org.apache.doris.qe.cache.CacheAnalyzer; import org.apache.doris.qe.cache.SqlCache; @@ -96,8 +97,8 @@ public static synchronized void updateConfig() { } private static Cache buildSqlCaches(int sqlCacheNum, long cacheIntervalSeconds) { - sqlCacheNum = sqlCacheNum <= 0 ? 100 : sqlCacheNum; - cacheIntervalSeconds = cacheIntervalSeconds <= 0 ? 30 : cacheIntervalSeconds; + sqlCacheNum = sqlCacheNum < 0 ? 100 : sqlCacheNum; + cacheIntervalSeconds = cacheIntervalSeconds < 0 ? 30 : cacheIntervalSeconds; return Caffeine.newBuilder() .maximumSize(sqlCacheNum) @@ -107,6 +108,22 @@ private static Cache buildSqlCaches(int sqlCacheNum, lo .build(); } + /** tryAddFeCache */ + public void tryAddFeSqlCache(ConnectContext connectContext, String sql) { + Optional sqlCacheContextOpt = connectContext.getStatementContext().getSqlCacheContext(); + if (!sqlCacheContextOpt.isPresent()) { + return; + } + + SqlCacheContext sqlCacheContext = sqlCacheContextOpt.get(); + UserIdentity currentUserIdentity = connectContext.getCurrentUserIdentity(); + String key = currentUserIdentity.toString() + ":" + sql.trim(); + if ((sqlCaches.getIfPresent(key) == null) && sqlCacheContext.getOrComputeCacheKeyMd5() != null + && sqlCacheContext.getResultSetInFe().isPresent()) { + sqlCaches.put(key, sqlCacheContext); + } + } + /** tryAddCache */ public void tryAddCache( ConnectContext connectContext, String sql, @@ -178,6 +195,19 @@ public Optional tryParseSql(ConnectContext connectContext, Stri } try { + Optional resultSetInFe = sqlCacheContext.getResultSetInFe(); + if (resultSetInFe.isPresent()) { + MetricRepo.COUNTER_CACHE_HIT_SQL.increase(1L); + + String cachedPlan = sqlCacheContext.getPhysicalPlan(); + LogicalSqlCache logicalSqlCache = new LogicalSqlCache( + sqlCacheContext.getQueryId(), sqlCacheContext.getColLabels(), + sqlCacheContext.getResultExprs(), resultSetInFe, ImmutableList.of(), + "none", cachedPlan + ); + return Optional.of(logicalSqlCache); + } + Status status = new Status(); PUniqueId cacheKeyMd5 = sqlCacheContext.getOrComputeCacheKeyMd5(); InternalService.PFetchCacheResult cacheData = @@ -195,7 +225,9 @@ public Optional tryParseSql(ConnectContext connectContext, Stri LogicalSqlCache logicalSqlCache = new LogicalSqlCache( sqlCacheContext.getQueryId(), sqlCacheContext.getColLabels(), - sqlCacheContext.getResultExprs(), cacheValues, backendAddress, cachedPlan); + sqlCacheContext.getResultExprs(), Optional.empty(), + cacheValues, backendAddress, cachedPlan + ); return Optional.of(logicalSqlCache); } return invalidateCache(key); diff --git a/fe/fe-core/src/main/java/org/apache/doris/nereids/NereidsPlanner.java b/fe/fe-core/src/main/java/org/apache/doris/nereids/NereidsPlanner.java index ef9734d4535eea..5c18ffd84c3f13 100644 --- a/fe/fe-core/src/main/java/org/apache/doris/nereids/NereidsPlanner.java +++ b/fe/fe-core/src/main/java/org/apache/doris/nereids/NereidsPlanner.java @@ -22,6 +22,7 @@ import org.apache.doris.analysis.LiteralExpr; import org.apache.doris.analysis.StatementBase; import org.apache.doris.catalog.Column; +import org.apache.doris.catalog.Env; import org.apache.doris.catalog.MTMV; import org.apache.doris.common.NereidsException; import org.apache.doris.common.Pair; @@ -54,6 +55,7 @@ import org.apache.doris.nereids.trees.plans.logical.LogicalPlan; import org.apache.doris.nereids.trees.plans.logical.LogicalSqlCache; import org.apache.doris.nereids.trees.plans.physical.PhysicalCatalogRelation; +import org.apache.doris.nereids.trees.plans.physical.PhysicalEmptyRelation; import org.apache.doris.nereids.trees.plans.physical.PhysicalOneRowRelation; import org.apache.doris.nereids.trees.plans.physical.PhysicalPlan; import org.apache.doris.nereids.trees.plans.physical.PhysicalResultSink; @@ -66,8 +68,10 @@ import org.apache.doris.qe.ConnectContext; import org.apache.doris.qe.ResultSet; import org.apache.doris.qe.ResultSetMetaData; +import org.apache.doris.qe.cache.CacheAnalyzer; import com.google.common.annotations.VisibleForTesting; +import com.google.common.collect.ImmutableList; import com.google.common.collect.Lists; import org.apache.logging.log4j.LogManager; import org.apache.logging.log4j.Logger; @@ -168,8 +172,9 @@ public Plan plan(LogicalPlan plan, PhysicalProperties requireProperties, LogicalSqlCache logicalSqlCache = (LogicalSqlCache) plan; physicalPlan = new PhysicalSqlCache( logicalSqlCache.getQueryId(), logicalSqlCache.getColumnLabels(), - logicalSqlCache.getResultExprs(), logicalSqlCache.getCacheValues(), - logicalSqlCache.getBackendAddress(), logicalSqlCache.getPlanBody() + logicalSqlCache.getResultExprs(), logicalSqlCache.getResultSetInFe(), + logicalSqlCache.getCacheValues(), logicalSqlCache.getBackendAddress(), + logicalSqlCache.getPlanBody() ); return physicalPlan; } @@ -528,31 +533,66 @@ public Optional handleQueryInFe(StatementBase parsedStmt) { if (!(parsedStmt instanceof LogicalPlanAdapter)) { return Optional.empty(); } - if (!(physicalPlan instanceof PhysicalResultSink)) { - return Optional.empty(); + if (physicalPlan instanceof PhysicalSqlCache + && ((PhysicalSqlCache) physicalPlan).getResultSet().isPresent()) { + return Optional.of(((PhysicalSqlCache) physicalPlan).getResultSet().get()); } - if (!(((PhysicalResultSink) physicalPlan).child() instanceof PhysicalOneRowRelation)) { + if (!(physicalPlan instanceof PhysicalResultSink)) { return Optional.empty(); } - PhysicalOneRowRelation physicalOneRowRelation - = (PhysicalOneRowRelation) ((PhysicalResultSink) physicalPlan).child(); - List columns = Lists.newArrayList(); - List data = Lists.newArrayList(); - for (int i = 0; i < physicalOneRowRelation.getProjects().size(); i++) { - NamedExpression item = physicalOneRowRelation.getProjects().get(i); - NamedExpression output = physicalPlan.getOutput().get(i); - Expression expr = item.child(0); - if (expr instanceof Literal) { - LiteralExpr legacyExpr = ((Literal) expr).toLegacyLiteral(); + + Optional sqlCacheContext = statementContext.getSqlCacheContext(); + boolean enableSqlCache + = CacheAnalyzer.canUseSqlCache(statementContext.getConnectContext().getSessionVariable()); + Plan child = physicalPlan.child(0); + if (child instanceof PhysicalOneRowRelation) { + PhysicalOneRowRelation physicalOneRowRelation = (PhysicalOneRowRelation) physicalPlan.child(0); + List columns = Lists.newArrayList(); + List data = Lists.newArrayList(); + for (int i = 0; i < physicalOneRowRelation.getProjects().size(); i++) { + NamedExpression item = physicalOneRowRelation.getProjects().get(i); + NamedExpression output = physicalPlan.getOutput().get(i); + Expression expr = item.child(0); + if (expr instanceof Literal) { + LiteralExpr legacyExpr = ((Literal) expr).toLegacyLiteral(); + columns.add(new Column(output.getName(), output.getDataType().toCatalogDataType())); + data.add(legacyExpr.getStringValueInFe()); + } else { + return Optional.empty(); + } + } + + ResultSetMetaData metadata = new CommonResultSet.CommonResultSetMetaData(columns); + ResultSet resultSet = new CommonResultSet(metadata, Collections.singletonList(data)); + if (sqlCacheContext.isPresent() && enableSqlCache) { + sqlCacheContext.get().setResultSetInFe(resultSet); + Env.getCurrentEnv().getSqlCacheManager().tryAddFeSqlCache( + statementContext.getConnectContext(), + statementContext.getOriginStatement().originStmt + ); + } + return Optional.of(resultSet); + } else if (child instanceof PhysicalEmptyRelation) { + List columns = Lists.newArrayList(); + PhysicalEmptyRelation physicalEmptyRelation = (PhysicalEmptyRelation) physicalPlan.child(0); + for (int i = 0; i < physicalEmptyRelation.getProjects().size(); i++) { + NamedExpression output = physicalPlan.getOutput().get(i); columns.add(new Column(output.getName(), output.getDataType().toCatalogDataType())); - data.add(legacyExpr.getStringValueInFe()); - } else { - return Optional.empty(); } + + ResultSetMetaData metadata = new CommonResultSet.CommonResultSetMetaData(columns); + ResultSet resultSet = new CommonResultSet(metadata, ImmutableList.of()); + if (sqlCacheContext.isPresent() && enableSqlCache) { + sqlCacheContext.get().setResultSetInFe(resultSet); + Env.getCurrentEnv().getSqlCacheManager().tryAddFeSqlCache( + statementContext.getConnectContext(), + statementContext.getOriginStatement().originStmt + ); + } + return Optional.of(resultSet); + } else { + return Optional.empty(); } - ResultSetMetaData metadata = new CommonResultSet.CommonResultSetMetaData(columns); - ResultSet resultSet = new CommonResultSet(metadata, Collections.singletonList(data)); - return Optional.of(resultSet); } @VisibleForTesting diff --git a/fe/fe-core/src/main/java/org/apache/doris/nereids/SqlCacheContext.java b/fe/fe-core/src/main/java/org/apache/doris/nereids/SqlCacheContext.java index 6a7dbd2ed9de4f..f3fa61cecaad8b 100644 --- a/fe/fe-core/src/main/java/org/apache/doris/nereids/SqlCacheContext.java +++ b/fe/fe-core/src/main/java/org/apache/doris/nereids/SqlCacheContext.java @@ -29,6 +29,7 @@ import org.apache.doris.nereids.trees.expressions.Variable; import org.apache.doris.nereids.util.Utils; import org.apache.doris.proto.Types.PUniqueId; +import org.apache.doris.qe.ResultSet; import org.apache.doris.qe.cache.CacheProxy; import org.apache.doris.thrift.TUniqueId; @@ -83,6 +84,7 @@ public class SqlCacheContext { private volatile List colLabels; private volatile PUniqueId cacheKeyMd5; + private volatile ResultSet resultSetInFe; public SqlCacheContext(UserIdentity userIdentity, TUniqueId queryId) { this.userIdentity = Objects.requireNonNull(userIdentity, "userIdentity cannot be null"); @@ -378,6 +380,14 @@ public void setOriginSql(String originSql) { this.originSql = originSql.trim(); } + public Optional getResultSetInFe() { + return Optional.ofNullable(resultSetInFe); + } + + public void setResultSetInFe(ResultSet resultSetInFe) { + this.resultSetInFe = resultSetInFe; + } + /** FullTableName */ @lombok.Data @lombok.AllArgsConstructor diff --git a/fe/fe-core/src/main/java/org/apache/doris/nereids/trees/plans/logical/LogicalSqlCache.java b/fe/fe-core/src/main/java/org/apache/doris/nereids/trees/plans/logical/LogicalSqlCache.java index c0c728d8fc9a86..663044d569fc6e 100644 --- a/fe/fe-core/src/main/java/org/apache/doris/nereids/trees/plans/logical/LogicalSqlCache.java +++ b/fe/fe-core/src/main/java/org/apache/doris/nereids/trees/plans/logical/LogicalSqlCache.java @@ -32,6 +32,7 @@ import org.apache.doris.nereids.trees.plans.visitor.PlanVisitor; import org.apache.doris.nereids.util.Utils; import org.apache.doris.proto.InternalService; +import org.apache.doris.qe.ResultSet; import org.apache.doris.thrift.TUniqueId; import com.google.common.collect.ImmutableList; @@ -46,6 +47,7 @@ public class LogicalSqlCache extends LogicalLeaf implements SqlCache, TreeString private final TUniqueId queryId; private final List columnLabels; private final List resultExprs; + private final Optional resultSetInFe; private final List cacheValues; private final String backendAddress; private final String planBody; @@ -53,11 +55,13 @@ public class LogicalSqlCache extends LogicalLeaf implements SqlCache, TreeString /** LogicalSqlCache */ public LogicalSqlCache(TUniqueId queryId, List columnLabels, List resultExprs, - List cacheValues, String backendAddress, String planBody) { + Optional resultSetInFe, List cacheValues, + String backendAddress, String planBody) { super(PlanType.LOGICAL_SQL_CACHE, Optional.empty(), Optional.empty()); this.queryId = Objects.requireNonNull(queryId, "queryId can not be null"); this.columnLabels = Objects.requireNonNull(columnLabels, "columnLabels can not be null"); this.resultExprs = Objects.requireNonNull(resultExprs, "resultExprs can not be null"); + this.resultSetInFe = Objects.requireNonNull(resultSetInFe, "resultSetInFe can not be null"); this.cacheValues = Objects.requireNonNull(cacheValues, "cacheValues can not be null"); this.backendAddress = Objects.requireNonNull(backendAddress, "backendAddress can not be null"); this.planBody = Objects.requireNonNull(planBody, "planBody can not be null"); @@ -67,6 +71,10 @@ public TUniqueId getQueryId() { return queryId; } + public Optional getResultSetInFe() { + return resultSetInFe; + } + public List getCacheValues() { return cacheValues; } diff --git a/fe/fe-core/src/main/java/org/apache/doris/nereids/trees/plans/physical/PhysicalSqlCache.java b/fe/fe-core/src/main/java/org/apache/doris/nereids/trees/plans/physical/PhysicalSqlCache.java index 6e223a49cde915..824ca7e8924058 100644 --- a/fe/fe-core/src/main/java/org/apache/doris/nereids/trees/plans/physical/PhysicalSqlCache.java +++ b/fe/fe-core/src/main/java/org/apache/doris/nereids/trees/plans/physical/PhysicalSqlCache.java @@ -32,6 +32,8 @@ import org.apache.doris.nereids.trees.plans.visitor.PlanVisitor; import org.apache.doris.nereids.util.Utils; import org.apache.doris.proto.InternalService; +import org.apache.doris.proto.InternalService.PCacheValue; +import org.apache.doris.qe.ResultSet; import org.apache.doris.statistics.Statistics; import org.apache.doris.thrift.TUniqueId; @@ -46,6 +48,7 @@ public class PhysicalSqlCache extends PhysicalLeaf implements SqlCache, TreeStri private final TUniqueId queryId; private final List columnLabels; private final List resultExprs; + private final Optional resultSet; private final List cacheValues; private final String backendAddress; private final String planBody; @@ -53,12 +56,14 @@ public class PhysicalSqlCache extends PhysicalLeaf implements SqlCache, TreeStri /** PhysicalSqlCache */ public PhysicalSqlCache(TUniqueId queryId, List columnLabels, List resultExprs, - List cacheValues, String backendAddress, String planBody) { + Optional resultSet, List cacheValues, + String backendAddress, String planBody) { super(PlanType.PHYSICAL_SQL_CACHE, Optional.empty(), new LogicalProperties(() -> ImmutableList.of(), () -> FunctionalDependencies.EMPTY_FUNC_DEPS)); this.queryId = Objects.requireNonNull(queryId, "queryId can not be null"); this.columnLabels = Objects.requireNonNull(columnLabels, "colNames can not be null"); this.resultExprs = Objects.requireNonNull(resultExprs, "resultExprs can not be null"); + this.resultSet = Objects.requireNonNull(resultSet, "resultSet can not be null"); this.cacheValues = Objects.requireNonNull(cacheValues, "cacheValues can not be null"); this.backendAddress = Objects.requireNonNull(backendAddress, "backendAddress can not be null"); this.planBody = Objects.requireNonNull(planBody, "planBody can not be null"); @@ -68,6 +73,10 @@ public TUniqueId getQueryId() { return queryId; } + public Optional getResultSet() { + return resultSet; + } + public List getCacheValues() { return cacheValues; } @@ -90,9 +99,18 @@ public String getPlanBody() { @Override public String toString() { + long rowCount = 0; + if (resultSet.isPresent()) { + rowCount = resultSet.get().getResultRows().size(); + } else { + for (PCacheValue cacheValue : cacheValues) { + rowCount += cacheValue.getRowsCount(); + } + } return Utils.toSqlString("PhysicalSqlCache[" + id.asInt() + "]", "queryId", DebugUtil.printId(queryId), - "backend", backendAddress + "backend", backendAddress, + "rowCount", rowCount ); } diff --git a/fe/fe-core/src/main/java/org/apache/doris/qe/StmtExecutor.java b/fe/fe-core/src/main/java/org/apache/doris/qe/StmtExecutor.java index 3208bb7a696ff1..fdc75ebf7bd191 100644 --- a/fe/fe-core/src/main/java/org/apache/doris/qe/StmtExecutor.java +++ b/fe/fe-core/src/main/java/org/apache/doris/qe/StmtExecutor.java @@ -1661,8 +1661,7 @@ private boolean sendCachedValues(MysqlChannel channel, List data = Lists.newArrayList(batch.getQueryStatistics() == null ? "0" : batch.getQueryStatistics().getReturnedRows() + ""); diff --git a/fe/fe-core/src/main/java/org/apache/doris/qe/cache/CacheAnalyzer.java b/fe/fe-core/src/main/java/org/apache/doris/qe/cache/CacheAnalyzer.java index 161661055a4981..85f37094b02725 100644 --- a/fe/fe-core/src/main/java/org/apache/doris/qe/cache/CacheAnalyzer.java +++ b/fe/fe-core/src/main/java/org/apache/doris/qe/cache/CacheAnalyzer.java @@ -215,7 +215,8 @@ public static boolean canUseSqlCache(SessionVariable sessionVariable) { } public static boolean commonCacheCondition(SessionVariable sessionVariable) { - return sessionVariable.getSqlSelectLimit() < 0 && sessionVariable.getDefaultOrderByLimit() < 0; + return sessionVariable.getSqlSelectLimit() < 0 && sessionVariable.getDefaultOrderByLimit() < 0 + && !sessionVariable.dryRunQuery; } /** diff --git a/regression-test/suites/nereids_p0/cache/parse_sql_from_sql_cache.groovy b/regression-test/suites/nereids_p0/cache/parse_sql_from_sql_cache.groovy index e73498442ae48b..643175fc61e77c 100644 --- a/regression-test/suites/nereids_p0/cache/parse_sql_from_sql_cache.groovy +++ b/regression-test/suites/nereids_p0/cache/parse_sql_from_sql_cache.groovy @@ -68,9 +68,14 @@ suite("parse_sql_from_sql_cache") { sql "select * from test_use_plan_cache2" assertHasCache "select * from test_use_plan_cache2" - // add empty partition can use cache + // NOTE: in cloud mode, add empty partition can not use cache, because the table version already update, + // but in native mode, add empty partition can use cache sql "alter table test_use_plan_cache2 add partition p6 values[('6'),('7'))" - assertHasCache "select * from test_use_plan_cache2" + if (isCloudMode()) { + assertNoCache "select * from test_use_plan_cache2" + } else { + assertHasCache "select * from test_use_plan_cache2" + } // insert data can not use cache sql "insert into test_use_plan_cache2 values(6, 1)" @@ -279,6 +284,13 @@ suite("parse_sql_from_sql_cache") { sql "create user test_cache_user1 identified by 'DORIS@2024'" def dbName = context.config.getDbNameByFile(context.file) sql """GRANT SELECT_PRIV ON *.* TO test_cache_user1""" + //cloud-mode + if (isCloudMode()) { + def clusters = sql " SHOW CLUSTERS; " + assertTrue(!clusters.isEmpty()) + def validCluster = clusters[0][0] + sql """GRANT USAGE_PRIV ON CLUSTER ${validCluster} TO test_cache_user1""" + } createTestTable "test_use_plan_cache12" @@ -319,6 +331,13 @@ suite("parse_sql_from_sql_cache") { sql "drop user if exists test_cache_user2" sql "create user test_cache_user2 identified by 'DORIS@2024'" sql """GRANT SELECT_PRIV ON *.* TO test_cache_user2""" + //cloud-mode + if (isCloudMode()) { + def clusters = sql " SHOW CLUSTERS; " + assertTrue(!clusters.isEmpty()) + def validCluster = clusters[0][0] + sql """GRANT USAGE_PRIV ON CLUSTER ${validCluster} TO test_cache_user2""" + } createTestTable "test_use_plan_cache13" @@ -374,6 +393,13 @@ suite("parse_sql_from_sql_cache") { sql "drop user if exists test_cache_user3" sql "create user test_cache_user3 identified by 'DORIS@2024'" sql """GRANT SELECT_PRIV ON *.* TO test_cache_user3""" + //cloud-mode + if (isCloudMode()) { + def clusters = sql " SHOW CLUSTERS; " + assertTrue(!clusters.isEmpty()) + def validCluster = clusters[0][0] + sql """GRANT USAGE_PRIV ON CLUSTER ${validCluster} TO test_cache_user3""" + } createTestTable "test_use_plan_cache14" @@ -436,6 +462,13 @@ suite("parse_sql_from_sql_cache") { sql "create user test_cache_user4 identified by 'DORIS@2024'" sql "GRANT SELECT_PRIV ON regression_test.* TO test_cache_user4" sql "GRANT SELECT_PRIV ON ${dbName}.test_use_plan_cache15 TO test_cache_user4" + //cloud-mode + if (isCloudMode()) { + def clusters = sql " SHOW CLUSTERS; " + assertTrue(!clusters.isEmpty()) + def validCluster = clusters[0][0] + sql """GRANT USAGE_PRIV ON CLUSTER ${validCluster} TO test_cache_user4""" + } sql "sync" @@ -618,6 +651,69 @@ suite("parse_sql_from_sql_cache") { sql "select * from test_use_plan_cache18" assertHasCache "select * from test_use_plan_cache18" } + }), + extraThread("test_dry_run_query", { + createTestTable "test_use_plan_cache19" + + // after partition changed 10s, the sql cache can be used + sleep(10000) + + sql "set enable_nereids_planner=true" + sql "set enable_fallback_to_original_planner=false" + sql "set enable_sql_cache=true" + + sql "set dry_run_query=true" + assertNoCache "select * from test_use_plan_cache19 order by 1, 2" + def result1 = sql "select * from test_use_plan_cache19 order by 1, 2" + assertTrue(result1.size() == 1) + assertNoCache "select * from test_use_plan_cache19 order by 1, 2" + + sql "set dry_run_query=false" + assertNoCache "select * from test_use_plan_cache19 order by 1, 2" + def result2 = sql "select * from test_use_plan_cache19 order by 1, 2" + assertTrue(result2.size() > 1) + assertHasCache "select * from test_use_plan_cache19 order by 1, 2" + + sql "set dry_run_query=true" + assertNoCache "select * from test_use_plan_cache19 order by 1, 2" + def result3 = sql "select * from test_use_plan_cache19 order by 1, 2" + assertTrue(result3.size() == 1) + assertNoCache "select * from test_use_plan_cache19 order by 1, 2" + }), + extraThread("test_sql_cache_in_fe", { + createTestTable "test_use_plan_cache20" + + sql "alter table test_use_plan_cache20 add partition p6 values[('999'), ('1000'))" + + // after partition changed 10s, the sql cache can be used + sleep(10000) + + sql "set enable_nereids_planner=true" + sql "set enable_fallback_to_original_planner=false" + sql "set enable_sql_cache=true" + + assertNoCache "select * from (select 100 as id)a" + def result1 = sql "select * from (select 100 as id)a" + assertTrue(result1.size() == 1) + + assertHasCache "select * from (select 100 as id)a" + def result2 = sql "select * from (select 100 as id)a" + assertTrue(result2.size() == 1) + + assertNoCache "select * from test_use_plan_cache20 limit 0" + def result3 = sql "select * from test_use_plan_cache20 limit 0" + assertTrue(result3.isEmpty()) + + assertHasCache "select * from test_use_plan_cache20 limit 0" + def result4 = sql "select * from test_use_plan_cache20 limit 0" + assertTrue(result4.isEmpty()) + + assertNoCache "select * from test_use_plan_cache20 where id=999" + def result5 = sql "select * from test_use_plan_cache20 where id=999" + assertTrue(result5.isEmpty()) + assertHasCache "select * from test_use_plan_cache20 where id=999" + def result6 = sql "select * from test_use_plan_cache20 where id=999" + assertTrue(result6.isEmpty()) }) ).get() }