Skip to content
Merged
Show file tree
Hide file tree
Changes from all commits
Commits
File filter

Filter by extension

Filter by extension

Conversations
Failed to load comments.
Loading
Jump to
Jump to file
Failed to load files.
Loading
Diff view
Diff view
2 changes: 2 additions & 0 deletions sql/src/main/codegen/config.fmpp
Original file line number Diff line number Diff line change
Expand Up @@ -383,6 +383,7 @@ data: {
# Example: SqlShowDatabases(), SqlShowTables().
statementParserMethods: [
"DruidSqlInsert()"
"DruidSqlExplain()"
]

# List of methods for parsing custom literals.
Expand Down Expand Up @@ -433,6 +434,7 @@ data: {
# "dataTypeParserMethods".
implementationFiles: [
"insert.ftl"
"explain.ftl"
]

includePosixOperators: false
Expand Down
71 changes: 71 additions & 0 deletions sql/src/main/codegen/includes/explain.ftl
Original file line number Diff line number Diff line change
@@ -0,0 +1,71 @@
/*
* Licensed to the Apache Software Foundation (ASF) under one
* or more contributor license agreements. See the NOTICE file
* distributed with this work for additional information
* regarding copyright ownership. The ASF licenses this file
* to you under the Apache License, Version 2.0 (the
* "License"); you may not use this file except in compliance
* with the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing,
* software distributed under the License is distributed on an
* "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
* KIND, either express or implied. See the License for the
* specific language governing permissions and limitations
* under the License.
*/

/**
* Parses an EXPLAIN PLAN statement. Allows for custom druid's statements as well.
* The main change from SqlExplain() rule is that the statements that can occur in front of the explain's can now be
* custom druid statements as well reflected in the DruidQueryOrSqlQueryOrDml() production rule
*
* Since this copies directly from SqlExplain(), this would need to be modified while updating Calcite to allow for
* any changes and improvements (e.g. adding another format apart from json or xml in which one can
* specify the explain plan output)
*/
SqlNode DruidSqlExplain() :
{
SqlNode stmt;
SqlExplainLevel detailLevel = SqlExplainLevel.EXPPLAN_ATTRIBUTES;
SqlExplain.Depth depth;
final SqlExplainFormat format;
}
{
<EXPLAIN> <PLAN>
[ detailLevel = ExplainDetailLevel() ]
depth = ExplainDepth()
(
LOOKAHEAD(2)
<AS> <XML> { format = SqlExplainFormat.XML; }
|
<AS> <JSON> { format = SqlExplainFormat.JSON; }
|
{ format = SqlExplainFormat.TEXT; }
)
<FOR> stmt = DruidQueryOrSqlQueryOrDml() {
return new SqlExplain(getPos(),
stmt,
detailLevel.symbol(SqlParserPos.ZERO),
depth.symbol(SqlParserPos.ZERO),
format.symbol(SqlParserPos.ZERO),
nDynamicParams);
}
}

SqlNode DruidQueryOrSqlQueryOrDml() :
{
SqlNode stmt;
}
{
(
stmt = DruidSqlInsert()
|
stmt = SqlQueryOrDml()
)
{
return stmt;
}
}
Original file line number Diff line number Diff line change
Expand Up @@ -758,7 +758,7 @@ private static class ParsedNodes
private final SqlExplain explain;

@Nullable
private final SqlInsert insert;
private final DruidSqlInsert insert;

private final SqlNode query;

Expand All @@ -767,7 +767,7 @@ private static class ParsedNodes

private ParsedNodes(
@Nullable SqlExplain explain,
@Nullable SqlInsert insert,
@Nullable DruidSqlInsert insert,
SqlNode query,
@Nullable Granularity ingestionGranularity
)
Expand All @@ -781,7 +781,7 @@ private ParsedNodes(
static ParsedNodes create(final SqlNode node) throws ValidationException
{
SqlExplain explain = null;
SqlInsert insert = null;
DruidSqlInsert druidSqlInsert = null;
SqlNode query = node;
Granularity ingestionGranularity = null;

Expand All @@ -791,8 +791,8 @@ static ParsedNodes create(final SqlNode node) throws ValidationException
}

if (query.getKind() == SqlKind.INSERT) {
insert = (SqlInsert) query;
query = insert.getSource();
druidSqlInsert = (DruidSqlInsert) query;
query = druidSqlInsert.getSource();

// Check if ORDER BY clause is not provided to the underlying query
if (query instanceof SqlOrderBy) {
Expand All @@ -803,45 +803,39 @@ static ParsedNodes create(final SqlNode node) throws ValidationException
}
}

// Processing to be done when the original query has either of the PARTITIONED BY or CLUSTERED BY clause
// The following condition should always be true however added defensively
if (insert instanceof DruidSqlInsert) {
DruidSqlInsert druidSqlInsert = (DruidSqlInsert) insert;

ingestionGranularity = druidSqlInsert.getPartitionedBy();

if (druidSqlInsert.getClusteredBy() != null) {
// If we have a CLUSTERED BY clause, extract the information in that CLUSTERED BY and create a new SqlOrderBy
// node
SqlNode offset = null;
SqlNode fetch = null;

if (query instanceof SqlOrderBy) {
SqlOrderBy sqlOrderBy = (SqlOrderBy) query;
// This represents the underlying query free of OFFSET, FETCH and ORDER BY clauses
// For a sqlOrderBy.query like "SELECT dim1, sum(dim2) FROM foo OFFSET 10 FETCH 30 ORDER BY dim1 GROUP BY dim1
// this would represent the "SELECT dim1, sum(dim2) from foo GROUP BY dim1
query = sqlOrderBy.query;
offset = sqlOrderBy.offset;
fetch = sqlOrderBy.fetch;
}
// Creates a new SqlOrderBy query, which may have our CLUSTERED BY overwritten
query = new SqlOrderBy(
query.getParserPosition(),
query,
druidSqlInsert.getClusteredBy(),
offset,
fetch
);
ingestionGranularity = druidSqlInsert.getPartitionedBy();

if (druidSqlInsert.getClusteredBy() != null) {
// If we have a CLUSTERED BY clause, extract the information in that CLUSTERED BY and create a new SqlOrderBy
// node
SqlNode offset = null;
SqlNode fetch = null;

if (query instanceof SqlOrderBy) {
SqlOrderBy sqlOrderBy = (SqlOrderBy) query;
// This represents the underlying query free of OFFSET, FETCH and ORDER BY clauses
// For a sqlOrderBy.query like "SELECT dim1, sum(dim2) FROM foo OFFSET 10 FETCH 30 ORDER BY dim1 GROUP BY dim1
// this would represent the "SELECT dim1, sum(dim2) from foo GROUP BY dim1
query = sqlOrderBy.query;
offset = sqlOrderBy.offset;
fetch = sqlOrderBy.fetch;
}
// Creates a new SqlOrderBy query, which may have our CLUSTERED BY overwritten
query = new SqlOrderBy(
query.getParserPosition(),
query,
druidSqlInsert.getClusteredBy(),
offset,
fetch
);
}
}

if (!query.isA(SqlKind.QUERY)) {
throw new ValidationException(StringUtils.format("Cannot execute [%s].", query.getKind()));
}

return new ParsedNodes(explain, insert, query, ingestionGranularity);
return new ParsedNodes(explain, druidSqlInsert, query, ingestionGranularity);
}

@Nullable
Expand All @@ -851,7 +845,7 @@ public SqlExplain getExplainNode()
}

@Nullable
public SqlInsert getInsertNode()
public DruidSqlInsert getInsertNode()
{
return insert;
}
Expand Down
Original file line number Diff line number Diff line change
Expand Up @@ -62,7 +62,6 @@
import org.hamcrest.MatcherAssert;
import org.junit.After;
import org.junit.Assert;
import org.junit.Ignore;
import org.junit.Test;
import org.junit.internal.matchers.ThrowableMessageMatcher;

Expand Down Expand Up @@ -559,27 +558,6 @@ public void testInsertWithoutPartitionedBy()
didTest = true;
}

// Currently EXPLAIN PLAN FOR doesn't work with the modified syntax
@Ignore
@Test
public void testExplainInsertWithPartitionedByAndClusteredBy()
{
Assert.assertThrows(
SqlPlanningException.class,
() ->
testQuery(
StringUtils.format(
"EXPLAIN PLAN FOR INSERT INTO dst SELECT * FROM %s PARTITIONED BY DAY CLUSTERED BY 1",
externSql(externalDataSource)
),
ImmutableList.of(),
ImmutableList.of()
)
);
didTest = true;
}

@Ignore
@Test
public void testExplainInsertFromExternal() throws Exception
{
Expand All @@ -592,7 +570,7 @@ public void testExplainInsertFromExternal() throws Exception
.columns("x", "y", "z")
.context(
queryJsonMapper.readValue(
"{\"defaultTimeout\":300000,\"maxScatterGatherBytes\":9223372036854775807,\"sqlCurrentTimestamp\":\"2000-01-01T00:00:00Z\",\"sqlQueryId\":\"dummy\",\"vectorize\":\"false\",\"vectorizeVirtualColumns\":\"false\"}",
"{\"defaultTimeout\":300000,\"maxScatterGatherBytes\":9223372036854775807,\"sqlCurrentTimestamp\":\"2000-01-01T00:00:00Z\",\"sqlInsertSegmentGranularity\":\"{\\\"type\\\":\\\"all\\\"}\",\"sqlQueryId\":\"dummy\",\"vectorize\":\"false\",\"vectorizeVirtualColumns\":\"false\"}",
JacksonUtils.TYPE_REFERENCE_MAP_STRING_OBJECT
)
)
Expand Down Expand Up @@ -624,7 +602,6 @@ public void testExplainInsertFromExternal() throws Exception
didTest = true;
}

@Ignore
@Test
public void testExplainInsertFromExternalUnauthorized()
{
Expand Down