From 5ae01340203d847c241f0072286defd9617dc78c Mon Sep 17 00:00:00 2001 From: Paul Rogers Date: Mon, 13 Jun 2022 22:08:52 -0700 Subject: [PATCH 1/8] Foundation for the Druid metadata catalog Provides the DB and REST layer, but not yet the integration with the Calcite SQL layer. --- .../metadata/MetadataStorageConnector.java | 2 + .../metadata/MetadataStorageTablesConfig.java | 16 +- .../sqlserver/SQLServerConnectorTest.java | 18 +- .../storage/mysql/MySQLConnectorTest.java | 2 +- .../postgresql/PostgreSQLConnectorTest.java | 14 +- .../MetadataStorageUpdaterJobSpec.java | 14 +- .../druid/catalog/AbstractColumnMetadata.java | 122 ++++ .../druid/catalog/AbstractTableMetadata.java | 281 +++++++++ .../org/apache/druid/catalog/Actions.java | 104 ++++ .../druid/catalog/CachedMetadataCatalog.java | 216 +++++++ .../druid/catalog/CatalogAuthorizer.java | 100 ++++ .../apache/druid/catalog/CatalogClient.java | 139 +++++ .../apache/druid/catalog/CatalogDefns.java | 60 ++ .../apache/druid/catalog/CatalogStorage.java | 120 ++++ .../druid/catalog/CatalogUpdateNotifier.java | 111 ++++ .../org/apache/druid/catalog/ColumnDefn.java | 101 ++++ .../druid/catalog/CommonCacheNotifierEx.java | 118 ++++ .../druid/catalog/DatasourceColumnDefn.java | 104 ++++ .../apache/druid/catalog/DatasourceDefn.java | 364 ++++++++++++ .../apache/druid/catalog/InputColumnDefn.java | 81 +++ .../apache/druid/catalog/InputSourceDefn.java | 225 ++++++++ .../druid/catalog/ListeningNodeSupplier.java | 67 +++ .../druid/catalog/LocalMetadataCatalog.java | 92 +++ .../druid/catalog/MeasureColumnDefn.java | 48 ++ .../apache/druid/catalog/MetadataCatalog.java | 144 +++++ .../druid/catalog/MetastoreManager.java | 60 ++ .../druid/catalog/MetastoreManagerImpl.java | 94 +++ .../druid/catalog/RestUpdateSender.java | 193 +++++++ .../apache/druid/catalog/SchemaRegistry.java | 45 ++ .../druid/catalog/SchemaRegistryImpl.java | 150 +++++ .../org/apache/druid/catalog/TableDefn.java | 91 +++ .../org/apache/druid/catalog/TableId.java | 113 ++++ .../org/apache/druid/catalog/TableSpec.java | 268 +++++++++ .../druid/discovery/DruidLeaderClient.java | 12 +- .../druid/guice/CatalogClientModule.java | 78 +++ .../org/apache/druid/guice/CatalogModule.java | 77 +++ .../IndexerSQLMetadataStorageCoordinator.java | 2 +- .../druid/metadata/MetadataRuleManager.java | 3 +- .../druid/metadata/SQLMetadataConnector.java | 41 +- .../metadata/catalog/CatalogManager.java | 100 ++++ .../metadata/catalog/SQLCatalogManager.java | 59 ++ .../metadata/catalog/SQLTableManager.java | 446 +++++++++++++++ .../metadata/catalog/TableDefnManager.java | 115 ++++ .../storage/derby/DerbyConnector.java | 10 + .../storage/derby/DerbyMetadataStorage.java | 21 +- .../server/http/CatalogListenerResource.java | 125 ++++ .../druid/server/http/CatalogResource.java | 490 ++++++++++++++++ .../apache/druid/server/security/Access.java | 3 +- .../server/security/AuthenticationResult.java | 12 +- .../server/security/AuthorizationUtils.java | 3 +- .../server/security/ForbiddenException.java | 6 +- .../druid/catalog/CacheNotifierTest.java | 122 ++++ .../druid/catalog/CatalogObjectTest.java | 149 +++++ .../druid/catalog/CatalogResourceTest.java | 491 ++++++++++++++++ .../apache/druid/catalog/CatalogTests.java | 81 +++ .../druid/catalog/DatasourceDefnTest.java | 204 +++++++ .../apache/druid/catalog/DummyRequest.java | 540 ++++++++++++++++++ .../druid/catalog/InputSourceDefnTest.java | 175 ++++++ .../druid/catalog/MetadataCatalogTest.java | 330 +++++++++++ .../apache/druid/catalog/MockCatalogSync.java | 90 +++ .../metadata/SQLMetadataConnectorTest.java | 22 + .../druid/metadata/catalog/TableIdTest.java | 56 ++ .../metadata/catalog/TableManagerTest.java | 214 +++++++ 63 files changed, 7686 insertions(+), 68 deletions(-) create mode 100644 server/src/main/java/org/apache/druid/catalog/AbstractColumnMetadata.java create mode 100644 server/src/main/java/org/apache/druid/catalog/AbstractTableMetadata.java create mode 100644 server/src/main/java/org/apache/druid/catalog/Actions.java create mode 100644 server/src/main/java/org/apache/druid/catalog/CachedMetadataCatalog.java create mode 100644 server/src/main/java/org/apache/druid/catalog/CatalogAuthorizer.java create mode 100644 server/src/main/java/org/apache/druid/catalog/CatalogClient.java create mode 100644 server/src/main/java/org/apache/druid/catalog/CatalogDefns.java create mode 100644 server/src/main/java/org/apache/druid/catalog/CatalogStorage.java create mode 100644 server/src/main/java/org/apache/druid/catalog/CatalogUpdateNotifier.java create mode 100644 server/src/main/java/org/apache/druid/catalog/ColumnDefn.java create mode 100644 server/src/main/java/org/apache/druid/catalog/CommonCacheNotifierEx.java create mode 100644 server/src/main/java/org/apache/druid/catalog/DatasourceColumnDefn.java create mode 100644 server/src/main/java/org/apache/druid/catalog/DatasourceDefn.java create mode 100644 server/src/main/java/org/apache/druid/catalog/InputColumnDefn.java create mode 100644 server/src/main/java/org/apache/druid/catalog/InputSourceDefn.java create mode 100644 server/src/main/java/org/apache/druid/catalog/ListeningNodeSupplier.java create mode 100644 server/src/main/java/org/apache/druid/catalog/LocalMetadataCatalog.java create mode 100644 server/src/main/java/org/apache/druid/catalog/MeasureColumnDefn.java create mode 100644 server/src/main/java/org/apache/druid/catalog/MetadataCatalog.java create mode 100644 server/src/main/java/org/apache/druid/catalog/MetastoreManager.java create mode 100644 server/src/main/java/org/apache/druid/catalog/MetastoreManagerImpl.java create mode 100644 server/src/main/java/org/apache/druid/catalog/RestUpdateSender.java create mode 100644 server/src/main/java/org/apache/druid/catalog/SchemaRegistry.java create mode 100644 server/src/main/java/org/apache/druid/catalog/SchemaRegistryImpl.java create mode 100644 server/src/main/java/org/apache/druid/catalog/TableDefn.java create mode 100644 server/src/main/java/org/apache/druid/catalog/TableId.java create mode 100644 server/src/main/java/org/apache/druid/catalog/TableSpec.java create mode 100644 server/src/main/java/org/apache/druid/guice/CatalogClientModule.java create mode 100644 server/src/main/java/org/apache/druid/guice/CatalogModule.java create mode 100644 server/src/main/java/org/apache/druid/metadata/catalog/CatalogManager.java create mode 100644 server/src/main/java/org/apache/druid/metadata/catalog/SQLCatalogManager.java create mode 100644 server/src/main/java/org/apache/druid/metadata/catalog/SQLTableManager.java create mode 100644 server/src/main/java/org/apache/druid/metadata/catalog/TableDefnManager.java create mode 100644 server/src/main/java/org/apache/druid/server/http/CatalogListenerResource.java create mode 100644 server/src/main/java/org/apache/druid/server/http/CatalogResource.java create mode 100644 server/src/test/java/org/apache/druid/catalog/CacheNotifierTest.java create mode 100644 server/src/test/java/org/apache/druid/catalog/CatalogObjectTest.java create mode 100644 server/src/test/java/org/apache/druid/catalog/CatalogResourceTest.java create mode 100644 server/src/test/java/org/apache/druid/catalog/CatalogTests.java create mode 100644 server/src/test/java/org/apache/druid/catalog/DatasourceDefnTest.java create mode 100644 server/src/test/java/org/apache/druid/catalog/DummyRequest.java create mode 100644 server/src/test/java/org/apache/druid/catalog/InputSourceDefnTest.java create mode 100644 server/src/test/java/org/apache/druid/catalog/MetadataCatalogTest.java create mode 100644 server/src/test/java/org/apache/druid/catalog/MockCatalogSync.java create mode 100644 server/src/test/java/org/apache/druid/metadata/catalog/TableIdTest.java create mode 100644 server/src/test/java/org/apache/druid/metadata/catalog/TableManagerTest.java diff --git a/core/src/main/java/org/apache/druid/metadata/MetadataStorageConnector.java b/core/src/main/java/org/apache/druid/metadata/MetadataStorageConnector.java index 45fb6639082c..0e0e52abf190 100644 --- a/core/src/main/java/org/apache/druid/metadata/MetadataStorageConnector.java +++ b/core/src/main/java/org/apache/druid/metadata/MetadataStorageConnector.java @@ -87,5 +87,7 @@ default void exportTable( void createSupervisorsTable(); + void createTableDefnTable(); + void deleteAllRecords(String tableName); } diff --git a/core/src/main/java/org/apache/druid/metadata/MetadataStorageTablesConfig.java b/core/src/main/java/org/apache/druid/metadata/MetadataStorageTablesConfig.java index 766efabb5261..dc24734a5ba0 100644 --- a/core/src/main/java/org/apache/druid/metadata/MetadataStorageTablesConfig.java +++ b/core/src/main/java/org/apache/druid/metadata/MetadataStorageTablesConfig.java @@ -30,9 +30,11 @@ */ public class MetadataStorageTablesConfig { + public static final String CONFIG_BASE = "druid.metadata.storage.tables"; + public static MetadataStorageTablesConfig fromBase(String base) { - return new MetadataStorageTablesConfig(base, null, null, null, null, null, null, null, null, null, null); + return new MetadataStorageTablesConfig(base, null, null, null, null, null, null, null, null, null, null, null); } public static final String TASK_ENTRY_TYPE = "task"; @@ -76,6 +78,9 @@ public static MetadataStorageTablesConfig fromBase(String base) @JsonProperty("supervisors") private final String supervisorTable; + @JsonProperty("tableDefn") + private final String tableDefnTable; + @JsonCreator public MetadataStorageTablesConfig( @JsonProperty("base") String base, @@ -88,7 +93,8 @@ public MetadataStorageTablesConfig( @JsonProperty("taskLog") String taskLogTable, @JsonProperty("taskLock") String taskLockTable, @JsonProperty("audit") String auditTable, - @JsonProperty("supervisors") String supervisorTable + @JsonProperty("supervisors") String supervisorTable, + @JsonProperty("tableDefn") String tablesTable ) { this.base = (base == null) ? DEFAULT_BASE : base; @@ -106,6 +112,7 @@ public MetadataStorageTablesConfig( lockTables.put(TASK_ENTRY_TYPE, this.taskLockTable); this.auditTable = makeTableName(auditTable, "audit"); this.supervisorTable = makeTableName(supervisorTable, "supervisors"); + this.tableDefnTable = makeTableName(tablesTable, "tableDefn"); } private String makeTableName(String explicitTableName, String defaultSuffix) @@ -194,4 +201,9 @@ public String getTaskLockTable() { return taskLockTable; } + + public String getTableDefnTable() + { + return tableDefnTable; + } } diff --git a/extensions-contrib/sqlserver-metadata-storage/src/test/java/org/apache/druid/metadata/storage/sqlserver/SQLServerConnectorTest.java b/extensions-contrib/sqlserver-metadata-storage/src/test/java/org/apache/druid/metadata/storage/sqlserver/SQLServerConnectorTest.java index a31159e88134..9fa13c2c70c7 100644 --- a/extensions-contrib/sqlserver-metadata-storage/src/test/java/org/apache/druid/metadata/storage/sqlserver/SQLServerConnectorTest.java +++ b/extensions-contrib/sqlserver-metadata-storage/src/test/java/org/apache/druid/metadata/storage/sqlserver/SQLServerConnectorTest.java @@ -30,26 +30,13 @@ @SuppressWarnings("nls") public class SQLServerConnectorTest { - @Test public void testIsTransientException() { SQLServerConnector connector = new SQLServerConnector( Suppliers.ofInstance(new MetadataStorageConnectorConfig()), Suppliers.ofInstance( - new MetadataStorageTablesConfig( - null, - null, - null, - null, - null, - null, - null, - null, - null, - null, - null - ) + MetadataStorageTablesConfig.fromBase(null) ) ); @@ -69,8 +56,7 @@ public void testLimitClause() { SQLServerConnector connector = new SQLServerConnector( Suppliers.ofInstance(new MetadataStorageConnectorConfig()), - Suppliers.ofInstance( - new MetadataStorageTablesConfig(null, null, null, null, null, null, null, null, null, null, null) + Suppliers.ofInstance(MetadataStorageTablesConfig.fromBase(null) ) ); Assert.assertEquals("FETCH NEXT 100 ROWS ONLY", connector.limitClause(100)); diff --git a/extensions-core/mysql-metadata-storage/src/test/java/org/apache/druid/metadata/storage/mysql/MySQLConnectorTest.java b/extensions-core/mysql-metadata-storage/src/test/java/org/apache/druid/metadata/storage/mysql/MySQLConnectorTest.java index 23ce46282232..2498e8a65c17 100644 --- a/extensions-core/mysql-metadata-storage/src/test/java/org/apache/druid/metadata/storage/mysql/MySQLConnectorTest.java +++ b/extensions-core/mysql-metadata-storage/src/test/java/org/apache/druid/metadata/storage/mysql/MySQLConnectorTest.java @@ -44,7 +44,7 @@ public String getDriverClassName() private static final Supplier CONNECTOR_CONFIG_SUPPLIER = MetadataStorageConnectorConfig::new; private static final Supplier TABLES_CONFIG_SUPPLIER = - () -> new MetadataStorageTablesConfig(null, null, null, null, null, null, null, null, null, null, null); + () -> MetadataStorageTablesConfig.fromBase(null); @Test diff --git a/extensions-core/postgresql-metadata-storage/src/test/java/org/apache/druid/metadata/storage/postgresql/PostgreSQLConnectorTest.java b/extensions-core/postgresql-metadata-storage/src/test/java/org/apache/druid/metadata/storage/postgresql/PostgreSQLConnectorTest.java index 08f3c333a1fb..4e5e4d85fc06 100644 --- a/extensions-core/postgresql-metadata-storage/src/test/java/org/apache/druid/metadata/storage/postgresql/PostgreSQLConnectorTest.java +++ b/extensions-core/postgresql-metadata-storage/src/test/java/org/apache/druid/metadata/storage/postgresql/PostgreSQLConnectorTest.java @@ -36,19 +36,7 @@ public void testIsTransientException() PostgreSQLConnector connector = new PostgreSQLConnector( Suppliers.ofInstance(new MetadataStorageConnectorConfig()), Suppliers.ofInstance( - new MetadataStorageTablesConfig( - null, - null, - null, - null, - null, - null, - null, - null, - null, - null, - null - ) + MetadataStorageTablesConfig.fromBase(null) ), new PostgreSQLConnectorConfig(), new PostgreSQLTablesConfig() diff --git a/indexing-hadoop/src/main/java/org/apache/druid/indexer/updater/MetadataStorageUpdaterJobSpec.java b/indexing-hadoop/src/main/java/org/apache/druid/indexer/updater/MetadataStorageUpdaterJobSpec.java index 0f8ca0acfccc..7805374d964d 100644 --- a/indexing-hadoop/src/main/java/org/apache/druid/indexer/updater/MetadataStorageUpdaterJobSpec.java +++ b/indexing-hadoop/src/main/java/org/apache/druid/indexer/updater/MetadataStorageUpdaterJobSpec.java @@ -86,18 +86,6 @@ public String getPassword() //by the code using this public MetadataStorageTablesConfig getMetadataStorageTablesConfig() { - return new MetadataStorageTablesConfig( - null, - null, - null, - segmentTable, - null, - null, - null, - null, - null, - null, - null - ); + return MetadataStorageTablesConfig.fromBase(null); } } diff --git a/server/src/main/java/org/apache/druid/catalog/AbstractColumnMetadata.java b/server/src/main/java/org/apache/druid/catalog/AbstractColumnMetadata.java new file mode 100644 index 000000000000..ea58f609dba0 --- /dev/null +++ b/server/src/main/java/org/apache/druid/catalog/AbstractColumnMetadata.java @@ -0,0 +1,122 @@ +/* + * Licensed to the Apache Software Foundation (ASF) under one + * or more contributor license agreements. See the NOTICE file + * distributed with this work for additional information + * regarding copyright ownership. The ASF licenses this file + * to you under the Apache License, Version 2.0 (the + * "License"); you may not use this file except in compliance + * with the License. You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, + * software distributed under the License is distributed on an + * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY + * KIND, either express or implied. See the License for the + * specific language governing permissions and limitations + * under the License. + */ + +package org.apache.druid.catalog; + +import org.apache.druid.catalog.MetadataCatalog.ColumnKind; +import org.apache.druid.catalog.MetadataCatalog.ColumnMetadata; +import org.apache.druid.catalog.MetadataCatalog.InputColumnMetadata; +import org.apache.druid.catalog.MetadataCatalog.MeasureMetadata; +import org.apache.druid.java.util.common.StringUtils; +import org.apache.druid.segment.column.ColumnType; + +public abstract class AbstractColumnMetadata implements ColumnMetadata +{ + public static class SimpleColumn extends AbstractColumnMetadata + { + public SimpleColumn(String name, String sqlType) + { + super(name, sqlType); + } + + @Override + public ColumnKind kind() + { + return ColumnKind.SIMPLE; + } + } + + public static class DimensionColumn extends AbstractColumnMetadata + { + public DimensionColumn(String name, String sqlType) + { + super(name, sqlType); + } + + @Override + public ColumnKind kind() + { + return ColumnKind.DIMENSION; + } + } + + public static class MeasureColumn extends AbstractColumnMetadata implements MeasureMetadata + { + private final String aggFn; + + public MeasureColumn(String name, String sqlType, String aggFn) + { + super(name, sqlType); + this.aggFn = aggFn; + } + + @Override + public ColumnKind kind() + { + return ColumnKind.MEASURE; + } + + @Override + public String aggFn() + { + return aggFn; + } + } + + public static class InputColumn extends SimpleColumn implements InputColumnMetadata + { + public InputColumn(String name, String sqlType) + { + super(name, sqlType); + } + + @Override + public ColumnKind kind() + { + return ColumnKind.INPUT; + } + + @Override + public ColumnType druidType() + { + return ColumnDefn.VALID_SQL_TYPES.get(StringUtils.toUpperCase(sqlType)); + } + } + + protected final String name; + protected final String sqlType; + + public AbstractColumnMetadata(String name, String sqlType) + { + this.name = name; + this.sqlType = sqlType; + } + + @Override + public String name() + { + return name; + } + + @Override + public String sqlType() + { + return sqlType; + } +} diff --git a/server/src/main/java/org/apache/druid/catalog/AbstractTableMetadata.java b/server/src/main/java/org/apache/druid/catalog/AbstractTableMetadata.java new file mode 100644 index 000000000000..ccfb58afea94 --- /dev/null +++ b/server/src/main/java/org/apache/druid/catalog/AbstractTableMetadata.java @@ -0,0 +1,281 @@ +/* + * Licensed to the Apache Software Foundation (ASF) under one + * or more contributor license agreements. See the NOTICE file + * distributed with this work for additional information + * regarding copyright ownership. The ASF licenses this file + * to you under the Apache License, Version 2.0 (the + * "License"); you may not use this file except in compliance + * with the License. You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, + * software distributed under the License is distributed on an + * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY + * KIND, either express or implied. See the License for the + * specific language governing permissions and limitations + * under the License. + */ + +package org.apache.druid.catalog; + +import com.fasterxml.jackson.annotation.JsonCreator; +import com.fasterxml.jackson.annotation.JsonIgnore; +import com.fasterxml.jackson.annotation.JsonProperty; +import org.apache.druid.catalog.AbstractColumnMetadata.DimensionColumn; +import org.apache.druid.catalog.AbstractColumnMetadata.InputColumn; +import org.apache.druid.catalog.AbstractColumnMetadata.MeasureColumn; +import org.apache.druid.catalog.AbstractColumnMetadata.SimpleColumn; +import org.apache.druid.catalog.MetadataCatalog.ColumnMetadata; +import org.apache.druid.catalog.MetadataCatalog.DatasourceMetadata; +import org.apache.druid.catalog.MetadataCatalog.InputSourceMetadata; +import org.apache.druid.catalog.MetadataCatalog.TableMetadata; +import org.apache.druid.catalog.MetadataCatalog.TableType; +import org.apache.druid.catalog.SchemaRegistry.SchemaDefn; +import org.apache.druid.data.input.InputFormat; +import org.apache.druid.data.input.InputSource; +import org.apache.druid.segment.column.RowSignature; + +import java.util.ArrayList; +import java.util.HashMap; +import java.util.List; +import java.util.Map; + +public abstract class AbstractTableMetadata implements TableMetadata +{ + public static class DatasourceTable extends AbstractTableMetadata implements DatasourceMetadata + { + private final String segmentGranularity; + private final String rollupGranularity; + + @JsonCreator + public DatasourceTable( + @JsonProperty("id") TableId id, + @JsonProperty("updateTime") long updateTime, + @JsonProperty("segmentGranularity") String segmentGranularity, + @JsonProperty("rollupGranularity") String rollupGranularity, + @JsonProperty("columns") List columns) + { + super(id, updateTime, columns); + this.segmentGranularity = segmentGranularity; + this.rollupGranularity = rollupGranularity; + } + + public DatasourceTable(TableId id, long updateTime, DatasourceDefn defn) + { + super(id, updateTime, convertColums(defn)); + this.segmentGranularity = defn.segmentGranularity(); + this.rollupGranularity = defn.rollupGranularity(); + } + + private static List convertColums(DatasourceDefn defn) + { + boolean isRollup = defn.isRollupTable(); + List converted = new ArrayList<>(); + for (ColumnDefn col : defn.columns()) { + ColumnMetadata mdCol; + if (col instanceof MeasureColumnDefn) { + MeasureColumnDefn measureDefn = (MeasureColumnDefn) col; + mdCol = new MeasureColumn(col.name(), col.sqlType(), measureDefn.aggregateFn()); + } else if (isRollup) { + mdCol = new DimensionColumn(col.name(), col.sqlType()); + } else { + mdCol = new SimpleColumn(col.name(), col.sqlType()); + } + converted.add(mdCol); + } + return converted; + } + + @Override + public TableType type() + { + return TableType.DATASOURCE; + } + + @Override + @JsonProperty("segmentGranularity") + public String segmentGranularity() + { + return segmentGranularity; + } + + @Override + @JsonIgnore + public boolean isRollup() + { + return !isDetail(); + } + + @Override + @JsonIgnore + public boolean isDetail() + { + return rollupGranularity == null; + } + + @Override + @JsonProperty("rollupGranularity") + public String rollupGranularity() + { + return rollupGranularity; + } + } + + public static class InputSourceTable extends AbstractTableMetadata implements InputSourceMetadata + { + private final InputSource inputSource; + private final InputFormat format; + + @JsonCreator + public InputSourceTable( + @JsonProperty("id") TableId id, + @JsonProperty("updateTime") long updateTime, + @JsonProperty("inputSource") InputSource inputSource, + @JsonProperty("format") InputFormat format, + @JsonProperty("columns") List columns + ) + { + super(id, updateTime, columns); + this.inputSource = inputSource; + this.format = format; + } + + public InputSourceTable(TableId id, long updateTime, InputSourceDefn defn) + { + super(id, updateTime, convertColums(defn)); + this.inputSource = defn.inputSource(); + this.format = defn.format(); + } + + private static List convertColums(InputSourceDefn defn) + { + List converted = new ArrayList<>(); + for (ColumnDefn col : defn.columns()) { + converted.add(new InputColumn(col.name(), col.sqlType())); + } + return converted; + } + + @Override + public TableType type() + { + return TableType.INPUT; + } + + @JsonProperty("inputSource") + public InputSource inputSource() + { + return inputSource; + } + + @JsonProperty("format") + public InputFormat format() + { + return format; + } + + public RowSignature rowSignature() + { + RowSignature.Builder builder = RowSignature.builder(); + for (ColumnMetadata col : columns) { + builder.add(col.name(), ((InputColumn) col).druidType()); + } + return builder.build(); + } + } + + protected final TableId id; + private final long updateTime; + protected final List columns; + private final Map columnIndex = new HashMap<>(); + + public AbstractTableMetadata( + TableId id, + long updateTime, + List columns + ) + { + this.id = id; + this.updateTime = updateTime; + this.columns = columns; + for (ColumnMetadata col : columns) { + columnIndex.put(col.name(), col); + } + } + + @Override + @JsonProperty("id") + public TableId id() + { + return id; + } + + @Override + @JsonProperty("updateTime") + public long updateTime() + { + return updateTime; + } + + @Override + @JsonProperty("columns") + public List columns() + { + return columns; + } + + @Override + public ColumnMetadata column(String name) + { + return columnIndex.get(name); + } + + public static TableMetadata fromCatalogTable(SchemaDefn schema, TableSpec table) + { + return create(schema, table.id(), table.updateTime(), table.defn()); + } + + public static TableMetadata create( + SchemaDefn schema, + TableId id, + long updateTime, + TableDefn defn) + { + if (defn == null) { + // Useless metadata: adds no information. Should not occur. + return null; + } + TableType tableType = schema.tableType(); + if (tableType == null) { + if (defn instanceof DatasourceDefn) { + tableType = TableType.DATASOURCE; + } else if (defn instanceof InputSourceDefn) { + tableType = TableType.INPUT; + } else { + // TODO: other types + return null; + } + } + switch (tableType) { + case DATASOURCE: + if (!(defn instanceof DatasourceDefn)) { + // Wrong type. Too late to fix it now. Ignore it. + return null; + } + return new DatasourceTable(id, updateTime, (DatasourceDefn) defn); + case INPUT: + if (!(defn instanceof InputSourceDefn)) { + return null; + } + return new InputSourceTable(id, updateTime, (InputSourceDefn) defn); + case VIEW: + // Not yet + return null; + default: + // Don't know what this is, so we don't know how to use it. + // Ignore it. + return null; + } + } +} diff --git a/server/src/main/java/org/apache/druid/catalog/Actions.java b/server/src/main/java/org/apache/druid/catalog/Actions.java new file mode 100644 index 000000000000..9ba1ae688a63 --- /dev/null +++ b/server/src/main/java/org/apache/druid/catalog/Actions.java @@ -0,0 +1,104 @@ +/* + * Licensed to the Apache Software Foundation (ASF) under one + * or more contributor license agreements. See the NOTICE file + * distributed with this work for additional information + * regarding copyright ownership. The ASF licenses this file + * to you under the Apache License, Version 2.0 (the + * "License"); you may not use this file except in compliance + * with the License. You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, + * software distributed under the License is distributed on an + * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY + * KIND, either express or implied. See the License for the + * specific language governing permissions and limitations + * under the License. + */ + +package org.apache.druid.catalog; + +import com.google.common.collect.ImmutableMap; +import org.apache.druid.server.security.ForbiddenException; + +import javax.ws.rs.core.Response; + +import java.util.Map; + +/** + * Helper functions for the catalog REST API actions. + */ +public class Actions +{ + public static final String DUPLICATE_ERROR = "Already exists"; + public static final String FAILED_ERROR = "Failed"; + public static final String INVALID = "Invalid"; + public static final String FORBIDDEN = "Forbidden"; + public static final String NOT_FOUND = "Not found"; + + public static final String ERROR_KEY = "error"; + public static final String ERR_MSG_KEY = "errorMessage"; + + public static Map error(String code, String msg) + { + return ImmutableMap.of(ERROR_KEY, code, ERR_MSG_KEY, msg); + } + + public static Response exception(Exception e) + { + return Response + .serverError() + .entity(error(FAILED_ERROR, e.getMessage())) + .build(); + } + + public static Response badRequest(String code, String msg) + { + return Response + .status(Response.Status.BAD_REQUEST) + .entity(error(code, msg)) + .build(); + } + + public static Response notFound(String msg) + { + return Response + .status(Response.Status.NOT_FOUND) + .entity(error(NOT_FOUND, msg)) + .build(); + } + + public static Response ok() + { + return Response.ok().build(); + } + + public static Response forbidden() + { + return forbidden("Unauthorized"); + } + + public static Response forbidden(ForbiddenException e) + { + return forbidden(e.getMessage()); + } + + public static Response forbidden(String msg) + { + // Like ForbiddenExceptionMapper, but in the standard error + // format. Used instead of throwing ForbiddenException + return Response.status(Response.Status.FORBIDDEN) + .entity(error(FORBIDDEN, msg)) + .build(); + } + + public static Response okWithVersion(long version) + { + return Response + .ok() + .entity(ImmutableMap.of("version", version)) + .build(); + + } +} diff --git a/server/src/main/java/org/apache/druid/catalog/CachedMetadataCatalog.java b/server/src/main/java/org/apache/druid/catalog/CachedMetadataCatalog.java new file mode 100644 index 000000000000..ee0d5a49ca2c --- /dev/null +++ b/server/src/main/java/org/apache/druid/catalog/CachedMetadataCatalog.java @@ -0,0 +1,216 @@ +/* + * Licensed to the Apache Software Foundation (ASF) under one + * or more contributor license agreements. See the NOTICE file + * distributed with this work for additional information + * regarding copyright ownership. The ASF licenses this file + * to you under the Apache License, Version 2.0 (the + * "License"); you may not use this file except in compliance + * with the License. You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, + * software distributed under the License is distributed on an + * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY + * KIND, either express or implied. See the License for the + * specific language governing permissions and limitations + * under the License. + */ + +package org.apache.druid.catalog; + +import org.apache.druid.catalog.MetadataCatalog.CatalogListener; +import org.apache.druid.catalog.SchemaRegistry.SchemaDefn; + +import javax.inject.Inject; + +import java.util.ArrayList; +import java.util.Collections; +import java.util.HashSet; +import java.util.List; +import java.util.Set; +import java.util.concurrent.ConcurrentHashMap; + +/** + * Caching version of the metadata catalog. Draws information + * from a base catalog. Fetches from the base if: + *
    + *
  • A table is requested that has not yet been requested. + * Once requested, the entry is cached, even if the table does + * not exist in the base catalog.
  • + *
  • The contents of a schema are requested, and have not yet + * been fetched.
  • + *
+ * + * Both tables and schemas are cached. In particular, if a table or + * schema is requested, and does not exist in the base catalog, then + * that schema is marked as not existing and won't be fetched again. + * + * The cache is updated via an update facility which either flushes + * the cache (crude) or listens to the base catalog for updates and + * populates the cache with updates. For a local cache, the DB layer + * provides the updates. For a remote cache, the DB host pushes updates. + */ +public class CachedMetadataCatalog implements MetadataCatalog, CatalogListener +{ + public static final int NOT_FETCHED = -1; + public static final int UNDEFINED = 0; + + private static class TableEntry + { + private final TableMetadata table; + + protected TableEntry(SchemaDefn schema, TableSpec table) + { + this.table = table == null + ? null + : AbstractTableMetadata.fromCatalogTable( + schema, + table); + } + + protected long version() + { + return table == null ? UNDEFINED : table.updateTime(); + } + } + + private class SchemaEntry + { + private final SchemaDefn schema; + private long version = NOT_FETCHED; + private final ConcurrentHashMap cache = new ConcurrentHashMap<>(); + + protected SchemaEntry(SchemaDefn schema) + { + this.schema = schema; + } + + protected TableMetadata resolveTable(TableId tableId) + { + TableEntry entry = cache.computeIfAbsent( + tableId.name(), + key -> new TableEntry(schema, base.table(tableId)) + ); + return entry.table; + } + + public synchronized List tables() + { + if (version == UNDEFINED) { + return Collections.emptyList(); + } + if (version == NOT_FETCHED) { + List catalogTables = base.tablesForSchema(schema.name()); + for (TableSpec table : catalogTables) { + update(table); + } + } + List orderedTables = new ArrayList<>(); + + // Get the list of actual tables; excluding any cached "misses". + cache.forEach((k, v) -> { + if (v.table != null) { + orderedTables.add(v.table); + } + }); + orderedTables.sort((e1, e2) -> e1.id().name().compareTo(e2.id().name())); + return orderedTables; + } + + public synchronized void update(TableSpec table) + { + cache.compute( + table.name(), + (k, v) -> v == null || v.version() < table.updateTime() + ? new TableEntry(schema, table) + : v + ); + version = Math.max(version, table.updateTime()); + } + + public void remove(String name) + { + cache.remove(name); + } + + public Set tableNames() + { + Set tables = new HashSet<>(); + cache.forEach((k, v) -> { + if (v.table != null) { + tables.add(k); + } + }); + return tables; + } + } + + private final ConcurrentHashMap schemaCache = new ConcurrentHashMap<>(); + private final CatalogSource base; + private final SchemaRegistry schemaRegistry; + + @Inject + public CachedMetadataCatalog( + CatalogSource catalog, + SchemaRegistry schemaRegistry + ) + { + this.base = catalog; + this.schemaRegistry = schemaRegistry; + } + + @Override + public TableMetadata resolveTable(TableId tableId) + { + SchemaEntry schemaEntry = entryFor(tableId.schema()); + return schemaEntry == null ? null : schemaEntry.resolveTable(tableId); + } + + @Override + public List tables(String schemaName) + { + SchemaEntry schemaEntry = entryFor(schemaName); + return schemaEntry == null ? null : schemaEntry.tables(); + } + + @Override + public void updated(TableSpec table) + { + SchemaEntry schemaEntry = entryFor(table.dbSchema()); + if (schemaEntry != null) { + schemaEntry.update(table); + } + } + + @Override + public void deleted(TableId tableId) + { + SchemaEntry schemaEntry = entryFor(tableId.schema()); + if (schemaEntry != null) { + schemaEntry.remove(tableId.name()); + } + } + + @Override + public Set tableNames(String schemaName) + { + SchemaEntry schemaEntry = entryFor(schemaName); + return schemaEntry == null ? Collections.emptySet() : schemaEntry.tableNames(); + } + + public void flush() + { + schemaCache.clear(); + } + + private SchemaEntry entryFor(String schemaName) + { + return schemaCache.computeIfAbsent( + schemaName, + k -> { + SchemaDefn schema = schemaRegistry.schema(k); + return schema == null ? null : new SchemaEntry(schema); + }); + } +} diff --git a/server/src/main/java/org/apache/druid/catalog/CatalogAuthorizer.java b/server/src/main/java/org/apache/druid/catalog/CatalogAuthorizer.java new file mode 100644 index 000000000000..64d41bdb43c2 --- /dev/null +++ b/server/src/main/java/org/apache/druid/catalog/CatalogAuthorizer.java @@ -0,0 +1,100 @@ +/* + * Licensed to the Apache Software Foundation (ASF) under one + * or more contributor license agreements. See the NOTICE file + * distributed with this work for additional information + * regarding copyright ownership. The ASF licenses this file + * to you under the Apache License, Version 2.0 (the + * "License"); you may not use this file except in compliance + * with the License. You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, + * software distributed under the License is distributed on an + * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY + * KIND, either express or implied. See the License for the + * specific language governing permissions and limitations + * under the License. + */ + +package org.apache.druid.catalog; + +import org.apache.druid.catalog.SchemaRegistry.SchemaDefn; +import org.apache.druid.server.security.Access; +import org.apache.druid.server.security.Action; +import org.apache.druid.server.security.AuthorizationUtils; +import org.apache.druid.server.security.AuthorizerMapper; +import org.apache.druid.server.security.ForbiddenException; +import org.apache.druid.server.security.Resource; +import org.apache.druid.server.security.ResourceAction; + +import javax.inject.Inject; +import javax.servlet.http.HttpServletRequest; + +/** + * Encapsulates the details of catalog authorization. + */ +public class CatalogAuthorizer +{ + private final AuthorizerMapper authorizerMapper; + + @Inject + public CatalogAuthorizer( + AuthorizerMapper authorizerMapper) + { + this.authorizerMapper = authorizerMapper; + } + + public AuthorizerMapper mapper() + { + return authorizerMapper; + } + + public void authorizeTable(SchemaDefn schema, String name, Action action, HttpServletRequest request) + { + if (action == Action.WRITE && !schema.writable()) { + throw new ForbiddenException( + "Cannot create table definitions in schema: " + schema.name()); + } + authorize(schema.securityResource(), name, action, request); + } + + public void authorize(String resource, String key, Action action, HttpServletRequest request) + { + final Access authResult = authorizeAccess(resource, key, action, request); + if (!authResult.isAllowed()) { + throw new ForbiddenException(authResult.toString()); + } + } + + public boolean isAuthorized(String resource, String key, Action action, HttpServletRequest request) + { + final Access authResult = authorizeAccess(resource, key, action, request); + return authResult.isAllowed(); + } + + public Access authorizeAccess(String resource, String key, Action action, HttpServletRequest request) + { + return AuthorizationUtils.authorizeResourceAction( + request, + new ResourceAction(new Resource(key, resource), action), + authorizerMapper + ); + } + + public ResourceAction resourceAction(SchemaDefn schema, String name, Action action) + { + return new ResourceAction(new Resource(name, schema.securityResource()), action); + } + + public Action inferAction(HttpServletRequest request) + { + switch (request.getMethod()) { + case "GET": + case "HEAD": + return Action.READ; + default: + return Action.WRITE; + } + } +} diff --git a/server/src/main/java/org/apache/druid/catalog/CatalogClient.java b/server/src/main/java/org/apache/druid/catalog/CatalogClient.java new file mode 100644 index 000000000000..49200ca07214 --- /dev/null +++ b/server/src/main/java/org/apache/druid/catalog/CatalogClient.java @@ -0,0 +1,139 @@ +/* + * Licensed to the Apache Software Foundation (ASF) under one + * or more contributor license agreements. See the NOTICE file + * distributed with this work for additional information + * regarding copyright ownership. The ASF licenses this file + * to you under the Apache License, Version 2.0 (the + * "License"); you may not use this file except in compliance + * with the License. You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, + * software distributed under the License is distributed on an + * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY + * KIND, either express or implied. See the License for the + * specific language governing permissions and limitations + * under the License. + */ + +package org.apache.druid.catalog; + +import com.fasterxml.jackson.core.type.TypeReference; +import com.fasterxml.jackson.databind.ObjectMapper; +import io.netty.handler.codec.http.HttpResponseStatus; +import org.apache.druid.catalog.MetadataCatalog.CatalogSource; +import org.apache.druid.client.coordinator.Coordinator; +import org.apache.druid.discovery.DruidLeaderClient; +import org.apache.druid.guice.annotations.Smile; +import org.apache.druid.java.util.common.ISE; +import org.apache.druid.java.util.http.client.Request; +import org.apache.druid.java.util.http.client.response.StringFullResponseHolder; +import org.apache.druid.server.http.CatalogResource; +import org.codehaus.plexus.util.StringUtils; +import org.jboss.netty.handler.codec.http.HttpHeaders; +import org.jboss.netty.handler.codec.http.HttpMethod; + +import javax.inject.Inject; +import javax.ws.rs.core.MediaType; + +import java.io.IOException; +import java.util.Collections; +import java.util.List; + +/** + * Guice-injected client for the catalog update sync process. Requests + * tables and schemas from the catalog component on the Coordinator. + * + * This class handles any recoverable error case. If this class throws + * an exception, then something went very wrong and there is little the + * caller can do to make things better. All the caller can do is try + * again later and hope things improve. + */ +public class CatalogClient implements CatalogSource +{ + public static final String SCHEMA_SYNC_PATH = CatalogResource.ROOT_PATH + CatalogResource.SCHEMA_SYNC; + public static final String TABLE_SYNC_PATH = CatalogResource.ROOT_PATH + CatalogResource.TABLE_SYNC; + private static final TypeReference> LIST_OF_TABLE_SPECS_TYPE = new TypeReference>() + { + }; + // Not strictly needed as a TypeReference, but doing so makes the code simpler. + private static final TypeReference TABLE_SPEC_TYPE = new TypeReference() + { + }; + + private final DruidLeaderClient coordClient; + private final ObjectMapper smileMapper; + + @Inject + public CatalogClient( + @Coordinator DruidLeaderClient coordClient, + @Smile ObjectMapper smileMapper + ) + { + this.coordClient = coordClient; + this.smileMapper = smileMapper; + } + + @Override + public List tablesForSchema(String dbSchema) + { + String url = StringUtils.replace(SCHEMA_SYNC_PATH, "{dbSchema}", dbSchema); + List results = send(url, LIST_OF_TABLE_SPECS_TYPE); + + // Not found for a list is an empty list. + return results == null ? Collections.emptyList() : results; + } + + @Override + public TableSpec table(TableId id) + { + String url = StringUtils.replace(SCHEMA_SYNC_PATH, "{dbSchema}", id.schema()); + url = StringUtils.replace(url, "{table}", id.name()); + return send(url, TABLE_SPEC_TYPE); + } + + /** + * Send the update. Exceptions are "unexpected": they should never occur in a + * working system. If they occur, something is broken. + * + * @return the requested update, or null if the item was not found in the + * catalog. + */ + private T send(String url, TypeReference typeRef) + { + final Request request; + try { + request = coordClient.makeRequest(HttpMethod.GET, url) + .addHeader(HttpHeaders.Names.ACCEPT, MediaType.APPLICATION_JSON); + } + catch (IOException e) { + throw new ISE("Cannot create catalog sync request"); + } + final StringFullResponseHolder responseHolder; + try { + responseHolder = coordClient.go(request); + } + catch (IOException e) { + throw new ISE(e, "Failed to send catalog sync"); + } + catch (InterruptedException e1) { + // Treat as a not-found: the only way this exception should occur + // is during shutdown. + return null; + } + if (responseHolder.getStatus().getCode() == HttpResponseStatus.NOT_FOUND.code()) { + // Not found means the item disappeared. Returning null means "not found". + return null; + } + if (responseHolder.getStatus().getCode() != HttpResponseStatus.OK.code()) { + throw new ISE("Unexpected status from catalog sync: " + responseHolder.getStatus()); + } + try { + return smileMapper.readValue(responseHolder.getContent(), typeRef); + } + catch (IOException e) { + throw new ISE(e, "Could not decode the JSON response from catalog sync."); + } + } +} diff --git a/server/src/main/java/org/apache/druid/catalog/CatalogDefns.java b/server/src/main/java/org/apache/druid/catalog/CatalogDefns.java new file mode 100644 index 000000000000..0e54b6c82cf2 --- /dev/null +++ b/server/src/main/java/org/apache/druid/catalog/CatalogDefns.java @@ -0,0 +1,60 @@ +/* + * Licensed to the Apache Software Foundation (ASF) under one + * or more contributor license agreements. See the NOTICE file + * distributed with this work for additional information + * regarding copyright ownership. The ASF licenses this file + * to you under the Apache License, Version 2.0 (the + * "License"); you may not use this file except in compliance + * with the License. You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, + * software distributed under the License is distributed on an + * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY + * KIND, either express or implied. See the License for the + * specific language governing permissions and limitations + * under the License. + */ + +package org.apache.druid.catalog; + +import com.fasterxml.jackson.core.JsonProcessingException; +import com.fasterxml.jackson.databind.ObjectMapper; +import org.apache.druid.java.util.common.ISE; + +import java.io.IOException; + +public class CatalogDefns +{ + public static byte[] toBytes(ObjectMapper jsonMapper, Object obj) + { + try { + return jsonMapper.writeValueAsBytes(obj); + } + catch (JsonProcessingException e) { + throw new ISE("Failed to serialize " + obj.getClass().getSimpleName()); + } + } + + public static T fromBytes(ObjectMapper jsonMapper, byte[] bytes, Class clazz) + { + try { + return jsonMapper.readValue(bytes, clazz); + } + catch (IOException e) { + throw new ISE(e, "Failed to deserialize a " + clazz.getSimpleName()); + } + } + + public static String toString(Object obj) + { + ObjectMapper jsonMapper = new ObjectMapper(); + try { + return jsonMapper.writeValueAsString(obj); + } + catch (JsonProcessingException e) { + throw new ISE("Failed to serialize TableDefn"); + } + } +} diff --git a/server/src/main/java/org/apache/druid/catalog/CatalogStorage.java b/server/src/main/java/org/apache/druid/catalog/CatalogStorage.java new file mode 100644 index 000000000000..a45733679b55 --- /dev/null +++ b/server/src/main/java/org/apache/druid/catalog/CatalogStorage.java @@ -0,0 +1,120 @@ +/* + * Licensed to the Apache Software Foundation (ASF) under one + * or more contributor license agreements. See the NOTICE file + * distributed with this work for additional information + * regarding copyright ownership. The ASF licenses this file + * to you under the Apache License, Version 2.0 (the + * "License"); you may not use this file except in compliance + * with the License. You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, + * software distributed under the License is distributed on an + * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY + * KIND, either express or implied. See the License for the + * specific language governing permissions and limitations + * under the License. + */ + +package org.apache.druid.catalog; + +import org.apache.druid.catalog.MetadataCatalog.CatalogListener; +import org.apache.druid.catalog.MetadataCatalog.CatalogSource; +import org.apache.druid.catalog.MetadataCatalog.CatalogUpdateProvider; +import org.apache.druid.catalog.SchemaRegistry.SchemaDefn; +import org.apache.druid.metadata.catalog.CatalogManager; +import org.apache.druid.metadata.catalog.TableDefnManager; +import org.apache.druid.server.security.AuthorizerMapper; + +import javax.inject.Inject; + +import java.util.List; + +/** + * Facade over the three internal components used to manage the metadata + * catalog from the REST API. + */ +public class CatalogStorage implements CatalogUpdateProvider, CatalogSource +{ + public class ListenerAdapter implements TableDefnManager.Listener + { + private final CatalogListener dest; + + public ListenerAdapter(CatalogListener dest) + { + this.dest = dest; + } + + @Override + public void added(TableSpec table) + { + dest.updated(table); + } + + @Override + public void updated(TableSpec table) + { + dest.updated(table); + } + + @Override + public void deleted(TableId id) + { + dest.deleted(id); + } + } + + protected final SchemaRegistry schemaRegistry; + protected final CatalogManager catalogMgr; + protected final CatalogAuthorizer authorizer; + + @Inject + public CatalogStorage( + CatalogManager catalogMgr, + AuthorizerMapper authorizerMapper + ) + { + this.schemaRegistry = new SchemaRegistryImpl(); + this.catalogMgr = catalogMgr; + this.authorizer = new CatalogAuthorizer(authorizerMapper); + } + + public CatalogAuthorizer authorizer() + { + return authorizer; + } + + public TableDefnManager tables() + { + return catalogMgr.tables(); + } + + public SchemaRegistry schemaRegistry() + { + return schemaRegistry; + } + + public SchemaDefn resolveSchema(String dbSchema) + { + return schemaRegistry.schema(dbSchema); + } + + @Override + public void register(CatalogListener listener) + { + tables().register(new ListenerAdapter(listener)); + } + + @Override + public List tablesForSchema(String dbSchema) + { + return tables().listDetails(dbSchema); + } + + @Override + public TableSpec table(TableId id) + { + return tables().read(id); + } +} diff --git a/server/src/main/java/org/apache/druid/catalog/CatalogUpdateNotifier.java b/server/src/main/java/org/apache/druid/catalog/CatalogUpdateNotifier.java new file mode 100644 index 000000000000..f463335455c7 --- /dev/null +++ b/server/src/main/java/org/apache/druid/catalog/CatalogUpdateNotifier.java @@ -0,0 +1,111 @@ +/* + * Licensed to the Apache Software Foundation (ASF) under one + * or more contributor license agreements. See the NOTICE file + * distributed with this work for additional information + * regarding copyright ownership. The ASF licenses this file + * to you under the Apache License, Version 2.0 (the + * "License"); you may not use this file except in compliance + * with the License. You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, + * software distributed under the License is distributed on an + * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY + * KIND, either express or implied. See the License for the + * specific language governing permissions and limitations + * under the License. + */ + +package org.apache.druid.catalog; + +import com.fasterxml.jackson.databind.ObjectMapper; +import org.apache.druid.catalog.MetadataCatalog.CatalogListener; +import org.apache.druid.catalog.RestUpdateSender.RestSender; +import org.apache.druid.discovery.DruidNodeDiscoveryProvider; +import org.apache.druid.discovery.NodeRole; +import org.apache.druid.guice.ManageLifecycle; +import org.apache.druid.guice.annotations.EscalatedClient; +import org.apache.druid.guice.annotations.Smile; +import org.apache.druid.java.util.common.lifecycle.LifecycleStart; +import org.apache.druid.java.util.common.lifecycle.LifecycleStop; +import org.apache.druid.java.util.http.client.HttpClient; +import org.apache.druid.server.DruidNode; +import org.apache.druid.server.http.CatalogListenerResource; +import org.joda.time.Duration; + +import javax.inject.Inject; + +import java.util.Arrays; +import java.util.function.Supplier; + +/** + * Global update notifier for the catalog. Registers itself as a catalog + * listener, then uses the common cache notifier to send Smile-encoded JSON + * updates to broker nodes discovered from node discovery (typically ZooKeeper.) + *

+ * Deletes are encoded as a table update with a table definition of a special + * tombstone type. This saves having the need for two endpoints, or having + * a wrapper class to handle deletes. + */ +@ManageLifecycle +public class CatalogUpdateNotifier implements CatalogListener +{ + private final String CALLER_NAME = "Catalog Sync"; + private final long TIMEOUT_MS = 5000; + private final TableDefn TABLE_TOMBSTONE = new TableDefn.Tombstone(); + + private final CommonCacheNotifierEx notifier; + private final ObjectMapper smileMapper; + + @Inject + public CatalogUpdateNotifier( + CatalogStorage catalog, + DruidNodeDiscoveryProvider discoveryProvider, + @EscalatedClient HttpClient httpClient, + @Smile ObjectMapper smileMapper + ) + { + long timeoutMs = TIMEOUT_MS; + this.smileMapper = smileMapper; + Supplier> nodeSupplier = new ListeningNodeSupplier( + Arrays.asList(NodeRole.BROKER), + discoveryProvider); + RestSender restSender = RestUpdateSender.httpClientSender(httpClient, Duration.millis(timeoutMs)); + RestUpdateSender sender = new RestUpdateSender( + CALLER_NAME, + nodeSupplier, + restSender, + CatalogListenerResource.BASE_URL + CatalogListenerResource.SYNC_URL, + timeoutMs); + this.notifier = new CommonCacheNotifierEx( + CALLER_NAME, + sender); + catalog.register(this); + } + + @LifecycleStart + public void start() + { + notifier.start(); + } + + @LifecycleStop + public void stop() + { + notifier.stop(); + } + + @Override + public void updated(TableSpec update) + { + notifier.send(update.toBytes(smileMapper)); + } + + @Override + public void deleted(TableId tableId) + { + TableSpec spec = TableSpec.newTable(tableId, TABLE_TOMBSTONE); + notifier.send(spec.toBytes(smileMapper)); + } +} diff --git a/server/src/main/java/org/apache/druid/catalog/ColumnDefn.java b/server/src/main/java/org/apache/druid/catalog/ColumnDefn.java new file mode 100644 index 000000000000..2c948d8370c1 --- /dev/null +++ b/server/src/main/java/org/apache/druid/catalog/ColumnDefn.java @@ -0,0 +1,101 @@ +/* + * Licensed to the Apache Software Foundation (ASF) under one + * or more contributor license agreements. See the NOTICE file + * distributed with this work for additional information + * regarding copyright ownership. The ASF licenses this file + * to you under the Apache License, Version 2.0 (the + * "License"); you may not use this file except in compliance + * with the License. You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, + * software distributed under the License is distributed on an + * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY + * KIND, either express or implied. See the License for the + * specific language governing permissions and limitations + * under the License. + */ + +package org.apache.druid.catalog; + +import com.fasterxml.jackson.annotation.JsonProperty; +import com.fasterxml.jackson.annotation.JsonSubTypes; +import com.fasterxml.jackson.annotation.JsonSubTypes.Type; +import com.fasterxml.jackson.annotation.JsonTypeInfo; +import com.fasterxml.jackson.databind.ObjectMapper; +import com.google.common.collect.ImmutableMap; +import org.apache.commons.lang.StringUtils; +import org.apache.druid.guice.annotations.PublicApi; +import org.apache.druid.java.util.common.IAE; +import org.apache.druid.segment.column.ColumnType; + +import java.util.Map; + +/** + * Base class for table columns. Columns have multiple types + * represented as subclasses. + */ +@PublicApi +@JsonTypeInfo(use = JsonTypeInfo.Id.NAME, property = "type") +@JsonSubTypes(value = { + @Type(name = "column", value = DatasourceColumnDefn.class), + @Type(name = "measure", value = MeasureColumnDefn.class), +}) +public abstract class ColumnDefn +{ + public static final Map VALID_SQL_TYPES = + new ImmutableMap.Builder() + .put("BIGINT", ColumnType.LONG) + .put("FLOAT", ColumnType.FLOAT) + .put("DOUBLE", ColumnType.DOUBLE) + .put("VARCHAR", ColumnType.STRING) + .build(); + + protected final String name; + protected final String sqlType; + + public ColumnDefn( + String name, + String sqlType + ) + { + this.name = name; + this.sqlType = sqlType; + } + + @JsonProperty("name") + public String name() + { + return name; + } + + @JsonProperty("sqlType") + public String sqlType() + { + return sqlType; + } + + public void validate() + { + if (StringUtils.isBlank(name)) { + throw new IAE("Column name is required"); + } + } + + public byte[] toBytes(ObjectMapper jsonMapper) + { + return CatalogDefns.toBytes(jsonMapper, this); + } + + public static ColumnDefn fromBytes(ObjectMapper jsonMapper, byte[] bytes) + { + return CatalogDefns.fromBytes(jsonMapper, bytes, ColumnDefn.class); + } + + @Override + public String toString() + { + return CatalogDefns.toString(this); + } +} diff --git a/server/src/main/java/org/apache/druid/catalog/CommonCacheNotifierEx.java b/server/src/main/java/org/apache/druid/catalog/CommonCacheNotifierEx.java new file mode 100644 index 000000000000..75fd2e84ccb5 --- /dev/null +++ b/server/src/main/java/org/apache/druid/catalog/CommonCacheNotifierEx.java @@ -0,0 +1,118 @@ +/* + * Licensed to the Apache Software Foundation (ASF) under one + * or more contributor license agreements. See the NOTICE file + * distributed with this work for additional information + * regarding copyright ownership. The ASF licenses this file + * to you under the Apache License, Version 2.0 (the + * "License"); you may not use this file except in compliance + * with the License. You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, + * software distributed under the License is distributed on an + * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY + * KIND, either express or implied. See the License for the + * specific language governing permissions and limitations + * under the License. + */ + +package org.apache.druid.catalog; + +import com.google.common.annotations.VisibleForTesting; +import org.apache.druid.concurrent.Threads; +import org.apache.druid.java.util.common.StringUtils; +import org.apache.druid.java.util.common.concurrent.Execs; +import org.apache.druid.java.util.emitter.EmittingLogger; + +import java.util.concurrent.BlockingQueue; +import java.util.concurrent.ExecutorService; +import java.util.concurrent.LinkedBlockingQueue; +import java.util.concurrent.TimeUnit; +import java.util.function.Consumer; + +/** + * Push style notifications that allow propagation of data from whatever server is + * running this notifier to whoever might be listening. Notifications arrive + * via a queue, then are dispatched via a configured sender. Details of the + * source of the updates, and how updates are sent, are handled external + * to this class. + * + * The algorithm is simple: each update is processed entirely before the + * next one is processed. As a result, this class is suitable for + * low-frequency updates: where the worst-case send times are less than + * the worst-case update frequency. If updates are faster, they will back + * up, and the class should be redesigned to allow healthy receivers to + * continue to get updates while laggards block only themselves. + * + * Events can be queued before startup. They will be send once the notifier + * is started. Events left in the queue at shutdown will be lost. + * + * Defined by composition so it can be tested and reused in other + * contexts. + */ +public class CommonCacheNotifierEx +{ + private static final EmittingLogger LOG = new EmittingLogger(CommonCacheNotifierEx.class); + + private final ExecutorService exec; + private final String callerName; + private final BlockingQueue updates = new LinkedBlockingQueue<>(); + private final Consumer sender; + + public CommonCacheNotifierEx( + final String callerName, + final Consumer sender + ) + { + this.callerName = callerName; + this.sender = sender; + + this.exec = Execs.singleThreaded( + StringUtils.format( + "%s-notifierThread-", + StringUtils.encodeForFormat(callerName)) + "%d" + ); + } + + public void start() + { + exec.submit(() -> { + while (!Thread.interrupted()) { + try { + sender.accept(updates.take()); + } + catch (InterruptedException e) { + return; + } + catch (Throwable t) { + LOG.makeAlert(t, callerName + ": Error occured while handling updates.").emit(); + } + } + }); + } + + public void send(byte[] update) + { + updates.add(update); + } + + @VisibleForTesting + public void stopGracefully() + { + try { + while (!updates.isEmpty()) { + Threads.sleepFor(100, TimeUnit.MILLISECONDS); + } + } + catch (InterruptedException e) { + // Ignore + } + stop(); + } + + public void stop() + { + exec.shutdownNow(); + } +} diff --git a/server/src/main/java/org/apache/druid/catalog/DatasourceColumnDefn.java b/server/src/main/java/org/apache/druid/catalog/DatasourceColumnDefn.java new file mode 100644 index 000000000000..a60620a22d97 --- /dev/null +++ b/server/src/main/java/org/apache/druid/catalog/DatasourceColumnDefn.java @@ -0,0 +1,104 @@ +/* + * Licensed to the Apache Software Foundation (ASF) under one + * or more contributor license agreements. See the NOTICE file + * distributed with this work for additional information + * regarding copyright ownership. The ASF licenses this file + * to you under the Apache License, Version 2.0 (the + * "License"); you may not use this file except in compliance + * with the License. You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, + * software distributed under the License is distributed on an + * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY + * KIND, either express or implied. See the License for the + * specific language governing permissions and limitations + * under the License. + */ + +package org.apache.druid.catalog; + +import com.fasterxml.jackson.annotation.JsonCreator; +import com.fasterxml.jackson.annotation.JsonProperty; +import org.apache.druid.java.util.common.IAE; +import org.apache.druid.java.util.common.StringUtils; + +/** + * Description of a detail datasource column and a rollup + * dimension column. + */ +public class DatasourceColumnDefn extends ColumnDefn +{ + private final String TIME_COLUMN = "__time"; + + @JsonCreator + public DatasourceColumnDefn( + @JsonProperty("name") String name, + @JsonProperty("sqlType") String sqlType + ) + { + super(name, sqlType); + } + + public static Builder builder(String name) + { + return new Builder(name); + } + + @Override + public void validate() + { + super.validate(); + if (sqlType == null) { + return; + } + if (TIME_COLUMN.equals(name)) { + if (!"TIMESTAMP".equalsIgnoreCase(sqlType)) { + throw new IAE("__time column must have type TIMESTAMP"); + } + } else if (!VALID_SQL_TYPES.containsKey(StringUtils.toUpperCase(sqlType))) { + throw new IAE("Not a supported SQL type: " + sqlType); + } + } + + public static class Builder + { + private final String name; + private String sqlType; + private String aggFn; + + public Builder(String name) + { + this.name = name; + } + + public Builder sqlType(String type) + { + this.sqlType = type; + return this; + } + + public Builder measure(String aggFn) + { + this.aggFn = aggFn; + return this; + } + + public DatasourceColumnDefn build() + { + if (aggFn == null) { + return new DatasourceColumnDefn( + name, + sqlType + ); + } else { + return new MeasureColumnDefn( + name, + sqlType, + aggFn + ); + } + } + } +} diff --git a/server/src/main/java/org/apache/druid/catalog/DatasourceDefn.java b/server/src/main/java/org/apache/druid/catalog/DatasourceDefn.java new file mode 100644 index 000000000000..588fd245ff51 --- /dev/null +++ b/server/src/main/java/org/apache/druid/catalog/DatasourceDefn.java @@ -0,0 +1,364 @@ +/* + * Licensed to the Apache Software Foundation (ASF) under one + * or more contributor license agreements. See the NOTICE file + * distributed with this work for additional information + * regarding copyright ownership. The ASF licenses this file + * to you under the Apache License, Version 2.0 (the + * "License"); you may not use this file except in compliance + * with the License. You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, + * software distributed under the License is distributed on an + * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY + * KIND, either express or implied. See the License for the + * specific language governing permissions and limitations + * under the License. + */ + +package org.apache.druid.catalog; + +import com.fasterxml.jackson.annotation.JsonIgnore; +import com.fasterxml.jackson.annotation.JsonInclude; +import com.fasterxml.jackson.annotation.JsonInclude.Include; +import com.fasterxml.jackson.annotation.JsonProperty; +import com.google.common.base.Strings; +import org.apache.druid.java.util.common.IAE; +import org.apache.druid.java.util.common.StringUtils; + +import java.util.ArrayList; +import java.util.Collections; +import java.util.HashMap; +import java.util.HashSet; +import java.util.List; +import java.util.Map; +import java.util.Objects; +import java.util.Set; + +/** + * Datasource metadata exchanged via the REST API and stored + * in the catalog. + */ +public class DatasourceDefn extends TableDefn +{ + /** + * Segment grain at ingestion and initial compaction. Aging rules + * may override the value as segments age. If not provided here, + * then it must be provided at ingestion time. + */ + private final String segmentGranularity; + + /** + * Ingestion and auto-compaction rollup granularity. If null, then no + * rollup is enabled. Same as {@code queryGranularity} in and ingest spec, + * but renamed since this granularity affects rollup, not queries. Can be + * overridden at ingestion time. The grain may change as segments evolve: + * this is the grain only for ingest. + */ + private final String rollupGranularity; + + /** + * The target segment size at ingestion and initial compaction. + * If 0, then the system setting is used. + */ + private final int targetSegmentRows; + + /** + * Whether to enable auto-compaction. Only relevant if no auto-compaction + * spec is defined, since the existence of a spec overrides this setting. + */ + private final boolean enableAutoCompaction; + + /** + * The offset of segments to be auto-compacted relative to the current + * time. If not present, the auto-compaction default is used if + * auto-compaction is enabled. + */ + private final String autoCompactionDelay; + + private final List columns; + + public DatasourceDefn( + @JsonProperty("segmentGranularity") String segmentGranularity, + @JsonProperty("rollupGranularity") String rollupGranularity, + @JsonProperty("targetSegmentRows") int targetSegmentRows, + @JsonProperty("enableAutoCompaction") boolean enableAutoCompaction, + @JsonProperty("autoCompactionDelay") String autoCompactionDelay, + @JsonProperty("properties") Map properties, + @JsonProperty("columns") List columns + ) + { + super(properties); + + // Note: no validation here. If a bad definition got into the + // DB, don't prevent deserialization. + + this.segmentGranularity = segmentGranularity; + this.rollupGranularity = rollupGranularity; + this.targetSegmentRows = targetSegmentRows; + this.enableAutoCompaction = enableAutoCompaction; + this.autoCompactionDelay = autoCompactionDelay; + this.columns = columns == null ? Collections.emptyList() : columns; + } + + @JsonProperty("rollupGranularity") + @JsonInclude(Include.NON_NULL) + public String rollupGranularity() + { + return rollupGranularity; + } + + @JsonProperty("segmentGranularity") + @JsonInclude(Include.NON_NULL) + public String segmentGranularity() + { + return segmentGranularity; + } + + @JsonProperty("targetSegmentRows") + @JsonInclude(Include.NON_DEFAULT) + public int targetSegmentRows() + { + return targetSegmentRows; + } + + @JsonProperty("enableAutoCompaction") + @JsonInclude(Include.NON_DEFAULT) + public boolean enableAutoCompaction() + { + return enableAutoCompaction; + } + + @JsonProperty("autoCompactionDelay") + @JsonInclude(Include.NON_NULL) + public String autoCompactionDelay() + { + return autoCompactionDelay; + } + + @JsonProperty("columns") + @JsonInclude(Include.NON_EMPTY) + public List columns() + { + return columns; + } + + public static Builder builder() + { + return new Builder(); + } + + public Builder toBuilder() + { + return new Builder(this); + } + + @JsonIgnore + public boolean isDetailTable() + { + return Strings.isNullOrEmpty(rollupGranularity); + } + + @JsonIgnore + public boolean isRollupTable() + { + return !isDetailTable(); + } + + @Override + public void validate() + { + super.validate(); + if (Strings.isNullOrEmpty(segmentGranularity)) { + throw new IAE("Segment granularity is required."); + } + boolean isDetail = isDetailTable(); + Set names = new HashSet<>(); + for (ColumnDefn col : columns) { + if (!(col instanceof DatasourceColumnDefn)) { + throw new IAE( + StringUtils.format("Column %s is not a segment column", col.name())); + } + if (isDetail && col instanceof MeasureColumnDefn) { + throw new IAE(StringUtils.format( + "Measure column %s not allowed for a detail table", + col.name())); + } + col.validate(); + if (!names.add(col.name())) { + throw new IAE("Duplicate column name: " + col.name()); + } + } + } + + @Override + public String defaultSchema() + { + return TableId.DRUID_SCHEMA; + } + + @Override + public boolean equals(Object o) + { + if (o == this) { + return true; + } + if (o == null || o.getClass() != getClass()) { + return false; + } + DatasourceDefn other = (DatasourceDefn) o; + return Objects.equals(this.segmentGranularity, other.segmentGranularity) + && Objects.equals(this.rollupGranularity, other.rollupGranularity) + && this.targetSegmentRows == other.targetSegmentRows + && this.enableAutoCompaction == other.enableAutoCompaction + && Objects.equals(this.autoCompactionDelay, other.autoCompactionDelay) + && Objects.equals(this.columns, other.columns) + && Objects.equals(this.properties(), other.properties()); + } + + @Override + public int hashCode() + { + return Objects.hash( + segmentGranularity, + rollupGranularity, + targetSegmentRows, + enableAutoCompaction, + autoCompactionDelay, + columns, + properties()); + } + + public static class Builder + { + private String segmentGranularity; + private String rollupGranularity; + private int targetSegmentRows; + private boolean enableAutoCompaction; + private String autoCompactionDelay; + private List columns; + private Map properties; + + public Builder() + { + this.columns = new ArrayList<>(); + this.properties = new HashMap<>(); + } + + public Builder(DatasourceDefn defn) + { + this.segmentGranularity = defn.segmentGranularity; + this.rollupGranularity = defn.rollupGranularity; + this.targetSegmentRows = defn.targetSegmentRows; + this.enableAutoCompaction = defn.enableAutoCompaction; + this.autoCompactionDelay = defn.autoCompactionDelay; + this.properties = new HashMap<>(defn.properties()); + this.columns = new ArrayList<>(defn.columns); + } + + public Builder rollupGranularity(String rollupGranularty) + { + this.rollupGranularity = rollupGranularty; + return this; + } + + public Builder segmentGranularity(String segmentGranularity) + { + this.segmentGranularity = segmentGranularity; + return this; + } + + public Builder targetSegmentRows(int targetSegmentRows) + { + this.targetSegmentRows = targetSegmentRows; + return this; + } + + public Builder enableAutoCompaction(boolean enableAutoCompaction) + { + this.enableAutoCompaction = enableAutoCompaction; + return this; + } + + public Builder autoCompactionDelay(String autoCompactionDelay) + { + this.autoCompactionDelay = autoCompactionDelay; + return this; + } + + public List columns() + { + return columns; + } + + public Builder column(DatasourceColumnDefn column) + { + if (Strings.isNullOrEmpty(column.name())) { + throw new IAE("Column name is required"); + } + columns.add(column); + return this; + } + + public Builder timeColumn() + { + return column("__time", "TIMESTAMP"); + } + + public Builder column(String name, String sqlType) + { + return column( + DatasourceColumnDefn + .builder(name) + .sqlType(sqlType) + .build()); + } + + public Builder measure(String name, String sqlType, String aggFn) + { + return column( + DatasourceColumnDefn + .builder(name) + .sqlType(sqlType) + .measure(aggFn) + .build()); + } + + public Builder properties(Map properties) + { + this.properties = properties; + return this; + } + + public Builder property(String key, Object value) + { + if (properties == null) { + properties = new HashMap<>(); + } + properties.put(key, value); + return this; + } + + public Map properties() + { + return properties; + } + + public DatasourceDefn build() + { + if (targetSegmentRows < 0) { + targetSegmentRows = 0; + } + // TODO(paul): validate upper bound + return new DatasourceDefn( + segmentGranularity, + rollupGranularity, + targetSegmentRows, + enableAutoCompaction, + autoCompactionDelay, + properties, + columns); + } + } +} diff --git a/server/src/main/java/org/apache/druid/catalog/InputColumnDefn.java b/server/src/main/java/org/apache/druid/catalog/InputColumnDefn.java new file mode 100644 index 000000000000..66a6f76e434c --- /dev/null +++ b/server/src/main/java/org/apache/druid/catalog/InputColumnDefn.java @@ -0,0 +1,81 @@ +/* + * Licensed to the Apache Software Foundation (ASF) under one + * or more contributor license agreements. See the NOTICE file + * distributed with this work for additional information + * regarding copyright ownership. The ASF licenses this file + * to you under the Apache License, Version 2.0 (the + * "License"); you may not use this file except in compliance + * with the License. You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, + * software distributed under the License is distributed on an + * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY + * KIND, either express or implied. See the License for the + * specific language governing permissions and limitations + * under the License. + */ + +package org.apache.druid.catalog; + +import com.fasterxml.jackson.annotation.JsonCreator; +import com.fasterxml.jackson.annotation.JsonProperty; +import org.apache.curator.shaded.com.google.common.base.Strings; +import org.apache.druid.java.util.common.IAE; +import org.apache.druid.java.util.common.StringUtils; + +import java.util.Objects; + +/** + * Definition of a column within an input source. Columns here describe + * the "as created" form of the columns: what is actually in the input. + * Column definitions are descriptive (of the data we already have), not + * proscriptive (of the columns we'd like to have, since Druid does not + * create input columns.) + */ +public class InputColumnDefn extends ColumnDefn +{ + @JsonCreator + public InputColumnDefn( + @JsonProperty("name") String name, + @JsonProperty("sqlType") String sqlType) + { + super(name, sqlType); + } + + @Override + public void validate() + { + super.validate(); + if (Strings.isNullOrEmpty(name)) { + throw new IAE("Columns names cannot be empty"); + } + if (Strings.isNullOrEmpty(sqlType)) { + throw new IAE("Columns type is required: " + name); + } + if (!VALID_SQL_TYPES.containsKey(StringUtils.toUpperCase(sqlType))) { + throw new IAE("Not a supported SQL type: " + sqlType); + } + } + + @Override + public boolean equals(Object o) + { + if (this == o) { + return true; + } + if (o == null || getClass() != o.getClass()) { + return false; + } + InputColumnDefn other = (InputColumnDefn) o; + return Objects.equals(this.name, other.name) + && Objects.equals(this.sqlType, other.sqlType); + } + + @Override + public int hashCode() + { + return Objects.hash(name, sqlType); + } +} diff --git a/server/src/main/java/org/apache/druid/catalog/InputSourceDefn.java b/server/src/main/java/org/apache/druid/catalog/InputSourceDefn.java new file mode 100644 index 000000000000..12da74c4bdf1 --- /dev/null +++ b/server/src/main/java/org/apache/druid/catalog/InputSourceDefn.java @@ -0,0 +1,225 @@ +/* + * Licensed to the Apache Software Foundation (ASF) under one + * or more contributor license agreements. See the NOTICE file + * distributed with this work for additional information + * regarding copyright ownership. The ASF licenses this file + * to you under the Apache License, Version 2.0 (the + * "License"); you may not use this file except in compliance + * with the License. You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, + * software distributed under the License is distributed on an + * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY + * KIND, either express or implied. See the License for the + * specific language governing permissions and limitations + * under the License. + */ + +package org.apache.druid.catalog; + +import com.fasterxml.jackson.annotation.JsonProperty; +import com.google.common.base.Strings; +import org.apache.druid.data.input.InputFormat; +import org.apache.druid.data.input.InputSource; +import org.apache.druid.java.util.common.IAE; + +import java.util.ArrayList; +import java.util.HashMap; +import java.util.HashSet; +import java.util.List; +import java.util.Map; +import java.util.Objects; +import java.util.Set; + +/** + * Definition of an external input source, primarily for ingestion. + * The components are derived from those for Druid ingestion: an + * input source, a format and a set of columns. Also provides + * properties, as do all table definitions. + */ +public class InputSourceDefn extends TableDefn +{ + private final InputSource inputSource; + private final InputFormat format; + private final List columns; + + public InputSourceDefn( + @JsonProperty("inputSource") InputSource inputSource, + @JsonProperty("format") InputFormat format, + @JsonProperty("columns") List columns, + @JsonProperty("properties") Map properties + ) + { + super(properties); + this.inputSource = inputSource; + this.format = format; + this.columns = columns; + } + + @JsonProperty("inputSource") + public InputSource inputSource() + { + return inputSource; + } + + @JsonProperty("format") + public InputFormat format() + { + return format; + } + + @JsonProperty("columns") + public List columns() + { + return columns; + } + + @Override + public void validate() + { + super.validate(); + if (inputSource == null) { + throw new IAE("The input source is required"); + } + if (format == null) { + throw new IAE("The format is required"); + } + if (columns == null || columns.isEmpty()) { + throw new IAE("An input source must specify one or more columns"); + } + Set names = new HashSet<>(); + for (ColumnDefn col : columns) { + if (!names.add(col.name())) { + throw new IAE("Duplicate column name: " + col.name()); + } + col.validate(); + } + } + + @Override + public String defaultSchema() + { + return TableId.INPUT_SCHEMA; + } + + public static Builder builder() + { + return new Builder(); + } + + public Builder toBuilder() + { + return new Builder(this); + } + + @Override + public boolean equals(Object o) + { + if (o == this) { + return true; + } + if (o == null || o.getClass() != getClass()) { + return false; + } + InputSourceDefn other = (InputSourceDefn) o; + return Objects.equals(this.inputSource, other.inputSource) + && Objects.equals(this.format, other.format) + && Objects.equals(this.columns, other.columns) + && Objects.equals(this.properties(), other.properties()); + } + + @Override + public int hashCode() + { + return Objects.hash( + inputSource, + format, + columns, + properties()); + } + + public static class Builder + { + private InputSource inputSource; + private InputFormat format; + private List columns; + private Map properties; + + public Builder() + { + this.columns = new ArrayList<>(); + this.properties = new HashMap<>(); + } + + public Builder(InputSourceDefn defn) + { + this.inputSource = defn.inputSource; + this.format = defn.format; + this.columns = new ArrayList<>(defn.columns); + this.properties = new HashMap<>(defn.properties()); + } + + public Builder source(InputSource inputSource) + { + this.inputSource = inputSource; + return this; + } + + public Builder format(InputFormat format) + { + this.format = format; + return this; + } + + public List columns() + { + return columns; + } + + public Builder column(InputColumnDefn column) + { + if (Strings.isNullOrEmpty(column.name())) { + throw new IAE("Column name is required"); + } + columns.add(column); + return this; + } + + public Builder column(String name, String sqlType) + { + return column(new InputColumnDefn(name, sqlType)); + } + + public Builder properties(Map properties) + { + this.properties = properties; + return this; + } + + public Builder property(String key, Object value) + { + if (properties == null) { + properties = new HashMap<>(); + } + properties.put(key, value); + return this; + } + + public Map properties() + { + return properties; + } + + public InputSourceDefn build() + { + return new InputSourceDefn( + inputSource, + format, + columns, + properties + ); + } + } +} diff --git a/server/src/main/java/org/apache/druid/catalog/ListeningNodeSupplier.java b/server/src/main/java/org/apache/druid/catalog/ListeningNodeSupplier.java new file mode 100644 index 000000000000..3672d4052844 --- /dev/null +++ b/server/src/main/java/org/apache/druid/catalog/ListeningNodeSupplier.java @@ -0,0 +1,67 @@ +/* + * Licensed to the Apache Software Foundation (ASF) under one + * or more contributor license agreements. See the NOTICE file + * distributed with this work for additional information + * regarding copyright ownership. The ASF licenses this file + * to you under the Apache License, Version 2.0 (the + * "License"); you may not use this file except in compliance + * with the License. You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, + * software distributed under the License is distributed on an + * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY + * KIND, either express or implied. See the License for the + * specific language governing permissions and limitations + * under the License. + */ + +package org.apache.druid.catalog; + +import org.apache.druid.discovery.DiscoveryDruidNode; +import org.apache.druid.discovery.DruidNodeDiscovery; +import org.apache.druid.discovery.DruidNodeDiscoveryProvider; +import org.apache.druid.discovery.NodeRole; +import org.apache.druid.server.DruidNode; + +import java.util.ArrayList; +import java.util.Collection; +import java.util.List; +import java.util.function.Supplier; + +/** + * Provides an up-to-date list of Druid nodes of the given types each + * time the list is requested. + * + * The algorithm could be improved to cache the list and update it only + * when the set of nodes changes. For the catalog, the rate of change is + * likely to be low, so creating the list each time is fine. If this code + * is used for high-speed updates, then caching would be desirable. + */ +public class ListeningNodeSupplier implements Supplier> +{ + private final List nodeTypes; + private final DruidNodeDiscoveryProvider discoveryProvider; + + public ListeningNodeSupplier( + List nodeTypes, + DruidNodeDiscoveryProvider discoveryProvider + ) + { + this.nodeTypes = nodeTypes; + this.discoveryProvider = discoveryProvider; + } + + @Override + public Iterable get() + { + List druidNodes = new ArrayList<>(); + for (NodeRole nodeRole : nodeTypes) { + DruidNodeDiscovery nodeDiscovery = discoveryProvider.getForNodeRole(nodeRole); + Collection nodes = nodeDiscovery.getAllNodes(); + nodes.forEach(node -> druidNodes.add(node.getDruidNode())); + } + return druidNodes; + } +} diff --git a/server/src/main/java/org/apache/druid/catalog/LocalMetadataCatalog.java b/server/src/main/java/org/apache/druid/catalog/LocalMetadataCatalog.java new file mode 100644 index 000000000000..f7966b36d884 --- /dev/null +++ b/server/src/main/java/org/apache/druid/catalog/LocalMetadataCatalog.java @@ -0,0 +1,92 @@ +/* + * Licensed to the Apache Software Foundation (ASF) under one + * or more contributor license agreements. See the NOTICE file + * distributed with this work for additional information + * regarding copyright ownership. The ASF licenses this file + * to you under the Apache License, Version 2.0 (the + * "License"); you may not use this file except in compliance + * with the License. You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, + * software distributed under the License is distributed on an + * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY + * KIND, either express or implied. See the License for the + * specific language governing permissions and limitations + * under the License. + */ + +package org.apache.druid.catalog; + +import org.apache.druid.catalog.SchemaRegistry.SchemaDefn; + +import javax.inject.Inject; + +import java.util.ArrayList; +import java.util.Collections; +import java.util.HashSet; +import java.util.List; +import java.util.Set; + +/** + * Metadata catalog which reads from the catalog storage. No caching. + * For testing, and as the Coodinator-side implementation of the remote + * synchronization protocol. + */ +public class LocalMetadataCatalog implements MetadataCatalog +{ + private final CatalogSource catalog; + private final SchemaRegistry schemaRegistry; + + @Inject + public LocalMetadataCatalog( + CatalogSource catalog, + SchemaRegistry schemaRegistry + ) + { + this.catalog = catalog; + this.schemaRegistry = schemaRegistry; + } + + @Override + public TableMetadata resolveTable(TableId tableId) + { + TableSpec table = catalog.table(tableId); + if (table == null) { + return null; + } + SchemaDefn schema = schemaRegistry.schema(table.dbSchema()); + return AbstractTableMetadata.fromCatalogTable(schema, table); + } + + @Override + public List tables(String schemaName) + { + SchemaDefn schema = schemaRegistry.schema(schemaName); + if (schema == null || !schema.writable()) { + return Collections.emptyList(); + } + List catalogTables = catalog.tablesForSchema(schemaName); + List tables = new ArrayList<>(); + for (TableSpec table : catalogTables) { + tables.add(AbstractTableMetadata.fromCatalogTable(schema, table)); + } + return tables; + } + + @Override + public Set tableNames(String schemaName) + { + SchemaDefn schema = schemaRegistry.schema(schemaName); + if (schema == null || !schema.writable()) { + return Collections.emptySet(); + } + List catalogTables = catalog.tablesForSchema(schemaName); + Set tables = new HashSet<>(); + for (TableSpec table : catalogTables) { + tables.add(table.name()); + } + return tables; + } +} diff --git a/server/src/main/java/org/apache/druid/catalog/MeasureColumnDefn.java b/server/src/main/java/org/apache/druid/catalog/MeasureColumnDefn.java new file mode 100644 index 000000000000..71bca316b9ec --- /dev/null +++ b/server/src/main/java/org/apache/druid/catalog/MeasureColumnDefn.java @@ -0,0 +1,48 @@ +/* + * Licensed to the Apache Software Foundation (ASF) under one + * or more contributor license agreements. See the NOTICE file + * distributed with this work for additional information + * regarding copyright ownership. The ASF licenses this file + * to you under the Apache License, Version 2.0 (the + * "License"); you may not use this file except in compliance + * with the License. You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, + * software distributed under the License is distributed on an + * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY + * KIND, either express or implied. See the License for the + * specific language governing permissions and limitations + * under the License. + */ + +package org.apache.druid.catalog; + +import com.fasterxml.jackson.annotation.JsonCreator; +import com.fasterxml.jackson.annotation.JsonProperty; + +/** + * Catalog definition of a measure (metric) column. + */ +public class MeasureColumnDefn extends DatasourceColumnDefn +{ + private final String aggregateFn; + + @JsonCreator + public MeasureColumnDefn( + @JsonProperty("name") String name, + @JsonProperty("sqlType") String sqlType, + @JsonProperty("aggregateFn") String aggregateFn + ) + { + super(name, sqlType); + this.aggregateFn = aggregateFn; + } + + @JsonProperty("aggregateFn") + public String aggregateFn() + { + return aggregateFn; + } +} diff --git a/server/src/main/java/org/apache/druid/catalog/MetadataCatalog.java b/server/src/main/java/org/apache/druid/catalog/MetadataCatalog.java new file mode 100644 index 000000000000..a558a3d8c8e1 --- /dev/null +++ b/server/src/main/java/org/apache/druid/catalog/MetadataCatalog.java @@ -0,0 +1,144 @@ +/* + * Licensed to the Apache Software Foundation (ASF) under one + * or more contributor license agreements. See the NOTICE file + * distributed with this work for additional information + * regarding copyright ownership. The ASF licenses this file + * to you under the Apache License, Version 2.0 (the + * "License"); you may not use this file except in compliance + * with the License. You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, + * software distributed under the License is distributed on an + * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY + * KIND, either express or implied. See the License for the + * specific language governing permissions and limitations + * under the License. + */ + +package org.apache.druid.catalog; + +import org.apache.druid.segment.column.ColumnType; + +import java.util.List; +import java.util.Set; + +/** + * Client view of the metadata catalog. Implementations can be local + * (with the DB on the same node), or remote (if the DB is on another + * node.) Any caching that is desired is done behind this interface. + *

+ * This interface does not interpolate physical data from + * segments. That work is done by a layer on top of this one: a + * layer which also has visibility to the segment caching logic. + */ +public interface MetadataCatalog +{ + enum TableType + { + DATASOURCE, + INPUT, + VIEW + } + + enum ColumnKind + { + SIMPLE, + DIMENSION, + MEASURE, + INPUT + } + + /** + * Facade over a column definition for the convenience of Broker clients. + */ + interface ColumnMetadata + { + String name(); + ColumnKind kind(); + String sqlType(); + } + + interface MeasureMetadata extends ColumnMetadata + { + String aggFn(); + } + + interface InputColumnMetadata extends ColumnMetadata + { + ColumnType druidType(); + } + + /** + * Facade over a table definition for the convenience of Broker clients. + */ + interface TableMetadata + { + TableId id(); + TableType type(); + long updateTime(); + + /** + * List of known columns, in user-defined order. + */ + List columns(); + + /** + * Look up a column by name. Returns null if no such column + * exists in metadata. + */ + ColumnMetadata column(String name); + } + + interface DatasourceMetadata extends TableMetadata + { + String segmentGranularity(); + String rollupGranularity(); + boolean isRollup(); + boolean isDetail(); + } + + interface InputSourceMetadata extends TableMetadata + { + } + + public interface CatalogSource + { + List tablesForSchema(String dbSchema); + TableSpec table(TableId id); + } + + public interface CatalogListener + { + void updated(TableSpec update); + void deleted(TableId tableId); + } + + interface CatalogUpdateProvider + { + void register(CatalogListener listener); + } + + /** + * Resolves a table given a {@link TableId} with the schema and + * table name. Does not do security checks: the caller is responsible. + * + * @return the table metadata, if any exists, else {@code null} if + * no metadata is available. Note that a datasource can exist without + * metadata. Views and input sources exist only if their + * metadata exists. System tables never have metadata. + */ + TableMetadata resolveTable(TableId tableId); + + /** + * List of tables defined within the given schema. Does not filter the + * tables by permissions: the caller is responsible for that. + * + * @param schemaName + * @return + */ + List tables(String schemaName); + + Set tableNames(String schemaName); +} diff --git a/server/src/main/java/org/apache/druid/catalog/MetastoreManager.java b/server/src/main/java/org/apache/druid/catalog/MetastoreManager.java new file mode 100644 index 000000000000..1016a25b40ac --- /dev/null +++ b/server/src/main/java/org/apache/druid/catalog/MetastoreManager.java @@ -0,0 +1,60 @@ +/* + * Licensed to the Apache Software Foundation (ASF) under one + * or more contributor license agreements. See the NOTICE file + * distributed with this work for additional information + * regarding copyright ownership. The ASF licenses this file + * to you under the Apache License, Version 2.0 (the + * "License"); you may not use this file except in compliance + * with the License. You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, + * software distributed under the License is distributed on an + * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY + * KIND, either express or implied. See the License for the + * specific language governing permissions and limitations + * under the License. + */ + +package org.apache.druid.catalog; + +import com.fasterxml.jackson.databind.ObjectMapper; +import org.apache.druid.metadata.MetadataStorageConnector; +import org.apache.druid.metadata.MetadataStorageConnectorConfig; +import org.apache.druid.metadata.MetadataStorageTablesConfig; +import org.apache.druid.metadata.SQLMetadataConnector; + +/** + * Represents the metastore manager database and its implementation. + * Abstracts away the various kick-knacks used to define the metastore. + * The metastore operations are defined via table-specific classes. + */ +public interface MetastoreManager +{ + MetadataStorageConnector connector(); + MetadataStorageConnectorConfig config(); + MetadataStorageTablesConfig tablesConfig(); + + /** + * Whether to create tables if they do not exist. + */ + boolean createTables(); + + /** + * Object mapper to use for serializing and deserializing + * JSON objects stored in the metastore DB. + */ + ObjectMapper jsonMapper(); + + /** + * Is the implementation SQL-based? + */ + boolean isSql(); + + /** + * If SQL based, return the SQL version of the metastore + * connector. Throws an exception if not SQL-based. + */ + SQLMetadataConnector sqlConnector(); +} diff --git a/server/src/main/java/org/apache/druid/catalog/MetastoreManagerImpl.java b/server/src/main/java/org/apache/druid/catalog/MetastoreManagerImpl.java new file mode 100644 index 000000000000..45bd67a71e86 --- /dev/null +++ b/server/src/main/java/org/apache/druid/catalog/MetastoreManagerImpl.java @@ -0,0 +1,94 @@ +/* + * Licensed to the Apache Software Foundation (ASF) under one + * or more contributor license agreements. See the NOTICE file + * distributed with this work for additional information + * regarding copyright ownership. The ASF licenses this file + * to you under the Apache License, Version 2.0 (the + * "License"); you may not use this file except in compliance + * with the License. You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, + * software distributed under the License is distributed on an + * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY + * KIND, either express or implied. See the License for the + * specific language governing permissions and limitations + * under the License. + */ + +package org.apache.druid.catalog; + +import com.fasterxml.jackson.databind.ObjectMapper; +import com.google.common.base.Supplier; +import org.apache.druid.guice.annotations.Json; +import org.apache.druid.metadata.MetadataStorageConnector; +import org.apache.druid.metadata.MetadataStorageConnectorConfig; +import org.apache.druid.metadata.MetadataStorageTablesConfig; +import org.apache.druid.metadata.SQLMetadataConnector; + +import javax.inject.Inject; + +public class MetastoreManagerImpl implements MetastoreManager +{ + private final ObjectMapper jsonMapper; + private final MetadataStorageConnector connector; + private final MetadataStorageConnectorConfig config; + private final MetadataStorageTablesConfig tablesConfig; + + @Inject + public MetastoreManagerImpl( + @Json ObjectMapper jsonMapper, + MetadataStorageConnector connector, + Supplier configSupplier, + Supplier tablesConfigSupplier + ) + { + this.jsonMapper = jsonMapper; + this.connector = connector; + this.config = configSupplier.get(); + this.tablesConfig = tablesConfigSupplier.get(); + } + + @Override + public MetadataStorageConnector connector() + { + return connector; + } + + @Override + public MetadataStorageConnectorConfig config() + { + return config; + } + + @Override + public MetadataStorageTablesConfig tablesConfig() + { + return tablesConfig; + } + + @Override + public boolean createTables() + { + return config.isCreateTables(); + } + + @Override + public ObjectMapper jsonMapper() + { + return jsonMapper; + } + + @Override + public boolean isSql() + { + return connector instanceof SQLMetadataConnector; + } + + @Override + public SQLMetadataConnector sqlConnector() + { + return (SQLMetadataConnector) connector; + } +} diff --git a/server/src/main/java/org/apache/druid/catalog/RestUpdateSender.java b/server/src/main/java/org/apache/druid/catalog/RestUpdateSender.java new file mode 100644 index 000000000000..0f48fbc85943 --- /dev/null +++ b/server/src/main/java/org/apache/druid/catalog/RestUpdateSender.java @@ -0,0 +1,193 @@ +/* + * Licensed to the Apache Software Foundation (ASF) under one + * or more contributor license agreements. See the NOTICE file + * distributed with this work for additional information + * regarding copyright ownership. The ASF licenses this file + * to you under the Apache License, Version 2.0 (the + * "License"); you may not use this file except in compliance + * with the License. You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, + * software distributed under the License is distributed on an + * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY + * KIND, either express or implied. See the License for the + * specific language governing permissions and limitations + * under the License. + */ + +package org.apache.druid.catalog; + +import com.fasterxml.jackson.jaxrs.smile.SmileMediaTypes; +import com.google.common.annotations.VisibleForTesting; +import com.google.common.util.concurrent.Futures; +import com.google.common.util.concurrent.ListenableFuture; +import org.apache.druid.java.util.common.RE; +import org.apache.druid.java.util.common.StringUtils; +import org.apache.druid.java.util.emitter.EmittingLogger; +import org.apache.druid.java.util.http.client.HttpClient; +import org.apache.druid.java.util.http.client.Request; +import org.apache.druid.java.util.http.client.response.StatusResponseHandler; +import org.apache.druid.java.util.http.client.response.StatusResponseHolder; +import org.apache.druid.server.DruidNode; +import org.jboss.netty.handler.codec.http.HttpMethod; +import org.jboss.netty.handler.codec.http.HttpResponseStatus; +import org.joda.time.Duration; + +import java.net.MalformedURLException; +import java.net.URL; +import java.util.ArrayList; +import java.util.List; +import java.util.concurrent.ExecutionException; +import java.util.concurrent.TimeUnit; +import java.util.concurrent.TimeoutException; +import java.util.function.Consumer; +import java.util.function.Supplier; + +/** + * Sends updates to a set of Druid nodes provided by the given supplier. + * The update is provided by the caller in serialized form. The class sends + * updates concurrently, and returns futures for all the requests. + * + * Updates are processed one by one, but each updates is sent concurrently. + * All responses from all receivers must arrive (or a timeout must occur) + * before the next updates can be sent. As a result, this class is suitable for + * low-frequency updates: where the worst-case send times are less than + * the worst-case update frequency. If updates are faster, they will back + * up, and the class should be redesigned to allow healthy receivers to + * continue to get updates while laggards block only themselves. + * + * Defined by composition so it can be tested and reused in other + * contexts. + */ +public class RestUpdateSender implements Consumer +{ + private static final EmittingLogger LOG = new EmittingLogger(RestUpdateSender.class); + + public interface RestSender + { + ListenableFuture send(URL listenerURL, byte[] serializedEntity); + } + + private static class HttpClientSender implements RestSender + { + private final HttpClient httpClient; + private final Duration cacheNotificationsTimeout; + + private HttpClientSender( + HttpClient httpClient, + Duration cacheNotificationsTimeout) + { + this.httpClient = httpClient; + this.cacheNotificationsTimeout = cacheNotificationsTimeout; + } + + @Override + public ListenableFuture send(URL listenerURL, byte[] serializedEntity) + { + // Best effort, if this fails, remote node will poll + // and pick up the update eventually. + return httpClient.go( + new Request(HttpMethod.POST, listenerURL) + .setContent(SmileMediaTypes.APPLICATION_JACKSON_SMILE, serializedEntity), + StatusResponseHandler.getInstance(), + cacheNotificationsTimeout + ); + } + } + + private final String callerName; + private final Supplier> destinationSupplier; + private final String baseUrl; + private final RestSender sender; + private final long cacheNotificationsTimeoutMs; + + public RestUpdateSender( + final String callerName, + final Supplier> destinationSupplier, + final RestSender sender, + final String baseUrl, + final long cacheNotificationsTimeoutMs + ) + { + this.callerName = callerName; + this.destinationSupplier = destinationSupplier; + this.sender = sender; + this.baseUrl = baseUrl; + this.cacheNotificationsTimeoutMs = cacheNotificationsTimeoutMs; + } + + public static RestSender httpClientSender(HttpClient httpClient, Duration cacheNotificationsTimeou) + { + return new HttpClientSender(httpClient, cacheNotificationsTimeou); + } + + @Override + public void accept(byte[] serializedEntity) + { + LOG.debug(callerName + ": Sending update notifications"); + + // Best effort, if a notification fails, the remote node will eventually poll to update its state + // We wait for responses however, to avoid flooding remote nodes with notifications. + List> futures = new ArrayList<>(); + for (DruidNode node : destinationSupplier.get()) { + futures.add( + sender.send( + getListenerURL(node, baseUrl), + serializedEntity)); + } + + try { + List responses = getResponsesFromFutures(futures); + + for (StatusResponseHolder response : responses) { + if (response == null) { + LOG.error("Got null future response from update request."); + continue; + } + HttpResponseStatus status = response.getStatus(); + if (HttpResponseStatus.OK.equals(status) || + HttpResponseStatus.ACCEPTED.equals(status)) { + LOG.debug("Got status [%s]", status); + } else { + LOG.error("Got error status [%s], content [%s]", status, response.getContent()); + } + } + } + catch (Exception e) { + LOG.makeAlert(e, callerName + ": Failed to get response for cache notification.").emit(); + } + + LOG.debug(callerName + ": Received responses for cache update notifications."); + } + + @VisibleForTesting + List getResponsesFromFutures( + List> futures + ) throws InterruptedException, ExecutionException, TimeoutException + { + return Futures.successfulAsList(futures) + .get( + cacheNotificationsTimeoutMs, + TimeUnit.MILLISECONDS + ); + } + + private URL getListenerURL(DruidNode druidNode, String baseUrl) + { + try { + return new URL( + druidNode.getServiceScheme(), + druidNode.getHost(), + druidNode.getPortToUse(), + baseUrl + ); + } + catch (MalformedURLException mue) { + String msg = StringUtils.format(callerName + ": Malformed url for DruidNode [%s] and baseUrl [%s]", druidNode, baseUrl); + LOG.error(msg); + throw new RE(mue, msg); + } + } +} diff --git a/server/src/main/java/org/apache/druid/catalog/SchemaRegistry.java b/server/src/main/java/org/apache/druid/catalog/SchemaRegistry.java new file mode 100644 index 000000000000..ac6456b136a5 --- /dev/null +++ b/server/src/main/java/org/apache/druid/catalog/SchemaRegistry.java @@ -0,0 +1,45 @@ +/* + * Licensed to the Apache Software Foundation (ASF) under one + * or more contributor license agreements. See the NOTICE file + * distributed with this work for additional information + * regarding copyright ownership. The ASF licenses this file + * to you under the Apache License, Version 2.0 (the + * "License"); you may not use this file except in compliance + * with the License. You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, + * software distributed under the License is distributed on an + * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY + * KIND, either express or implied. See the License for the + * specific language governing permissions and limitations + * under the License. + */ + +package org.apache.druid.catalog; + +import org.apache.druid.catalog.MetadataCatalog.TableType; + +import java.util.Set; + +/** + * Defines the set of schemas available in Druid and their properties. + * Since Druid has a fixed set of schemas, this registry is currently + * hard-coded. That will change if/when Druid allows user-defined + * schemas. + */ +public interface SchemaRegistry +{ + public interface SchemaDefn + { + String name(); + String securityResource(); + boolean writable(); + boolean accepts(TableDefn defn); + TableType tableType(); + } + + SchemaDefn schema(String name); + Set names(); +} diff --git a/server/src/main/java/org/apache/druid/catalog/SchemaRegistryImpl.java b/server/src/main/java/org/apache/druid/catalog/SchemaRegistryImpl.java new file mode 100644 index 000000000000..27adb4c5d5e3 --- /dev/null +++ b/server/src/main/java/org/apache/druid/catalog/SchemaRegistryImpl.java @@ -0,0 +1,150 @@ +/* + * Licensed to the Apache Software Foundation (ASF) under one + * or more contributor license agreements. See the NOTICE file + * distributed with this work for additional information + * regarding copyright ownership. The ASF licenses this file + * to you under the Apache License, Version 2.0 (the + * "License"); you may not use this file except in compliance + * with the License. You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, + * software distributed under the License is distributed on an + * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY + * KIND, either express or implied. See the License for the + * specific language governing permissions and limitations + * under the License. + */ + +package org.apache.druid.catalog; + +import org.apache.druid.catalog.MetadataCatalog.TableType; +import org.apache.druid.server.security.ResourceType; + +import java.util.HashMap; +import java.util.Map; +import java.util.Set; +import java.util.TreeSet; + +/** + * Hard-coded schema registry that knows about the well-known, and + * a few obscure, Druid schemas. Does not allow for user-defined + * schemas, which the rest of Druid would not be able to support. + */ +public class SchemaRegistryImpl implements SchemaRegistry +{ + // Mimics the definition in ExternalOperatorConvertion + // TODO: Change this when ExternalOperatorConvertion changes + private String EXTERNAL_RESOURCE = "EXTERNAL"; + + public static class SchemaDefnImpl implements SchemaDefn + { + private final String name; + private final String resource; + private final TableType tableType; + private Class acceptedClass; + + public SchemaDefnImpl( + String name, + String resource, + TableType tableType, + Class acceptedClass) + { + this.name = name; + this.resource = resource; + this.tableType = tableType; + this.acceptedClass = acceptedClass; + } + + @Override + public String name() + { + return name; + } + + @Override + public String securityResource() + { + return resource; + } + + @Override + public boolean writable() + { + return acceptedClass != null; + } + + @Override + public boolean accepts(TableDefn defn) + { + if (acceptedClass == null) { + return false; + } + if (defn == null) { + return false; + } + return acceptedClass.isAssignableFrom(defn.getClass()); + } + + @Override + public TableType tableType() + { + return tableType; + } + } + + private final Map builtIns; + + public SchemaRegistryImpl() + { + builtIns = new HashMap<>(); + register(new SchemaDefnImpl( + TableId.DRUID_SCHEMA, + ResourceType.DATASOURCE, + TableType.DATASOURCE, + DatasourceDefn.class)); + register(new SchemaDefnImpl( + TableId.LOOKUP_SCHEMA, + ResourceType.CONFIG, + null, // TODO + null)); // TODO + register(new SchemaDefnImpl( + TableId.CATALOG_SCHEMA, + ResourceType.SYSTEM_TABLE, + null, + null)); + register(new SchemaDefnImpl( + TableId.SYSTEM_SCHEMA, + ResourceType.SYSTEM_TABLE, + null, + null)); + register(new SchemaDefnImpl( + TableId.INPUT_SCHEMA, + EXTERNAL_RESOURCE, + TableType.INPUT, + InputSourceDefn.class)); + register(new SchemaDefnImpl( + TableId.VIEW_SCHEMA, + ResourceType.VIEW, + null, // TODO + null)); // TODO + } + + private void register(SchemaDefn schemaDefn) + { + builtIns.put(schemaDefn.name(), schemaDefn); + } + + @Override + public SchemaDefn schema(String name) + { + return builtIns.get(name); + } + + @Override + public Set names() + { + return new TreeSet(builtIns.keySet()); + } +} diff --git a/server/src/main/java/org/apache/druid/catalog/TableDefn.java b/server/src/main/java/org/apache/druid/catalog/TableDefn.java new file mode 100644 index 000000000000..700998c5103b --- /dev/null +++ b/server/src/main/java/org/apache/druid/catalog/TableDefn.java @@ -0,0 +1,91 @@ +/* + * Licensed to the Apache Software Foundation (ASF) under one + * or more contributor license agreements. See the NOTICE file + * distributed with this work for additional information + * regarding copyright ownership. The ASF licenses this file + * to you under the Apache License, Version 2.0 (the + * "License"); you may not use this file except in compliance + * with the License. You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, + * software distributed under the License is distributed on an + * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY + * KIND, either express or implied. See the License for the + * specific language governing permissions and limitations + * under the License. + */ + +package org.apache.druid.catalog; + +import com.fasterxml.jackson.annotation.JsonInclude; +import com.fasterxml.jackson.annotation.JsonInclude.Include; +import com.fasterxml.jackson.annotation.JsonProperty; +import com.fasterxml.jackson.annotation.JsonSubTypes; +import com.fasterxml.jackson.annotation.JsonSubTypes.Type; +import com.fasterxml.jackson.annotation.JsonTypeInfo; +import com.fasterxml.jackson.databind.ObjectMapper; +import com.google.common.collect.ImmutableMap; + +import java.util.Map; + +/** + * Definition of a table "hint" in the metastore, between client and + * Druid, and between Druid nodes. + */ +@JsonTypeInfo(use = JsonTypeInfo.Id.NAME, property = "type") +@JsonSubTypes(value = { + @Type(name = "datasource", value = DatasourceDefn.class), + @Type(name = "input", value = InputSourceDefn.class), + @Type(name = "tombstone", value = TableDefn.Tombstone.class), +}) +public class TableDefn +{ + private final Map properties; + + public TableDefn(Map properties) + { + this.properties = properties == null ? ImmutableMap.of() : properties; + } + + @JsonProperty("properties") + @JsonInclude(Include.NON_NULL) + public Map properties() + { + return properties; + } + + public void validate() + { + } + + public byte[] toBytes(ObjectMapper jsonMapper) + { + return CatalogDefns.toBytes(jsonMapper, this); + } + + public static TableDefn fromBytes(ObjectMapper jsonMapper, byte[] bytes) + { + return CatalogDefns.fromBytes(jsonMapper, bytes, TableDefn.class); + } + + @Override + public String toString() + { + return CatalogDefns.toString(this); + } + + public String defaultSchema() + { + return null; + } + + public static class Tombstone extends TableDefn + { + public Tombstone() + { + super(null); + } + } +} diff --git a/server/src/main/java/org/apache/druid/catalog/TableId.java b/server/src/main/java/org/apache/druid/catalog/TableId.java new file mode 100644 index 000000000000..5dd18abc4a0a --- /dev/null +++ b/server/src/main/java/org/apache/druid/catalog/TableId.java @@ -0,0 +1,113 @@ +/* + * Licensed to the Apache Software Foundation (ASF) under one + * or more contributor license agreements. See the NOTICE file + * distributed with this work for additional information + * regarding copyright ownership. The ASF licenses this file + * to you under the Apache License, Version 2.0 (the + * "License"); you may not use this file except in compliance + * with the License. You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, + * software distributed under the License is distributed on an + * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY + * KIND, either express or implied. See the License for the + * specific language governing permissions and limitations + * under the License. + */ + +package org.apache.druid.catalog; + +import com.fasterxml.jackson.annotation.JsonCreator; +import com.fasterxml.jackson.annotation.JsonProperty; +import org.apache.curator.shaded.com.google.common.base.Objects; +import org.apache.druid.java.util.common.StringUtils; + +/** + * SQL-like compound table ID with schema and table name. + */ +public class TableId +{ + // Well-known Druid schemas + public static final String DRUID_SCHEMA = "druid"; + public static final String LOOKUP_SCHEMA = "lookups"; + public static final String SYSTEM_SCHEMA = "sys"; + public static final String CATALOG_SCHEMA = "INFORMATION_SCHEMA"; + + // Extra for MSQE + public static final String INPUT_SCHEMA = "input"; + + // Extra for views + public static final String VIEW_SCHEMA = "view"; + + private final String schema; + private final String name; + + @JsonCreator + public TableId( + @JsonProperty("schema") String schema, + @JsonProperty("schema") String name) + { + this.schema = schema; + this.name = name; + } + + public static TableId datasource(String name) + { + return new TableId(DRUID_SCHEMA, name); + } + + public static TableId inputSource(String name) + { + return new TableId(INPUT_SCHEMA, name); + } + + public static TableId of(String schema, String table) + { + return new TableId(schema, table); + } + + @JsonProperty("schema") + public String schema() + { + return schema; + } + + @JsonProperty("name") + public String name() + { + return name; + } + + public String sqlName() + { + return StringUtils.format("\"%s\".\"%s\"", schema, name); + } + + @Override + public String toString() + { + return sqlName(); + } + + @Override + public boolean equals(Object o) + { + if (o == this) { + return true; + } + if (o == null || o.getClass() != getClass()) { + return false; + } + TableId other = (TableId) o; + return Objects.equal(this.schema, other.schema) + && Objects.equal(this.name, other.name); + } + + @Override + public int hashCode() + { + return Objects.hashCode(schema, name); + } +} diff --git a/server/src/main/java/org/apache/druid/catalog/TableSpec.java b/server/src/main/java/org/apache/druid/catalog/TableSpec.java new file mode 100644 index 000000000000..1a4449398225 --- /dev/null +++ b/server/src/main/java/org/apache/druid/catalog/TableSpec.java @@ -0,0 +1,268 @@ +/* + * Licensed to the Apache Software Foundation (ASF) under one + * or more contributor license agreements. See the NOTICE file + * distributed with this work for additional information + * regarding copyright ownership. The ASF licenses this file + * to you under the Apache License, Version 2.0 (the + * "License"); you may not use this file except in compliance + * with the License. You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, + * software distributed under the License is distributed on an + * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY + * KIND, either express or implied. See the License for the + * specific language governing permissions and limitations + * under the License. + */ + +package org.apache.druid.catalog; + +import com.fasterxml.jackson.annotation.JsonInclude; +import com.fasterxml.jackson.annotation.JsonProperty; +import com.fasterxml.jackson.databind.ObjectMapper; +import com.google.common.base.Strings; +import org.apache.druid.guice.annotations.PublicApi; +import org.apache.druid.java.util.common.IAE; +import org.apache.druid.java.util.common.StringUtils; +import org.apache.druid.metadata.catalog.CatalogManager.TableState; + +import java.util.Objects; + +/** + * REST API level description of a table. Tables have multiple types + * as described by subclasses. + */ +@PublicApi +public class TableSpec +{ + private final String dbSchema; + private final String name; + private final String owner; + private final long creationTime; + private final long updateTime; + private final TableState state; + private final TableDefn defn; + + public TableSpec( + @JsonProperty("dbSchema") String dbSchema, + @JsonProperty("name") String name, + @JsonProperty("owner") String owner, + @JsonProperty("creationTime") long creationTime, + @JsonProperty("updateTime") long updateTime, + @JsonProperty("state") TableState state, + @JsonProperty("defn") TableDefn defn) + { + this.dbSchema = dbSchema; + this.name = name; + this.owner = owner; + this.creationTime = creationTime; + this.updateTime = updateTime; + this.state = state; + this.defn = defn; + } + + public static TableSpec newTable( + TableId id, + TableDefn defn + ) + { + return newTable(id.schema(), id.name(), defn); + } + + public static TableSpec newTable( + String dbSchema, + String name, + TableDefn defn + ) + { + return new TableSpec( + dbSchema, + name, + null, + 0, + 0, + TableState.ACTIVE, + defn); + } + + public static TableSpec newSegmentTable( + String name, + TableDefn defn + ) + { + return newTable( + TableId.DRUID_SCHEMA, + name, + defn); + } + + public TableSpec fromInsert(String dbSchema, long updateTime) + { + return new TableSpec( + dbSchema, + name, + owner, + updateTime, + updateTime, + state, + defn); + } + + public TableSpec asUpdate(long updateTime) + { + return new TableSpec( + dbSchema, + name, + owner, + creationTime, + updateTime, + state, + defn); + } + + public TableSpec withSchema(String dbSchema) + { + if (dbSchema.equals(this.dbSchema)) { + return this; + } + return new TableSpec( + dbSchema, + name, + owner, + creationTime, + updateTime, + state, + defn); + } + + public TableId id() + { + return new TableId(resolveDbSchema(), name); + } + + @JsonProperty("dbSchema") + public String dbSchema() + { + return dbSchema; + } + + @JsonProperty("name") + public String name() + { + return name; + } + + public String sqlName() + { + return StringUtils.format("\"%s\".\"%s\"", dbSchema, name); + } + + @JsonProperty("owner") + @JsonInclude(JsonInclude.Include.NON_NULL) + public String owner() + { + return owner; + } + + @JsonProperty("state") + public TableState state() + { + return state; + } + + @JsonProperty("creationTime") + public long creationTime() + { + return creationTime; + } + + @JsonProperty("updateTime") + public long updateTime() + { + return updateTime; + } + + @JsonProperty("defn") + public TableDefn defn() + { + return defn; + } + + /** + * Syntactic validation of a table object. Validates only that which + * can be checked from this table object. + */ + public void validate() + { + if (Strings.isNullOrEmpty(dbSchema)) { + throw new IAE("Database schema is required"); + } + if (Strings.isNullOrEmpty(name)) { + throw new IAE("Table name is required"); + } + if (defn != null) { + defn.validate(); + } + } + + public byte[] toBytes(ObjectMapper jsonMapper) + { + return CatalogDefns.toBytes(jsonMapper, this); + } + + public static TableSpec fromBytes(ObjectMapper jsonMapper, byte[] bytes) + { + return CatalogDefns.fromBytes(jsonMapper, bytes, TableSpec.class); + } + + @Override + public String toString() + { + return CatalogDefns.toString(this); + } + + public String resolveDbSchema() + { + if (!Strings.isNullOrEmpty(dbSchema)) { + return dbSchema; + } else if (defn != null) { + return defn.defaultSchema(); + } else { + return null; + } + } + + @Override + public boolean equals(Object o) + { + if (o == this) { + return true; + } + if (o == null || o.getClass() != getClass()) { + return false; + } + TableSpec other = (TableSpec) o; + return Objects.equals(dbSchema, other.dbSchema) + && Objects.equals(name, other.name) + && Objects.equals(owner, other.owner) + && creationTime == other.creationTime + && updateTime == other.updateTime + && state == other.state + && Objects.equals(defn, other.defn); + } + + @Override + public int hashCode() + { + return Objects.hash( + dbSchema, + name, + owner, + creationTime, + updateTime, + state, + defn); + } +} diff --git a/server/src/main/java/org/apache/druid/discovery/DruidLeaderClient.java b/server/src/main/java/org/apache/druid/discovery/DruidLeaderClient.java index 0679ac39e4ac..063957547c8c 100644 --- a/server/src/main/java/org/apache/druid/discovery/DruidLeaderClient.java +++ b/server/src/main/java/org/apache/druid/discovery/DruidLeaderClient.java @@ -146,12 +146,12 @@ public > H go(Request request, HttpResponseHa // Unwrap IOExceptions and ChannelExceptions, re-throw others Throwables.propagateIfInstanceOf(e.getCause(), IOException.class); Throwables.propagateIfInstanceOf(e.getCause(), ChannelException.class); - throw new RE(e, "HTTP request to[%s] failed", request.getUrl()); + throw new RE(e, "HTTP request to [%s] failed", request.getUrl()); } } catch (IOException | ChannelException ex) { // can happen if the node is stopped. - log.warn(ex, "Request[%s] failed.", request.getUrl()); + log.warn(ex, "Request [%s] failed.", request.getUrl()); try { if (request.getUrl().getQuery() == null) { @@ -176,7 +176,7 @@ public > H go(Request request, HttpResponseHa // Not an IOException; this is our own fault. throw new ISE( e, - "failed to build url with path[%] and query string [%s].", + "failed to build url with path [%] and query string [%s].", request.getUrl().getPath(), request.getUrl().getQuery() ); @@ -186,10 +186,10 @@ public > H go(Request request, HttpResponseHa if (HttpResponseStatus.TEMPORARY_REDIRECT.equals(fullResponseHolder.getResponse().getStatus())) { String redirectUrlStr = fullResponseHolder.getResponse().headers().get("Location"); if (redirectUrlStr == null) { - throw new IOE("No redirect location is found in response from url[%s].", request.getUrl()); + throw new IOE("No redirect location is found in response from url [%s].", request.getUrl()); } - log.info("Request[%s] received redirect response to location [%s].", request.getUrl(), redirectUrlStr); + log.info("Request [%s] received redirect response to location [%s].", request.getUrl(), redirectUrlStr); final URL redirectUrl; try { @@ -245,7 +245,7 @@ public String findCurrentLeader() return validatedUrl.toString(); } catch (MalformedURLException ex) { - log.error(ex, "Received malformed leader url[%s].", leaderUrl); + log.error(ex, "Received malformed leader url [%s].", leaderUrl); } } diff --git a/server/src/main/java/org/apache/druid/guice/CatalogClientModule.java b/server/src/main/java/org/apache/druid/guice/CatalogClientModule.java new file mode 100644 index 000000000000..2305feeaa37c --- /dev/null +++ b/server/src/main/java/org/apache/druid/guice/CatalogClientModule.java @@ -0,0 +1,78 @@ +/* + * Licensed to the Apache Software Foundation (ASF) under one + * or more contributor license agreements. See the NOTICE file + * distributed with this work for additional information + * regarding copyright ownership. The ASF licenses this file + * to you under the Apache License, Version 2.0 (the + * "License"); you may not use this file except in compliance + * with the License. You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, + * software distributed under the License is distributed on an + * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY + * KIND, either express or implied. See the License for the + * specific language governing permissions and limitations + * under the License. + */ + +package org.apache.druid.guice; + +import com.google.inject.Binder; +import com.google.inject.Module; +import org.apache.druid.catalog.CachedMetadataCatalog; +import org.apache.druid.catalog.CatalogClient; +import org.apache.druid.catalog.MetadataCatalog; +import org.apache.druid.catalog.MetadataCatalog.CatalogListener; +import org.apache.druid.catalog.MetadataCatalog.CatalogSource; +import org.apache.druid.catalog.SchemaRegistry; +import org.apache.druid.catalog.SchemaRegistryImpl; +import org.apache.druid.server.http.CatalogListenerResource; + +/** + * Configures the metadata catalog on the Broker to use a cache + * and network communications for pull and push updates. + */ +public class CatalogClientModule implements Module +{ + @Override + public void configure(Binder binder) + { + // The Broker (client) uses a cached metadata catalog. + binder + .bind(CachedMetadataCatalog.class) + .in(LazySingleton.class); + + // Broker code accesses he catalog through the + // MetadataCatalog interface. + binder + .bind(MetadataCatalog.class) + .to(CachedMetadataCatalog.class) + .in(LazySingleton.class); + + // The cached metadata catalog needs a "pull" source, + // which is the network client. + binder + .bind(CatalogSource.class) + .to(CatalogClient.class) + .in(LazySingleton.class); + + // The cached metadata catalog is the listener for + // "push" events. + binder + .bind(CatalogListener.class) + .to(CachedMetadataCatalog.class) + .in(LazySingleton.class); + + // At present, the set of schemas is fixed. + binder + .bind(SchemaRegistry.class) + .to(SchemaRegistryImpl.class) + .in(LazySingleton.class); + + // The listener resource sends to the catalog + // listener (the cached catalog.) + Jerseys.addResource(binder, CatalogListenerResource.class); + } +} diff --git a/server/src/main/java/org/apache/druid/guice/CatalogModule.java b/server/src/main/java/org/apache/druid/guice/CatalogModule.java new file mode 100644 index 000000000000..b0a77adbca27 --- /dev/null +++ b/server/src/main/java/org/apache/druid/guice/CatalogModule.java @@ -0,0 +1,77 @@ +/* + * Licensed to the Apache Software Foundation (ASF) under one + * or more contributor license agreements. See the NOTICE file + * distributed with this work for additional information + * regarding copyright ownership. The ASF licenses this file + * to you under the Apache License, Version 2.0 (the + * "License"); you may not use this file except in compliance + * with the License. You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, + * software distributed under the License is distributed on an + * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY + * KIND, either express or implied. See the License for the + * specific language governing permissions and limitations + * under the License. + */ + +package org.apache.druid.guice; + +import com.google.inject.Binder; +import com.google.inject.Module; +import org.apache.druid.catalog.CatalogStorage; +import org.apache.druid.catalog.CatalogUpdateNotifier; +import org.apache.druid.catalog.MetastoreManager; +import org.apache.druid.catalog.MetastoreManagerImpl; +import org.apache.druid.catalog.SchemaRegistry; +import org.apache.druid.catalog.SchemaRegistryImpl; +import org.apache.druid.metadata.catalog.CatalogManager; +import org.apache.druid.metadata.catalog.SQLCatalogManager; +import org.apache.druid.server.http.CatalogResource; + +/** + * Configures the catalog database on the Coordinator, along + * with its REST resource for CRUD updates and the notifier + * for push updates. + */ +public class CatalogModule implements Module +{ + @Override + public void configure(Binder binder) + { + // Database layer: only the SQL version is supported at present. + binder + .bind(CatalogManager.class) + .to(SQLCatalogManager.class) + .in(LazySingleton.class); + + // Storage abstraction used by the REST API, sits on top of the + // database layer. + binder + .bind(CatalogStorage.class) + .in(LazySingleton.class); + binder + .bind(MetastoreManager.class) + .to(MetastoreManagerImpl.class) + .in(LazySingleton.class); + + // At present, the set of schemas is fixed. + binder + .bind(SchemaRegistry.class) + .to(SchemaRegistryImpl.class) + .in(LazySingleton.class); + + // Push update notifier, which is lifecycle managed. No references, + // so force Guice to create the instance. (Lifecycle will also, if + // Guice hasn't done so.) + binder + .bind(CatalogUpdateNotifier.class) + .in(ManageLifecycle.class); + LifecycleModule.register(binder, CatalogUpdateNotifier.class); + + // Public REST API and private cache sync API. + Jerseys.addResource(binder, CatalogResource.class); + } +} diff --git a/server/src/main/java/org/apache/druid/metadata/IndexerSQLMetadataStorageCoordinator.java b/server/src/main/java/org/apache/druid/metadata/IndexerSQLMetadataStorageCoordinator.java index 931ae81774cc..5575108bc091 100644 --- a/server/src/main/java/org/apache/druid/metadata/IndexerSQLMetadataStorageCoordinator.java +++ b/server/src/main/java/org/apache/druid/metadata/IndexerSQLMetadataStorageCoordinator.java @@ -1227,7 +1227,7 @@ protected DataStoreMetadataUpdateResult updateDataSourceMetadataWithHandle( } /** - * Mark segments as unsed in a transaction. This method is idempotent in that if + * Mark segments as unused in a transaction. This method is idempotent in that if * the segments was already marked unused, it will return true. * * @param handle database handle diff --git a/server/src/main/java/org/apache/druid/metadata/MetadataRuleManager.java b/server/src/main/java/org/apache/druid/metadata/MetadataRuleManager.java index ea2b6e7461f8..eb2bec6b136f 100644 --- a/server/src/main/java/org/apache/druid/metadata/MetadataRuleManager.java +++ b/server/src/main/java/org/apache/druid/metadata/MetadataRuleManager.java @@ -44,7 +44,8 @@ public interface MetadataRuleManager boolean overrideRule(String dataSource, List rulesConfig, AuditInfo auditInfo); /** - * Remove rules for non-existence datasource (datasource with no segment) created older than the given timestamp. + * Remove rules for a non-existent datasource (datasource with no segments) + * created before than the given timestamp. * * @param timestamp timestamp in milliseconds * @return number of rules removed diff --git a/server/src/main/java/org/apache/druid/metadata/SQLMetadataConnector.java b/server/src/main/java/org/apache/druid/metadata/SQLMetadataConnector.java index 781a4b99c8f9..f7cf24ae7707 100644 --- a/server/src/main/java/org/apache/druid/metadata/SQLMetadataConnector.java +++ b/server/src/main/java/org/apache/druid/metadata/SQLMetadataConnector.java @@ -193,14 +193,14 @@ public void createTable(final String tableName, final Iterable sql) public Void withHandle(Handle handle) { if (!tableExists(handle, tableName)) { - log.info("Creating table[%s]", tableName); + log.info("Creating table [%s]", tableName); final Batch batch = handle.createBatch(); for (String s : sql) { batch.add(s); } batch.execute(); } else { - log.info("Table[%s] already exists", tableName); + log.info("Table [%s] already exists", tableName); } return null; } @@ -794,6 +794,36 @@ public void createAuditTable() } } + private void createTableDefnTable(final String tableName) + { + createTable( + tableName, + ImmutableList.of( + StringUtils.format( + "CREATE TABLE %s (\n" + + " schemaName VARCHAR(255) NOT NULL,\n" + + " name VARCHAR(255) NOT NULL,\n" + + " owner VARCHAR(255),\n" + + " creationTime BIGINT NOT NULL,\n" + + " updateTime BIGINT NOT NULL,\n" + + " state CHAR(1) NOT NULL,\n" + + " payload %s,\n" + + " PRIMARY KEY(schemaName, name)\n" + + ")", + tableName, getPayloadType() + ) + ) + ); + } + + @Override + public void createTableDefnTable() + { + if (config.get().isCreateTables()) { + createTableDefnTable(tablesConfigSupplier.get().getTableDefnTable()); + } + } + @Override public void deleteAllRecords(final String tableName) { @@ -821,4 +851,11 @@ public Void withHandle(Handle handle) log.warn(e, "Exception while deleting records from table"); } } + + public boolean isDuplicateRecordException(UnableToExecuteStatementException e) + { + // TODO(paul): Track down how to figure this out for each supported + // DB. + return false; + } } diff --git a/server/src/main/java/org/apache/druid/metadata/catalog/CatalogManager.java b/server/src/main/java/org/apache/druid/metadata/catalog/CatalogManager.java new file mode 100644 index 000000000000..f0ea2c9ffeb3 --- /dev/null +++ b/server/src/main/java/org/apache/druid/metadata/catalog/CatalogManager.java @@ -0,0 +1,100 @@ +/* + * Licensed to the Apache Software Foundation (ASF) under one + * or more contributor license agreements. See the NOTICE file + * distributed with this work for additional information + * regarding copyright ownership. The ASF licenses this file + * to you under the Apache License, Version 2.0 (the + * "License"); you may not use this file except in compliance + * with the License. You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, + * software distributed under the License is distributed on an + * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY + * KIND, either express or implied. See the License for the + * specific language governing permissions and limitations + * under the License. + */ + +package org.apache.druid.metadata.catalog; + +import org.apache.druid.java.util.common.ISE; + +/** + * Manages catalog data. Used in Coordinator, which will be in either + * an leader or standby state. The Coordinator calls the {@link #start()} + * method when it becomes the leader, and calls {@link #stop()} when + * it loses leadership, or shuts down. + */ +public interface CatalogManager +{ + enum TableState + { + ACTIVE("A"), + DELETING("D"); + + private final String code; + + private TableState(String code) + { + this.code = code; + } + + public String code() + { + return code; + } + + public static TableState fromCode(String code) + { + for (TableState state : values()) { + if (state.code.equals(code)) { + return state; + } + } + throw new ISE("Unknown TableState code: " + code); + } + } + + /** + * Thrown with an "optimistic lock" fails: the version of a + * catalog object being updated is not the same as that of + * the expected version. + */ + public class OutOfDateException extends Exception + { + public OutOfDateException(String msg) + { + super(msg); + } + } + + public class NotFoundException extends Exception + { + public NotFoundException(String msg) + { + super(msg); + } + } + + /** + * Indicates an attempt to insert a duplicate key into a table. + * This could indicate a logic error, or a race condition. It is + * generally not retryable: it us unrealistic to expect the other + * thread to helpfully delete the record it just added. + */ + public class DuplicateKeyException extends Exception + { + public DuplicateKeyException(String msg, Exception e) + { + super(msg, e); + } + } + + void start(); + + void stop(); + + TableDefnManager tables(); +} diff --git a/server/src/main/java/org/apache/druid/metadata/catalog/SQLCatalogManager.java b/server/src/main/java/org/apache/druid/metadata/catalog/SQLCatalogManager.java new file mode 100644 index 000000000000..dd150d21e54b --- /dev/null +++ b/server/src/main/java/org/apache/druid/metadata/catalog/SQLCatalogManager.java @@ -0,0 +1,59 @@ +/* + * Licensed to the Apache Software Foundation (ASF) under one + * or more contributor license agreements. See the NOTICE file + * distributed with this work for additional information + * regarding copyright ownership. The ASF licenses this file + * to you under the Apache License, Version 2.0 (the + * "License"); you may not use this file except in compliance + * with the License. You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, + * software distributed under the License is distributed on an + * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY + * KIND, either express or implied. See the License for the + * specific language governing permissions and limitations + * under the License. + */ + +package org.apache.druid.metadata.catalog; + +import com.google.inject.Inject; +import org.apache.druid.catalog.MetastoreManager; +import org.apache.druid.guice.ManageLifecycle; +import org.apache.druid.java.util.common.ISE; +import org.apache.druid.java.util.common.lifecycle.LifecycleStart; + +@ManageLifecycle +public class SQLCatalogManager implements CatalogManager +{ + private final TableDefnManager tableManager; + + @Inject + public SQLCatalogManager(MetastoreManager metastoreManager) + { + if (!metastoreManager.isSql()) { + throw new ISE("SQLCatalogManager only works with SQL based metadata store at this time"); + } + tableManager = new SQLTableManager(metastoreManager); + } + + @Override + @LifecycleStart + public void start() + { + tableManager.createTableDefnTable(); + } + + @Override + public void stop() + { + } + + @Override + public TableDefnManager tables() + { + return tableManager; + } +} diff --git a/server/src/main/java/org/apache/druid/metadata/catalog/SQLTableManager.java b/server/src/main/java/org/apache/druid/metadata/catalog/SQLTableManager.java new file mode 100644 index 000000000000..0da8171b14c8 --- /dev/null +++ b/server/src/main/java/org/apache/druid/metadata/catalog/SQLTableManager.java @@ -0,0 +1,446 @@ +/* + * Licensed to the Apache Software Foundation (ASF) under one + * or more contributor license agreements. See the NOTICE file + * distributed with this work for additional information + * regarding copyright ownership. The ASF licenses this file + * to you under the Apache License, Version 2.0 (the + * "License"); you may not use this file except in compliance + * with the License. You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, + * software distributed under the License is distributed on an + * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY + * KIND, either express or implied. See the License for the + * specific language governing permissions and limitations + * under the License. + */ + +package org.apache.druid.metadata.catalog; + +import com.fasterxml.jackson.databind.ObjectMapper; +import net.thisptr.jackson.jq.internal.misc.Lists; +import org.apache.druid.catalog.MetastoreManager; +import org.apache.druid.catalog.TableDefn; +import org.apache.druid.catalog.TableId; +import org.apache.druid.catalog.TableSpec; +import org.apache.druid.java.util.common.StringUtils; +import org.apache.druid.metadata.SQLMetadataConnector; +import org.apache.druid.metadata.catalog.CatalogManager.DuplicateKeyException; +import org.apache.druid.metadata.catalog.CatalogManager.NotFoundException; +import org.apache.druid.metadata.catalog.CatalogManager.OutOfDateException; +import org.apache.druid.metadata.catalog.CatalogManager.TableState; +import org.skife.jdbi.v2.Handle; +import org.skife.jdbi.v2.IDBI; +import org.skife.jdbi.v2.Query; +import org.skife.jdbi.v2.ResultIterator; +import org.skife.jdbi.v2.Update; +import org.skife.jdbi.v2.exceptions.CallbackFailedException; +import org.skife.jdbi.v2.exceptions.UnableToExecuteStatementException; +import org.skife.jdbi.v2.tweak.HandleCallback; + +import java.util.Deque; +import java.util.List; +import java.util.Map; +import java.util.concurrent.ConcurrentLinkedDeque; + +public class SQLTableManager implements TableDefnManager +{ + private static final String INSERT_TABLE = + "INSERT INTO %s\n" + + " (schemaName, name, owner, creationTime, updateTime, state, payload)\n" + + " VALUES(:schemaName, :name, :owner, :creationTime, :updateTime, :state, :payload)"; + + private static final String UPDATE_HEAD = + "UPDATE %s\n SET\n"; + + private static final String WHERE_TABLE_ID = + "WHERE schemaName = :schemaName\n" + + " AND name = :name\n"; + + private static final String SAFETY_CHECK = + " AND updateTime = :oldVersion"; + + private static final String UPDATE_DEFN_UNSAFE = + UPDATE_HEAD + + " payload = :payload,\n" + + " updateTime = :updateTime\n" + + WHERE_TABLE_ID; + + private static final String UPDATE_DEFN_SAFE = + UPDATE_DEFN_UNSAFE + + SAFETY_CHECK; + + private static final String UPDATE_STATE = + UPDATE_HEAD + + " state = :state,\n" + + " updateTime = :updateTime\n" + + WHERE_TABLE_ID; + + private static final String SELECT_TABLE = + "SELECT owner, creationTime, updateTime, state, payload\n" + + "FROM %s\n" + + WHERE_TABLE_ID; + + private static final String SELECT_ALL_TABLES = + "SELECT schemaName, name\n" + + "FROM %s\n" + + "ORDER BY schemaName, name"; + + private static final String SELECT_TABLES_IN_SCHEMA = + "SELECT name\n" + + "FROM %s\n" + + "WHERE schemaName = :schemaName\n" + + "ORDER BY name"; + + private static final String SELECT_TABLE_DETAILS_IN_SCHEMA = + "SELECT name, owner, creationTime, updateTime, state, payload\n" + + "FROM %s\n" + + "WHERE schemaName = :schemaName\n" + + "ORDER BY name"; + + private static final String DELETE_TABLE = + "DELETE FROM %s\n" + + WHERE_TABLE_ID; + + private final SQLMetadataConnector connector; + private final ObjectMapper jsonMapper; + private final IDBI dbi; + private final String tableName; + private final Deque listeners = new ConcurrentLinkedDeque<>(); + + public SQLTableManager( + MetastoreManager metastoreManager + ) + { + this.connector = metastoreManager.sqlConnector(); + this.dbi = connector.getDBI(); + this.jsonMapper = metastoreManager.jsonMapper(); + this.tableName = metastoreManager.tablesConfig().getTableDefnTable(); + } + + @Override + public void createTableDefnTable() + { + connector.createTableDefnTable(); + } + + @Override + public long create(TableSpec table) throws DuplicateKeyException + { + try { + return dbi.withHandle( + new HandleCallback() + { + @Override + public Long withHandle(Handle handle) throws Exception + { + long updateTime = System.currentTimeMillis(); + Update stmt = handle.createStatement( + StringUtils.format(INSERT_TABLE, tableName) + ) + .bind("schemaName", table.resolveDbSchema()) + .bind("name", table.name()) + .bind("owner", table.owner()) + .bind("creationTime", updateTime) + .bind("updateTime", updateTime) + .bind("state", TableState.ACTIVE.code()) + .bind("payload", table.defn().toBytes(jsonMapper)); + try { + stmt.execute(); + } + catch (UnableToExecuteStatementException e) { + if (connector.isDuplicateRecordException(e)) { + throw new DuplicateKeyException( + "Tried to insert a duplicate table: " + table.sqlName(), + e); + } else { + throw e; + } + } + sendAddition(table, updateTime); + return updateTime; + } + } + ); + } + catch (CallbackFailedException e) { + if (e.getCause() instanceof DuplicateKeyException) { + throw (DuplicateKeyException) e.getCause(); + } + throw e; + } + } + + @Override + public TableSpec read(TableId id) + { + return dbi.withHandle( + new HandleCallback() + { + @Override + public TableSpec withHandle(Handle handle) throws Exception + { + Query> query = handle.createQuery( + StringUtils.format(SELECT_TABLE, tableName) + ) + .setFetchSize(connector.getStreamingFetchSize()) + .bind("schemaName", id.schema()) + .bind("name", id.name()); + final ResultIterator resultIterator = + query.map((index, r, ctx) -> + new TableSpec( + id.schema(), + id.name(), + r.getString(1), + r.getLong(2), + r.getLong(3), + TableState.fromCode(r.getString(4)), + TableDefn.fromBytes(jsonMapper, r.getBytes(5)) + )) + .iterator(); + if (resultIterator.hasNext()) { + return resultIterator.next(); + } + return null; + } + } + ); + } + + @Override + public long updateDefn(TableId id, TableDefn defn, long oldVersion) throws OutOfDateException + { + try { + return dbi.withHandle( + new HandleCallback() + { + @Override + public Long withHandle(Handle handle) throws Exception + { + long updateTime = System.currentTimeMillis(); + int updateCount = handle.createStatement( + StringUtils.format(UPDATE_DEFN_SAFE, tableName)) + .bind("schemaName", id.schema()) + .bind("name", id.name()) + .bind("payload", defn.toBytes(jsonMapper)) + .bind("updateTime", updateTime) + .bind("oldVersion", oldVersion) + .execute(); + if (updateCount == 0) { + throw new OutOfDateException( + StringUtils.format( + "Table %s: not found or update version does not match DB version", + id.sqlName())); + } + sendUpdate(id); + return updateTime; + } + } + ); + } + catch (CallbackFailedException e) { + if (e.getCause() instanceof OutOfDateException) { + throw (OutOfDateException) e.getCause(); + } + throw e; + } + } + + @Override + public long updateDefn(TableId id, TableDefn defn) throws NotFoundException + { + try { + return dbi.withHandle( + new HandleCallback() + { + @Override + public Long withHandle(Handle handle) throws Exception + { + long updateTime = System.currentTimeMillis(); + int updateCount = handle.createStatement( + StringUtils.format(UPDATE_DEFN_UNSAFE, tableName)) + .bind("schemaName", id.schema()) + .bind("name", id.name()) + .bind("payload", defn.toBytes(jsonMapper)) + .bind("updateTime", updateTime) + .execute(); + if (updateCount == 0) { + throw new NotFoundException( + StringUtils.format( + "Table %s: not found", + id.sqlName())); + } + sendUpdate(id); + return updateTime; + } + } + ); + } + catch (CallbackFailedException e) { + if (e.getCause() instanceof NotFoundException) { + throw (NotFoundException) e.getCause(); + } + throw e; + } + } + + @Override + public long markDeleting(TableId id) + { + return dbi.withHandle( + new HandleCallback() + { + @Override + public Long withHandle(Handle handle) throws Exception + { + long updateTime = System.currentTimeMillis(); + int updateCount = handle.createStatement( + StringUtils.format(UPDATE_STATE, tableName)) + .bind("schemaName", id.schema()) + .bind("name", id.name()) + .bind("updateTime", updateTime) + .bind("state", TableState.DELETING.code()) + .execute(); + sendDeletion(id); + return updateCount == 1 ? updateTime : 0; + } + } + ); + } + + @Override + public boolean delete(TableId id) + { + return dbi.withHandle( + new HandleCallback() + { + @Override + public Boolean withHandle(Handle handle) throws Exception + { + int updateCount = handle.createStatement( + StringUtils.format(DELETE_TABLE, tableName)) + .bind("schemaName", id.schema()) + .bind("name", id.name()) + .execute(); + sendDeletion(id); + return updateCount > 0; + } + } + ); + } + + @Override + public List list() + { + return dbi.withHandle( + new HandleCallback>() + { + @Override + public List withHandle(Handle handle) throws Exception + { + Query> query = handle.createQuery( + StringUtils.format(SELECT_ALL_TABLES, tableName) + ) + .setFetchSize(connector.getStreamingFetchSize()); + final ResultIterator resultIterator = + query.map((index, r, ctx) -> + new TableId(r.getString(1), r.getString(2))) + .iterator(); + return Lists.newArrayList(resultIterator); + } + } + ); + } + + @Override + public List list(String dbSchema) + { + return dbi.withHandle( + new HandleCallback>() + { + @Override + public List withHandle(Handle handle) throws Exception + { + Query> query = handle.createQuery( + StringUtils.format(SELECT_TABLES_IN_SCHEMA, tableName) + ) + .bind("schemaName", dbSchema) + .setFetchSize(connector.getStreamingFetchSize()); + final ResultIterator resultIterator = + query.map((index, r, ctx) -> + r.getString(1)) + .iterator(); + return Lists.newArrayList(resultIterator); + } + } + ); + } + + @Override + public List listDetails(String dbSchema) + { + return dbi.withHandle( + new HandleCallback>() + { + @Override + public List withHandle(Handle handle) throws Exception + { + Query> query = handle.createQuery( + StringUtils.format(SELECT_TABLE_DETAILS_IN_SCHEMA, tableName) + ) + .bind("schemaName", dbSchema) + .setFetchSize(connector.getStreamingFetchSize()); + final ResultIterator resultIterator = + query.map((index, r, ctx) -> + new TableSpec( + dbSchema, + r.getString(1), + r.getString(2), + r.getLong(3), + r.getLong(4), + TableState.fromCode(r.getString(5)), + TableDefn.fromBytes(jsonMapper, r.getBytes(6)))) + .iterator(); + return Lists.newArrayList(resultIterator); + } + } + ); + } + + @Override + public synchronized void register(Listener listener) + { + listeners.add(listener); + } + + protected synchronized void sendAddition(TableSpec table, long updateTime) + { + if (listeners.isEmpty()) { + return; + } + TableSpec newTable = table.fromInsert(table.dbSchema(), updateTime); + for (Listener listener : listeners) { + listener.added(newTable); + } + } + + protected synchronized void sendUpdate(TableId id) + { + if (listeners.isEmpty()) { + return; + } + TableSpec updatedTable = read(id); + for (Listener listener : listeners) { + listener.updated(updatedTable); + } + } + + protected synchronized void sendDeletion(TableId id) + { + for (Listener listener : listeners) { + listener.deleted(id); + } + } +} diff --git a/server/src/main/java/org/apache/druid/metadata/catalog/TableDefnManager.java b/server/src/main/java/org/apache/druid/metadata/catalog/TableDefnManager.java new file mode 100644 index 000000000000..510c37a70717 --- /dev/null +++ b/server/src/main/java/org/apache/druid/metadata/catalog/TableDefnManager.java @@ -0,0 +1,115 @@ +/* + * Licensed to the Apache Software Foundation (ASF) under one + * or more contributor license agreements. See the NOTICE file + * distributed with this work for additional information + * regarding copyright ownership. The ASF licenses this file + * to you under the Apache License, Version 2.0 (the + * "License"); you may not use this file except in compliance + * with the License. You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, + * software distributed under the License is distributed on an + * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY + * KIND, either express or implied. See the License for the + * specific language governing permissions and limitations + * under the License. + */ + +package org.apache.druid.metadata.catalog; + +import org.apache.druid.catalog.TableDefn; +import org.apache.druid.catalog.TableId; +import org.apache.druid.catalog.TableSpec; +import org.apache.druid.metadata.catalog.CatalogManager.DuplicateKeyException; +import org.apache.druid.metadata.catalog.CatalogManager.NotFoundException; +import org.apache.druid.metadata.catalog.CatalogManager.OutOfDateException; + +import javax.annotation.Nullable; + +import java.util.List; + +/** + * The Table Manager performs detailed CRUD operations on the + * catalog tables table. Higher-level operations appear + * elsewhere. + */ +public interface TableDefnManager +{ + interface Listener + { + void added(TableSpec table); + void updated(TableSpec table); + void deleted(TableId id); + } + + void register(Listener listener); + void createTableDefnTable(); + + /** + * Create a table entry. + * + * @return the version of the newly created table. Call + * {@link TableDefnRecord#asUpdate(long)} if you want a new + * {@link TableDefnRecord} with the new version. + * @throws DuplicateKeyException if the row is a duplicate + * (schema, name) pair. This generally indicates a code error, + * or since our code is perfect, a race condition or a DB + * update outside of Druid. In any event, the error is not + * retryable: the user should pick another name, or update the + * existing table + */ + long create(TableSpec table) throws DuplicateKeyException; + + /** + * Update a table definition, but only if the database entry is at + * the given {@code oldVersion}. + */ + long updateDefn(TableId id, TableDefn defn, long oldVersion) throws OutOfDateException; + + /** + * Update a table definition, overwriting any current content. + * This is a potential race conditions if this is a partial update + * because of the possibility of another user doing an update since the + * read. Fine when the goal is to replace the entire definition. + */ + long updateDefn(TableId id, TableDefn defn) throws NotFoundException; + + /** + * Move the table to the deleting state. No version check: fine + * if the table is already in the deleting state. Does nothing if the + * table does not exist. + * + * @return new table update timestamp, or 0 if the table does not + * exist + */ + long markDeleting(TableId id); + + /** + * Read the table record for the given ID. + * + * @return the table record, or {@code null} if the entry is not + * found in the DB. + */ + @Nullable TableSpec read(TableId id); + + /** + * Delete the table record for the given ID. Essentially does a + * "DELETE IF EXISTS". There is no version check. Delete should be + * called only when there are no segments left for the table: use + * {@link #markDeleting(TableId)} to indicates that the segments are + * being deleted. Call this method after deletion is complete. + *

+ * Does not cascade deletes yet. Eventually, should delete all entries + * for the table. + * + * @return {@code true} if the table exists and was deleted, + * {@code false} if the table did not exist. + */ + boolean delete(TableId id); + + List list(); + List list(String dbSchema); + List listDetails(String dbSchema); +} diff --git a/server/src/main/java/org/apache/druid/metadata/storage/derby/DerbyConnector.java b/server/src/main/java/org/apache/druid/metadata/storage/derby/DerbyConnector.java index 19d0c6b04f16..2f6398dcb5f2 100644 --- a/server/src/main/java/org/apache/druid/metadata/storage/derby/DerbyConnector.java +++ b/server/src/main/java/org/apache/druid/metadata/storage/derby/DerbyConnector.java @@ -33,6 +33,7 @@ import org.apache.druid.metadata.SQLMetadataConnector; import org.skife.jdbi.v2.DBI; import org.skife.jdbi.v2.Handle; +import org.skife.jdbi.v2.exceptions.UnableToExecuteStatementException; import org.skife.jdbi.v2.tweak.HandleCallback; import java.sql.DatabaseMetaData; @@ -181,4 +182,13 @@ public void stop() log.info("Stopping DerbyConnector..."); storage.stop(); } + + @Override + public boolean isDuplicateRecordException(UnableToExecuteStatementException e) + { + // Done using class names to avoid a dependency on Derby for this one + // simple thing. + return e.getCause() != null && + e.getCause().getClass().getSimpleName().equals("DerbySQLIntegrityConstraintViolationException"); + } } diff --git a/server/src/main/java/org/apache/druid/metadata/storage/derby/DerbyMetadataStorage.java b/server/src/main/java/org/apache/druid/metadata/storage/derby/DerbyMetadataStorage.java index 725c531626ca..c79e62c75b5b 100644 --- a/server/src/main/java/org/apache/druid/metadata/storage/derby/DerbyMetadataStorage.java +++ b/server/src/main/java/org/apache/druid/metadata/storage/derby/DerbyMetadataStorage.java @@ -42,7 +42,6 @@ public DerbyMetadataStorage(MetadataStorageConnectorConfig config) catch (Exception e) { throw new RuntimeException(e); } - } @Override @@ -55,6 +54,26 @@ public void start() catch (Exception e) { throw new RuntimeException(e); } + + // It takes a while for the Derby server to start in another + // thread. Ping to ensure it is ready. Saves ugly failure/retry + // loops elsewhere in startup. Those loops look alarming in the + // log file. + while (true) { + try { + server.ping(); + break; + } + catch (Exception e) { + log.info("Derby server not yet ready, still trying..."); + try { + Thread.sleep(100); + } + catch (InterruptedException e1) { + // Ignore + } + } + } } @Override diff --git a/server/src/main/java/org/apache/druid/server/http/CatalogListenerResource.java b/server/src/main/java/org/apache/druid/server/http/CatalogListenerResource.java new file mode 100644 index 000000000000..20542cdc123d --- /dev/null +++ b/server/src/main/java/org/apache/druid/server/http/CatalogListenerResource.java @@ -0,0 +1,125 @@ +/* + * Licensed to the Apache Software Foundation (ASF) under one + * or more contributor license agreements. See the NOTICE file + * distributed with this work for additional information + * regarding copyright ownership. The ASF licenses this file + * to you under the Apache License, Version 2.0 (the + * "License"); you may not use this file except in compliance + * with the License. You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, + * software distributed under the License is distributed on an + * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY + * KIND, either express or implied. See the License for the + * specific language governing permissions and limitations + * under the License. + */ + +package org.apache.druid.server.http; + +import com.fasterxml.jackson.databind.ObjectMapper; +import com.fasterxml.jackson.jaxrs.smile.SmileMediaTypes; +import org.apache.druid.catalog.MetadataCatalog.CatalogListener; +import org.apache.druid.catalog.TableDefn; +import org.apache.druid.catalog.TableSpec; +import org.apache.druid.guice.annotations.Json; +import org.apache.druid.guice.annotations.Smile; +import org.apache.druid.java.util.common.StringUtils; +import org.apache.druid.server.security.Access; +import org.apache.druid.server.security.Action; +import org.apache.druid.server.security.AuthorizationUtils; +import org.apache.druid.server.security.AuthorizerMapper; +import org.apache.druid.server.security.Resource; +import org.apache.druid.server.security.ResourceAction; +import org.apache.druid.server.security.ResourceType; + +import javax.inject.Inject; +import javax.servlet.http.HttpServletRequest; +import javax.ws.rs.Consumes; +import javax.ws.rs.POST; +import javax.ws.rs.Path; +import javax.ws.rs.core.Context; +import javax.ws.rs.core.MediaType; +import javax.ws.rs.core.Response; + +import java.io.IOException; +import java.io.InputStream; + +@Path(CatalogListenerResource.BASE_URL) +public class CatalogListenerResource +{ + public static final String BASE_URL = "/druid/broker/v1/catalog"; + public static final String SYNC_URL = "/sync"; + + private final CatalogListener listener; + private final AuthorizerMapper authorizerMapper; + private final ObjectMapper smileMapper; + private final ObjectMapper jsonMapper; + + @Inject + public CatalogListenerResource( + final CatalogListener listener, + @Smile final ObjectMapper smileMapper, + @Json final ObjectMapper jsonMapper, + final AuthorizerMapper authorizerMapper) + { + this.listener = listener; + this.authorizerMapper = authorizerMapper; + this.smileMapper = smileMapper; + this.jsonMapper = jsonMapper; + } + + @POST + @Path(SYNC_URL) + @Consumes({MediaType.APPLICATION_JSON, SmileMediaTypes.APPLICATION_JACKSON_SMILE}) + public Response syncTable( + final InputStream inputStream, + @Context final HttpServletRequest req) + { + Response resp = checkAuth(req); + if (resp != null) { + return resp; + } + final String reqContentType = req.getContentType(); + final boolean isSmile = SmileMediaTypes.APPLICATION_JACKSON_SMILE.equals(reqContentType); + final ObjectMapper mapper = isSmile ? smileMapper : jsonMapper; + TableSpec tableSpec; + try { + tableSpec = mapper.readValue(inputStream, TableSpec.class); + } + catch (IOException e) { + return Response.serverError().entity(e.getMessage()).build(); + } + TableDefn defn = tableSpec.defn(); + if (defn instanceof TableDefn.Tombstone) { + listener.deleted(tableSpec.id()); + } else { + listener.updated(tableSpec); + } + return Response.status(Response.Status.ACCEPTED).build(); + } + + private Response checkAuth(final HttpServletRequest request) + { + final ResourceAction resourceAction = new ResourceAction( + new Resource("CONFIG", ResourceType.CONFIG), + Action.WRITE + ); + + final Access authResult = AuthorizationUtils.authorizeResourceAction( + request, + resourceAction, + authorizerMapper + ); + + if (authResult.isAllowed()) { + return null; + } + return Response.status(Response.Status.FORBIDDEN) + .type(MediaType.TEXT_PLAIN) + .entity(StringUtils.format("Access-Check-Result: %s", authResult.toString())) + .build(); + } +} diff --git a/server/src/main/java/org/apache/druid/server/http/CatalogResource.java b/server/src/main/java/org/apache/druid/server/http/CatalogResource.java new file mode 100644 index 000000000000..5f3b441e51fd --- /dev/null +++ b/server/src/main/java/org/apache/druid/server/http/CatalogResource.java @@ -0,0 +1,490 @@ +/* + * Licensed to the Apache Software Foundation (ASF) under one + * or more contributor license agreements. See the NOTICE file + * distributed with this work for additional information + * regarding copyright ownership. The ASF licenses this file + * to you under the Apache License, Version 2.0 (the + * "License"); you may not use this file except in compliance + * with the License. You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, + * software distributed under the License is distributed on an + * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY + * KIND, either express or implied. See the License for the + * specific language governing permissions and limitations + * under the License. + */ + +package org.apache.druid.server.http; + +import com.google.common.base.Strings; +import org.apache.curator.shaded.com.google.common.collect.Lists; +import org.apache.druid.catalog.Actions; +import org.apache.druid.catalog.CatalogStorage; +import org.apache.druid.catalog.SchemaRegistry.SchemaDefn; +import org.apache.druid.catalog.TableDefn; +import org.apache.druid.catalog.TableId; +import org.apache.druid.catalog.TableSpec; +import org.apache.druid.java.util.common.IAE; +import org.apache.druid.java.util.common.Pair; +import org.apache.druid.java.util.common.StringUtils; +import org.apache.druid.metadata.catalog.CatalogManager.DuplicateKeyException; +import org.apache.druid.metadata.catalog.CatalogManager.NotFoundException; +import org.apache.druid.metadata.catalog.CatalogManager.OutOfDateException; +import org.apache.druid.metadata.catalog.TableDefnManager; +import org.apache.druid.server.security.Action; +import org.apache.druid.server.security.AuthorizationUtils; +import org.apache.druid.server.security.ForbiddenException; +import org.apache.druid.server.security.ResourceType; + +import javax.inject.Inject; +import javax.servlet.http.HttpServletRequest; +import javax.ws.rs.Consumes; +import javax.ws.rs.DELETE; +import javax.ws.rs.GET; +import javax.ws.rs.POST; +import javax.ws.rs.Path; +import javax.ws.rs.PathParam; +import javax.ws.rs.Produces; +import javax.ws.rs.QueryParam; +import javax.ws.rs.core.Context; +import javax.ws.rs.core.MediaType; +import javax.ws.rs.core.Response; + +import java.util.ArrayList; +import java.util.Collections; +import java.util.List; + +/** + * REST endpoint for user and internal catalog actions. Catalog actions + * occur at the global level (all schemas), the schema level, or the + * table level. + * + * @see {@link CatalogListenerResource} for the client-side API. + */ +@Path(CatalogResource.ROOT_PATH) +public class CatalogResource +{ + public static final String ROOT_PATH = "/druid/coordinator/v1/catalog"; + + private final CatalogStorage catalog; + + @Inject + public CatalogResource(CatalogStorage catalog) + { + this.catalog = catalog; + } + + /** + * Create a new table within the indicated schema. + * + * @param dbSchema Druid schema. Must be a valid, writable schema + * for which the user has write access. + * @param table The table definition to create. + * @param ifNew Whether to skip the action if the table already exists. + * This is the same as the SQL IF NOT EXISTS clause. If {@code false}, + * then an error is raised if the table exists. If {@code true}, then + * the action silently does nothing if the table exists. Primarily for + * use in scripts. + * @param req the HTTP request used for authorization. + * @return the version number of the table + */ + @POST + @Path("/tables") + @Consumes(MediaType.APPLICATION_JSON) + @Produces(MediaType.APPLICATION_JSON) + public Response createTable( + TableSpec table, + @QueryParam("ifnew") boolean ifNew, + @Context final HttpServletRequest req) + { + String dbSchema = table.resolveDbSchema(); + Pair result = validateSchema(dbSchema); + if (result.lhs != null) { + return result.lhs; + } + SchemaDefn schema = result.rhs; + if (!schema.writable()) { + return Actions.badRequest( + Actions.INVALID, + StringUtils.format("Cannot create tables in schema %s", dbSchema)); + } + table = table.withSchema(dbSchema); + try { + table.validate(); + } + catch (IAE e) { + return Actions.badRequest(Actions.INVALID, e.getMessage()); + } + TableDefn defn = table.defn(); + if (!schema.accepts(defn)) { + return Actions.badRequest( + Actions.INVALID, + StringUtils.format( + "Cannot create tables of type %s in schema %s", + defn == null ? "null" : defn.getClass().getSimpleName(), + dbSchema)); + } + try { + catalog.authorizer().authorizeTable(schema, table.name(), Action.WRITE, req); + } + catch (ForbiddenException e) { + return Actions.forbidden(e); + } + try { + long createVersion = catalog.tables().create(table); + return Actions.okWithVersion(createVersion); + } + catch (DuplicateKeyException e) { + if (!ifNew) { + return Actions.badRequest( + Actions.DUPLICATE_ERROR, + StringUtils.format( + "A table of name %s.%s aleady exists", + table.dbSchema(), + table.name())); + } else { + return Actions.okWithVersion(0); + } + } + catch (Exception e) { + return Actions.exception(e); + } + } + + /** + * Update a table within the given schema. + * + * @param dbSchema The name of the Druid schema, which must be writable + * and the user must have at least read access. + * @param name The name of the table definition to modify. The user must + * have write access to the table. + * @param defn The new table definition. + * @param version An optional table version. If provided, the metadata DB + * entry for the table must be at this exact version or the update + * will fail. (Provides "optimistic locking.") If omitted (that is, + * if zero), then no update conflict change is done. + * @param req the HTTP request used for authorization. + * @return the new version number of the table + */ + @POST + @Path("/tables/{dbSchema}/{name}") + @Consumes(MediaType.APPLICATION_JSON) + @Produces(MediaType.APPLICATION_JSON) + public Response updateTableDefn( + @PathParam("dbSchema") String dbSchema, + @PathParam("name") String name, + TableDefn defn, + @QueryParam("version") long version, + @Context final HttpServletRequest req) + { + try { + if (defn != null) { + defn.validate(); + } + } + catch (IAE e) { + return Actions.badRequest(Actions.INVALID, e.getMessage()); + } + Pair result = validateSchema(dbSchema); + if (result.lhs != null) { + return result.lhs; + } + if (Strings.isNullOrEmpty(name)) { + return Actions.badRequest(Actions.INVALID, "Table name is required"); + } + SchemaDefn schema = result.rhs; + if (!schema.writable()) { + return Actions.badRequest( + Actions.INVALID, + StringUtils.format("Cannot update tables in schema %s", dbSchema)); + } + if (!schema.accepts(defn)) { + return Actions.badRequest( + Actions.INVALID, + StringUtils.format( + "Cannot update tables to type %s in schema %s", + defn == null ? "null" : defn.getClass().getSimpleName(), + dbSchema)); + } + try { + catalog.authorizer().authorizeTable(schema, name, Action.WRITE, req); + } + catch (ForbiddenException e) { + return Actions.forbidden(e); + } + try { + TableDefnManager tableMgr = catalog.tables(); + TableId tableId = new TableId(dbSchema, name); + long newVersion; + if (version == 0) { + newVersion = tableMgr.updateDefn(tableId, defn); + } else { + newVersion = tableMgr.updateDefn(tableId, defn, version); + } + return Actions.okWithVersion(newVersion); + } + catch (NotFoundException e) { + return Response.status(Response.Status.NOT_FOUND).build(); + } + catch (OutOfDateException e) { + return Response + .status(Response.Status.BAD_REQUEST) + .entity( + Actions.error( + Actions.DUPLICATE_ERROR, + "The table entry not found or is older than the given version: reload and retry")) + .build(); + } + catch (Exception e) { + return Actions.exception(e); + } + } + + /** + * Retrieves the definition of the given table. + *

+ * Returns a 404 (NOT FOUND) error if the table definition does not exist. + * Note that this check is only for the definition; the table (or + * datasource) itself may exist. Similarly, this call may return a definition + * even if there is no datasource of the same name (typically occurs when + * the definition is created before the datasource itself.) + * + * @param dbSchema The Druid schema. The user must have read access. + * @param name The name of the table within the schema. The user must have + * read access. + * @param req the HTTP request used for authorization. + * @return the definition for the table, if any. + */ + @GET + @Path("/tables/{dbSchema}/{name}") + @Produces(MediaType.APPLICATION_JSON) + public Response getTable( + @PathParam("dbSchema") String dbSchema, + @PathParam("name") String name, + @Context final HttpServletRequest req) + { + Pair result = validateSchema(dbSchema); + if (result.lhs != null) { + return result.lhs; + } + if (Strings.isNullOrEmpty(name)) { + return Actions.badRequest(Actions.INVALID, "Table name is required"); + } + try { + catalog.authorizer().authorizeTable(result.rhs, name, Action.READ, req); + } + catch (ForbiddenException e) { + return Actions.forbidden(e); + } + try { + TableId tableId = new TableId(dbSchema, name); + TableSpec table = catalog.tables().read(tableId); + if (table == null) { + return Response.status(Response.Status.NOT_FOUND).build(); + } + return Response.ok().entity(table).build(); + } + catch (Exception e) { + return Actions.exception(e); + } + } + + /** + * Retrieves the list of all Druid schema names. At present, Druid does + * not impose security on schemas, only tables within schemas. + */ + @GET + @Path("/schemas") + @Produces(MediaType.APPLICATION_JSON) + public Response listSchemas( + @Context final HttpServletRequest req) + { + // No good resource to use: we really need finer-grain control. + catalog.authorizer().authorizeAccess(ResourceType.STATE, "schemas", Action.READ, req); + return Response.ok().entity(catalog.schemaRegistry().names()).build(); + } + + /** + * Retrieves the list of all Druid table names for which the user has at + * least read access. + */ + @GET + @Path("/tables") + @Produces(MediaType.APPLICATION_JSON) + public Response listTables( + @Context final HttpServletRequest req) + { + List tables = catalog.tables().list(); + Iterable filtered = AuthorizationUtils.filterAuthorizedResources( + req, + tables, + tableId -> { + SchemaDefn schema = catalog.resolveSchema(tableId.schema()); + if (schema == null) { + // Should never occur. + return null; + } + return Collections.singletonList( + catalog.authorizer().resourceAction(schema, tableId.name(), Action.READ)); + }, + catalog.authorizer().mapper()); + List filteredList = new ArrayList<>(); + for (TableId tableId : filtered) { + filteredList.add(tableId); + } + return Response.ok().entity(filteredList).build(); + } + + /** + * Retrieves the list of table names within the given schema for which the + * user has at least read access. This returns the list of table definitions + * which will probably differ from the list of actual tables. For example, for + * the read-only schemas, there will be no table definitions. + * + * @param dbSchema The Druid schema to query. The user must have read access. + */ + @GET + @Path("/tables/{dbSchema}") + @Produces(MediaType.APPLICATION_JSON) + public Response listTables( + @PathParam("dbSchema") String dbSchema, + @Context final HttpServletRequest req) + { + Pair result = validateSchema(dbSchema); + if (result.lhs != null) { + return result.lhs; + } + SchemaDefn schema = result.rhs; + List tables = catalog.tables().list(dbSchema); + Iterable filtered = AuthorizationUtils.filterAuthorizedResources( + req, + tables, + name -> + Collections.singletonList( + catalog.authorizer().resourceAction(schema, name, Action.READ)), + catalog.authorizer().mapper()); + return Response.ok().entity(Lists.newArrayList(filtered)).build(); + } + + /** + * Deletes the table definition (but not the underlying table or datasource) + * for the given schema and table. + * + * @param dbSchema The name of the schema that holds the table. + * @param name The name of the table definition to delete. The user must have + * write access. + * @param ifExists Optional flag. If {@code false} (the default), 404 (NOT FOUND) + * error is returned if the table does not exist. If {@code true}, + * then acts like the SQL IF EXISTS clause and does not return an + * error if the table does not exist, + */ + @DELETE + @Path("/tables/{dbSchema}/{name}") + @Produces(MediaType.APPLICATION_JSON) + public Response deleteTable( + @PathParam("dbSchema") String dbSchema, + @PathParam("name") String name, + @QueryParam("ifExists") boolean ifExists, + @Context final HttpServletRequest req) + { + TableId tableId = new TableId(dbSchema, name); + Pair result = validateSchema(tableId.schema()); + if (result.lhs != null) { + return result.lhs; + } + SchemaDefn schema = result.rhs; + if (!schema.writable()) { + return Actions.badRequest( + Actions.INVALID, + StringUtils.format("Cannot delete tables from schema %s", tableId.schema())); + } + if (Strings.isNullOrEmpty(name)) { + return Actions.badRequest(Actions.INVALID, "Table name is required"); + } + try { + catalog.authorizer().authorizeTable(schema, tableId.name(), Action.WRITE, req); + } + catch (ForbiddenException e) { + return Actions.forbidden(e); + } + try { + if (!catalog.tables().delete(tableId) && !ifExists) { + return Actions.notFound(tableId.sqlName()); + } + } + catch (Exception e) { + return Actions.exception(e); + } + return Actions.ok(); + } + + public static final String SCHEMA_SYNC = "/sync/{dbSchema}"; + + /** + * Synchronization request from the Broker for a database schema. Requests all + * table definitions known to the catalog. Used to prime a cache on first access. + * After that, the Coordinator will push updates to Brokers. Returns the full + * list of table details. + * + * It is expected that the number of table definitions will be of small or moderate + * size, so no provision is made to handle very large lists. + */ + @GET + @Path(SCHEMA_SYNC) + @Produces(MediaType.APPLICATION_JSON) + public Response syncSchema( + @PathParam("dbSchema") String dbSchema, + @Context final HttpServletRequest req + ) + { + Pair result = validateSchema(dbSchema); + if (result.lhs != null) { + return result.lhs; + } + SchemaDefn schema = result.rhs; + List tables = catalog.tables().listDetails(dbSchema); + Iterable filtered = AuthorizationUtils.filterAuthorizedResources( + req, + tables, + table -> + Collections.singletonList( + catalog.authorizer().resourceAction(schema, table.name(), Action.READ)), + catalog.authorizer().mapper()); + return Response.ok().entity(Lists.newArrayList(filtered)).build(); + } + + public static final String TABLE_SYNC = "/sync/{dbSchema}/{name}"; + + /** + * Synchronization request from the Broker for information about a specific table + * (datasource). Done on first access to the table by any query. After that, the + * Coordinator pushes updates to the Broker on any changes. + */ + @GET + @Path(TABLE_SYNC) + @Produces(MediaType.APPLICATION_JSON) + public Response syncTable( + @PathParam("dbSchema") String dbSchema, + @PathParam("name") String name, + @Context final HttpServletRequest req) + { + return getTable(dbSchema, name, req); + } + + private Pair validateSchema(String dbSchema) + { + if (Strings.isNullOrEmpty(dbSchema)) { + return Pair.of(Actions.badRequest(Actions.INVALID, "Schema name is required"), null); + } + SchemaDefn schema = catalog.resolveSchema(dbSchema); + if (schema == null) { + return Pair.of(Actions.notFound( + StringUtils.format("Unknown schema %s", dbSchema)), + null); + } + return Pair.of(null, schema); + } +} diff --git a/server/src/main/java/org/apache/druid/server/security/Access.java b/server/src/main/java/org/apache/druid/server/security/Access.java index 0c86a42a1a21..8686e853238d 100644 --- a/server/src/main/java/org/apache/druid/server/security/Access.java +++ b/server/src/main/java/org/apache/druid/server/security/Access.java @@ -24,6 +24,7 @@ public class Access { public static final Access OK = new Access(true); + public static final Access DENIED = new Access(false); private final boolean allowed; private final String message; @@ -52,6 +53,6 @@ public String getMessage() @Override public String toString() { - return StringUtils.format("Allowed:%s, Message:%s", allowed, message); + return StringUtils.format("Access{Allowed: %s, Message: %s}", allowed, message); } } diff --git a/server/src/main/java/org/apache/druid/server/security/AuthenticationResult.java b/server/src/main/java/org/apache/druid/server/security/AuthenticationResult.java index 361cb434a7c6..d41cebfe44c8 100644 --- a/server/src/main/java/org/apache/druid/server/security/AuthenticationResult.java +++ b/server/src/main/java/org/apache/druid/server/security/AuthenticationResult.java @@ -29,26 +29,26 @@ public class AuthenticationResult { /** - * the identity of the requester + * Identity of the requester. */ private final String identity; /** - * the name of the Authorizer that should handle the authenticated request. + * Name of the Authorizer that should handle the authenticated request. */ private final String authorizerName; - /** - * Name of authenticator whom created the results + * Name of authenticator whom created the results. * - * If you found your self asking why the authenticatedBy field can be null please read this + * If you found yourself asking why the authenticatedBy field can be null please read this * https://github.com/apache/druid/pull/5706#discussion_r185940889 */ @Nullable private final String authenticatedBy; + /** - * parameter containing additional context information from an Authenticator + * Additional context information from an Authenticator. */ @Nullable private final Map context; diff --git a/server/src/main/java/org/apache/druid/server/security/AuthorizationUtils.java b/server/src/main/java/org/apache/druid/server/security/AuthorizationUtils.java index 2b4449826476..2d862f794274 100644 --- a/server/src/main/java/org/apache/druid/server/security/AuthorizationUtils.java +++ b/server/src/main/java/org/apache/druid/server/security/AuthorizationUtils.java @@ -240,7 +240,7 @@ public static Iterable filterAuthorizedResources( * If every resource-action in the iterable is authorized, the resource will be added to the filtered resources. * * If there is an authorization failure for one of the resource-actions, the resource will not be - * added to the returned filtered resources.. + * added to the returned filtered resources. * * If the resourceActionGenerator returns null for a resource, that resource will not be added to the filtered * resources. @@ -322,7 +322,6 @@ public static Map> filterAuthorizedRes final AuthorizerMapper authorizerMapper ) { - if (request.getAttribute(AuthConfig.DRUID_ALLOW_UNSECURED_PATH) != null) { return unfilteredResources; } diff --git a/server/src/main/java/org/apache/druid/server/security/ForbiddenException.java b/server/src/main/java/org/apache/druid/server/security/ForbiddenException.java index 7de37d677d84..e10c722be5ac 100644 --- a/server/src/main/java/org/apache/druid/server/security/ForbiddenException.java +++ b/server/src/main/java/org/apache/druid/server/security/ForbiddenException.java @@ -27,8 +27,8 @@ import java.util.function.Function; /** - * Throw this when a request is unauthorized and we want to send a 403 response back, Jersey exception mapper will - * take care of sending the response. + * Throw this when a request is unauthorized and we want to send a 403 response back, + * Jersey exception mapper will take care of sending the response. */ public class ForbiddenException extends RuntimeException implements SanitizableException { @@ -48,7 +48,7 @@ public ForbiddenException(@JsonProperty("errorMessage") String msg) @JsonProperty public String getErrorMessage() { - return super.getMessage(); + return getMessage(); } @Override diff --git a/server/src/test/java/org/apache/druid/catalog/CacheNotifierTest.java b/server/src/test/java/org/apache/druid/catalog/CacheNotifierTest.java new file mode 100644 index 000000000000..4b02c5baa12a --- /dev/null +++ b/server/src/test/java/org/apache/druid/catalog/CacheNotifierTest.java @@ -0,0 +1,122 @@ +/* + * Licensed to the Apache Software Foundation (ASF) under one + * or more contributor license agreements. See the NOTICE file + * distributed with this work for additional information + * regarding copyright ownership. The ASF licenses this file + * to you under the Apache License, Version 2.0 (the + * "License"); you may not use this file except in compliance + * with the License. You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, + * software distributed under the License is distributed on an + * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY + * KIND, either express or implied. See the License for the + * specific language governing permissions and limitations + * under the License. + */ + +package org.apache.druid.catalog; + +import com.google.common.util.concurrent.Futures; +import com.google.common.util.concurrent.ListenableFuture; +import org.apache.druid.catalog.RestUpdateSender.RestSender; +import org.apache.druid.java.util.http.client.response.StatusResponseHolder; +import org.apache.druid.server.DruidNode; +import org.jboss.netty.handler.codec.http.HttpResponseStatus; +import org.junit.Test; + +import java.net.URL; +import java.util.Arrays; +import java.util.List; +import java.util.function.Consumer; +import java.util.function.Supplier; + +import static org.junit.Assert.assertEquals; + +public class CacheNotifierTest +{ + private static class MockSender implements Consumer + { + int sendCount; + + @Override + public void accept(byte[] update) + { + assertEquals(sendCount++, update[0]); + } + } + + @Test + public void testNotifier() + { + MockSender sender = new MockSender(); + CommonCacheNotifierEx notifier = new CommonCacheNotifierEx("test", sender); + notifier.start(); + for (int i = 0; i < 100; i++) { + byte[] msg = new byte[] {(byte) i}; + notifier.send(msg); + } + notifier.stopGracefully(); + assertEquals(100, sender.sendCount); + } + + private static class MockRestSender implements RestSender + { + int sendCount; + + @Override + public ListenableFuture send(URL listenerURL, byte[] serializedEntity) + { + sendCount++; + StatusResponseHolder holder = new StatusResponseHolder(HttpResponseStatus.ACCEPTED, new StringBuilder()); + return Futures.immediateFuture(holder); + } + } + + @Test + public void testRestUpdateSender() + { + DruidNode node1 = new DruidNode("service", "host1", true, 1000, 0, true, false); + DruidNode node2 = new DruidNode("service", "host2", true, 1000, 0, true, false); + List nodes = Arrays.asList(node1, node2); + Supplier> nodeSupplier = () -> nodes; + MockRestSender restSender = new MockRestSender(); + RestUpdateSender updateSender = new RestUpdateSender( + "test", + nodeSupplier, + restSender, + "/test/foo", + 1000); + for (int i = 0; i < 100; i++) { + byte[] msg = new byte[] {(byte) i}; + updateSender.accept(msg); + } + assertEquals(200, restSender.sendCount); + } + + @Test + public void testStack() + { + DruidNode node1 = new DruidNode("service", "host1", true, 1000, 0, true, false); + DruidNode node2 = new DruidNode("service", "host2", true, 1000, 0, true, false); + List nodes = Arrays.asList(node1, node2); + Supplier> nodeSupplier = () -> nodes; + MockRestSender restSender = new MockRestSender(); + RestUpdateSender updateSender = new RestUpdateSender( + "test", + nodeSupplier, + restSender, + "/test/foo", + 1000); + CommonCacheNotifierEx notifier = new CommonCacheNotifierEx("test", updateSender); + notifier.start(); + for (int i = 0; i < 100; i++) { + byte[] msg = new byte[] {(byte) i}; + notifier.send(msg); + } + notifier.stopGracefully(); + assertEquals(200, restSender.sendCount); + } +} diff --git a/server/src/test/java/org/apache/druid/catalog/CatalogObjectTest.java b/server/src/test/java/org/apache/druid/catalog/CatalogObjectTest.java new file mode 100644 index 000000000000..5f6b70bb6b9d --- /dev/null +++ b/server/src/test/java/org/apache/druid/catalog/CatalogObjectTest.java @@ -0,0 +1,149 @@ +/* + * Licensed to the Apache Software Foundation (ASF) under one + * or more contributor license agreements. See the NOTICE file + * distributed with this work for additional information + * regarding copyright ownership. The ASF licenses this file + * to you under the Apache License, Version 2.0 (the + * "License"); you may not use this file except in compliance + * with the License. You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, + * software distributed under the License is distributed on an + * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY + * KIND, either express or implied. See the License for the + * specific language governing permissions and limitations + * under the License. + */ + +package org.apache.druid.catalog; + +import nl.jqno.equalsverifier.EqualsVerifier; +import org.apache.druid.java.util.common.IAE; +import org.apache.druid.metadata.catalog.CatalogManager.TableState; +import org.junit.Test; + +import static org.junit.Assert.assertEquals; +import static org.junit.Assert.assertNull; +import static org.junit.Assert.assertSame; +import static org.junit.Assert.fail; + +public class CatalogObjectTest +{ + @Test + public void testMinimalTable() + { + TableSpec table = new TableSpec( + TableId.DRUID_SCHEMA, + "foo", + "bob", + 10, + 20, + TableState.ACTIVE, + null); + table.validate(); + assertEquals(TableId.DRUID_SCHEMA, table.dbSchema()); + assertEquals("foo", table.name()); + assertEquals("bob", table.owner()); + assertEquals(10, table.creationTime()); + assertEquals(20, table.updateTime()); + assertEquals(TableState.ACTIVE, table.state()); + assertNull(table.defn()); + + try { + table = new TableSpec( + null, + "foo", + "bob", + 10, + 20, + TableState.ACTIVE, + null); + table.validate(); + fail(); + } + catch (IAE e) { + // Expected + } + + try { + table = new TableSpec( + TableId.DRUID_SCHEMA, + null, + "bob", + 10, + 20, + TableState.ACTIVE, + null); + table.validate(); + fail(); + } + catch (IAE e) { + // Expected + } + } + + @Test + public void testDefn() + { + DatasourceDefn defn = DatasourceDefn.builder() + .segmentGranularity("PT1D") + .build(); + TableSpec table = new TableSpec( + TableId.DRUID_SCHEMA, + "foo", + "bob", + 10, + 20, + TableState.ACTIVE, + defn); + table.validate(); + assertSame(defn, table.defn()); + + try { + table = new TableSpec( + "wrong", + "foo", + "bob", + 10, + 20, + TableState.ACTIVE, + defn); + table.validate(); + fail(); + } + catch (IAE e) { + // Expected + } + } + + @Test + public void testConversions() + { + DatasourceDefn defn = DatasourceDefn.builder() + .segmentGranularity("PT1D") + .build(); + TableSpec table = TableSpec.newSegmentTable( + "ds", + defn); + assertEquals(TableId.datasource("ds"), table.id()); + assertEquals(TableState.ACTIVE, table.state()); + assertEquals(0, table.updateTime()); + assertSame(defn, table.defn()); + + TableSpec table2 = TableSpec.newSegmentTable("ds", defn); + assertEquals(table, table2); + + TableSpec table3 = table2.asUpdate(20); + assertEquals(20, table3.updateTime()); + } + + @Test + public void testEquals() + { + EqualsVerifier.forClass(TableSpec.class) + .usingGetClass() + .verify(); + } +} diff --git a/server/src/test/java/org/apache/druid/catalog/CatalogResourceTest.java b/server/src/test/java/org/apache/druid/catalog/CatalogResourceTest.java new file mode 100644 index 000000000000..7ecb3ab0ab14 --- /dev/null +++ b/server/src/test/java/org/apache/druid/catalog/CatalogResourceTest.java @@ -0,0 +1,491 @@ +/* + * Licensed to the Apache Software Foundation (ASF) under one + * or more contributor license agreements. See the NOTICE file + * distributed with this work for additional information + * regarding copyright ownership. The ASF licenses this file + * to you under the Apache License, Version 2.0 (the + * "License"); you may not use this file except in compliance + * with the License. You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, + * software distributed under the License is distributed on an + * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY + * KIND, either express or implied. See the License for the + * specific language governing permissions and limitations + * under the License. + */ + +package org.apache.druid.catalog; + +import org.apache.druid.data.input.InputFormat; +import org.apache.druid.data.input.InputSource; +import org.apache.druid.data.input.impl.InlineInputSource; +import org.apache.druid.metadata.TestDerbyConnector; +import org.apache.druid.server.http.CatalogResource; +import org.junit.After; +import org.junit.Before; +import org.junit.Rule; +import org.junit.Test; + +import javax.ws.rs.core.Response; + +import java.util.List; +import java.util.Map; + +import static org.apache.druid.catalog.DummyRequest.deleteBy; +import static org.apache.druid.catalog.DummyRequest.getBy; +import static org.apache.druid.catalog.DummyRequest.postBy; +import static org.junit.Assert.assertEquals; +import static org.junit.Assert.assertTrue; + +/** + * Test of REST API operations for the table catalog. + */ +public class CatalogResourceTest +{ + @Rule + public final TestDerbyConnector.DerbyConnectorRule derbyConnectorRule = new TestDerbyConnector.DerbyConnectorRule(); + + private CatalogTests.DbFixture dbFixture; + private CatalogResource resource; + + @Before + public void setUp() + { + dbFixture = new CatalogTests.DbFixture(derbyConnectorRule); + resource = new CatalogResource(dbFixture.storage); + } + + @After + public void tearDown() + { + CatalogTests.tearDown(dbFixture); + } + + private static long getVersion(Response resp) + { + @SuppressWarnings("unchecked") + Map result = (Map) resp.getEntity(); + return (Long) result.get("version"); + } + + @Test + public void testCreate() + { + DatasourceDefn defn = DatasourceDefn.builder() + .segmentGranularity("PT1D") + .build(); + + // Missing schema name: infer the schema. + String tableName = "create"; + TableSpec table = TableSpec.newTable( + null, + "create1", + defn); + Response resp = resource.createTable(table, false, postBy(DummyRequest.SUPER_USER)); + assertEquals(Response.Status.OK.getStatusCode(), resp.getStatus()); + + // Blank schema name: infer the schema. + table = TableSpec.newTable( + "", + "create2", + defn); + resp = resource.createTable(table, false, postBy(DummyRequest.SUPER_USER)); + assertEquals(Response.Status.OK.getStatusCode(), resp.getStatus()); + + // Missing table name + table = TableSpec.newTable(TableId.DRUID_SCHEMA, null, defn); + resp = resource.createTable(table, false, postBy(DummyRequest.SUPER_USER)); + assertEquals(Response.Status.BAD_REQUEST.getStatusCode(), resp.getStatus()); + + // Unknown schema + table = TableSpec.newTable("bogus", tableName, defn); + resp = resource.createTable(table, false, postBy(DummyRequest.SUPER_USER)); + assertEquals(Response.Status.NOT_FOUND.getStatusCode(), resp.getStatus()); + + // Immutable schema + table = TableSpec.newTable(TableId.CATALOG_SCHEMA, tableName, defn); + resp = resource.createTable(table, false, postBy(DummyRequest.SUPER_USER)); + assertEquals(Response.Status.BAD_REQUEST.getStatusCode(), resp.getStatus()); + + // Wrong definition type. + table = TableSpec.newTable(TableId.INPUT_SCHEMA, tableName, defn); + resp = resource.createTable(table, false, postBy(DummyRequest.DENY_USER)); + assertEquals(Response.Status.BAD_REQUEST.getStatusCode(), resp.getStatus()); + + // No permissions + table = TableSpec.newTable(TableId.DRUID_SCHEMA, tableName, defn); + resp = resource.createTable(table, false, postBy(DummyRequest.DENY_USER)); + assertEquals(Response.Status.FORBIDDEN.getStatusCode(), resp.getStatus()); + + // Read permission + resp = resource.createTable(table, false, postBy(DummyRequest.READER_USER)); + assertEquals(Response.Status.FORBIDDEN.getStatusCode(), resp.getStatus()); + + // Write permission + resp = resource.createTable(table, false, postBy(DummyRequest.WRITER_USER)); + assertEquals(Response.Status.OK.getStatusCode(), resp.getStatus()); + assertTrue(getVersion(resp) > 0); + + // Duplicate + resp = resource.createTable(table, false, postBy(DummyRequest.WRITER_USER)); + assertEquals(Response.Status.BAD_REQUEST.getStatusCode(), resp.getStatus()); + + // Duplicate, "if not exists" + resp = resource.createTable(table, true, postBy(DummyRequest.WRITER_USER)); + assertEquals(Response.Status.OK.getStatusCode(), resp.getStatus()); + assertEquals(0, getVersion(resp)); + + // Input source + InputSource inputSource = new InlineInputSource("a,b,1\nc,d,2\n"); + InputFormat inputFormat = CatalogTests.csvFormat(); + InputSourceDefn inputDefn = InputSourceDefn + .builder() + .source(inputSource) + .format(inputFormat) + .column("a", "varchar") + .build(); + table = TableSpec.newTable(TableId.INPUT_SCHEMA, "input", inputDefn); + resp = resource.createTable(table, true, postBy(DummyRequest.WRITER_USER)); + assertEquals(Response.Status.OK.getStatusCode(), resp.getStatus()); + } + + @Test + public void testUpdate() + { + DatasourceDefn defn = DatasourceDefn.builder() + .segmentGranularity("PT1D") + .build(); + + // Missing schema name + String tableName = "update"; + Response resp = resource.updateTableDefn("", tableName, defn, 0, postBy(DummyRequest.SUPER_USER)); + assertEquals(Response.Status.BAD_REQUEST.getStatusCode(), resp.getStatus()); + + // Missing table name + resp = resource.updateTableDefn(TableId.DRUID_SCHEMA, null, defn, 0, postBy(DummyRequest.SUPER_USER)); + assertEquals(Response.Status.BAD_REQUEST.getStatusCode(), resp.getStatus()); + + // Unknown schema + resp = resource.updateTableDefn("bogus", tableName, defn, 0, postBy(DummyRequest.SUPER_USER)); + assertEquals(Response.Status.NOT_FOUND.getStatusCode(), resp.getStatus()); + + // Immutable schema + resp = resource.updateTableDefn(TableId.CATALOG_SCHEMA, tableName, defn, 0, postBy(DummyRequest.SUPER_USER)); + assertEquals(Response.Status.BAD_REQUEST.getStatusCode(), resp.getStatus()); + + // TODO: Wrong definition type. + + // Does not exist + resp = resource.updateTableDefn(TableId.DRUID_SCHEMA, tableName, defn, 0, postBy(DummyRequest.SUPER_USER)); + assertEquals(Response.Status.NOT_FOUND.getStatusCode(), resp.getStatus()); + + // Create the table + TableSpec table = TableSpec.newTable( + TableId.DRUID_SCHEMA, + "update", + defn); + resp = resource.createTable(table, false, postBy(DummyRequest.WRITER_USER)); + assertEquals(Response.Status.OK.getStatusCode(), resp.getStatus()); + long version = getVersion(resp); + + // No update permission + resp = resource.updateTableDefn(TableId.DRUID_SCHEMA, tableName, defn, 0, postBy(DummyRequest.READER_USER)); + assertEquals(Response.Status.FORBIDDEN.getStatusCode(), resp.getStatus()); + + // Out-of-date version + resp = resource.updateTableDefn(TableId.DRUID_SCHEMA, tableName, defn, 10, postBy(DummyRequest.WRITER_USER)); + assertEquals(Response.Status.BAD_REQUEST.getStatusCode(), resp.getStatus()); + + // Valid version + resp = resource.updateTableDefn(TableId.DRUID_SCHEMA, tableName, defn, version, postBy(DummyRequest.WRITER_USER)); + assertEquals(Response.Status.OK.getStatusCode(), resp.getStatus()); + assertTrue(getVersion(resp) > version); + version = getVersion(resp); + + // Overwrite + resp = resource.updateTableDefn(TableId.DRUID_SCHEMA, tableName, defn, 0, postBy(DummyRequest.WRITER_USER)); + assertEquals(Response.Status.OK.getStatusCode(), resp.getStatus()); + assertTrue(getVersion(resp) > version); + } + + @Test + public void testRead() + { + DatasourceDefn defn = DatasourceDefn.builder() + .segmentGranularity("PT1D") + .build(); + + // Missing schema name + String tableName = "read"; + Response resp = resource.getTable("", tableName, getBy(DummyRequest.SUPER_USER)); + assertEquals(Response.Status.BAD_REQUEST.getStatusCode(), resp.getStatus()); + + // Missing table name + resp = resource.getTable(TableId.DRUID_SCHEMA, null, getBy(DummyRequest.SUPER_USER)); + assertEquals(Response.Status.BAD_REQUEST.getStatusCode(), resp.getStatus()); + + // Unknown schema + resp = resource.getTable("bogus", tableName, getBy(DummyRequest.SUPER_USER)); + assertEquals(Response.Status.NOT_FOUND.getStatusCode(), resp.getStatus()); + + // Does not exist + resp = resource.getTable(TableId.DRUID_SCHEMA, tableName, getBy(DummyRequest.SUPER_USER)); + assertEquals(Response.Status.NOT_FOUND.getStatusCode(), resp.getStatus()); + + // Create the table + TableSpec table = TableSpec.newTable( + TableId.DRUID_SCHEMA, + tableName, + defn); + resp = resource.createTable(table, false, postBy(DummyRequest.WRITER_USER)); + assertEquals(Response.Status.OK.getStatusCode(), resp.getStatus()); + table = table.fromInsert(TableId.DRUID_SCHEMA, getVersion(resp)); + + // No read permission + resp = resource.getTable(TableId.DRUID_SCHEMA, tableName, getBy(DummyRequest.DENY_USER)); + assertEquals(Response.Status.FORBIDDEN.getStatusCode(), resp.getStatus()); + + // Valid + resp = resource.getTable(TableId.DRUID_SCHEMA, tableName, getBy(DummyRequest.READER_USER)); + assertEquals(Response.Status.OK.getStatusCode(), resp.getStatus()); + TableSpec read = (TableSpec) resp.getEntity(); + assertEquals(table, read); + + // Internal sync API + resp = resource.syncTable(TableId.DRUID_SCHEMA, tableName, getBy(DummyRequest.SUPER_USER)); + assertEquals(Response.Status.OK.getStatusCode(), resp.getStatus()); + read = (TableSpec) resp.getEntity(); + assertEquals(table, read); + } + + @SuppressWarnings("unchecked") + private List getTableIdList(Response resp) + { + return (List) resp.getEntity(); + } + + @SuppressWarnings("unchecked") + private List getTableList(Response resp) + { + return (List) resp.getEntity(); + } + + @SuppressWarnings("unchecked") + private List getDetailsList(Response resp) + { + return (List) resp.getEntity(); + } + + @Test + public void testList() + { + // No entries + Response resp = resource.listTables(getBy(DummyRequest.READER_USER)); + assertEquals(Response.Status.OK.getStatusCode(), resp.getStatus()); + List tableIds = getTableIdList(resp); + assertTrue(tableIds.isEmpty()); + + resp = resource.listTables(TableId.DRUID_SCHEMA, getBy(DummyRequest.READER_USER)); + assertEquals(Response.Status.OK.getStatusCode(), resp.getStatus()); + List tables = getTableList(resp); + assertTrue(tables.isEmpty()); + + // Missing schema + resp = resource.listTables(null, getBy(DummyRequest.READER_USER)); + assertEquals(Response.Status.BAD_REQUEST.getStatusCode(), resp.getStatus()); + + // Invalid schema + resp = resource.listTables("bogus", getBy(DummyRequest.READER_USER)); + assertEquals(Response.Status.NOT_FOUND.getStatusCode(), resp.getStatus()); + + // Create a table + DatasourceDefn defn = DatasourceDefn.builder() + .segmentGranularity("PT1D") + .build(); + TableSpec table = TableSpec.newTable(TableId.DRUID_SCHEMA, "list", defn); + resp = resource.createTable(table, false, postBy(DummyRequest.WRITER_USER)); + assertEquals(Response.Status.OK.getStatusCode(), resp.getStatus()); + + // No read access + resp = resource.listTables(getBy(DummyRequest.DENY_USER)); + assertEquals(Response.Status.OK.getStatusCode(), resp.getStatus()); + tableIds = getTableIdList(resp); + assertTrue(tableIds.isEmpty()); + + resp = resource.listTables(TableId.DRUID_SCHEMA, getBy(DummyRequest.DENY_USER)); + assertEquals(Response.Status.OK.getStatusCode(), resp.getStatus()); + tables = getTableList(resp); + assertTrue(tables.isEmpty()); + + // Read access + resp = resource.listTables(getBy(DummyRequest.READER_USER)); + assertEquals(Response.Status.OK.getStatusCode(), resp.getStatus()); + tableIds = getTableIdList(resp); + assertEquals(1, tableIds.size()); + + resp = resource.listTables(TableId.DRUID_SCHEMA, getBy(DummyRequest.READER_USER)); + assertEquals(Response.Status.OK.getStatusCode(), resp.getStatus()); + tables = getTableList(resp); + assertEquals(1, tables.size()); + + resp = resource.listTables(TableId.SYSTEM_SCHEMA, getBy(DummyRequest.READER_USER)); + assertEquals(Response.Status.OK.getStatusCode(), resp.getStatus()); + tables = getTableList(resp); + assertTrue(tables.isEmpty()); + + // Internal sync schema API + resp = resource.syncSchema(TableId.SYSTEM_SCHEMA, getBy(DummyRequest.SUPER_USER)); + assertEquals(Response.Status.OK.getStatusCode(), resp.getStatus()); + assertTrue(getDetailsList(resp).isEmpty()); + + resp = resource.syncSchema(TableId.DRUID_SCHEMA, getBy(DummyRequest.SUPER_USER)); + assertEquals(Response.Status.OK.getStatusCode(), resp.getStatus()); + List details = getDetailsList(resp); + assertEquals(1, details.size()); + } + + @Test + public void testDelete() + { + // Missing schema name + String tableName = "delete"; + Response resp = resource.deleteTable("", tableName, false, deleteBy(DummyRequest.SUPER_USER)); + assertEquals(Response.Status.BAD_REQUEST.getStatusCode(), resp.getStatus()); + + // Missing table name + resp = resource.deleteTable(TableId.DRUID_SCHEMA, null, false, deleteBy(DummyRequest.SUPER_USER)); + assertEquals(Response.Status.BAD_REQUEST.getStatusCode(), resp.getStatus()); + + // Unknown schema + resp = resource.deleteTable("bogus", tableName, false, deleteBy(DummyRequest.SUPER_USER)); + assertEquals(Response.Status.NOT_FOUND.getStatusCode(), resp.getStatus()); + + // Immutable schema + resp = resource.deleteTable(TableId.CATALOG_SCHEMA, tableName, false, deleteBy(DummyRequest.SUPER_USER)); + assertEquals(Response.Status.BAD_REQUEST.getStatusCode(), resp.getStatus()); + + // Does not exist + resp = resource.deleteTable(TableId.DRUID_SCHEMA, tableName, false, deleteBy(DummyRequest.SUPER_USER)); + assertEquals(Response.Status.NOT_FOUND.getStatusCode(), resp.getStatus()); + + resp = resource.deleteTable(TableId.DRUID_SCHEMA, tableName, true, deleteBy(DummyRequest.SUPER_USER)); + assertEquals(Response.Status.OK.getStatusCode(), resp.getStatus()); + + // Create the table + DatasourceDefn defn = DatasourceDefn.builder() + .segmentGranularity("PT1D") + .build(); + TableSpec table = TableSpec.newTable( + TableId.DRUID_SCHEMA, + tableName, + defn); + resp = resource.createTable(table, false, postBy(DummyRequest.WRITER_USER)); + assertEquals(Response.Status.OK.getStatusCode(), resp.getStatus()); + + // No write permission + resp = resource.deleteTable(TableId.DRUID_SCHEMA, tableName, false, deleteBy(DummyRequest.READER_USER)); + assertEquals(Response.Status.FORBIDDEN.getStatusCode(), resp.getStatus()); + + // Write permission + resp = resource.deleteTable(TableId.DRUID_SCHEMA, tableName, false, deleteBy(DummyRequest.WRITER_USER)); + assertEquals(Response.Status.OK.getStatusCode(), resp.getStatus()); + + resp = resource.deleteTable(TableId.DRUID_SCHEMA, tableName, false, deleteBy(DummyRequest.WRITER_USER)); + assertEquals(Response.Status.NOT_FOUND.getStatusCode(), resp.getStatus()); + + resp = resource.deleteTable(TableId.DRUID_SCHEMA, tableName, true, deleteBy(DummyRequest.WRITER_USER)); + assertEquals(Response.Status.OK.getStatusCode(), resp.getStatus()); + } + + @Test + public void testLifecycle() + { + // Operations for one table - create + String table1Name = "lifecycle1"; + DatasourceDefn defn = DatasourceDefn.builder() + .segmentGranularity("PT1D") + .build(); + TableSpec table = TableSpec.newTable(TableId.DRUID_SCHEMA, table1Name, defn); + Response resp = resource.createTable(table, false, postBy(DummyRequest.WRITER_USER)); + assertEquals(Response.Status.OK.getStatusCode(), resp.getStatus()); + long version = getVersion(resp); + table = table.fromInsert(TableId.DRUID_SCHEMA, version); + + // read + resp = resource.getTable(TableId.DRUID_SCHEMA, table1Name, postBy(DummyRequest.READER_USER)); + assertEquals(Response.Status.OK.getStatusCode(), resp.getStatus()); + TableSpec read = (TableSpec) resp.getEntity(); + assertEquals(table, read); + + // list + resp = resource.listTables(getBy(DummyRequest.READER_USER)); + assertEquals(Response.Status.OK.getStatusCode(), resp.getStatus()); + List tableIds = getTableIdList(resp); + assertEquals(1, tableIds.size()); + assertEquals(table.id(), tableIds.get(0)); + + resp = resource.listTables(TableId.DRUID_SCHEMA, getBy(DummyRequest.READER_USER)); + assertEquals(Response.Status.OK.getStatusCode(), resp.getStatus()); + List tables = getTableList(resp); + assertEquals(1, tables.size()); + assertEquals(table.name(), tables.get(0)); + + // update + DatasourceDefn defn2 = DatasourceDefn.builder() + .segmentGranularity("PT1H") + .build(); + resp = resource.updateTableDefn(TableId.DRUID_SCHEMA, table1Name, defn2, version, postBy(DummyRequest.WRITER_USER)); + assertEquals(Response.Status.OK.getStatusCode(), resp.getStatus()); + assertTrue(getVersion(resp) > version); + version = getVersion(resp); + + // verify update + resp = resource.getTable(TableId.DRUID_SCHEMA, table1Name, getBy(DummyRequest.READER_USER)); + assertEquals(Response.Status.OK.getStatusCode(), resp.getStatus()); + read = (TableSpec) resp.getEntity(); + assertEquals(table.creationTime(), read.creationTime()); + assertEquals(version, read.updateTime()); + assertEquals(defn2, read.defn()); + + // add second table + String table2Name = "lifecycle2"; + TableSpec table2 = TableSpec.newTable(TableId.DRUID_SCHEMA, table2Name, defn); + resp = resource.createTable(table2, false, postBy(DummyRequest.WRITER_USER)); + assertEquals(Response.Status.OK.getStatusCode(), resp.getStatus()); + + // verify lists + resp = resource.listTables(getBy(DummyRequest.READER_USER)); + assertEquals(Response.Status.OK.getStatusCode(), resp.getStatus()); + tableIds = getTableIdList(resp); + assertEquals(2, tableIds.size()); + assertEquals(table.id(), tableIds.get(0)); + assertEquals(table2.id(), tableIds.get(1)); + + resp = resource.listTables(TableId.DRUID_SCHEMA, getBy(DummyRequest.READER_USER)); + assertEquals(Response.Status.OK.getStatusCode(), resp.getStatus()); + tables = getTableList(resp); + assertEquals(2, tables.size()); + assertEquals(table.name(), tables.get(0)); + assertEquals(table2.name(), tables.get(1)); + + // delete and verify + resp = resource.deleteTable(TableId.DRUID_SCHEMA, table1Name, false, deleteBy(DummyRequest.WRITER_USER)); + assertEquals(Response.Status.OK.getStatusCode(), resp.getStatus()); + + resp = resource.listTables(TableId.DRUID_SCHEMA, getBy(DummyRequest.READER_USER)); + assertEquals(Response.Status.OK.getStatusCode(), resp.getStatus()); + tables = getTableList(resp); + assertEquals(1, tables.size()); + + resp = resource.deleteTable(TableId.DRUID_SCHEMA, table2Name, false, deleteBy(DummyRequest.WRITER_USER)); + assertEquals(Response.Status.OK.getStatusCode(), resp.getStatus()); + + resp = resource.listTables(TableId.DRUID_SCHEMA, getBy(DummyRequest.READER_USER)); + assertEquals(Response.Status.OK.getStatusCode(), resp.getStatus()); + tables = getTableList(resp); + assertEquals(0, tables.size()); + } +} diff --git a/server/src/test/java/org/apache/druid/catalog/CatalogTests.java b/server/src/test/java/org/apache/druid/catalog/CatalogTests.java new file mode 100644 index 000000000000..83599667e7c5 --- /dev/null +++ b/server/src/test/java/org/apache/druid/catalog/CatalogTests.java @@ -0,0 +1,81 @@ +/* + * Licensed to the Apache Software Foundation (ASF) under one + * or more contributor license agreements. See the NOTICE file + * distributed with this work for additional information + * regarding copyright ownership. The ASF licenses this file + * to you under the Apache License, Version 2.0 (the + * "License"); you may not use this file except in compliance + * with the License. You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, + * software distributed under the License is distributed on an + * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY + * KIND, either express or implied. See the License for the + * specific language governing permissions and limitations + * under the License. + */ + +package org.apache.druid.catalog; + +import com.fasterxml.jackson.databind.ObjectMapper; +import org.apache.druid.data.input.InputFormat; +import org.apache.druid.data.input.impl.CsvInputFormat; +import org.apache.druid.jackson.DefaultObjectMapper; +import org.apache.druid.metadata.TestDerbyConnector.DerbyConnectorRule; +import org.apache.druid.metadata.catalog.CatalogManager; +import org.apache.druid.metadata.catalog.SQLCatalogManager; + +import java.util.Arrays; + +public class CatalogTests +{ + public static InputFormat csvFormat() + { + return new CsvInputFormat( + Arrays.asList("x", "y", "z"), + null, // listDelimiter + false, // hasHeaderRow + false, // findColumnsFromHeader + 0 // skipHeaderRows + ); + } + + public static final ObjectMapper JSON_MAPPER = new DefaultObjectMapper(); + + public static class DbFixture + { + public CatalogManager manager; + public CatalogStorage storage; + + public DbFixture(DerbyConnectorRule derbyConnectorRule) + { + MetastoreManager metastoreMgr = new MetastoreManagerImpl( + JSON_MAPPER, + derbyConnectorRule.getConnector(), + () -> derbyConnectorRule.getMetadataConnectorConfig(), + derbyConnectorRule.metadataTablesConfigSupplier() + ); + manager = new SQLCatalogManager(metastoreMgr); + manager.start(); + storage = new CatalogStorage(manager, DummyRequest.AUTH_MAPPER); + } + + public void tearDown() + { + if (manager != null) { + manager.stop(); + manager = null; + } + } + } + + public static void tearDown(DbFixture fixture) + { + if (fixture != null) { + fixture.tearDown(); + } + } + +} diff --git a/server/src/test/java/org/apache/druid/catalog/DatasourceDefnTest.java b/server/src/test/java/org/apache/druid/catalog/DatasourceDefnTest.java new file mode 100644 index 000000000000..1c277395c9ba --- /dev/null +++ b/server/src/test/java/org/apache/druid/catalog/DatasourceDefnTest.java @@ -0,0 +1,204 @@ +/* + * Licensed to the Apache Software Foundation (ASF) under one + * or more contributor license agreements. See the NOTICE file + * distributed with this work for additional information + * regarding copyright ownership. The ASF licenses this file + * to you under the Apache License, Version 2.0 (the + * "License"); you may not use this file except in compliance + * with the License. You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, + * software distributed under the License is distributed on an + * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY + * KIND, either express or implied. See the License for the + * specific language governing permissions and limitations + * under the License. + */ + +package org.apache.druid.catalog; + +import com.fasterxml.jackson.databind.ObjectMapper; +import com.google.common.collect.ImmutableMap; +import nl.jqno.equalsverifier.EqualsVerifier; +import org.apache.druid.java.util.common.IAE; +import org.junit.Test; + +import java.util.List; +import java.util.Map; + +import static org.junit.Assert.assertEquals; +import static org.junit.Assert.assertNotNull; +import static org.junit.Assert.assertNull; +import static org.junit.Assert.assertTrue; +import static org.junit.Assert.fail; + +/** + * Test of validation and serialization of the catalog table definitions. + */ +public class DatasourceDefnTest +{ + @Test + public void testMinimalBuilder() + { + // Minimum possible definition + DatasourceDefn defn = DatasourceDefn.builder() + .segmentGranularity("PT1D") + .build(); + + defn.validate(); + assertEquals("PT1D", defn.segmentGranularity()); + assertNull(defn.rollupGranularity()); + assertEquals(0, defn.targetSegmentRows()); + + DatasourceDefn copy = defn.toBuilder().build(); + assertEquals(defn, copy); + } + + @Test + public void testFullBuilder() + { + DatasourceDefn defn = DatasourceDefn.builder() + .segmentGranularity("PT1H") + .rollupGranularity("PT1M") + .targetSegmentRows(1_000_000) + .build(); + + defn.validate(); + assertEquals("PT1H", defn.segmentGranularity()); + assertEquals("PT1M", defn.rollupGranularity()); + assertEquals(1_000_000, defn.targetSegmentRows()); + + DatasourceDefn copy = defn.toBuilder().build(); + assertEquals(defn, copy); + } + + @Test + public void testProperties() + { + Map props = ImmutableMap.of( + "foo", 10, "bar", "mumble"); + DatasourceDefn defn = DatasourceDefn.builder() + .rollupGranularity("PT1M") + .properties(props) + .build(); + + defn.validate(); + assertEquals(props, defn.properties()); + + DatasourceDefn copy = defn.toBuilder().build(); + assertEquals(defn, copy); + } + + @Test + public void testColumns() + { + DatasourceDefn defn = DatasourceDefn.builder() + .segmentGranularity("PT1D") + .rollupGranularity("PT1M") + .column(DatasourceColumnDefn.builder("a").build()) + .column(DatasourceColumnDefn.builder("b").sqlType("VARCHAR").build()) + .column(DatasourceColumnDefn.builder("c").sqlType("BIGINT").measure("SUM").build()) + .build(); + + defn.validate(); + List columns = defn.columns(); + assertEquals(3, columns.size()); + assertTrue(columns.get(0) instanceof DatasourceColumnDefn); + assertEquals("a", columns.get(0).name()); + assertNull(columns.get(0).sqlType()); + assertTrue(columns.get(1) instanceof DatasourceColumnDefn); + assertEquals("b", columns.get(1).name()); + assertEquals("VARCHAR", columns.get(1).sqlType()); + assertTrue(columns.get(2) instanceof MeasureColumnDefn); + assertEquals("c", columns.get(2).name()); + assertEquals("BIGINT", columns.get(2).sqlType()); + assertEquals("SUM", ((MeasureColumnDefn) columns.get(2)).aggregateFn()); + + DatasourceDefn copy = defn.toBuilder().build(); + assertEquals(defn, copy); + + try { + defn = DatasourceDefn.builder() + .segmentGranularity("PT1D") + .column("c", "FOO") + .build(); + defn.validate(); + fail(); + } + catch (IAE e) { + // Expected + } + + try { + defn = DatasourceDefn.builder() + .segmentGranularity("PT1D") + .column(DatasourceColumnDefn.builder("c").sqlType("BIGINT").measure("SUM").build()) + .build(); + defn.validate(); + fail(); + } + catch (IAE e) { + // Expected + } + + try { + defn = DatasourceDefn.builder() + .segmentGranularity("PT1D") + .column(DatasourceColumnDefn.builder("a").build()) + .column(DatasourceColumnDefn.builder("a").build()) + .build(); + defn.validate(); + fail(); + } + catch (IAE e) { + // Expected + } + } + + @Test + public void testValidation() + { + // Ignore rollup grain for detail table + DatasourceDefn defn = DatasourceDefn.builder() + .segmentGranularity("PT1H") + .build(); + + assertNull(defn.rollupGranularity()); + assertEquals("PT1H", defn.segmentGranularity()); + + // Negative segment size mapped to 0 + defn = DatasourceDefn.builder() + .segmentGranularity("PT1H") + .targetSegmentRows(-1) + .build(); + assertEquals(0, defn.targetSegmentRows()); + } + + @Test + public void testSerialization() + { + ObjectMapper mapper = new ObjectMapper(); + DatasourceDefn defn = DatasourceDefn.builder() + .segmentGranularity("PT1H") + .rollupGranularity("PT1M") + .targetSegmentRows(1_000_000) + .build(); + + // Round-trip + TableDefn defn2 = TableDefn.fromBytes(mapper, defn.toBytes(mapper)); + assertEquals(defn, defn2); + + // Sanity check of toString, which uses JSON + assertNotNull(defn.toString()); + } + + @Test + public void testEquals() + { + EqualsVerifier.forClass(DatasourceDefn.class) + .usingGetClass() + .verify(); + } +} diff --git a/server/src/test/java/org/apache/druid/catalog/DummyRequest.java b/server/src/test/java/org/apache/druid/catalog/DummyRequest.java new file mode 100644 index 000000000000..b2e26f63b3b5 --- /dev/null +++ b/server/src/test/java/org/apache/druid/catalog/DummyRequest.java @@ -0,0 +1,540 @@ +/* + * Licensed to the Apache Software Foundation (ASF) under one + * or more contributor license agreements. See the NOTICE file + * distributed with this work for additional information + * regarding copyright ownership. The ASF licenses this file + * to you under the Apache License, Version 2.0 (the + * "License"); you may not use this file except in compliance + * with the License. You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, + * software distributed under the License is distributed on an + * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY + * KIND, either express or implied. See the License for the + * specific language governing permissions and limitations + * under the License. + */ + +package org.apache.druid.catalog; + +import com.google.common.collect.ImmutableMap; +import org.apache.druid.server.security.Access; +import org.apache.druid.server.security.Action; +import org.apache.druid.server.security.AuthConfig; +import org.apache.druid.server.security.AuthenticationResult; +import org.apache.druid.server.security.Authorizer; +import org.apache.druid.server.security.AuthorizerMapper; +import org.apache.druid.server.security.Resource; +import org.apache.druid.server.security.ResourceType; + +import javax.servlet.AsyncContext; +import javax.servlet.DispatcherType; +import javax.servlet.RequestDispatcher; +import javax.servlet.ServletContext; +import javax.servlet.ServletException; +import javax.servlet.ServletInputStream; +import javax.servlet.ServletRequest; +import javax.servlet.ServletResponse; +import javax.servlet.http.Cookie; +import javax.servlet.http.HttpServletRequest; +import javax.servlet.http.HttpServletResponse; +import javax.servlet.http.HttpSession; +import javax.servlet.http.HttpUpgradeHandler; +import javax.servlet.http.Part; + +import java.io.BufferedReader; +import java.io.IOException; +import java.io.UnsupportedEncodingException; +import java.security.Principal; +import java.util.Collection; +import java.util.Enumeration; +import java.util.HashMap; +import java.util.Locale; +import java.util.Map; + +/** + * Test-only implementation of an HTTP request. Allows us to control + * aspects of the request without resorting to mocks. + */ +public class DummyRequest implements HttpServletRequest +{ + protected static final String SUPER_USER = "super"; + protected static final String READER_USER = "reader"; + protected static final String WRITER_USER = "writer"; + protected static final String DENY_USER = "denyAll"; + + protected static final String TEST_AUTHORITY = "test"; + + protected static final String GET = "GET"; + protected static final String POST = "POST"; + protected static final String DELETE = "DELETE"; + + private static class TestAuthorizer implements Authorizer + { + @Override + public Access authorize(AuthenticationResult authenticationResult, Resource resource, Action action) + { + final String userName = authenticationResult.getIdentity(); + if (DummyRequest.SUPER_USER.equals(userName)) { + return Access.OK; + } + if (ResourceType.DATASOURCE.equals(resource.getType())) { + if ("forbidden".equals(resource.getName())) { + return Access.DENIED; + } + return new Access( + DummyRequest.WRITER_USER.equals(userName) || + DummyRequest.READER_USER.equals(userName) && action == Action.READ); + } + return Access.OK; + } + } + + protected static final AuthorizerMapper AUTH_MAPPER = new AuthorizerMapper( + ImmutableMap.of(DummyRequest.TEST_AUTHORITY, new TestAuthorizer())); + + private final String method; + private final Map attribs = new HashMap<>(); + private final String contentType; + + public DummyRequest(String method, String userName) + { + this(method, userName, null); + } + + public DummyRequest(String method, String userName, String contentType) + { + this.method = method; + AuthenticationResult authResult = new AuthenticationResult(userName, TEST_AUTHORITY, null, null); + attribs.put(AuthConfig.DRUID_AUTHENTICATION_RESULT, authResult); + this.contentType = contentType; + } + + public static HttpServletRequest postBy(String user) + { + return new DummyRequest(DummyRequest.POST, user); + } + + public static HttpServletRequest getBy(String user) + { + return new DummyRequest(DummyRequest.GET, user); + } + + public static HttpServletRequest deleteBy(String user) + { + return new DummyRequest(DummyRequest.DELETE, user); + } + + @Override + public Object getAttribute(String name) + { + return attribs.get(name); + } + + @Override + public Enumeration getAttributeNames() + { + return null; + } + + @Override + public String getCharacterEncoding() + { + return null; + } + + @Override + public void setCharacterEncoding(String env) throws UnsupportedEncodingException + { + } + + @Override + public int getContentLength() + { + return 0; + } + + @Override + public long getContentLengthLong() + { + return 0; + } + + @Override + public String getContentType() + { + return contentType; + } + + @Override + public ServletInputStream getInputStream() throws IOException + { + return null; + } + + @Override + public String getParameter(String name) + { + return null; + } + + @Override + public Enumeration getParameterNames() + { + return null; + } + + @Override + public String[] getParameterValues(String name) + { + return null; + } + + @Override + public Map getParameterMap() + { + return null; + } + + @Override + public String getProtocol() + { + return null; + } + + @Override + public String getScheme() + { + return null; + } + + @Override + public String getServerName() + { + return null; + } + + @Override + public int getServerPort() + { + return 0; + } + + @Override + public BufferedReader getReader() throws IOException + { + return null; + } + + @Override + public String getRemoteAddr() + { + return null; + } + + @Override + public String getRemoteHost() + { + return null; + } + + @Override + public void setAttribute(String name, Object o) + { + attribs.put(name, o); + } + + @Override + public void removeAttribute(String name) + { + } + + @Override + public Locale getLocale() + { + return null; + } + + @Override + public Enumeration getLocales() + { + return null; + } + + @Override + public boolean isSecure() + { + return false; + } + + @Override + public RequestDispatcher getRequestDispatcher(String path) + { + return null; + } + + @Override + public String getRealPath(String path) + { + return null; + } + + @Override + public int getRemotePort() + { + return 0; + } + + @Override + public String getLocalName() + { + return null; + } + + @Override + public String getLocalAddr() + { + return null; + } + + @Override + public int getLocalPort() + { + return 0; + } + + @Override + public ServletContext getServletContext() + { + return null; + } + + @Override + public AsyncContext startAsync() throws IllegalStateException + { + return null; + } + + @Override + public AsyncContext startAsync(ServletRequest servletRequest, ServletResponse servletResponse) + throws IllegalStateException + { + return null; + } + + @Override + public boolean isAsyncStarted() + { + return false; + } + + @Override + public boolean isAsyncSupported() + { + return false; + } + + @Override + public AsyncContext getAsyncContext() + { + return null; + } + + @Override + public DispatcherType getDispatcherType() + { + return null; + } + + @Override + public String getAuthType() + { + return null; + } + + @Override + public Cookie[] getCookies() + { + return null; + } + + @Override + public long getDateHeader(String name) + { + return 0; + } + + @Override + public String getHeader(String name) + { + return null; + } + + @Override + public Enumeration getHeaders(String name) + { + return null; + } + + @Override + public Enumeration getHeaderNames() + { + return null; + } + + @Override + public int getIntHeader(String name) + { + return 0; + } + + @Override + public String getMethod() + { + return method; + } + + @Override + public String getPathInfo() + { + return null; + } + + @Override + public String getPathTranslated() + { + return null; + } + + @Override + public String getContextPath() + { + return null; + } + + @Override + public String getQueryString() + { + return null; + } + + @Override + public String getRemoteUser() + { + return null; + } + + @Override + public boolean isUserInRole(String role) + { + return false; + } + + @Override + public Principal getUserPrincipal() + { + return null; + } + + @Override + public String getRequestedSessionId() + { + return null; + } + + @Override + public String getRequestURI() + { + return null; + } + + @Override + public StringBuffer getRequestURL() + { + return null; + } + + @Override + public String getServletPath() + { + return null; + } + + @Override + public HttpSession getSession(boolean create) + { + return null; + } + + @Override + public HttpSession getSession() + { + return null; + } + + @Override + public String changeSessionId() + { + return null; + } + + @Override + public boolean isRequestedSessionIdValid() + { + return false; + } + + @Override + public boolean isRequestedSessionIdFromCookie() + { + return false; + } + + @Override + public boolean isRequestedSessionIdFromURL() + { + return false; + } + + @Override + public boolean isRequestedSessionIdFromUrl() + { + return false; + } + + @Override + public boolean authenticate(HttpServletResponse response) throws IOException, ServletException + { + return false; + } + + @Override + public void login(String username, String password) throws ServletException + { + } + + @Override + public void logout() throws ServletException + { + } + + @Override + public Collection getParts() throws IOException, ServletException + { + return null; + } + + @Override + public Part getPart(String name) throws IOException, ServletException + { + return null; + } + + @Override + public T upgrade(Class handlerClass) throws IOException, ServletException + { + return null; + } +} diff --git a/server/src/test/java/org/apache/druid/catalog/InputSourceDefnTest.java b/server/src/test/java/org/apache/druid/catalog/InputSourceDefnTest.java new file mode 100644 index 000000000000..2880e0112337 --- /dev/null +++ b/server/src/test/java/org/apache/druid/catalog/InputSourceDefnTest.java @@ -0,0 +1,175 @@ +/* + * Licensed to the Apache Software Foundation (ASF) under one + * or more contributor license agreements. See the NOTICE file + * distributed with this work for additional information + * regarding copyright ownership. The ASF licenses this file + * to you under the Apache License, Version 2.0 (the + * "License"); you may not use this file except in compliance + * with the License. You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, + * software distributed under the License is distributed on an + * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY + * KIND, either express or implied. See the License for the + * specific language governing permissions and limitations + * under the License. + */ + +package org.apache.druid.catalog; + +import com.fasterxml.jackson.databind.ObjectMapper; +import nl.jqno.equalsverifier.EqualsVerifier; +import org.apache.druid.data.input.InputFormat; +import org.apache.druid.data.input.InputSource; +import org.apache.druid.data.input.impl.InlineInputSource; +import org.apache.druid.java.util.common.IAE; +import org.junit.Test; + +import java.util.List; + +import static org.junit.Assert.assertEquals; +import static org.junit.Assert.assertNotNull; +import static org.junit.Assert.assertSame; +import static org.junit.Assert.fail; + +public class InputSourceDefnTest +{ + @Test + public void testMinimalBuilder() + { + // Minimum possible definition + InputSource inputSource = new InlineInputSource("a,b,1\nc,d,2\n"); + InputFormat inputFormat = CatalogTests.csvFormat(); + InputSourceDefn defn = InputSourceDefn + .builder() + .source(inputSource) + .format(inputFormat) + .column("a", "varchar") + .build(); + + defn.validate(); + assertSame(inputSource, defn.inputSource()); + assertSame(inputFormat, defn.format()); + List columns = defn.columns(); + assertEquals(1, columns.size()); + assertEquals("a", columns.get(0).name()); + assertEquals("varchar", columns.get(0).sqlType()); + + InputSourceDefn copy = defn.toBuilder().build(); + assertEquals(defn, copy); + } + + @Test + public void testValidation() + { + InputSourceDefn defn = InputSourceDefn.builder().build(); + try { + defn.validate(); + fail(); + } + catch (IAE e) { + // Expected + } + + InputSource inputSource = new InlineInputSource("a,b,1\nc,d,2\n"); + defn = InputSourceDefn + .builder() + .source(inputSource) + .build(); + try { + defn.validate(); + fail(); + } + catch (IAE e) { + // Expected + } + + InputFormat inputFormat = CatalogTests.csvFormat(); + defn = InputSourceDefn + .builder() + .source(inputSource) + .format(inputFormat) + .build(); + try { + defn.validate(); + fail(); + } + catch (IAE e) { + // Expected + } + + try { + defn = InputSourceDefn + .builder() + .source(inputSource) + .format(inputFormat) + .column(null, "VARCHAR") + .build(); + defn.validate(); + fail(); + } + catch (IAE e) { + // Expected + } + + defn = InputSourceDefn + .builder() + .source(inputSource) + .format(inputFormat) + .column("a", null) + .build(); + try { + defn.validate(); + fail(); + } + catch (IAE e) { + // Expected + } + + defn = InputSourceDefn + .builder() + .source(inputSource) + .format(inputFormat) + .column("a", "varchar") + .column("a", "varchar") + .build(); + try { + defn.validate(); + fail(); + } + catch (IAE e) { + // Expected + } + } + + @Test + public void testSerialization() + { + ObjectMapper mapper = new ObjectMapper(); + InputSource inputSource = new InlineInputSource("a,b,1\nc,d,2\n"); + InputFormat inputFormat = CatalogTests.csvFormat(); + InputSourceDefn defn = InputSourceDefn + .builder() + .source(inputSource) + .format(inputFormat) + .column("a", "varchar") + .build(); + + // Round-trip + TableDefn defn2 = TableDefn.fromBytes(mapper, defn.toBytes(mapper)); + assertEquals(defn, defn2); + + // Sanity check of toString, which uses JSON + assertNotNull(defn.toString()); + } + + @Test + public void testEquals() + { + EqualsVerifier.forClass(InputSourceDefn.class) + .usingGetClass() + .verify(); + } +} diff --git a/server/src/test/java/org/apache/druid/catalog/MetadataCatalogTest.java b/server/src/test/java/org/apache/druid/catalog/MetadataCatalogTest.java new file mode 100644 index 000000000000..4bb1a02cee31 --- /dev/null +++ b/server/src/test/java/org/apache/druid/catalog/MetadataCatalogTest.java @@ -0,0 +1,330 @@ +/* + * Licensed to the Apache Software Foundation (ASF) under one + * or more contributor license agreements. See the NOTICE file + * distributed with this work for additional information + * regarding copyright ownership. The ASF licenses this file + * to you under the Apache License, Version 2.0 (the + * "License"); you may not use this file except in compliance + * with the License. You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, + * software distributed under the License is distributed on an + * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY + * KIND, either express or implied. See the License for the + * specific language governing permissions and limitations + * under the License. + */ + +package org.apache.druid.catalog; + +import com.fasterxml.jackson.databind.ObjectMapper; +import com.fasterxml.jackson.dataformat.smile.SmileFactory; +import org.apache.druid.catalog.AbstractColumnMetadata.InputColumn; +import org.apache.druid.catalog.AbstractColumnMetadata.MeasureColumn; +import org.apache.druid.catalog.AbstractTableMetadata.DatasourceTable; +import org.apache.druid.catalog.AbstractTableMetadata.InputSourceTable; +import org.apache.druid.catalog.MetadataCatalog.ColumnKind; +import org.apache.druid.catalog.MetadataCatalog.ColumnMetadata; +import org.apache.druid.catalog.MetadataCatalog.TableMetadata; +import org.apache.druid.catalog.MetadataCatalog.TableType; +import org.apache.druid.data.input.InputFormat; +import org.apache.druid.data.input.InputSource; +import org.apache.druid.data.input.impl.InlineInputSource; +import org.apache.druid.metadata.TestDerbyConnector; +import org.apache.druid.metadata.catalog.CatalogManager.DuplicateKeyException; +import org.apache.druid.metadata.catalog.CatalogManager.OutOfDateException; +import org.apache.druid.segment.column.ColumnType; +import org.junit.After; +import org.junit.Before; +import org.junit.Rule; +import org.junit.Test; + +import java.util.List; + +import static org.junit.Assert.assertEquals; +import static org.junit.Assert.assertFalse; +import static org.junit.Assert.assertNotNull; +import static org.junit.Assert.assertNull; +import static org.junit.Assert.assertSame; +import static org.junit.Assert.assertTrue; + +public class MetadataCatalogTest +{ + @Rule + public final TestDerbyConnector.DerbyConnectorRule derbyConnectorRule = new TestDerbyConnector.DerbyConnectorRule(); + + private CatalogTests.DbFixture dbFixture; + private CatalogStorage storage; + private ObjectMapper jsonMapper; + private ObjectMapper smileMapper; + + @Before + public void setUp() + { + dbFixture = new CatalogTests.DbFixture(derbyConnectorRule); + storage = dbFixture.storage; + jsonMapper = new ObjectMapper(); + smileMapper = new ObjectMapper(new SmileFactory()); + } + + @After + public void tearDown() + { + CatalogTests.tearDown(dbFixture); + } + + @Test + public void testDirect() throws DuplicateKeyException, OutOfDateException + { + populateCatalog(); + MetadataCatalog catalog = new LocalMetadataCatalog(storage, storage.schemaRegistry); + verifyInitial(catalog); + alterCatalog(); + verifyAltered(catalog); + } + + @Test + public void testCached() throws DuplicateKeyException, OutOfDateException + { + populateCatalog(); + CachedMetadataCatalog catalog = new CachedMetadataCatalog(storage, storage.schemaRegistry); + storage.register(catalog); + verifyInitial(catalog); + alterCatalog(); + verifyAltered(catalog); + + // Also test the deletion case + TableId table2 = TableId.datasource("table2"); + storage.tables().delete(table2); + assertNull(storage.tables().read(table2)); + + List tables = catalog.tables(TableId.DRUID_SCHEMA); + assertEquals(2, tables.size()); + assertEquals("table1", tables.get(0).id().name()); + assertEquals("table3", tables.get(1).id().name()); + } + + @Test + public void testRemoteWithJson() throws DuplicateKeyException, OutOfDateException + { + doTestRemote(false); + } + + @Test + public void testRemoteWithSmile() throws DuplicateKeyException, OutOfDateException + { + doTestRemote(true); + } + + private void doTestRemote(boolean useSmile) throws DuplicateKeyException, OutOfDateException + { + populateCatalog(); + MockCatalogSync sync = new MockCatalogSync(storage, jsonMapper, smileMapper, useSmile); + MetadataCatalog catalog = sync.catalog(); + storage.register(sync); + verifyInitial(catalog); + alterCatalog(); + verifyAltered(catalog); + + // Also test the deletion case + TableId table2 = TableId.datasource("table2"); + storage.tables().delete(table2); + assertNull(storage.tables().read(table2)); + + List tables = catalog.tables(TableId.DRUID_SCHEMA); + assertEquals(2, tables.size()); + assertEquals("table1", tables.get(0).id().name()); + assertEquals("table3", tables.get(1).id().name()); + } + + /** + * Populate the catalog with a few items using the REST resource. + * @throws DuplicateKeyException + */ + private void populateCatalog() throws DuplicateKeyException + { + DatasourceDefn defn = DatasourceDefn.builder() + .segmentGranularity("PT1D") + .timeColumn() + .column("a", "VARCHAR") + .build(); + TableSpec table = TableSpec.newTable( + TableId.DRUID_SCHEMA, + "table1", + defn); + storage.tables().create(table); + + defn = DatasourceDefn.builder() + .segmentGranularity("PT1D") + .rollupGranularity("PT1H") + .timeColumn() + .column("dim", "VARCHAR") + .measure("measure", "BIGINT", "SUM") + .build(); + table = TableSpec.newTable( + TableId.DRUID_SCHEMA, + "table2", + defn); + storage.tables().create(table); + + InputSource inputSource = new InlineInputSource("a,b,1\nc,d,2\n"); + InputFormat inputFormat = CatalogTests.csvFormat(); + InputSourceDefn inputDefn = InputSourceDefn + .builder() + .source(inputSource) + .format(inputFormat) + .column("a", "varchar") + .build(); + table = TableSpec.newTable( + TableId.INPUT_SCHEMA, + "input", + inputDefn); + storage.tables().create(table); + } + + private void verifyInitial(MetadataCatalog catalog) + { + { + TableId id = TableId.datasource("table1"); + TableMetadata table = catalog.resolveTable(id); + assertEquals(id, table.id()); + assertTrue(table.updateTime() > 0); + assertEquals(TableType.DATASOURCE, table.type()); + + List cols = table.columns(); + assertEquals(2, cols.size()); + assertEquals("__time", cols.get(0).name()); + assertEquals("TIMESTAMP", cols.get(0).sqlType()); + assertEquals(ColumnKind.SIMPLE, cols.get(0).kind()); + assertEquals("a", cols.get(1).name()); + assertEquals("VARCHAR", cols.get(1).sqlType()); + assertEquals(ColumnKind.SIMPLE, cols.get(0).kind()); + assertSame(cols.get(0), table.column("__time")); + assertSame(cols.get(1), table.column("a")); + assertNull(table.column("b")); + + DatasourceTable dsTable = (DatasourceTable) table; + assertEquals("PT1D", dsTable.segmentGranularity()); + assertTrue(dsTable.isDetail()); + assertFalse(dsTable.isRollup()); + assertNull(dsTable.rollupGranularity()); + } + { + TableId id = TableId.datasource("table2"); + TableMetadata table = catalog.resolveTable(id); + assertEquals(id, table.id()); + assertTrue(table.updateTime() > 0); + assertEquals(TableType.DATASOURCE, table.type()); + + List cols = table.columns(); + assertEquals(3, cols.size()); + assertEquals("__time", cols.get(0).name()); + assertEquals("TIMESTAMP", cols.get(0).sqlType()); + assertEquals(ColumnKind.DIMENSION, cols.get(0).kind()); + assertEquals("dim", cols.get(1).name()); + assertEquals("VARCHAR", cols.get(1).sqlType()); + assertEquals(ColumnKind.DIMENSION, cols.get(1).kind()); + assertEquals("measure", cols.get(2).name()); + assertEquals("BIGINT", cols.get(2).sqlType()); + assertEquals(ColumnKind.MEASURE, cols.get(2).kind()); + assertEquals("SUM", ((MeasureColumn) cols.get(2)).aggFn()); + assertSame(cols.get(0), table.column("__time")); + assertSame(cols.get(1), table.column("dim")); + assertSame(cols.get(2), table.column("measure")); + + DatasourceTable dsTable = (DatasourceTable) table; + assertEquals("PT1D", dsTable.segmentGranularity()); + assertFalse(dsTable.isDetail()); + assertTrue(dsTable.isRollup()); + assertEquals("PT1H", dsTable.rollupGranularity()); + } + assertNull(catalog.resolveTable(TableId.datasource("table3"))); + { + TableId id = TableId.inputSource("input"); + TableMetadata table = catalog.resolveTable(id); + assertEquals(id, table.id()); + assertTrue(table.updateTime() > 0); + assertEquals(TableType.INPUT, table.type()); + + List cols = table.columns(); + assertEquals(1, cols.size()); + assertEquals("a", cols.get(0).name()); + assertEquals("varchar", cols.get(0).sqlType()); + assertEquals(ColumnKind.INPUT, cols.get(0).kind()); + assertEquals(ColumnType.STRING, ((InputColumn) cols.get(0)).druidType()); + + InputSourceTable inputTable = (InputSourceTable) table; + assertNotNull(inputTable.inputSource()); + assertNotNull(inputTable.format()); + } + + List tables = catalog.tables(TableId.DRUID_SCHEMA); + assertEquals(2, tables.size()); + assertEquals("table1", tables.get(0).id().name()); + assertEquals("table2", tables.get(1).id().name()); + + tables = catalog.tables(TableId.INPUT_SCHEMA); + assertEquals(1, tables.size()); + assertEquals("input", tables.get(0).id().name()); + } + + private void alterCatalog() throws DuplicateKeyException, OutOfDateException + { + // Add a column to table 1 + TableId id1 = TableId.datasource("table1"); + TableSpec table1 = storage.tables().read(id1); + assertNotNull(table1); + + DatasourceDefn defn = (DatasourceDefn) table1.defn(); + defn = defn.toBuilder() + .column("b", "DOUBLE") + .build(); + storage.tables().updateDefn(id1, defn, table1.updateTime()); + + // Create a table 3 + defn = DatasourceDefn.builder() + .segmentGranularity("PT1D") + .timeColumn() + .column("x", "FLOAT") + .build(); + TableSpec table = TableSpec.newTable( + TableId.DRUID_SCHEMA, + "table3", + defn); + storage.tables().create(table); + } + + private void verifyAltered(MetadataCatalog catalog) + { + { + TableId id = TableId.datasource("table1"); + TableMetadata table = catalog.resolveTable(id); + + List cols = table.columns(); + assertEquals(3, cols.size()); + assertEquals("__time", cols.get(0).name()); + assertEquals("a", cols.get(1).name()); + assertEquals("b", cols.get(2).name()); + assertEquals("DOUBLE", cols.get(2).sqlType()); + assertEquals(ColumnKind.SIMPLE, cols.get(2).kind()); + assertSame(cols.get(2), table.column("b")); + } + { + TableId id = TableId.datasource("table3"); + TableMetadata table = catalog.resolveTable(id); + + List cols = table.columns(); + assertEquals(2, cols.size()); + assertEquals("__time", cols.get(0).name()); + assertEquals("x", cols.get(1).name()); + } + + List tables = catalog.tables(TableId.DRUID_SCHEMA); + assertEquals(3, tables.size()); + assertEquals("table1", tables.get(0).id().name()); + assertEquals("table2", tables.get(1).id().name()); + assertEquals("table3", tables.get(2).id().name()); + } +} diff --git a/server/src/test/java/org/apache/druid/catalog/MockCatalogSync.java b/server/src/test/java/org/apache/druid/catalog/MockCatalogSync.java new file mode 100644 index 000000000000..e61dd6dbb95d --- /dev/null +++ b/server/src/test/java/org/apache/druid/catalog/MockCatalogSync.java @@ -0,0 +1,90 @@ +/* + * Licensed to the Apache Software Foundation (ASF) under one + * or more contributor license agreements. See the NOTICE file + * distributed with this work for additional information + * regarding copyright ownership. The ASF licenses this file + * to you under the Apache License, Version 2.0 (the + * "License"); you may not use this file except in compliance + * with the License. You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, + * software distributed under the License is distributed on an + * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY + * KIND, either express or implied. See the License for the + * specific language governing permissions and limitations + * under the License. + */ + +package org.apache.druid.catalog; + +import com.fasterxml.jackson.databind.ObjectMapper; +import com.fasterxml.jackson.jaxrs.smile.SmileMediaTypes; +import org.apache.druid.catalog.MetadataCatalog.CatalogListener; +import org.apache.druid.server.http.CatalogListenerResource; + +import javax.ws.rs.core.MediaType; + +import java.io.ByteArrayInputStream; + +/** + * Simulates a network sync from catalog (Coordinator) to consumer (Broker). + */ +public class MockCatalogSync implements CatalogListener +{ + private final CatalogListenerResource listenerResource; + private final CachedMetadataCatalog catalog; + private final boolean useSmile; + private final ObjectMapper smileMapper; + private final ObjectMapper jsonMapper; + + public MockCatalogSync( + CatalogStorage storage, + final ObjectMapper smileMapper, + final ObjectMapper jsonMapper, + boolean useSmile) + { + this.catalog = new CachedMetadataCatalog(storage, storage.schemaRegistry); + this.listenerResource = new CatalogListenerResource( + catalog, + smileMapper, + jsonMapper, + storage.authorizer().mapper()); + this.useSmile = useSmile; + this.smileMapper = smileMapper; + this.jsonMapper = jsonMapper; + } + + @Override + public void updated(TableSpec update) + { + doSync(update); + } + + private void doSync(TableSpec update) + { + byte[] encoded = update.toBytes(useSmile ? smileMapper : jsonMapper); + listenerResource.syncTable( + new ByteArrayInputStream(encoded), + new DummyRequest( + DummyRequest.POST, + DummyRequest.SUPER_USER, + useSmile ? SmileMediaTypes.APPLICATION_JACKSON_SMILE : MediaType.APPLICATION_JSON)); + } + + @Override + public void deleted(TableId tableId) + { + TableSpec spec = TableSpec.newTable( + tableId.schema(), + tableId.name(), + new TableDefn.Tombstone()); + doSync(spec); + } + + public MetadataCatalog catalog() + { + return catalog; + } +} diff --git a/server/src/test/java/org/apache/druid/metadata/SQLMetadataConnectorTest.java b/server/src/test/java/org/apache/druid/metadata/SQLMetadataConnectorTest.java index 1c192da475d5..ddc5fc321d8d 100644 --- a/server/src/test/java/org/apache/druid/metadata/SQLMetadataConnectorTest.java +++ b/server/src/test/java/org/apache/druid/metadata/SQLMetadataConnectorTest.java @@ -69,6 +69,26 @@ public void testCreateTables() tables.add(tablesConfig.getEntryTable(entryType)); tables.add(tablesConfig.getAuditTable()); tables.add(tablesConfig.getSupervisorTable()); + tables.add(tablesConfig.getPendingSegmentsTable()); + tables.add(tablesConfig.getDataSourceTable()); + + connector.getDBI().withHandle( + new HandleCallback() + { + @Override + public Void withHandle(Handle handle) + { + for (String table : tables) { + Assert.assertFalse( + StringUtils.format("table %s already created!", table), + connector.tableExists(handle, table) + ); + } + + return null; + } + } + ); connector.createSegmentTable(); connector.createConfigTable(); @@ -76,6 +96,8 @@ public void testCreateTables() connector.createTaskTables(); connector.createAuditTable(); connector.createSupervisorsTable(); + connector.createPendingSegmentsTable(); + connector.createDataSourceTable(); connector.getDBI().withHandle( new HandleCallback() diff --git a/server/src/test/java/org/apache/druid/metadata/catalog/TableIdTest.java b/server/src/test/java/org/apache/druid/metadata/catalog/TableIdTest.java new file mode 100644 index 000000000000..61f0c9c48035 --- /dev/null +++ b/server/src/test/java/org/apache/druid/metadata/catalog/TableIdTest.java @@ -0,0 +1,56 @@ +/* + * Licensed to the Apache Software Foundation (ASF) under one + * or more contributor license agreements. See the NOTICE file + * distributed with this work for additional information + * regarding copyright ownership. The ASF licenses this file + * to you under the Apache License, Version 2.0 (the + * "License"); you may not use this file except in compliance + * with the License. You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, + * software distributed under the License is distributed on an + * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY + * KIND, either express or implied. See the License for the + * specific language governing permissions and limitations + * under the License. + */ + +package org.apache.druid.metadata.catalog; + +import nl.jqno.equalsverifier.EqualsVerifier; +import org.apache.druid.catalog.TableId; +import org.junit.Test; + +import static org.junit.Assert.assertEquals; + +/** + * Tests the various catalog table record objects. These are mostly + * just "data objects" that do nothing other than hold data. + */ +public class TableIdTest +{ + @Test + public void testId() + { + TableId id1 = new TableId("schema", "table"); + assertEquals(id1, id1); + assertEquals("schema", id1.schema()); + assertEquals("table", id1.name()); + assertEquals("\"schema\".\"table\"", id1.sqlName()); + assertEquals(id1.sqlName(), id1.toString()); + + TableId id2 = TableId.datasource("ds"); + assertEquals(TableId.DRUID_SCHEMA, id2.schema()); + assertEquals("ds", id2.name()); + } + + @Test + public void testEquals() + { + EqualsVerifier.forClass(TableId.class) + .usingGetClass() + .verify(); + } +} diff --git a/server/src/test/java/org/apache/druid/metadata/catalog/TableManagerTest.java b/server/src/test/java/org/apache/druid/metadata/catalog/TableManagerTest.java new file mode 100644 index 000000000000..826a3f6e153f --- /dev/null +++ b/server/src/test/java/org/apache/druid/metadata/catalog/TableManagerTest.java @@ -0,0 +1,214 @@ +/* + * Licensed to the Apache Software Foundation (ASF) under one + * or more contributor license agreements. See the NOTICE file + * distributed with this work for additional information + * regarding copyright ownership. The ASF licenses this file + * to you under the Apache License, Version 2.0 (the + * "License"); you may not use this file except in compliance + * with the License. You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, + * software distributed under the License is distributed on an + * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY + * KIND, either express or implied. See the License for the + * specific language governing permissions and limitations + * under the License. + */ + +package org.apache.druid.metadata.catalog; + +import com.fasterxml.jackson.databind.ObjectMapper; +import org.apache.druid.catalog.DatasourceDefn; +import org.apache.druid.catalog.MetastoreManager; +import org.apache.druid.catalog.MetastoreManagerImpl; +import org.apache.druid.catalog.TableId; +import org.apache.druid.catalog.TableSpec; +import org.apache.druid.jackson.DefaultObjectMapper; +import org.apache.druid.metadata.TestDerbyConnector; +import org.apache.druid.metadata.catalog.CatalogManager.DuplicateKeyException; +import org.apache.druid.metadata.catalog.CatalogManager.NotFoundException; +import org.apache.druid.metadata.catalog.CatalogManager.OutOfDateException; +import org.apache.druid.metadata.catalog.CatalogManager.TableState; +import org.junit.After; +import org.junit.Before; +import org.junit.Rule; +import org.junit.Test; + +import java.util.Arrays; +import java.util.List; + +import static org.junit.Assert.assertEquals; +import static org.junit.Assert.assertFalse; +import static org.junit.Assert.assertNull; +import static org.junit.Assert.assertTrue; +import static org.junit.Assert.fail; + +public class TableManagerTest +{ + private static final ObjectMapper JSON_MAPPER = new DefaultObjectMapper(); + @Rule + public final TestDerbyConnector.DerbyConnectorRule derbyConnectorRule = new TestDerbyConnector.DerbyConnectorRule(); + private CatalogManager manager; + + @Before + public void setUp() + { + MetastoreManager metastoreMgr = new MetastoreManagerImpl( + JSON_MAPPER, + derbyConnectorRule.getConnector(), + () -> derbyConnectorRule.getMetadataConnectorConfig(), + derbyConnectorRule.metadataTablesConfigSupplier() + ); + manager = new SQLCatalogManager(metastoreMgr); + manager.start(); + } + + @After + public void tearDown() + { + if (manager != null) { + manager.stop(); + manager = null; + } + } + + @Test + public void testCreate() throws DuplicateKeyException + { + TableDefnManager tableMgr = manager.tables(); + + DatasourceDefn defn = DatasourceDefn.builder() + .segmentGranularity("PT1H") + .rollupGranularity("PT1M") + .targetSegmentRows(1_000_000) + .build(); + TableSpec table = TableSpec.newSegmentTable("table1", defn); + + // Table does not exist, read returns nothing. + assertNull(tableMgr.read(table.id())); + + // Create the table + long version = tableMgr.create(table); + TableSpec created = table.fromInsert(table.dbSchema(), version); + + // Read the record + TableSpec read = tableMgr.read(table.id()); + assertEquals(created, read); + + // Try to create a second time + try { + tableMgr.create(table); + fail(); + } + catch (DuplicateKeyException e) { + // Expected + } + } + + @Test + public void testUpdate() throws DuplicateKeyException, OutOfDateException, NotFoundException + { + TableDefnManager tableMgr = manager.tables(); + + DatasourceDefn defn = DatasourceDefn.builder() + .segmentGranularity("PT1H") + .rollupGranularity("PT1M") + .targetSegmentRows(1_000_000) + .build(); + TableSpec table = TableSpec.newSegmentTable("table1", defn); + long version = tableMgr.create(table); + + // Change the definition + DatasourceDefn defn2 = DatasourceDefn.builder() + .segmentGranularity("PT1D") + .rollupGranularity("PT1H") + .targetSegmentRows(2_000_000) + .build(); + + try { + tableMgr.updateDefn(table.id(), defn2, 3); + fail(); + } + catch (OutOfDateException e) { + // expected + } + + assertEquals(version, tableMgr.read(table.id()).updateTime()); + long newVersion = tableMgr.updateDefn(table.id(), defn2, version); + TableSpec table3 = tableMgr.read(table.id()); + assertEquals(defn2, table3.defn()); + assertEquals(newVersion, table3.updateTime()); + + // Changing the state requires no version check + assertEquals(TableState.ACTIVE, table3.state()); + newVersion = tableMgr.markDeleting(table.id()); + TableSpec table4 = tableMgr.read(table.id()); + assertEquals(TableState.DELETING, table4.state()); + assertEquals(newVersion, table4.updateTime()); + + // Update: no version check) + long newerVersion = tableMgr.updateDefn(table.id(), defn2); + assertTrue(newerVersion > newVersion); + } + + @Test + public void testDelete() throws DuplicateKeyException + { + TableDefnManager tableMgr = manager.tables(); + + DatasourceDefn defn = DatasourceDefn.builder() + .segmentGranularity("PT1H") + .rollupGranularity("PT1M") + .targetSegmentRows(1_000_000) + .build(); + TableSpec table = TableSpec.newSegmentTable("table1", defn); + + assertFalse(tableMgr.delete(table.id())); + tableMgr.create(table); + assertTrue(tableMgr.delete(table.id())); + assertFalse(tableMgr.delete(table.id())); + } + + @Test + public void testList() throws DuplicateKeyException + { + TableDefnManager tableMgr = manager.tables(); + + List list = tableMgr.list(); + assertTrue(list.isEmpty()); + + DatasourceDefn defn = DatasourceDefn.builder() + .segmentGranularity("PT1H") + .rollupGranularity("PT1M") + .targetSegmentRows(1_000_000) + .build(); + + // Create tables in inverse order + TableSpec table2 = TableSpec.newSegmentTable("table2", defn); + long version = tableMgr.create(table2); + table2 = table2.fromInsert(TableId.DRUID_SCHEMA, version); + TableSpec table1 = TableSpec.newSegmentTable("table1", defn); + tableMgr.create(table1); + table1 = table1.fromInsert(TableId.DRUID_SCHEMA, version); + + list = tableMgr.list(); + assertEquals(2, list.size()); + TableId id = list.get(0); + assertEquals(TableId.DRUID_SCHEMA, id.schema()); + assertEquals("table1", id.name()); + id = list.get(1); + assertEquals(TableId.DRUID_SCHEMA, id.schema()); + assertEquals("table2", id.name()); + + List names = tableMgr.list(TableId.DRUID_SCHEMA); + assertEquals(2, names.size()); + + names = tableMgr.list(TableId.SYSTEM_SCHEMA); + assertEquals(0, names.size()); + + List details = tableMgr.listDetails(TableId.DRUID_SCHEMA); + assertEquals(Arrays.asList(table1, table2), details); + } +} From cdc339bd197fabe01f517eea4ecd5a886ad5e7a8 Mon Sep 17 00:00:00 2001 From: Paul Rogers Date: Tue, 14 Jun 2022 19:41:49 -0700 Subject: [PATCH 2/8] Build fixes --- .../metadata/MetadataStorageTablesConfig.java | 2 +- .../MetadataStorageTablesConfigTest.java | 48 +++++++++++++++++++ .../MetadataStorageUpdaterJobSpec.java | 4 +- .../MetadataStorageUpdaterJobSpecTest.java | 8 ++++ .../apache/druid/catalog/CatalogClient.java | 8 ++-- .../apache/druid/catalog/CatalogStorage.java | 2 +- .../druid/catalog/CatalogUpdateNotifier.java | 4 +- .../org/apache/druid/catalog/ColumnDefn.java | 4 +- .../druid/catalog/DatasourceColumnDefn.java | 2 +- .../apache/druid/catalog/InputColumnDefn.java | 2 +- .../druid/catalog/LocalMetadataCatalog.java | 2 +- .../apache/druid/catalog/MetadataCatalog.java | 4 +- .../apache/druid/catalog/SchemaRegistry.java | 2 +- .../org/apache/druid/catalog/TableId.java | 2 +- .../metadata/catalog/CatalogManager.java | 6 +-- .../metadata/catalog/SQLTableManager.java | 20 ++++---- .../metadata/catalog/TableDefnManager.java | 6 +-- .../druid/server/http/CatalogResource.java | 2 - .../apache/druid/catalog/DummyRequest.java | 33 ++++++------- .../metadata/catalog/TableManagerTest.java | 6 ++- 20 files changed, 112 insertions(+), 55 deletions(-) create mode 100644 core/src/test/java/org/apache/druid/metadata/MetadataStorageTablesConfigTest.java diff --git a/core/src/main/java/org/apache/druid/metadata/MetadataStorageTablesConfig.java b/core/src/main/java/org/apache/druid/metadata/MetadataStorageTablesConfig.java index dc24734a5ba0..16e2ff1c0fc4 100644 --- a/core/src/main/java/org/apache/druid/metadata/MetadataStorageTablesConfig.java +++ b/core/src/main/java/org/apache/druid/metadata/MetadataStorageTablesConfig.java @@ -39,7 +39,7 @@ public static MetadataStorageTablesConfig fromBase(String base) public static final String TASK_ENTRY_TYPE = "task"; - private static final String DEFAULT_BASE = "druid"; + public static final String DEFAULT_BASE = "druid"; private final Map entryTables = new HashMap<>(); private final Map logTables = new HashMap<>(); diff --git a/core/src/test/java/org/apache/druid/metadata/MetadataStorageTablesConfigTest.java b/core/src/test/java/org/apache/druid/metadata/MetadataStorageTablesConfigTest.java new file mode 100644 index 000000000000..39a9c16268bc --- /dev/null +++ b/core/src/test/java/org/apache/druid/metadata/MetadataStorageTablesConfigTest.java @@ -0,0 +1,48 @@ +/* + * Licensed to the Apache Software Foundation (ASF) under one + * or more contributor license agreements. See the NOTICE file + * distributed with this work for additional information + * regarding copyright ownership. The ASF licenses this file + * to you under the Apache License, Version 2.0 (the + * "License"); you may not use this file except in compliance + * with the License. You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, + * software distributed under the License is distributed on an + * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY + * KIND, either express or implied. See the License for the + * specific language governing permissions and limitations + * under the License. + */ + +package org.apache.druid.metadata; + +import org.junit.Test; + +import static org.junit.Assert.assertEquals; + +public class MetadataStorageTablesConfigTest +{ + /** + * Pretty lame test: mostly to get the static checks to not complain. + */ + @Test + public void testDefaults() + { + MetadataStorageTablesConfig config = MetadataStorageTablesConfig.fromBase(null); + assertEquals(MetadataStorageTablesConfig.DEFAULT_BASE, config.getBase()); + assertEquals(MetadataStorageTablesConfig.DEFAULT_BASE + "_dataSource", config.getDataSourceTable()); + assertEquals(MetadataStorageTablesConfig.DEFAULT_BASE + "_pendingSegments", config.getPendingSegmentsTable()); + assertEquals(MetadataStorageTablesConfig.DEFAULT_BASE + "_segments", config.getSegmentsTable()); + assertEquals(MetadataStorageTablesConfig.DEFAULT_BASE + "_rules", config.getRulesTable()); + assertEquals(MetadataStorageTablesConfig.DEFAULT_BASE + "_config", config.getConfigTable()); + assertEquals(MetadataStorageTablesConfig.DEFAULT_BASE + "_tasks", config.getTasksTable()); + assertEquals(MetadataStorageTablesConfig.DEFAULT_BASE + "_tasklogs", config.getTaskLogTable()); + assertEquals(MetadataStorageTablesConfig.DEFAULT_BASE + "_tasklocks", config.getTaskLockTable()); + assertEquals(MetadataStorageTablesConfig.DEFAULT_BASE + "_audit", config.getAuditTable()); + assertEquals(MetadataStorageTablesConfig.DEFAULT_BASE + "_supervisors", config.getSupervisorTable()); + assertEquals(MetadataStorageTablesConfig.DEFAULT_BASE + "_tableDefn", config.getTableDefnTable()); + } +} diff --git a/indexing-hadoop/src/main/java/org/apache/druid/indexer/updater/MetadataStorageUpdaterJobSpec.java b/indexing-hadoop/src/main/java/org/apache/druid/indexer/updater/MetadataStorageUpdaterJobSpec.java index 7805374d964d..39ffcc4aa23c 100644 --- a/indexing-hadoop/src/main/java/org/apache/druid/indexer/updater/MetadataStorageUpdaterJobSpec.java +++ b/indexing-hadoop/src/main/java/org/apache/druid/indexer/updater/MetadataStorageUpdaterJobSpec.java @@ -82,8 +82,8 @@ public String getPassword() }; } - //Note: Currently it only supports configured segmentTable, other tables should be added if needed - //by the code using this + // Note: Currently it only supports configured segmentTable, other tables + // should be added if needed by the code using this public MetadataStorageTablesConfig getMetadataStorageTablesConfig() { return MetadataStorageTablesConfig.fromBase(null); diff --git a/indexing-hadoop/src/test/java/org/apache/druid/indexer/updater/MetadataStorageUpdaterJobSpecTest.java b/indexing-hadoop/src/test/java/org/apache/druid/indexer/updater/MetadataStorageUpdaterJobSpecTest.java index 83484d0060d0..70974f38fa64 100644 --- a/indexing-hadoop/src/test/java/org/apache/druid/indexer/updater/MetadataStorageUpdaterJobSpecTest.java +++ b/indexing-hadoop/src/test/java/org/apache/druid/indexer/updater/MetadataStorageUpdaterJobSpecTest.java @@ -53,6 +53,14 @@ public void testMetadaStorageConnectionConfigWithDefaultProviderPassword() throw ); } + @Test + void testMetadataConfig() + { + // Fairy lame test: just makes the static checks happy. + MetadataStorageUpdaterJobSpec spec = new MetadataStorageUpdaterJobSpec(); + Assert.assertNotNull(spec.getMetadataStorageTablesConfig()); + } + private void testMetadataStorageUpdaterJobSpec( String segmentTable, String type, diff --git a/server/src/main/java/org/apache/druid/catalog/CatalogClient.java b/server/src/main/java/org/apache/druid/catalog/CatalogClient.java index 49200ca07214..67a37b57dba4 100644 --- a/server/src/main/java/org/apache/druid/catalog/CatalogClient.java +++ b/server/src/main/java/org/apache/druid/catalog/CatalogClient.java @@ -21,18 +21,18 @@ import com.fasterxml.jackson.core.type.TypeReference; import com.fasterxml.jackson.databind.ObjectMapper; -import io.netty.handler.codec.http.HttpResponseStatus; import org.apache.druid.catalog.MetadataCatalog.CatalogSource; import org.apache.druid.client.coordinator.Coordinator; import org.apache.druid.discovery.DruidLeaderClient; import org.apache.druid.guice.annotations.Smile; import org.apache.druid.java.util.common.ISE; +import org.apache.druid.java.util.common.StringUtils; import org.apache.druid.java.util.http.client.Request; import org.apache.druid.java.util.http.client.response.StringFullResponseHolder; import org.apache.druid.server.http.CatalogResource; -import org.codehaus.plexus.util.StringUtils; import org.jboss.netty.handler.codec.http.HttpHeaders; import org.jboss.netty.handler.codec.http.HttpMethod; +import org.jboss.netty.handler.codec.http.HttpResponseStatus; import javax.inject.Inject; import javax.ws.rs.core.MediaType; @@ -122,11 +122,11 @@ private T send(String url, TypeReference typeRef) // is during shutdown. return null; } - if (responseHolder.getStatus().getCode() == HttpResponseStatus.NOT_FOUND.code()) { + if (responseHolder.getStatus().getCode() == HttpResponseStatus.NOT_FOUND.getCode()) { // Not found means the item disappeared. Returning null means "not found". return null; } - if (responseHolder.getStatus().getCode() != HttpResponseStatus.OK.code()) { + if (responseHolder.getStatus().getCode() != HttpResponseStatus.OK.getCode()) { throw new ISE("Unexpected status from catalog sync: " + responseHolder.getStatus()); } try { diff --git a/server/src/main/java/org/apache/druid/catalog/CatalogStorage.java b/server/src/main/java/org/apache/druid/catalog/CatalogStorage.java index a45733679b55..4e8d74f1e9e2 100644 --- a/server/src/main/java/org/apache/druid/catalog/CatalogStorage.java +++ b/server/src/main/java/org/apache/druid/catalog/CatalogStorage.java @@ -37,7 +37,7 @@ */ public class CatalogStorage implements CatalogUpdateProvider, CatalogSource { - public class ListenerAdapter implements TableDefnManager.Listener + public static class ListenerAdapter implements TableDefnManager.Listener { private final CatalogListener dest; diff --git a/server/src/main/java/org/apache/druid/catalog/CatalogUpdateNotifier.java b/server/src/main/java/org/apache/druid/catalog/CatalogUpdateNotifier.java index f463335455c7..333b277dcb19 100644 --- a/server/src/main/java/org/apache/druid/catalog/CatalogUpdateNotifier.java +++ b/server/src/main/java/org/apache/druid/catalog/CatalogUpdateNotifier.java @@ -36,7 +36,7 @@ import javax.inject.Inject; -import java.util.Arrays; +import java.util.Collections; import java.util.function.Supplier; /** @@ -69,7 +69,7 @@ public CatalogUpdateNotifier( long timeoutMs = TIMEOUT_MS; this.smileMapper = smileMapper; Supplier> nodeSupplier = new ListeningNodeSupplier( - Arrays.asList(NodeRole.BROKER), + Collections.singletonList(NodeRole.BROKER), discoveryProvider); RestSender restSender = RestUpdateSender.httpClientSender(httpClient, Duration.millis(timeoutMs)); RestUpdateSender sender = new RestUpdateSender( diff --git a/server/src/main/java/org/apache/druid/catalog/ColumnDefn.java b/server/src/main/java/org/apache/druid/catalog/ColumnDefn.java index 2c948d8370c1..6af7ed14bd9f 100644 --- a/server/src/main/java/org/apache/druid/catalog/ColumnDefn.java +++ b/server/src/main/java/org/apache/druid/catalog/ColumnDefn.java @@ -24,8 +24,8 @@ import com.fasterxml.jackson.annotation.JsonSubTypes.Type; import com.fasterxml.jackson.annotation.JsonTypeInfo; import com.fasterxml.jackson.databind.ObjectMapper; +import com.google.common.base.Strings; import com.google.common.collect.ImmutableMap; -import org.apache.commons.lang.StringUtils; import org.apache.druid.guice.annotations.PublicApi; import org.apache.druid.java.util.common.IAE; import org.apache.druid.segment.column.ColumnType; @@ -78,7 +78,7 @@ public String sqlType() public void validate() { - if (StringUtils.isBlank(name)) { + if (Strings.isNullOrEmpty(name)) { throw new IAE("Column name is required"); } } diff --git a/server/src/main/java/org/apache/druid/catalog/DatasourceColumnDefn.java b/server/src/main/java/org/apache/druid/catalog/DatasourceColumnDefn.java index a60620a22d97..274fff8bea3b 100644 --- a/server/src/main/java/org/apache/druid/catalog/DatasourceColumnDefn.java +++ b/server/src/main/java/org/apache/druid/catalog/DatasourceColumnDefn.java @@ -30,7 +30,7 @@ */ public class DatasourceColumnDefn extends ColumnDefn { - private final String TIME_COLUMN = "__time"; + private static final String TIME_COLUMN = "__time"; @JsonCreator public DatasourceColumnDefn( diff --git a/server/src/main/java/org/apache/druid/catalog/InputColumnDefn.java b/server/src/main/java/org/apache/druid/catalog/InputColumnDefn.java index 66a6f76e434c..fb09206a1277 100644 --- a/server/src/main/java/org/apache/druid/catalog/InputColumnDefn.java +++ b/server/src/main/java/org/apache/druid/catalog/InputColumnDefn.java @@ -21,7 +21,7 @@ import com.fasterxml.jackson.annotation.JsonCreator; import com.fasterxml.jackson.annotation.JsonProperty; -import org.apache.curator.shaded.com.google.common.base.Strings; +import com.google.common.base.Strings; import org.apache.druid.java.util.common.IAE; import org.apache.druid.java.util.common.StringUtils; diff --git a/server/src/main/java/org/apache/druid/catalog/LocalMetadataCatalog.java b/server/src/main/java/org/apache/druid/catalog/LocalMetadataCatalog.java index f7966b36d884..06714ebc9bb5 100644 --- a/server/src/main/java/org/apache/druid/catalog/LocalMetadataCatalog.java +++ b/server/src/main/java/org/apache/druid/catalog/LocalMetadataCatalog.java @@ -31,7 +31,7 @@ /** * Metadata catalog which reads from the catalog storage. No caching. - * For testing, and as the Coodinator-side implementation of the remote + * For testing, and as the Coordinator-side implementation of the remote * synchronization protocol. */ public class LocalMetadataCatalog implements MetadataCatalog diff --git a/server/src/main/java/org/apache/druid/catalog/MetadataCatalog.java b/server/src/main/java/org/apache/druid/catalog/MetadataCatalog.java index a558a3d8c8e1..e1c73bda234f 100644 --- a/server/src/main/java/org/apache/druid/catalog/MetadataCatalog.java +++ b/server/src/main/java/org/apache/druid/catalog/MetadataCatalog.java @@ -103,13 +103,13 @@ interface InputSourceMetadata extends TableMetadata { } - public interface CatalogSource + interface CatalogSource { List tablesForSchema(String dbSchema); TableSpec table(TableId id); } - public interface CatalogListener + interface CatalogListener { void updated(TableSpec update); void deleted(TableId tableId); diff --git a/server/src/main/java/org/apache/druid/catalog/SchemaRegistry.java b/server/src/main/java/org/apache/druid/catalog/SchemaRegistry.java index ac6456b136a5..0c26476e9849 100644 --- a/server/src/main/java/org/apache/druid/catalog/SchemaRegistry.java +++ b/server/src/main/java/org/apache/druid/catalog/SchemaRegistry.java @@ -31,7 +31,7 @@ */ public interface SchemaRegistry { - public interface SchemaDefn + interface SchemaDefn { String name(); String securityResource(); diff --git a/server/src/main/java/org/apache/druid/catalog/TableId.java b/server/src/main/java/org/apache/druid/catalog/TableId.java index 5dd18abc4a0a..33103835410a 100644 --- a/server/src/main/java/org/apache/druid/catalog/TableId.java +++ b/server/src/main/java/org/apache/druid/catalog/TableId.java @@ -21,7 +21,7 @@ import com.fasterxml.jackson.annotation.JsonCreator; import com.fasterxml.jackson.annotation.JsonProperty; -import org.apache.curator.shaded.com.google.common.base.Objects; +import com.google.common.base.Objects; import org.apache.druid.java.util.common.StringUtils; /** diff --git a/server/src/main/java/org/apache/druid/metadata/catalog/CatalogManager.java b/server/src/main/java/org/apache/druid/metadata/catalog/CatalogManager.java index f0ea2c9ffeb3..f0319f2a755c 100644 --- a/server/src/main/java/org/apache/druid/metadata/catalog/CatalogManager.java +++ b/server/src/main/java/org/apache/druid/metadata/catalog/CatalogManager.java @@ -62,7 +62,7 @@ public static TableState fromCode(String code) * catalog object being updated is not the same as that of * the expected version. */ - public class OutOfDateException extends Exception + class OutOfDateException extends Exception { public OutOfDateException(String msg) { @@ -70,7 +70,7 @@ public OutOfDateException(String msg) } } - public class NotFoundException extends Exception + class NotFoundException extends Exception { public NotFoundException(String msg) { @@ -84,7 +84,7 @@ public NotFoundException(String msg) * generally not retryable: it us unrealistic to expect the other * thread to helpfully delete the record it just added. */ - public class DuplicateKeyException extends Exception + class DuplicateKeyException extends Exception { public DuplicateKeyException(String msg, Exception e) { diff --git a/server/src/main/java/org/apache/druid/metadata/catalog/SQLTableManager.java b/server/src/main/java/org/apache/druid/metadata/catalog/SQLTableManager.java index 0da8171b14c8..6faf88c6df7d 100644 --- a/server/src/main/java/org/apache/druid/metadata/catalog/SQLTableManager.java +++ b/server/src/main/java/org/apache/druid/metadata/catalog/SQLTableManager.java @@ -20,7 +20,7 @@ package org.apache.druid.metadata.catalog; import com.fasterxml.jackson.databind.ObjectMapper; -import net.thisptr.jackson.jq.internal.misc.Lists; +import com.google.common.collect.Lists; import org.apache.druid.catalog.MetastoreManager; import org.apache.druid.catalog.TableDefn; import org.apache.druid.catalog.TableId; @@ -134,7 +134,7 @@ public long create(TableSpec table) throws DuplicateKeyException new HandleCallback() { @Override - public Long withHandle(Handle handle) throws Exception + public Long withHandle(Handle handle) throws DuplicateKeyException { long updateTime = System.currentTimeMillis(); Update stmt = handle.createStatement( @@ -180,7 +180,7 @@ public TableSpec read(TableId id) new HandleCallback() { @Override - public TableSpec withHandle(Handle handle) throws Exception + public TableSpec withHandle(Handle handle) { Query> query = handle.createQuery( StringUtils.format(SELECT_TABLE, tableName) @@ -217,7 +217,7 @@ public long updateDefn(TableId id, TableDefn defn, long oldVersion) throws OutOf new HandleCallback() { @Override - public Long withHandle(Handle handle) throws Exception + public Long withHandle(Handle handle) throws OutOfDateException { long updateTime = System.currentTimeMillis(); int updateCount = handle.createStatement( @@ -256,7 +256,7 @@ public long updateDefn(TableId id, TableDefn defn) throws NotFoundException new HandleCallback() { @Override - public Long withHandle(Handle handle) throws Exception + public Long withHandle(Handle handle) throws NotFoundException { long updateTime = System.currentTimeMillis(); int updateCount = handle.createStatement( @@ -293,7 +293,7 @@ public long markDeleting(TableId id) new HandleCallback() { @Override - public Long withHandle(Handle handle) throws Exception + public Long withHandle(Handle handle) { long updateTime = System.currentTimeMillis(); int updateCount = handle.createStatement( @@ -317,7 +317,7 @@ public boolean delete(TableId id) new HandleCallback() { @Override - public Boolean withHandle(Handle handle) throws Exception + public Boolean withHandle(Handle handle) { int updateCount = handle.createStatement( StringUtils.format(DELETE_TABLE, tableName)) @@ -338,7 +338,7 @@ public List list() new HandleCallback>() { @Override - public List withHandle(Handle handle) throws Exception + public List withHandle(Handle handle) { Query> query = handle.createQuery( StringUtils.format(SELECT_ALL_TABLES, tableName) @@ -361,7 +361,7 @@ public List list(String dbSchema) new HandleCallback>() { @Override - public List withHandle(Handle handle) throws Exception + public List withHandle(Handle handle) { Query> query = handle.createQuery( StringUtils.format(SELECT_TABLES_IN_SCHEMA, tableName) @@ -385,7 +385,7 @@ public List listDetails(String dbSchema) new HandleCallback>() { @Override - public List withHandle(Handle handle) throws Exception + public List withHandle(Handle handle) { Query> query = handle.createQuery( StringUtils.format(SELECT_TABLE_DETAILS_IN_SCHEMA, tableName) diff --git a/server/src/main/java/org/apache/druid/metadata/catalog/TableDefnManager.java b/server/src/main/java/org/apache/druid/metadata/catalog/TableDefnManager.java index 510c37a70717..c2fcb2598e47 100644 --- a/server/src/main/java/org/apache/druid/metadata/catalog/TableDefnManager.java +++ b/server/src/main/java/org/apache/druid/metadata/catalog/TableDefnManager.java @@ -51,9 +51,9 @@ interface Listener * Create a table entry. * * @return the version of the newly created table. Call - * {@link TableDefnRecord#asUpdate(long)} if you want a new - * {@link TableDefnRecord} with the new version. - * @throws DuplicateKeyException if the row is a duplicate + * {@link TableSpec#asUpdate(long)} if you want a new + * {@link TableSpec} with the new version. + * @throws {@link DuplicateKeyException} if the row is a duplicate * (schema, name) pair. This generally indicates a code error, * or since our code is perfect, a race condition or a DB * update outside of Druid. In any event, the error is not diff --git a/server/src/main/java/org/apache/druid/server/http/CatalogResource.java b/server/src/main/java/org/apache/druid/server/http/CatalogResource.java index 5f3b441e51fd..8f2478167032 100644 --- a/server/src/main/java/org/apache/druid/server/http/CatalogResource.java +++ b/server/src/main/java/org/apache/druid/server/http/CatalogResource.java @@ -80,8 +80,6 @@ public CatalogResource(CatalogStorage catalog) /** * Create a new table within the indicated schema. * - * @param dbSchema Druid schema. Must be a valid, writable schema - * for which the user has write access. * @param table The table definition to create. * @param ifNew Whether to skip the action if the table already exists. * This is the same as the SQL IF NOT EXISTS clause. If {@code false}, diff --git a/server/src/test/java/org/apache/druid/catalog/DummyRequest.java b/server/src/test/java/org/apache/druid/catalog/DummyRequest.java index b2e26f63b3b5..5866b16528f4 100644 --- a/server/src/test/java/org/apache/druid/catalog/DummyRequest.java +++ b/server/src/test/java/org/apache/druid/catalog/DummyRequest.java @@ -33,7 +33,6 @@ import javax.servlet.DispatcherType; import javax.servlet.RequestDispatcher; import javax.servlet.ServletContext; -import javax.servlet.ServletException; import javax.servlet.ServletInputStream; import javax.servlet.ServletRequest; import javax.servlet.ServletResponse; @@ -45,8 +44,6 @@ import javax.servlet.http.Part; import java.io.BufferedReader; -import java.io.IOException; -import java.io.UnsupportedEncodingException; import java.security.Principal; import java.util.Collection; import java.util.Enumeration; @@ -74,7 +71,11 @@ public class DummyRequest implements HttpServletRequest private static class TestAuthorizer implements Authorizer { @Override - public Access authorize(AuthenticationResult authenticationResult, Resource resource, Action action) + public Access authorize( + AuthenticationResult authenticationResult, + Resource resource, + Action action + ) { final String userName = authenticationResult.getIdentity(); if (DummyRequest.SUPER_USER.equals(userName)) { @@ -107,7 +108,8 @@ public DummyRequest(String method, String userName) public DummyRequest(String method, String userName, String contentType) { this.method = method; - AuthenticationResult authResult = new AuthenticationResult(userName, TEST_AUTHORITY, null, null); + AuthenticationResult authResult = + new AuthenticationResult(userName, TEST_AUTHORITY, null, null); attribs.put(AuthConfig.DRUID_AUTHENTICATION_RESULT, authResult); this.contentType = contentType; } @@ -146,7 +148,7 @@ public String getCharacterEncoding() } @Override - public void setCharacterEncoding(String env) throws UnsupportedEncodingException + public void setCharacterEncoding(String env) { } @@ -169,7 +171,7 @@ public String getContentType() } @Override - public ServletInputStream getInputStream() throws IOException + public ServletInputStream getInputStream() { return null; } @@ -223,7 +225,7 @@ public int getServerPort() } @Override - public BufferedReader getReader() throws IOException + public BufferedReader getReader() { return null; } @@ -312,14 +314,13 @@ public ServletContext getServletContext() } @Override - public AsyncContext startAsync() throws IllegalStateException + public AsyncContext startAsync() { return null; } @Override public AsyncContext startAsync(ServletRequest servletRequest, ServletResponse servletResponse) - throws IllegalStateException { return null; } @@ -505,35 +506,35 @@ public boolean isRequestedSessionIdFromUrl() } @Override - public boolean authenticate(HttpServletResponse response) throws IOException, ServletException + public boolean authenticate(HttpServletResponse response) { return false; } @Override - public void login(String username, String password) throws ServletException + public void login(String username, String password) { } @Override - public void logout() throws ServletException + public void logout() { } @Override - public Collection getParts() throws IOException, ServletException + public Collection getParts() { return null; } @Override - public Part getPart(String name) throws IOException, ServletException + public Part getPart(String name) { return null; } @Override - public T upgrade(Class handlerClass) throws IOException, ServletException + public T upgrade(Class handlerClass) { return null; } diff --git a/server/src/test/java/org/apache/druid/metadata/catalog/TableManagerTest.java b/server/src/test/java/org/apache/druid/metadata/catalog/TableManagerTest.java index 826a3f6e153f..70667ec63414 100644 --- a/server/src/test/java/org/apache/druid/metadata/catalog/TableManagerTest.java +++ b/server/src/test/java/org/apache/druid/metadata/catalog/TableManagerTest.java @@ -48,8 +48,10 @@ public class TableManagerTest { private static final ObjectMapper JSON_MAPPER = new DefaultObjectMapper(); + @Rule - public final TestDerbyConnector.DerbyConnectorRule derbyConnectorRule = new TestDerbyConnector.DerbyConnectorRule(); + public final TestDerbyConnector.DerbyConnectorRule derbyConnectorRule = + new TestDerbyConnector.DerbyConnectorRule(); private CatalogManager manager; @Before @@ -190,7 +192,7 @@ public void testList() throws DuplicateKeyException long version = tableMgr.create(table2); table2 = table2.fromInsert(TableId.DRUID_SCHEMA, version); TableSpec table1 = TableSpec.newSegmentTable("table1", defn); - tableMgr.create(table1); + version = tableMgr.create(table1); table1 = table1.fromInsert(TableId.DRUID_SCHEMA, version); list = tableMgr.list(); From 04ec4148a6f912196079592e834f277501e28b5e Mon Sep 17 00:00:00 2001 From: Paul Rogers Date: Wed, 15 Jun 2022 22:06:33 -0700 Subject: [PATCH 3/8] Build fixes --- .../metadata/MetadataStorageTablesConfig.java | 31 +++++++++++++++++++ .../MetadataStorageUpdaterJobSpec.java | 18 +++++++++-- .../MetadataStorageUpdaterJobSpecTest.java | 8 ----- .../metadata/catalog/CatalogManager.java | 2 +- .../druid/catalog/DatasourceDefnTest.java | 2 +- 5 files changed, 48 insertions(+), 13 deletions(-) diff --git a/core/src/main/java/org/apache/druid/metadata/MetadataStorageTablesConfig.java b/core/src/main/java/org/apache/druid/metadata/MetadataStorageTablesConfig.java index 16e2ff1c0fc4..2fba303ce39b 100644 --- a/core/src/main/java/org/apache/druid/metadata/MetadataStorageTablesConfig.java +++ b/core/src/main/java/org/apache/druid/metadata/MetadataStorageTablesConfig.java @@ -115,6 +115,37 @@ public MetadataStorageTablesConfig( this.tableDefnTable = makeTableName(tablesTable, "tableDefn"); } + /** + * Shim constructor for backwards compatibility with code that + * cannot be changed due to missing unit tests. + */ + public MetadataStorageTablesConfig( + String base, + String dataSourceTable, + String pendingSegmentsTable, + String segmentsTable, + String rulesTable, + String configTable, + String tasksTable, + String taskLogTable, + String taskLockTable, + String auditTable, + String supervisorTable + ) + { + this( + base, + dataSourceTable, + pendingSegmentsTable, + segmentsTable, rulesTable, + configTable, + tasksTable, + taskLogTable, + taskLockTable, + auditTable, + supervisorTable, + null); + } private String makeTableName(String explicitTableName, String defaultSuffix) { if (explicitTableName == null) { diff --git a/indexing-hadoop/src/main/java/org/apache/druid/indexer/updater/MetadataStorageUpdaterJobSpec.java b/indexing-hadoop/src/main/java/org/apache/druid/indexer/updater/MetadataStorageUpdaterJobSpec.java index 39ffcc4aa23c..0f8ca0acfccc 100644 --- a/indexing-hadoop/src/main/java/org/apache/druid/indexer/updater/MetadataStorageUpdaterJobSpec.java +++ b/indexing-hadoop/src/main/java/org/apache/druid/indexer/updater/MetadataStorageUpdaterJobSpec.java @@ -82,10 +82,22 @@ public String getPassword() }; } - // Note: Currently it only supports configured segmentTable, other tables - // should be added if needed by the code using this + //Note: Currently it only supports configured segmentTable, other tables should be added if needed + //by the code using this public MetadataStorageTablesConfig getMetadataStorageTablesConfig() { - return MetadataStorageTablesConfig.fromBase(null); + return new MetadataStorageTablesConfig( + null, + null, + null, + segmentTable, + null, + null, + null, + null, + null, + null, + null + ); } } diff --git a/indexing-hadoop/src/test/java/org/apache/druid/indexer/updater/MetadataStorageUpdaterJobSpecTest.java b/indexing-hadoop/src/test/java/org/apache/druid/indexer/updater/MetadataStorageUpdaterJobSpecTest.java index 70974f38fa64..83484d0060d0 100644 --- a/indexing-hadoop/src/test/java/org/apache/druid/indexer/updater/MetadataStorageUpdaterJobSpecTest.java +++ b/indexing-hadoop/src/test/java/org/apache/druid/indexer/updater/MetadataStorageUpdaterJobSpecTest.java @@ -53,14 +53,6 @@ public void testMetadaStorageConnectionConfigWithDefaultProviderPassword() throw ); } - @Test - void testMetadataConfig() - { - // Fairy lame test: just makes the static checks happy. - MetadataStorageUpdaterJobSpec spec = new MetadataStorageUpdaterJobSpec(); - Assert.assertNotNull(spec.getMetadataStorageTablesConfig()); - } - private void testMetadataStorageUpdaterJobSpec( String segmentTable, String type, diff --git a/server/src/main/java/org/apache/druid/metadata/catalog/CatalogManager.java b/server/src/main/java/org/apache/druid/metadata/catalog/CatalogManager.java index f0319f2a755c..88efbdc97e80 100644 --- a/server/src/main/java/org/apache/druid/metadata/catalog/CatalogManager.java +++ b/server/src/main/java/org/apache/druid/metadata/catalog/CatalogManager.java @@ -36,7 +36,7 @@ enum TableState private final String code; - private TableState(String code) + TableState(String code) { this.code = code; } diff --git a/server/src/test/java/org/apache/druid/catalog/DatasourceDefnTest.java b/server/src/test/java/org/apache/druid/catalog/DatasourceDefnTest.java index 1c277395c9ba..fc5ff65bf919 100644 --- a/server/src/test/java/org/apache/druid/catalog/DatasourceDefnTest.java +++ b/server/src/test/java/org/apache/druid/catalog/DatasourceDefnTest.java @@ -80,7 +80,7 @@ public void testProperties() Map props = ImmutableMap.of( "foo", 10, "bar", "mumble"); DatasourceDefn defn = DatasourceDefn.builder() - .rollupGranularity("PT1M") + .segmentGranularity("PT1D") .properties(props) .build(); From 50c267beabcb8d95b95d14859344c97bfd66f17a Mon Sep 17 00:00:00 2001 From: Paul Rogers Date: Wed, 15 Jun 2022 23:54:14 -0700 Subject: [PATCH 4/8] And there was much renaming --- .../metadata/MetadataStorageTablesConfig.java | 1 + .../druid/catalog/AbstractColumnMetadata.java | 122 ----- .../druid/catalog/AbstractTableMetadata.java | 281 ----------- ...acheNotifierEx.java => CacheNotifier.java} | 6 +- .../druid/catalog/CachedMetadataCatalog.java | 16 +- .../apache/druid/catalog/CatalogClient.java | 10 +- .../{CatalogDefns.java => CatalogSpecs.java} | 2 +- .../apache/druid/catalog/CatalogStorage.java | 15 +- .../druid/catalog/CatalogUpdateNotifier.java | 10 +- .../{ColumnDefn.java => ColumnSpec.java} | 30 +- .../druid/catalog/DatasourceColumnDefn.java | 104 ---- .../druid/catalog/DatasourceColumnSpec.java | 135 ++++++ ...atasourceDefn.java => DatasourceSpec.java} | 78 +-- ...utColumnDefn.java => InputColumnSpec.java} | 12 +- ...putSourceDefn.java => InputTableSpec.java} | 35 +- .../druid/catalog/LocalMetadataCatalog.java | 19 +- .../druid/catalog/MeasureColumnDefn.java | 48 -- .../apache/druid/catalog/MetadataCatalog.java | 76 +-- .../apache/druid/catalog/SchemaRegistry.java | 4 +- .../druid/catalog/SchemaRegistryImpl.java | 12 +- .../org/apache/druid/catalog/TableDefn.java | 91 ---- .../apache/druid/catalog/TableMetadata.java | 281 +++++++++++ .../org/apache/druid/catalog/TableSpec.java | 244 ++-------- .../metadata/catalog/CatalogManager.java | 89 +++- .../metadata/catalog/SQLCatalogManager.java | 412 +++++++++++++++- .../metadata/catalog/SQLTableManager.java | 446 ------------------ .../metadata/catalog/TableDefnManager.java | 115 ----- .../server/http/CatalogListenerResource.java | 10 +- .../druid/server/http/CatalogResource.java | 18 +- .../druid/catalog/CacheNotifierTest.java | 4 +- .../druid/catalog/CatalogObjectTest.java | 22 +- .../druid/catalog/CatalogResourceTest.java | 58 +-- ...eDefnTest.java => DatasourceSpecTest.java} | 59 +-- ...eDefnTest.java => InputTableSpecTest.java} | 26 +- .../druid/catalog/MetadataCatalogTest.java | 91 ++-- .../apache/druid/catalog/MockCatalogSync.java | 8 +- .../metadata/catalog/TableManagerTest.java | 80 ++-- 37 files changed, 1260 insertions(+), 1810 deletions(-) delete mode 100644 server/src/main/java/org/apache/druid/catalog/AbstractColumnMetadata.java delete mode 100644 server/src/main/java/org/apache/druid/catalog/AbstractTableMetadata.java rename server/src/main/java/org/apache/druid/catalog/{CommonCacheNotifierEx.java => CacheNotifier.java} (97%) rename server/src/main/java/org/apache/druid/catalog/{CatalogDefns.java => CatalogSpecs.java} (98%) rename server/src/main/java/org/apache/druid/catalog/{ColumnDefn.java => ColumnSpec.java} (77%) delete mode 100644 server/src/main/java/org/apache/druid/catalog/DatasourceColumnDefn.java create mode 100644 server/src/main/java/org/apache/druid/catalog/DatasourceColumnSpec.java rename server/src/main/java/org/apache/druid/catalog/{DatasourceDefn.java => DatasourceSpec.java} (83%) rename server/src/main/java/org/apache/druid/catalog/{InputColumnDefn.java => InputColumnSpec.java} (92%) rename server/src/main/java/org/apache/druid/catalog/{InputSourceDefn.java => InputTableSpec.java} (86%) delete mode 100644 server/src/main/java/org/apache/druid/catalog/MeasureColumnDefn.java delete mode 100644 server/src/main/java/org/apache/druid/catalog/TableDefn.java create mode 100644 server/src/main/java/org/apache/druid/catalog/TableMetadata.java delete mode 100644 server/src/main/java/org/apache/druid/metadata/catalog/SQLTableManager.java delete mode 100644 server/src/main/java/org/apache/druid/metadata/catalog/TableDefnManager.java rename server/src/test/java/org/apache/druid/catalog/{DatasourceDefnTest.java => DatasourceSpecTest.java} (72%) rename server/src/test/java/org/apache/druid/catalog/{InputSourceDefnTest.java => InputTableSpecTest.java} (87%) diff --git a/core/src/main/java/org/apache/druid/metadata/MetadataStorageTablesConfig.java b/core/src/main/java/org/apache/druid/metadata/MetadataStorageTablesConfig.java index 2fba303ce39b..7f4e4d7c037c 100644 --- a/core/src/main/java/org/apache/druid/metadata/MetadataStorageTablesConfig.java +++ b/core/src/main/java/org/apache/druid/metadata/MetadataStorageTablesConfig.java @@ -146,6 +146,7 @@ public MetadataStorageTablesConfig( supervisorTable, null); } + private String makeTableName(String explicitTableName, String defaultSuffix) { if (explicitTableName == null) { diff --git a/server/src/main/java/org/apache/druid/catalog/AbstractColumnMetadata.java b/server/src/main/java/org/apache/druid/catalog/AbstractColumnMetadata.java deleted file mode 100644 index ea58f609dba0..000000000000 --- a/server/src/main/java/org/apache/druid/catalog/AbstractColumnMetadata.java +++ /dev/null @@ -1,122 +0,0 @@ -/* - * Licensed to the Apache Software Foundation (ASF) under one - * or more contributor license agreements. See the NOTICE file - * distributed with this work for additional information - * regarding copyright ownership. The ASF licenses this file - * to you under the Apache License, Version 2.0 (the - * "License"); you may not use this file except in compliance - * with the License. You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, - * software distributed under the License is distributed on an - * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY - * KIND, either express or implied. See the License for the - * specific language governing permissions and limitations - * under the License. - */ - -package org.apache.druid.catalog; - -import org.apache.druid.catalog.MetadataCatalog.ColumnKind; -import org.apache.druid.catalog.MetadataCatalog.ColumnMetadata; -import org.apache.druid.catalog.MetadataCatalog.InputColumnMetadata; -import org.apache.druid.catalog.MetadataCatalog.MeasureMetadata; -import org.apache.druid.java.util.common.StringUtils; -import org.apache.druid.segment.column.ColumnType; - -public abstract class AbstractColumnMetadata implements ColumnMetadata -{ - public static class SimpleColumn extends AbstractColumnMetadata - { - public SimpleColumn(String name, String sqlType) - { - super(name, sqlType); - } - - @Override - public ColumnKind kind() - { - return ColumnKind.SIMPLE; - } - } - - public static class DimensionColumn extends AbstractColumnMetadata - { - public DimensionColumn(String name, String sqlType) - { - super(name, sqlType); - } - - @Override - public ColumnKind kind() - { - return ColumnKind.DIMENSION; - } - } - - public static class MeasureColumn extends AbstractColumnMetadata implements MeasureMetadata - { - private final String aggFn; - - public MeasureColumn(String name, String sqlType, String aggFn) - { - super(name, sqlType); - this.aggFn = aggFn; - } - - @Override - public ColumnKind kind() - { - return ColumnKind.MEASURE; - } - - @Override - public String aggFn() - { - return aggFn; - } - } - - public static class InputColumn extends SimpleColumn implements InputColumnMetadata - { - public InputColumn(String name, String sqlType) - { - super(name, sqlType); - } - - @Override - public ColumnKind kind() - { - return ColumnKind.INPUT; - } - - @Override - public ColumnType druidType() - { - return ColumnDefn.VALID_SQL_TYPES.get(StringUtils.toUpperCase(sqlType)); - } - } - - protected final String name; - protected final String sqlType; - - public AbstractColumnMetadata(String name, String sqlType) - { - this.name = name; - this.sqlType = sqlType; - } - - @Override - public String name() - { - return name; - } - - @Override - public String sqlType() - { - return sqlType; - } -} diff --git a/server/src/main/java/org/apache/druid/catalog/AbstractTableMetadata.java b/server/src/main/java/org/apache/druid/catalog/AbstractTableMetadata.java deleted file mode 100644 index ccfb58afea94..000000000000 --- a/server/src/main/java/org/apache/druid/catalog/AbstractTableMetadata.java +++ /dev/null @@ -1,281 +0,0 @@ -/* - * Licensed to the Apache Software Foundation (ASF) under one - * or more contributor license agreements. See the NOTICE file - * distributed with this work for additional information - * regarding copyright ownership. The ASF licenses this file - * to you under the Apache License, Version 2.0 (the - * "License"); you may not use this file except in compliance - * with the License. You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, - * software distributed under the License is distributed on an - * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY - * KIND, either express or implied. See the License for the - * specific language governing permissions and limitations - * under the License. - */ - -package org.apache.druid.catalog; - -import com.fasterxml.jackson.annotation.JsonCreator; -import com.fasterxml.jackson.annotation.JsonIgnore; -import com.fasterxml.jackson.annotation.JsonProperty; -import org.apache.druid.catalog.AbstractColumnMetadata.DimensionColumn; -import org.apache.druid.catalog.AbstractColumnMetadata.InputColumn; -import org.apache.druid.catalog.AbstractColumnMetadata.MeasureColumn; -import org.apache.druid.catalog.AbstractColumnMetadata.SimpleColumn; -import org.apache.druid.catalog.MetadataCatalog.ColumnMetadata; -import org.apache.druid.catalog.MetadataCatalog.DatasourceMetadata; -import org.apache.druid.catalog.MetadataCatalog.InputSourceMetadata; -import org.apache.druid.catalog.MetadataCatalog.TableMetadata; -import org.apache.druid.catalog.MetadataCatalog.TableType; -import org.apache.druid.catalog.SchemaRegistry.SchemaDefn; -import org.apache.druid.data.input.InputFormat; -import org.apache.druid.data.input.InputSource; -import org.apache.druid.segment.column.RowSignature; - -import java.util.ArrayList; -import java.util.HashMap; -import java.util.List; -import java.util.Map; - -public abstract class AbstractTableMetadata implements TableMetadata -{ - public static class DatasourceTable extends AbstractTableMetadata implements DatasourceMetadata - { - private final String segmentGranularity; - private final String rollupGranularity; - - @JsonCreator - public DatasourceTable( - @JsonProperty("id") TableId id, - @JsonProperty("updateTime") long updateTime, - @JsonProperty("segmentGranularity") String segmentGranularity, - @JsonProperty("rollupGranularity") String rollupGranularity, - @JsonProperty("columns") List columns) - { - super(id, updateTime, columns); - this.segmentGranularity = segmentGranularity; - this.rollupGranularity = rollupGranularity; - } - - public DatasourceTable(TableId id, long updateTime, DatasourceDefn defn) - { - super(id, updateTime, convertColums(defn)); - this.segmentGranularity = defn.segmentGranularity(); - this.rollupGranularity = defn.rollupGranularity(); - } - - private static List convertColums(DatasourceDefn defn) - { - boolean isRollup = defn.isRollupTable(); - List converted = new ArrayList<>(); - for (ColumnDefn col : defn.columns()) { - ColumnMetadata mdCol; - if (col instanceof MeasureColumnDefn) { - MeasureColumnDefn measureDefn = (MeasureColumnDefn) col; - mdCol = new MeasureColumn(col.name(), col.sqlType(), measureDefn.aggregateFn()); - } else if (isRollup) { - mdCol = new DimensionColumn(col.name(), col.sqlType()); - } else { - mdCol = new SimpleColumn(col.name(), col.sqlType()); - } - converted.add(mdCol); - } - return converted; - } - - @Override - public TableType type() - { - return TableType.DATASOURCE; - } - - @Override - @JsonProperty("segmentGranularity") - public String segmentGranularity() - { - return segmentGranularity; - } - - @Override - @JsonIgnore - public boolean isRollup() - { - return !isDetail(); - } - - @Override - @JsonIgnore - public boolean isDetail() - { - return rollupGranularity == null; - } - - @Override - @JsonProperty("rollupGranularity") - public String rollupGranularity() - { - return rollupGranularity; - } - } - - public static class InputSourceTable extends AbstractTableMetadata implements InputSourceMetadata - { - private final InputSource inputSource; - private final InputFormat format; - - @JsonCreator - public InputSourceTable( - @JsonProperty("id") TableId id, - @JsonProperty("updateTime") long updateTime, - @JsonProperty("inputSource") InputSource inputSource, - @JsonProperty("format") InputFormat format, - @JsonProperty("columns") List columns - ) - { - super(id, updateTime, columns); - this.inputSource = inputSource; - this.format = format; - } - - public InputSourceTable(TableId id, long updateTime, InputSourceDefn defn) - { - super(id, updateTime, convertColums(defn)); - this.inputSource = defn.inputSource(); - this.format = defn.format(); - } - - private static List convertColums(InputSourceDefn defn) - { - List converted = new ArrayList<>(); - for (ColumnDefn col : defn.columns()) { - converted.add(new InputColumn(col.name(), col.sqlType())); - } - return converted; - } - - @Override - public TableType type() - { - return TableType.INPUT; - } - - @JsonProperty("inputSource") - public InputSource inputSource() - { - return inputSource; - } - - @JsonProperty("format") - public InputFormat format() - { - return format; - } - - public RowSignature rowSignature() - { - RowSignature.Builder builder = RowSignature.builder(); - for (ColumnMetadata col : columns) { - builder.add(col.name(), ((InputColumn) col).druidType()); - } - return builder.build(); - } - } - - protected final TableId id; - private final long updateTime; - protected final List columns; - private final Map columnIndex = new HashMap<>(); - - public AbstractTableMetadata( - TableId id, - long updateTime, - List columns - ) - { - this.id = id; - this.updateTime = updateTime; - this.columns = columns; - for (ColumnMetadata col : columns) { - columnIndex.put(col.name(), col); - } - } - - @Override - @JsonProperty("id") - public TableId id() - { - return id; - } - - @Override - @JsonProperty("updateTime") - public long updateTime() - { - return updateTime; - } - - @Override - @JsonProperty("columns") - public List columns() - { - return columns; - } - - @Override - public ColumnMetadata column(String name) - { - return columnIndex.get(name); - } - - public static TableMetadata fromCatalogTable(SchemaDefn schema, TableSpec table) - { - return create(schema, table.id(), table.updateTime(), table.defn()); - } - - public static TableMetadata create( - SchemaDefn schema, - TableId id, - long updateTime, - TableDefn defn) - { - if (defn == null) { - // Useless metadata: adds no information. Should not occur. - return null; - } - TableType tableType = schema.tableType(); - if (tableType == null) { - if (defn instanceof DatasourceDefn) { - tableType = TableType.DATASOURCE; - } else if (defn instanceof InputSourceDefn) { - tableType = TableType.INPUT; - } else { - // TODO: other types - return null; - } - } - switch (tableType) { - case DATASOURCE: - if (!(defn instanceof DatasourceDefn)) { - // Wrong type. Too late to fix it now. Ignore it. - return null; - } - return new DatasourceTable(id, updateTime, (DatasourceDefn) defn); - case INPUT: - if (!(defn instanceof InputSourceDefn)) { - return null; - } - return new InputSourceTable(id, updateTime, (InputSourceDefn) defn); - case VIEW: - // Not yet - return null; - default: - // Don't know what this is, so we don't know how to use it. - // Ignore it. - return null; - } - } -} diff --git a/server/src/main/java/org/apache/druid/catalog/CommonCacheNotifierEx.java b/server/src/main/java/org/apache/druid/catalog/CacheNotifier.java similarity index 97% rename from server/src/main/java/org/apache/druid/catalog/CommonCacheNotifierEx.java rename to server/src/main/java/org/apache/druid/catalog/CacheNotifier.java index 75fd2e84ccb5..d220dc23461c 100644 --- a/server/src/main/java/org/apache/druid/catalog/CommonCacheNotifierEx.java +++ b/server/src/main/java/org/apache/druid/catalog/CacheNotifier.java @@ -51,16 +51,16 @@ * Defined by composition so it can be tested and reused in other * contexts. */ -public class CommonCacheNotifierEx +public class CacheNotifier { - private static final EmittingLogger LOG = new EmittingLogger(CommonCacheNotifierEx.class); + private static final EmittingLogger LOG = new EmittingLogger(CacheNotifier.class); private final ExecutorService exec; private final String callerName; private final BlockingQueue updates = new LinkedBlockingQueue<>(); private final Consumer sender; - public CommonCacheNotifierEx( + public CacheNotifier( final String callerName, final Consumer sender ) diff --git a/server/src/main/java/org/apache/druid/catalog/CachedMetadataCatalog.java b/server/src/main/java/org/apache/druid/catalog/CachedMetadataCatalog.java index ee0d5a49ca2c..4fe6f703bcb4 100644 --- a/server/src/main/java/org/apache/druid/catalog/CachedMetadataCatalog.java +++ b/server/src/main/java/org/apache/druid/catalog/CachedMetadataCatalog.java @@ -60,13 +60,9 @@ private static class TableEntry { private final TableMetadata table; - protected TableEntry(SchemaDefn schema, TableSpec table) + protected TableEntry(SchemaDefn schema, TableMetadata table) { - this.table = table == null - ? null - : AbstractTableMetadata.fromCatalogTable( - schema, - table); + this.table = table; } protected long version() @@ -101,8 +97,8 @@ public synchronized List tables() return Collections.emptyList(); } if (version == NOT_FETCHED) { - List catalogTables = base.tablesForSchema(schema.name()); - for (TableSpec table : catalogTables) { + List catalogTables = base.tablesForSchema(schema.name()); + for (TableMetadata table : catalogTables) { update(table); } } @@ -118,7 +114,7 @@ public synchronized List tables() return orderedTables; } - public synchronized void update(TableSpec table) + public synchronized void update(TableMetadata table) { cache.compute( table.name(), @@ -175,7 +171,7 @@ public List tables(String schemaName) } @Override - public void updated(TableSpec table) + public void updated(TableMetadata table) { SchemaEntry schemaEntry = entryFor(table.dbSchema()); if (schemaEntry != null) { diff --git a/server/src/main/java/org/apache/druid/catalog/CatalogClient.java b/server/src/main/java/org/apache/druid/catalog/CatalogClient.java index 67a37b57dba4..f2fc929273c4 100644 --- a/server/src/main/java/org/apache/druid/catalog/CatalogClient.java +++ b/server/src/main/java/org/apache/druid/catalog/CatalogClient.java @@ -54,11 +54,11 @@ public class CatalogClient implements CatalogSource { public static final String SCHEMA_SYNC_PATH = CatalogResource.ROOT_PATH + CatalogResource.SCHEMA_SYNC; public static final String TABLE_SYNC_PATH = CatalogResource.ROOT_PATH + CatalogResource.TABLE_SYNC; - private static final TypeReference> LIST_OF_TABLE_SPECS_TYPE = new TypeReference>() + private static final TypeReference> LIST_OF_TABLE_SPECS_TYPE = new TypeReference>() { }; // Not strictly needed as a TypeReference, but doing so makes the code simpler. - private static final TypeReference TABLE_SPEC_TYPE = new TypeReference() + private static final TypeReference TABLE_SPEC_TYPE = new TypeReference() { }; @@ -76,17 +76,17 @@ public CatalogClient( } @Override - public List tablesForSchema(String dbSchema) + public List tablesForSchema(String dbSchema) { String url = StringUtils.replace(SCHEMA_SYNC_PATH, "{dbSchema}", dbSchema); - List results = send(url, LIST_OF_TABLE_SPECS_TYPE); + List results = send(url, LIST_OF_TABLE_SPECS_TYPE); // Not found for a list is an empty list. return results == null ? Collections.emptyList() : results; } @Override - public TableSpec table(TableId id) + public TableMetadata table(TableId id) { String url = StringUtils.replace(SCHEMA_SYNC_PATH, "{dbSchema}", id.schema()); url = StringUtils.replace(url, "{table}", id.name()); diff --git a/server/src/main/java/org/apache/druid/catalog/CatalogDefns.java b/server/src/main/java/org/apache/druid/catalog/CatalogSpecs.java similarity index 98% rename from server/src/main/java/org/apache/druid/catalog/CatalogDefns.java rename to server/src/main/java/org/apache/druid/catalog/CatalogSpecs.java index 0e54b6c82cf2..b914e34d1966 100644 --- a/server/src/main/java/org/apache/druid/catalog/CatalogDefns.java +++ b/server/src/main/java/org/apache/druid/catalog/CatalogSpecs.java @@ -25,7 +25,7 @@ import java.io.IOException; -public class CatalogDefns +public class CatalogSpecs { public static byte[] toBytes(ObjectMapper jsonMapper, Object obj) { diff --git a/server/src/main/java/org/apache/druid/catalog/CatalogStorage.java b/server/src/main/java/org/apache/druid/catalog/CatalogStorage.java index 4e8d74f1e9e2..25e50813a447 100644 --- a/server/src/main/java/org/apache/druid/catalog/CatalogStorage.java +++ b/server/src/main/java/org/apache/druid/catalog/CatalogStorage.java @@ -24,7 +24,6 @@ import org.apache.druid.catalog.MetadataCatalog.CatalogUpdateProvider; import org.apache.druid.catalog.SchemaRegistry.SchemaDefn; import org.apache.druid.metadata.catalog.CatalogManager; -import org.apache.druid.metadata.catalog.TableDefnManager; import org.apache.druid.server.security.AuthorizerMapper; import javax.inject.Inject; @@ -37,7 +36,7 @@ */ public class CatalogStorage implements CatalogUpdateProvider, CatalogSource { - public static class ListenerAdapter implements TableDefnManager.Listener + public static class ListenerAdapter implements CatalogManager.Listener { private final CatalogListener dest; @@ -47,13 +46,13 @@ public ListenerAdapter(CatalogListener dest) } @Override - public void added(TableSpec table) + public void added(TableMetadata table) { dest.updated(table); } @Override - public void updated(TableSpec table) + public void updated(TableMetadata table) { dest.updated(table); } @@ -85,9 +84,9 @@ public CatalogAuthorizer authorizer() return authorizer; } - public TableDefnManager tables() + public CatalogManager tables() { - return catalogMgr.tables(); + return catalogMgr; } public SchemaRegistry schemaRegistry() @@ -107,13 +106,13 @@ public void register(CatalogListener listener) } @Override - public List tablesForSchema(String dbSchema) + public List tablesForSchema(String dbSchema) { return tables().listDetails(dbSchema); } @Override - public TableSpec table(TableId id) + public TableMetadata table(TableId id) { return tables().read(id); } diff --git a/server/src/main/java/org/apache/druid/catalog/CatalogUpdateNotifier.java b/server/src/main/java/org/apache/druid/catalog/CatalogUpdateNotifier.java index 333b277dcb19..3e095987fef7 100644 --- a/server/src/main/java/org/apache/druid/catalog/CatalogUpdateNotifier.java +++ b/server/src/main/java/org/apache/druid/catalog/CatalogUpdateNotifier.java @@ -53,9 +53,9 @@ public class CatalogUpdateNotifier implements CatalogListener { private final String CALLER_NAME = "Catalog Sync"; private final long TIMEOUT_MS = 5000; - private final TableDefn TABLE_TOMBSTONE = new TableDefn.Tombstone(); + private final TableSpec TABLE_TOMBSTONE = new TableSpec.Tombstone(); - private final CommonCacheNotifierEx notifier; + private final CacheNotifier notifier; private final ObjectMapper smileMapper; @Inject @@ -78,7 +78,7 @@ public CatalogUpdateNotifier( restSender, CatalogListenerResource.BASE_URL + CatalogListenerResource.SYNC_URL, timeoutMs); - this.notifier = new CommonCacheNotifierEx( + this.notifier = new CacheNotifier( CALLER_NAME, sender); catalog.register(this); @@ -97,7 +97,7 @@ public void stop() } @Override - public void updated(TableSpec update) + public void updated(TableMetadata update) { notifier.send(update.toBytes(smileMapper)); } @@ -105,7 +105,7 @@ public void updated(TableSpec update) @Override public void deleted(TableId tableId) { - TableSpec spec = TableSpec.newTable(tableId, TABLE_TOMBSTONE); + TableMetadata spec = TableMetadata.newTable(tableId, TABLE_TOMBSTONE); notifier.send(spec.toBytes(smileMapper)); } } diff --git a/server/src/main/java/org/apache/druid/catalog/ColumnDefn.java b/server/src/main/java/org/apache/druid/catalog/ColumnSpec.java similarity index 77% rename from server/src/main/java/org/apache/druid/catalog/ColumnDefn.java rename to server/src/main/java/org/apache/druid/catalog/ColumnSpec.java index 6af7ed14bd9f..5f26275fa267 100644 --- a/server/src/main/java/org/apache/druid/catalog/ColumnDefn.java +++ b/server/src/main/java/org/apache/druid/catalog/ColumnSpec.java @@ -20,9 +20,6 @@ package org.apache.druid.catalog; import com.fasterxml.jackson.annotation.JsonProperty; -import com.fasterxml.jackson.annotation.JsonSubTypes; -import com.fasterxml.jackson.annotation.JsonSubTypes.Type; -import com.fasterxml.jackson.annotation.JsonTypeInfo; import com.fasterxml.jackson.databind.ObjectMapper; import com.google.common.base.Strings; import com.google.common.collect.ImmutableMap; @@ -37,13 +34,16 @@ * represented as subclasses. */ @PublicApi -@JsonTypeInfo(use = JsonTypeInfo.Id.NAME, property = "type") -@JsonSubTypes(value = { - @Type(name = "column", value = DatasourceColumnDefn.class), - @Type(name = "measure", value = MeasureColumnDefn.class), -}) -public abstract class ColumnDefn +public abstract class ColumnSpec { + enum ColumnKind + { + DETAIL, + DIMENSION, + MEASURE, + INPUT + } + public static final Map VALID_SQL_TYPES = new ImmutableMap.Builder() .put("BIGINT", ColumnType.LONG) @@ -55,7 +55,7 @@ public abstract class ColumnDefn protected final String name; protected final String sqlType; - public ColumnDefn( + public ColumnSpec( String name, String sqlType ) @@ -64,6 +64,8 @@ public ColumnDefn( this.sqlType = sqlType; } + protected abstract ColumnKind kind(); + @JsonProperty("name") public String name() { @@ -85,17 +87,17 @@ public void validate() public byte[] toBytes(ObjectMapper jsonMapper) { - return CatalogDefns.toBytes(jsonMapper, this); + return CatalogSpecs.toBytes(jsonMapper, this); } - public static ColumnDefn fromBytes(ObjectMapper jsonMapper, byte[] bytes) + public static ColumnSpec fromBytes(ObjectMapper jsonMapper, byte[] bytes) { - return CatalogDefns.fromBytes(jsonMapper, bytes, ColumnDefn.class); + return CatalogSpecs.fromBytes(jsonMapper, bytes, ColumnSpec.class); } @Override public String toString() { - return CatalogDefns.toString(this); + return CatalogSpecs.toString(this); } } diff --git a/server/src/main/java/org/apache/druid/catalog/DatasourceColumnDefn.java b/server/src/main/java/org/apache/druid/catalog/DatasourceColumnDefn.java deleted file mode 100644 index 274fff8bea3b..000000000000 --- a/server/src/main/java/org/apache/druid/catalog/DatasourceColumnDefn.java +++ /dev/null @@ -1,104 +0,0 @@ -/* - * Licensed to the Apache Software Foundation (ASF) under one - * or more contributor license agreements. See the NOTICE file - * distributed with this work for additional information - * regarding copyright ownership. The ASF licenses this file - * to you under the Apache License, Version 2.0 (the - * "License"); you may not use this file except in compliance - * with the License. You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, - * software distributed under the License is distributed on an - * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY - * KIND, either express or implied. See the License for the - * specific language governing permissions and limitations - * under the License. - */ - -package org.apache.druid.catalog; - -import com.fasterxml.jackson.annotation.JsonCreator; -import com.fasterxml.jackson.annotation.JsonProperty; -import org.apache.druid.java.util.common.IAE; -import org.apache.druid.java.util.common.StringUtils; - -/** - * Description of a detail datasource column and a rollup - * dimension column. - */ -public class DatasourceColumnDefn extends ColumnDefn -{ - private static final String TIME_COLUMN = "__time"; - - @JsonCreator - public DatasourceColumnDefn( - @JsonProperty("name") String name, - @JsonProperty("sqlType") String sqlType - ) - { - super(name, sqlType); - } - - public static Builder builder(String name) - { - return new Builder(name); - } - - @Override - public void validate() - { - super.validate(); - if (sqlType == null) { - return; - } - if (TIME_COLUMN.equals(name)) { - if (!"TIMESTAMP".equalsIgnoreCase(sqlType)) { - throw new IAE("__time column must have type TIMESTAMP"); - } - } else if (!VALID_SQL_TYPES.containsKey(StringUtils.toUpperCase(sqlType))) { - throw new IAE("Not a supported SQL type: " + sqlType); - } - } - - public static class Builder - { - private final String name; - private String sqlType; - private String aggFn; - - public Builder(String name) - { - this.name = name; - } - - public Builder sqlType(String type) - { - this.sqlType = type; - return this; - } - - public Builder measure(String aggFn) - { - this.aggFn = aggFn; - return this; - } - - public DatasourceColumnDefn build() - { - if (aggFn == null) { - return new DatasourceColumnDefn( - name, - sqlType - ); - } else { - return new MeasureColumnDefn( - name, - sqlType, - aggFn - ); - } - } - } -} diff --git a/server/src/main/java/org/apache/druid/catalog/DatasourceColumnSpec.java b/server/src/main/java/org/apache/druid/catalog/DatasourceColumnSpec.java new file mode 100644 index 000000000000..7aa875980f6a --- /dev/null +++ b/server/src/main/java/org/apache/druid/catalog/DatasourceColumnSpec.java @@ -0,0 +1,135 @@ +/* + * Licensed to the Apache Software Foundation (ASF) under one + * or more contributor license agreements. See the NOTICE file + * distributed with this work for additional information + * regarding copyright ownership. The ASF licenses this file + * to you under the Apache License, Version 2.0 (the + * "License"); you may not use this file except in compliance + * with the License. You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, + * software distributed under the License is distributed on an + * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY + * KIND, either express or implied. See the License for the + * specific language governing permissions and limitations + * under the License. + */ + +package org.apache.druid.catalog; + +import com.fasterxml.jackson.annotation.JsonCreator; +import com.fasterxml.jackson.annotation.JsonProperty; +import com.fasterxml.jackson.annotation.JsonSubTypes; +import com.fasterxml.jackson.annotation.JsonSubTypes.Type; +import com.fasterxml.jackson.annotation.JsonTypeInfo; +import org.apache.druid.java.util.common.IAE; +import org.apache.druid.java.util.common.StringUtils; + +/** + * Description of a detail datasource column and a rollup + * dimension column. + */ +@JsonTypeInfo(use = JsonTypeInfo.Id.NAME, property = "type") +@JsonSubTypes(value = { + @Type(name = "detail", value = DatasourceColumnSpec.DetailColumnSpec.class), + @Type(name = "dimension", value = DatasourceColumnSpec.DimensionSpec.class), + @Type(name = "measure", value = DatasourceColumnSpec.MeasureSpec.class), +}) +public abstract class DatasourceColumnSpec extends ColumnSpec +{ + private static final String TIME_COLUMN = "__time"; + + @JsonCreator + public DatasourceColumnSpec( + @JsonProperty("name") String name, + @JsonProperty("sqlType") String sqlType + ) + { + super(name, sqlType); + } + + @Override + public void validate() + { + super.validate(); + if (sqlType == null) { + return; + } + if (TIME_COLUMN.equals(name)) { + if (!"TIMESTAMP".equalsIgnoreCase(sqlType)) { + throw new IAE("__time column must have type TIMESTAMP"); + } + } else if (!VALID_SQL_TYPES.containsKey(StringUtils.toUpperCase(sqlType))) { + throw new IAE("Not a supported SQL type: " + sqlType); + } + } + + public static class DetailColumnSpec extends DatasourceColumnSpec + { + @JsonCreator + public DetailColumnSpec( + @JsonProperty("name") String name, + @JsonProperty("sqlType") String sqlType + ) + { + super(name, sqlType); + } + + @Override + protected ColumnKind kind() + { + return ColumnKind.DETAIL; + } + } + + public static class DimensionSpec extends DatasourceColumnSpec + { + @JsonCreator + public DimensionSpec( + @JsonProperty("name") String name, + @JsonProperty("sqlType") String sqlType + ) + { + super(name, sqlType); + } + + @Override + protected ColumnKind kind() + { + return ColumnKind.DIMENSION; + } + } + + /** + * Catalog definition of a measure (metric) column. + */ + public static class MeasureSpec extends DatasourceColumnSpec + { + private final String aggregateFn; + + @JsonCreator + public MeasureSpec( + @JsonProperty("name") String name, + @JsonProperty("sqlType") String sqlType, + @JsonProperty("aggregateFn") String aggregateFn + ) + { + super(name, sqlType); + this.aggregateFn = aggregateFn; + } + + @Override + protected ColumnKind kind() + { + return ColumnKind.MEASURE; + } + + @JsonProperty("aggregateFn") + public String aggregateFn() + { + return aggregateFn; + } + } +} diff --git a/server/src/main/java/org/apache/druid/catalog/DatasourceDefn.java b/server/src/main/java/org/apache/druid/catalog/DatasourceSpec.java similarity index 83% rename from server/src/main/java/org/apache/druid/catalog/DatasourceDefn.java rename to server/src/main/java/org/apache/druid/catalog/DatasourceSpec.java index 588fd245ff51..2249a0ffab6e 100644 --- a/server/src/main/java/org/apache/druid/catalog/DatasourceDefn.java +++ b/server/src/main/java/org/apache/druid/catalog/DatasourceSpec.java @@ -24,6 +24,10 @@ import com.fasterxml.jackson.annotation.JsonInclude.Include; import com.fasterxml.jackson.annotation.JsonProperty; import com.google.common.base.Strings; +import org.apache.druid.catalog.DatasourceColumnSpec.DetailColumnSpec; +import org.apache.druid.catalog.DatasourceColumnSpec.DimensionSpec; +import org.apache.druid.catalog.DatasourceColumnSpec.MeasureSpec; +import org.apache.druid.catalog.TableMetadata.TableType; import org.apache.druid.java.util.common.IAE; import org.apache.druid.java.util.common.StringUtils; @@ -40,7 +44,7 @@ * Datasource metadata exchanged via the REST API and stored * in the catalog. */ -public class DatasourceDefn extends TableDefn +public class DatasourceSpec extends TableSpec { /** * Segment grain at ingestion and initial compaction. Aging rules @@ -77,16 +81,16 @@ public class DatasourceDefn extends TableDefn */ private final String autoCompactionDelay; - private final List columns; + private final List columns; - public DatasourceDefn( + public DatasourceSpec( @JsonProperty("segmentGranularity") String segmentGranularity, @JsonProperty("rollupGranularity") String rollupGranularity, @JsonProperty("targetSegmentRows") int targetSegmentRows, @JsonProperty("enableAutoCompaction") boolean enableAutoCompaction, @JsonProperty("autoCompactionDelay") String autoCompactionDelay, @JsonProperty("properties") Map properties, - @JsonProperty("columns") List columns + @JsonProperty("columns") List columns ) { super(properties); @@ -102,6 +106,12 @@ public DatasourceDefn( this.columns = columns == null ? Collections.emptyList() : columns; } + @Override + public TableType type() + { + return TableType.DATASOURCE; + } + @JsonProperty("rollupGranularity") @JsonInclude(Include.NON_NULL) public String rollupGranularity() @@ -139,7 +149,7 @@ public String autoCompactionDelay() @JsonProperty("columns") @JsonInclude(Include.NON_EMPTY) - public List columns() + public List columns() { return columns; } @@ -155,15 +165,15 @@ public Builder toBuilder() } @JsonIgnore - public boolean isDetailTable() + public boolean isDetail() { return Strings.isNullOrEmpty(rollupGranularity); } @JsonIgnore - public boolean isRollupTable() + public boolean isRollup() { - return !isDetailTable(); + return !isDetail(); } @Override @@ -173,18 +183,24 @@ public void validate() if (Strings.isNullOrEmpty(segmentGranularity)) { throw new IAE("Segment granularity is required."); } - boolean isDetail = isDetailTable(); + boolean isDetail = isDetail(); Set names = new HashSet<>(); - for (ColumnDefn col : columns) { - if (!(col instanceof DatasourceColumnDefn)) { - throw new IAE( - StringUtils.format("Column %s is not a segment column", col.name())); - } - if (isDetail && col instanceof MeasureColumnDefn) { + for (ColumnSpec col : columns) { + if (isDetail && col instanceof MeasureSpec) { throw new IAE(StringUtils.format( "Measure column %s not allowed for a detail table", col.name())); } + if (isDetail && col instanceof DimensionSpec) { + throw new IAE(StringUtils.format( + "Dimension column %s not allowed for a detail table", + col.name())); + } + if (!isDetail && col instanceof DetailColumnSpec) { + throw new IAE(StringUtils.format( + "Detail column %s not allowed for a rollup table", + col.name())); + } col.validate(); if (!names.add(col.name())) { throw new IAE("Duplicate column name: " + col.name()); @@ -207,7 +223,7 @@ public boolean equals(Object o) if (o == null || o.getClass() != getClass()) { return false; } - DatasourceDefn other = (DatasourceDefn) o; + DatasourceSpec other = (DatasourceSpec) o; return Objects.equals(this.segmentGranularity, other.segmentGranularity) && Objects.equals(this.rollupGranularity, other.rollupGranularity) && this.targetSegmentRows == other.targetSegmentRows @@ -237,7 +253,7 @@ public static class Builder private int targetSegmentRows; private boolean enableAutoCompaction; private String autoCompactionDelay; - private List columns; + private List columns; private Map properties; public Builder() @@ -246,7 +262,7 @@ public Builder() this.properties = new HashMap<>(); } - public Builder(DatasourceDefn defn) + public Builder(DatasourceSpec defn) { this.segmentGranularity = defn.segmentGranularity; this.rollupGranularity = defn.rollupGranularity; @@ -287,12 +303,12 @@ public Builder autoCompactionDelay(String autoCompactionDelay) return this; } - public List columns() + public List columns() { return columns; } - public Builder column(DatasourceColumnDefn column) + public Builder column(DatasourceColumnSpec column) { if (Strings.isNullOrEmpty(column.name())) { throw new IAE("Column name is required"); @@ -308,21 +324,17 @@ public Builder timeColumn() public Builder column(String name, String sqlType) { - return column( - DatasourceColumnDefn - .builder(name) - .sqlType(sqlType) - .build()); + if (rollupGranularity == null) { + column(new DetailColumnSpec(name, sqlType)); + } else { + column(new DimensionSpec(name, sqlType)); + } + return this; } public Builder measure(String name, String sqlType, String aggFn) { - return column( - DatasourceColumnDefn - .builder(name) - .sqlType(sqlType) - .measure(aggFn) - .build()); + return column(new MeasureSpec(name, sqlType, aggFn)); } public Builder properties(Map properties) @@ -345,13 +357,13 @@ public Map properties() return properties; } - public DatasourceDefn build() + public DatasourceSpec build() { if (targetSegmentRows < 0) { targetSegmentRows = 0; } // TODO(paul): validate upper bound - return new DatasourceDefn( + return new DatasourceSpec( segmentGranularity, rollupGranularity, targetSegmentRows, diff --git a/server/src/main/java/org/apache/druid/catalog/InputColumnDefn.java b/server/src/main/java/org/apache/druid/catalog/InputColumnSpec.java similarity index 92% rename from server/src/main/java/org/apache/druid/catalog/InputColumnDefn.java rename to server/src/main/java/org/apache/druid/catalog/InputColumnSpec.java index fb09206a1277..4b7f439cefeb 100644 --- a/server/src/main/java/org/apache/druid/catalog/InputColumnDefn.java +++ b/server/src/main/java/org/apache/druid/catalog/InputColumnSpec.java @@ -34,16 +34,22 @@ * proscriptive (of the columns we'd like to have, since Druid does not * create input columns.) */ -public class InputColumnDefn extends ColumnDefn +public class InputColumnSpec extends ColumnSpec { @JsonCreator - public InputColumnDefn( + public InputColumnSpec( @JsonProperty("name") String name, @JsonProperty("sqlType") String sqlType) { super(name, sqlType); } + @Override + protected ColumnKind kind() + { + return ColumnKind.INPUT; + } + @Override public void validate() { @@ -68,7 +74,7 @@ public boolean equals(Object o) if (o == null || getClass() != o.getClass()) { return false; } - InputColumnDefn other = (InputColumnDefn) o; + InputColumnSpec other = (InputColumnSpec) o; return Objects.equals(this.name, other.name) && Objects.equals(this.sqlType, other.sqlType); } diff --git a/server/src/main/java/org/apache/druid/catalog/InputSourceDefn.java b/server/src/main/java/org/apache/druid/catalog/InputTableSpec.java similarity index 86% rename from server/src/main/java/org/apache/druid/catalog/InputSourceDefn.java rename to server/src/main/java/org/apache/druid/catalog/InputTableSpec.java index 12da74c4bdf1..9cbc7b2b2684 100644 --- a/server/src/main/java/org/apache/druid/catalog/InputSourceDefn.java +++ b/server/src/main/java/org/apache/druid/catalog/InputTableSpec.java @@ -21,6 +21,7 @@ import com.fasterxml.jackson.annotation.JsonProperty; import com.google.common.base.Strings; +import org.apache.druid.catalog.TableMetadata.TableType; import org.apache.druid.data.input.InputFormat; import org.apache.druid.data.input.InputSource; import org.apache.druid.java.util.common.IAE; @@ -39,16 +40,16 @@ * input source, a format and a set of columns. Also provides * properties, as do all table definitions. */ -public class InputSourceDefn extends TableDefn +public class InputTableSpec extends TableSpec { private final InputSource inputSource; private final InputFormat format; - private final List columns; + private final List columns; - public InputSourceDefn( + public InputTableSpec( @JsonProperty("inputSource") InputSource inputSource, @JsonProperty("format") InputFormat format, - @JsonProperty("columns") List columns, + @JsonProperty("columns") List columns, @JsonProperty("properties") Map properties ) { @@ -58,6 +59,12 @@ public InputSourceDefn( this.columns = columns; } + @Override + public TableType type() + { + return TableType.INPUT; + } + @JsonProperty("inputSource") public InputSource inputSource() { @@ -71,7 +78,7 @@ public InputFormat format() } @JsonProperty("columns") - public List columns() + public List columns() { return columns; } @@ -90,7 +97,7 @@ public void validate() throw new IAE("An input source must specify one or more columns"); } Set names = new HashSet<>(); - for (ColumnDefn col : columns) { + for (ColumnSpec col : columns) { if (!names.add(col.name())) { throw new IAE("Duplicate column name: " + col.name()); } @@ -123,7 +130,7 @@ public boolean equals(Object o) if (o == null || o.getClass() != getClass()) { return false; } - InputSourceDefn other = (InputSourceDefn) o; + InputTableSpec other = (InputTableSpec) o; return Objects.equals(this.inputSource, other.inputSource) && Objects.equals(this.format, other.format) && Objects.equals(this.columns, other.columns) @@ -144,7 +151,7 @@ public static class Builder { private InputSource inputSource; private InputFormat format; - private List columns; + private List columns; private Map properties; public Builder() @@ -153,7 +160,7 @@ public Builder() this.properties = new HashMap<>(); } - public Builder(InputSourceDefn defn) + public Builder(InputTableSpec defn) { this.inputSource = defn.inputSource; this.format = defn.format; @@ -173,12 +180,12 @@ public Builder format(InputFormat format) return this; } - public List columns() + public List columns() { return columns; } - public Builder column(InputColumnDefn column) + public Builder column(InputColumnSpec column) { if (Strings.isNullOrEmpty(column.name())) { throw new IAE("Column name is required"); @@ -189,7 +196,7 @@ public Builder column(InputColumnDefn column) public Builder column(String name, String sqlType) { - return column(new InputColumnDefn(name, sqlType)); + return column(new InputColumnSpec(name, sqlType)); } public Builder properties(Map properties) @@ -212,9 +219,9 @@ public Map properties() return properties; } - public InputSourceDefn build() + public InputTableSpec build() { - return new InputSourceDefn( + return new InputTableSpec( inputSource, format, columns, diff --git a/server/src/main/java/org/apache/druid/catalog/LocalMetadataCatalog.java b/server/src/main/java/org/apache/druid/catalog/LocalMetadataCatalog.java index 06714ebc9bb5..e2a9f23fa61e 100644 --- a/server/src/main/java/org/apache/druid/catalog/LocalMetadataCatalog.java +++ b/server/src/main/java/org/apache/druid/catalog/LocalMetadataCatalog.java @@ -23,7 +23,6 @@ import javax.inject.Inject; -import java.util.ArrayList; import java.util.Collections; import java.util.HashSet; import java.util.List; @@ -52,12 +51,7 @@ public LocalMetadataCatalog( @Override public TableMetadata resolveTable(TableId tableId) { - TableSpec table = catalog.table(tableId); - if (table == null) { - return null; - } - SchemaDefn schema = schemaRegistry.schema(table.dbSchema()); - return AbstractTableMetadata.fromCatalogTable(schema, table); + return catalog.table(tableId); } @Override @@ -67,12 +61,7 @@ public List tables(String schemaName) if (schema == null || !schema.writable()) { return Collections.emptyList(); } - List catalogTables = catalog.tablesForSchema(schemaName); - List tables = new ArrayList<>(); - for (TableSpec table : catalogTables) { - tables.add(AbstractTableMetadata.fromCatalogTable(schema, table)); - } - return tables; + return catalog.tablesForSchema(schemaName); } @Override @@ -82,9 +71,9 @@ public Set tableNames(String schemaName) if (schema == null || !schema.writable()) { return Collections.emptySet(); } - List catalogTables = catalog.tablesForSchema(schemaName); + List catalogTables = catalog.tablesForSchema(schemaName); Set tables = new HashSet<>(); - for (TableSpec table : catalogTables) { + for (TableMetadata table : catalogTables) { tables.add(table.name()); } return tables; diff --git a/server/src/main/java/org/apache/druid/catalog/MeasureColumnDefn.java b/server/src/main/java/org/apache/druid/catalog/MeasureColumnDefn.java deleted file mode 100644 index 71bca316b9ec..000000000000 --- a/server/src/main/java/org/apache/druid/catalog/MeasureColumnDefn.java +++ /dev/null @@ -1,48 +0,0 @@ -/* - * Licensed to the Apache Software Foundation (ASF) under one - * or more contributor license agreements. See the NOTICE file - * distributed with this work for additional information - * regarding copyright ownership. The ASF licenses this file - * to you under the Apache License, Version 2.0 (the - * "License"); you may not use this file except in compliance - * with the License. You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, - * software distributed under the License is distributed on an - * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY - * KIND, either express or implied. See the License for the - * specific language governing permissions and limitations - * under the License. - */ - -package org.apache.druid.catalog; - -import com.fasterxml.jackson.annotation.JsonCreator; -import com.fasterxml.jackson.annotation.JsonProperty; - -/** - * Catalog definition of a measure (metric) column. - */ -public class MeasureColumnDefn extends DatasourceColumnDefn -{ - private final String aggregateFn; - - @JsonCreator - public MeasureColumnDefn( - @JsonProperty("name") String name, - @JsonProperty("sqlType") String sqlType, - @JsonProperty("aggregateFn") String aggregateFn - ) - { - super(name, sqlType); - this.aggregateFn = aggregateFn; - } - - @JsonProperty("aggregateFn") - public String aggregateFn() - { - return aggregateFn; - } -} diff --git a/server/src/main/java/org/apache/druid/catalog/MetadataCatalog.java b/server/src/main/java/org/apache/druid/catalog/MetadataCatalog.java index e1c73bda234f..f6f942bad9b3 100644 --- a/server/src/main/java/org/apache/druid/catalog/MetadataCatalog.java +++ b/server/src/main/java/org/apache/druid/catalog/MetadataCatalog.java @@ -19,8 +19,6 @@ package org.apache.druid.catalog; -import org.apache.druid.segment.column.ColumnType; - import java.util.List; import java.util.Set; @@ -35,83 +33,15 @@ */ public interface MetadataCatalog { - enum TableType - { - DATASOURCE, - INPUT, - VIEW - } - - enum ColumnKind - { - SIMPLE, - DIMENSION, - MEASURE, - INPUT - } - - /** - * Facade over a column definition for the convenience of Broker clients. - */ - interface ColumnMetadata - { - String name(); - ColumnKind kind(); - String sqlType(); - } - - interface MeasureMetadata extends ColumnMetadata - { - String aggFn(); - } - - interface InputColumnMetadata extends ColumnMetadata - { - ColumnType druidType(); - } - - /** - * Facade over a table definition for the convenience of Broker clients. - */ - interface TableMetadata - { - TableId id(); - TableType type(); - long updateTime(); - - /** - * List of known columns, in user-defined order. - */ - List columns(); - - /** - * Look up a column by name. Returns null if no such column - * exists in metadata. - */ - ColumnMetadata column(String name); - } - - interface DatasourceMetadata extends TableMetadata - { - String segmentGranularity(); - String rollupGranularity(); - boolean isRollup(); - boolean isDetail(); - } - - interface InputSourceMetadata extends TableMetadata - { - } - interface CatalogSource { - List tablesForSchema(String dbSchema); - TableSpec table(TableId id); + List tablesForSchema(String dbSchema); + TableMetadata table(TableId id); } interface CatalogListener { - void updated(TableSpec update); + void updated(TableMetadata update); void deleted(TableId tableId); } diff --git a/server/src/main/java/org/apache/druid/catalog/SchemaRegistry.java b/server/src/main/java/org/apache/druid/catalog/SchemaRegistry.java index 0c26476e9849..0d41c003ff34 100644 --- a/server/src/main/java/org/apache/druid/catalog/SchemaRegistry.java +++ b/server/src/main/java/org/apache/druid/catalog/SchemaRegistry.java @@ -19,7 +19,7 @@ package org.apache.druid.catalog; -import org.apache.druid.catalog.MetadataCatalog.TableType; +import org.apache.druid.catalog.TableMetadata.TableType; import java.util.Set; @@ -36,7 +36,7 @@ interface SchemaDefn String name(); String securityResource(); boolean writable(); - boolean accepts(TableDefn defn); + boolean accepts(TableSpec defn); TableType tableType(); } diff --git a/server/src/main/java/org/apache/druid/catalog/SchemaRegistryImpl.java b/server/src/main/java/org/apache/druid/catalog/SchemaRegistryImpl.java index 27adb4c5d5e3..5f18ec7900e0 100644 --- a/server/src/main/java/org/apache/druid/catalog/SchemaRegistryImpl.java +++ b/server/src/main/java/org/apache/druid/catalog/SchemaRegistryImpl.java @@ -19,7 +19,7 @@ package org.apache.druid.catalog; -import org.apache.druid.catalog.MetadataCatalog.TableType; +import org.apache.druid.catalog.TableMetadata.TableType; import org.apache.druid.server.security.ResourceType; import java.util.HashMap; @@ -43,13 +43,13 @@ public static class SchemaDefnImpl implements SchemaDefn private final String name; private final String resource; private final TableType tableType; - private Class acceptedClass; + private Class acceptedClass; public SchemaDefnImpl( String name, String resource, TableType tableType, - Class acceptedClass) + Class acceptedClass) { this.name = name; this.resource = resource; @@ -76,7 +76,7 @@ public boolean writable() } @Override - public boolean accepts(TableDefn defn) + public boolean accepts(TableSpec defn) { if (acceptedClass == null) { return false; @@ -103,7 +103,7 @@ public SchemaRegistryImpl() TableId.DRUID_SCHEMA, ResourceType.DATASOURCE, TableType.DATASOURCE, - DatasourceDefn.class)); + DatasourceSpec.class)); register(new SchemaDefnImpl( TableId.LOOKUP_SCHEMA, ResourceType.CONFIG, @@ -123,7 +123,7 @@ public SchemaRegistryImpl() TableId.INPUT_SCHEMA, EXTERNAL_RESOURCE, TableType.INPUT, - InputSourceDefn.class)); + InputTableSpec.class)); register(new SchemaDefnImpl( TableId.VIEW_SCHEMA, ResourceType.VIEW, diff --git a/server/src/main/java/org/apache/druid/catalog/TableDefn.java b/server/src/main/java/org/apache/druid/catalog/TableDefn.java deleted file mode 100644 index 700998c5103b..000000000000 --- a/server/src/main/java/org/apache/druid/catalog/TableDefn.java +++ /dev/null @@ -1,91 +0,0 @@ -/* - * Licensed to the Apache Software Foundation (ASF) under one - * or more contributor license agreements. See the NOTICE file - * distributed with this work for additional information - * regarding copyright ownership. The ASF licenses this file - * to you under the Apache License, Version 2.0 (the - * "License"); you may not use this file except in compliance - * with the License. You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, - * software distributed under the License is distributed on an - * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY - * KIND, either express or implied. See the License for the - * specific language governing permissions and limitations - * under the License. - */ - -package org.apache.druid.catalog; - -import com.fasterxml.jackson.annotation.JsonInclude; -import com.fasterxml.jackson.annotation.JsonInclude.Include; -import com.fasterxml.jackson.annotation.JsonProperty; -import com.fasterxml.jackson.annotation.JsonSubTypes; -import com.fasterxml.jackson.annotation.JsonSubTypes.Type; -import com.fasterxml.jackson.annotation.JsonTypeInfo; -import com.fasterxml.jackson.databind.ObjectMapper; -import com.google.common.collect.ImmutableMap; - -import java.util.Map; - -/** - * Definition of a table "hint" in the metastore, between client and - * Druid, and between Druid nodes. - */ -@JsonTypeInfo(use = JsonTypeInfo.Id.NAME, property = "type") -@JsonSubTypes(value = { - @Type(name = "datasource", value = DatasourceDefn.class), - @Type(name = "input", value = InputSourceDefn.class), - @Type(name = "tombstone", value = TableDefn.Tombstone.class), -}) -public class TableDefn -{ - private final Map properties; - - public TableDefn(Map properties) - { - this.properties = properties == null ? ImmutableMap.of() : properties; - } - - @JsonProperty("properties") - @JsonInclude(Include.NON_NULL) - public Map properties() - { - return properties; - } - - public void validate() - { - } - - public byte[] toBytes(ObjectMapper jsonMapper) - { - return CatalogDefns.toBytes(jsonMapper, this); - } - - public static TableDefn fromBytes(ObjectMapper jsonMapper, byte[] bytes) - { - return CatalogDefns.fromBytes(jsonMapper, bytes, TableDefn.class); - } - - @Override - public String toString() - { - return CatalogDefns.toString(this); - } - - public String defaultSchema() - { - return null; - } - - public static class Tombstone extends TableDefn - { - public Tombstone() - { - super(null); - } - } -} diff --git a/server/src/main/java/org/apache/druid/catalog/TableMetadata.java b/server/src/main/java/org/apache/druid/catalog/TableMetadata.java new file mode 100644 index 000000000000..03857474847a --- /dev/null +++ b/server/src/main/java/org/apache/druid/catalog/TableMetadata.java @@ -0,0 +1,281 @@ +/* + * Licensed to the Apache Software Foundation (ASF) under one + * or more contributor license agreements. See the NOTICE file + * distributed with this work for additional information + * regarding copyright ownership. The ASF licenses this file + * to you under the Apache License, Version 2.0 (the + * "License"); you may not use this file except in compliance + * with the License. You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, + * software distributed under the License is distributed on an + * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY + * KIND, either express or implied. See the License for the + * specific language governing permissions and limitations + * under the License. + */ + +package org.apache.druid.catalog; + +import com.fasterxml.jackson.annotation.JsonInclude; +import com.fasterxml.jackson.annotation.JsonProperty; +import com.fasterxml.jackson.databind.ObjectMapper; +import com.google.common.base.Strings; +import org.apache.druid.guice.annotations.PublicApi; +import org.apache.druid.java.util.common.IAE; +import org.apache.druid.java.util.common.StringUtils; +import org.apache.druid.metadata.catalog.CatalogManager.TableState; + +import java.util.Objects; + +/** + * REST API level description of a table. Tables have multiple types + * as described by subclasses. + */ +@PublicApi +public class TableMetadata +{ + public enum TableType + { + DATASOURCE, + INPUT, + VIEW, + TOMBSTONE + } + + private final String dbSchema; + private final String name; + private final String owner; + private final long creationTime; + private final long updateTime; + private final TableState state; + private final TableSpec defn; + + public TableMetadata( + @JsonProperty("dbSchema") String dbSchema, + @JsonProperty("name") String name, + @JsonProperty("owner") String owner, + @JsonProperty("creationTime") long creationTime, + @JsonProperty("updateTime") long updateTime, + @JsonProperty("state") TableState state, + @JsonProperty("defn") TableSpec defn) + { + this.dbSchema = dbSchema; + this.name = name; + this.owner = owner; + this.creationTime = creationTime; + this.updateTime = updateTime; + this.state = state; + this.defn = defn; + } + + public static TableMetadata newTable( + TableId id, + TableSpec defn + ) + { + return newTable(id.schema(), id.name(), defn); + } + + public static TableMetadata newTable( + String dbSchema, + String name, + TableSpec defn + ) + { + return new TableMetadata( + dbSchema, + name, + null, + 0, + 0, + TableState.ACTIVE, + defn); + } + + public static TableMetadata newSegmentTable( + String name, + TableSpec defn + ) + { + return newTable( + TableId.DRUID_SCHEMA, + name, + defn); + } + + public TableMetadata fromInsert(String dbSchema, long updateTime) + { + return new TableMetadata( + dbSchema, + name, + owner, + updateTime, + updateTime, + state, + defn); + } + + public TableMetadata asUpdate(long updateTime) + { + return new TableMetadata( + dbSchema, + name, + owner, + creationTime, + updateTime, + state, + defn); + } + + public TableMetadata withSchema(String dbSchema) + { + if (dbSchema.equals(this.dbSchema)) { + return this; + } + return new TableMetadata( + dbSchema, + name, + owner, + creationTime, + updateTime, + state, + defn); + } + + public TableId id() + { + return new TableId(resolveDbSchema(), name); + } + + @JsonProperty("dbSchema") + public String dbSchema() + { + return dbSchema; + } + + @JsonProperty("name") + public String name() + { + return name; + } + + public String sqlName() + { + return StringUtils.format("\"%s\".\"%s\"", dbSchema, name); + } + + @JsonProperty("owner") + @JsonInclude(JsonInclude.Include.NON_NULL) + public String owner() + { + return owner; + } + + @JsonProperty("state") + public TableState state() + { + return state; + } + + @JsonProperty("creationTime") + public long creationTime() + { + return creationTime; + } + + @JsonProperty("updateTime") + public long updateTime() + { + return updateTime; + } + + @JsonProperty("defn") + public TableSpec defn() + { + return defn; + } + + /** + * Syntactic validation of a table object. Validates only that which + * can be checked from this table object. + */ + public void validate() + { + if (Strings.isNullOrEmpty(dbSchema)) { + throw new IAE("Database schema is required"); + } + if (Strings.isNullOrEmpty(name)) { + throw new IAE("Table name is required"); + } + if (defn != null) { + defn.validate(); + } + } + + public byte[] toBytes(ObjectMapper jsonMapper) + { + return CatalogSpecs.toBytes(jsonMapper, this); + } + + public static TableMetadata fromBytes(ObjectMapper jsonMapper, byte[] bytes) + { + return CatalogSpecs.fromBytes(jsonMapper, bytes, TableMetadata.class); + } + + @Override + public String toString() + { + return CatalogSpecs.toString(this); + } + + public String resolveDbSchema() + { + if (!Strings.isNullOrEmpty(dbSchema)) { + return dbSchema; + } else if (defn != null) { + return defn.defaultSchema(); + } else { + return null; + } + } + + @Override + public boolean equals(Object o) + { + if (o == this) { + return true; + } + if (o == null || o.getClass() != getClass()) { + return false; + } + TableMetadata other = (TableMetadata) o; + return Objects.equals(dbSchema, other.dbSchema) + && Objects.equals(name, other.name) + && Objects.equals(owner, other.owner) + && creationTime == other.creationTime + && updateTime == other.updateTime + && state == other.state + && Objects.equals(defn, other.defn); + } + + @Override + public int hashCode() + { + return Objects.hash( + dbSchema, + name, + owner, + creationTime, + updateTime, + state, + defn); + } + + public TableType type() + { + return defn == null ? null : defn.type(); + } +} diff --git a/server/src/main/java/org/apache/druid/catalog/TableSpec.java b/server/src/main/java/org/apache/druid/catalog/TableSpec.java index 1a4449398225..c3a82bd76154 100644 --- a/server/src/main/java/org/apache/druid/catalog/TableSpec.java +++ b/server/src/main/java/org/apache/druid/catalog/TableSpec.java @@ -20,249 +20,81 @@ package org.apache.druid.catalog; import com.fasterxml.jackson.annotation.JsonInclude; +import com.fasterxml.jackson.annotation.JsonInclude.Include; import com.fasterxml.jackson.annotation.JsonProperty; +import com.fasterxml.jackson.annotation.JsonSubTypes; +import com.fasterxml.jackson.annotation.JsonSubTypes.Type; +import com.fasterxml.jackson.annotation.JsonTypeInfo; import com.fasterxml.jackson.databind.ObjectMapper; -import com.google.common.base.Strings; -import org.apache.druid.guice.annotations.PublicApi; -import org.apache.druid.java.util.common.IAE; -import org.apache.druid.java.util.common.StringUtils; -import org.apache.druid.metadata.catalog.CatalogManager.TableState; +import com.google.common.collect.ImmutableMap; +import org.apache.druid.catalog.TableMetadata.TableType; -import java.util.Objects; +import java.util.Map; /** - * REST API level description of a table. Tables have multiple types - * as described by subclasses. + * Definition of a table "hint" in the metastore, between client and + * Druid, and between Druid nodes. */ -@PublicApi -public class TableSpec +@JsonTypeInfo(use = JsonTypeInfo.Id.NAME, property = "type") +@JsonSubTypes(value = { + @Type(name = "datasource", value = DatasourceSpec.class), + @Type(name = "input", value = InputTableSpec.class), + @Type(name = "tombstone", value = TableSpec.Tombstone.class), +}) +public abstract class TableSpec { - private final String dbSchema; - private final String name; - private final String owner; - private final long creationTime; - private final long updateTime; - private final TableState state; - private final TableDefn defn; + private final Map properties; - public TableSpec( - @JsonProperty("dbSchema") String dbSchema, - @JsonProperty("name") String name, - @JsonProperty("owner") String owner, - @JsonProperty("creationTime") long creationTime, - @JsonProperty("updateTime") long updateTime, - @JsonProperty("state") TableState state, - @JsonProperty("defn") TableDefn defn) + public TableSpec(Map properties) { - this.dbSchema = dbSchema; - this.name = name; - this.owner = owner; - this.creationTime = creationTime; - this.updateTime = updateTime; - this.state = state; - this.defn = defn; + this.properties = properties == null ? ImmutableMap.of() : properties; } - public static TableSpec newTable( - TableId id, - TableDefn defn - ) + @JsonProperty("properties") + @JsonInclude(Include.NON_NULL) + public Map properties() { - return newTable(id.schema(), id.name(), defn); + return properties; } - public static TableSpec newTable( - String dbSchema, - String name, - TableDefn defn - ) - { - return new TableSpec( - dbSchema, - name, - null, - 0, - 0, - TableState.ACTIVE, - defn); - } - - public static TableSpec newSegmentTable( - String name, - TableDefn defn - ) - { - return newTable( - TableId.DRUID_SCHEMA, - name, - defn); - } - - public TableSpec fromInsert(String dbSchema, long updateTime) - { - return new TableSpec( - dbSchema, - name, - owner, - updateTime, - updateTime, - state, - defn); - } - - public TableSpec asUpdate(long updateTime) - { - return new TableSpec( - dbSchema, - name, - owner, - creationTime, - updateTime, - state, - defn); - } - - public TableSpec withSchema(String dbSchema) - { - if (dbSchema.equals(this.dbSchema)) { - return this; - } - return new TableSpec( - dbSchema, - name, - owner, - creationTime, - updateTime, - state, - defn); - } - - public TableId id() - { - return new TableId(resolveDbSchema(), name); - } - - @JsonProperty("dbSchema") - public String dbSchema() - { - return dbSchema; - } - - @JsonProperty("name") - public String name() - { - return name; - } - - public String sqlName() - { - return StringUtils.format("\"%s\".\"%s\"", dbSchema, name); - } - - @JsonProperty("owner") - @JsonInclude(JsonInclude.Include.NON_NULL) - public String owner() - { - return owner; - } - - @JsonProperty("state") - public TableState state() - { - return state; - } - - @JsonProperty("creationTime") - public long creationTime() - { - return creationTime; - } - - @JsonProperty("updateTime") - public long updateTime() - { - return updateTime; - } - - @JsonProperty("defn") - public TableDefn defn() - { - return defn; - } - - /** - * Syntactic validation of a table object. Validates only that which - * can be checked from this table object. - */ public void validate() { - if (Strings.isNullOrEmpty(dbSchema)) { - throw new IAE("Database schema is required"); - } - if (Strings.isNullOrEmpty(name)) { - throw new IAE("Table name is required"); - } - if (defn != null) { - defn.validate(); - } } public byte[] toBytes(ObjectMapper jsonMapper) { - return CatalogDefns.toBytes(jsonMapper, this); + return CatalogSpecs.toBytes(jsonMapper, this); } public static TableSpec fromBytes(ObjectMapper jsonMapper, byte[] bytes) { - return CatalogDefns.fromBytes(jsonMapper, bytes, TableSpec.class); + return CatalogSpecs.fromBytes(jsonMapper, bytes, TableSpec.class); } @Override public String toString() { - return CatalogDefns.toString(this); + return CatalogSpecs.toString(this); } - public String resolveDbSchema() + public String defaultSchema() { - if (!Strings.isNullOrEmpty(dbSchema)) { - return dbSchema; - } else if (defn != null) { - return defn.defaultSchema(); - } else { - return null; - } + return null; } - @Override - public boolean equals(Object o) + public static class Tombstone extends TableSpec { - if (o == this) { - return true; + public Tombstone() + { + super(null); } - if (o == null || o.getClass() != getClass()) { - return false; + + @Override + public TableType type() + { + return TableType.TOMBSTONE; } - TableSpec other = (TableSpec) o; - return Objects.equals(dbSchema, other.dbSchema) - && Objects.equals(name, other.name) - && Objects.equals(owner, other.owner) - && creationTime == other.creationTime - && updateTime == other.updateTime - && state == other.state - && Objects.equals(defn, other.defn); } - @Override - public int hashCode() - { - return Objects.hash( - dbSchema, - name, - owner, - creationTime, - updateTime, - state, - defn); - } + public abstract TableType type(); } diff --git a/server/src/main/java/org/apache/druid/metadata/catalog/CatalogManager.java b/server/src/main/java/org/apache/druid/metadata/catalog/CatalogManager.java index 88efbdc97e80..3f34da6ee86f 100644 --- a/server/src/main/java/org/apache/druid/metadata/catalog/CatalogManager.java +++ b/server/src/main/java/org/apache/druid/metadata/catalog/CatalogManager.java @@ -19,13 +19,23 @@ package org.apache.druid.metadata.catalog; +import org.apache.druid.catalog.TableId; +import org.apache.druid.catalog.TableMetadata; +import org.apache.druid.catalog.TableSpec; import org.apache.druid.java.util.common.ISE; +import javax.annotation.Nullable; + +import java.util.List; + /** * Manages catalog data. Used in Coordinator, which will be in either * an leader or standby state. The Coordinator calls the {@link #start()} * method when it becomes the leader, and calls {@link #stop()} when * it loses leadership, or shuts down. + * + * Performs detailed CRUD operations on the catalog tables table. + * Higher-level operations appear elsewhere. */ public interface CatalogManager { @@ -92,9 +102,84 @@ public DuplicateKeyException(String msg, Exception e) } } + interface Listener + { + void added(TableMetadata table); + void updated(TableMetadata table); + void deleted(TableId id); + } + void start(); - void stop(); - TableDefnManager tables(); + void register(Listener listener); + void createTableDefnTable(); + + /** + * Create a table entry. + * + * @return the version of the newly created table. Call + * {@link TableMetadata#asUpdate(long)} if you want a new + * {@link TableMetadata} with the new version. + * @throws {@link DuplicateKeyException} if the row is a duplicate + * (schema, name) pair. This generally indicates a code error, + * or since our code is perfect, a race condition or a DB + * update outside of Druid. In any event, the error is not + * retryable: the user should pick another name, or update the + * existing table + */ + long create(TableMetadata table) throws DuplicateKeyException; + + /** + * Update a table definition, but only if the database entry is at + * the given {@code oldVersion}. + */ + long updateDefn(TableId id, TableSpec defn, long oldVersion) throws OutOfDateException; + + /** + * Update a table definition, overwriting any current content. + * This is a potential race conditions if this is a partial update + * because of the possibility of another user doing an update since the + * read. Fine when the goal is to replace the entire definition. + */ + long updateDefn(TableId id, TableSpec defn) throws NotFoundException; + + /** + * Move the table to the deleting state. No version check: fine + * if the table is already in the deleting state. Does nothing if the + * table does not exist. + * + * @return new table update timestamp, or 0 if the table does not + * exist + */ + long markDeleting(TableId id); + + /** + * Read the table record for the given ID. + * + * @return the table record, or {@code null} if the entry is not + * found in the DB. + */ + @Nullable TableMetadata read(TableId id); + + /** + * Delete the table record for the given ID. Essentially does a + * "DELETE IF EXISTS". There is no version check. Delete should be + * called only when there are no segments left for the table: use + * {@link #markDeleting(TableId)} to indicates that the segments are + * being deleted. Call this method after deletion is complete. + *

+ * Does not cascade deletes yet. Eventually, should delete all entries + * for the table. + * + * @return {@code true} if the table exists and was deleted, + * {@code false} if the table did not exist. + */ + boolean delete(TableId id); + + List list(); + List list(String dbSchema); + List listDetails(String dbSchema); + + void stop(); } diff --git a/server/src/main/java/org/apache/druid/metadata/catalog/SQLCatalogManager.java b/server/src/main/java/org/apache/druid/metadata/catalog/SQLCatalogManager.java index dd150d21e54b..0400494c25da 100644 --- a/server/src/main/java/org/apache/druid/metadata/catalog/SQLCatalogManager.java +++ b/server/src/main/java/org/apache/druid/metadata/catalog/SQLCatalogManager.java @@ -19,16 +19,97 @@ package org.apache.druid.metadata.catalog; +import com.fasterxml.jackson.databind.ObjectMapper; +import com.google.common.collect.Lists; import com.google.inject.Inject; import org.apache.druid.catalog.MetastoreManager; +import org.apache.druid.catalog.TableId; +import org.apache.druid.catalog.TableMetadata; +import org.apache.druid.catalog.TableSpec; import org.apache.druid.guice.ManageLifecycle; import org.apache.druid.java.util.common.ISE; +import org.apache.druid.java.util.common.StringUtils; import org.apache.druid.java.util.common.lifecycle.LifecycleStart; +import org.apache.druid.metadata.SQLMetadataConnector; +import org.skife.jdbi.v2.Handle; +import org.skife.jdbi.v2.IDBI; +import org.skife.jdbi.v2.Query; +import org.skife.jdbi.v2.ResultIterator; +import org.skife.jdbi.v2.Update; +import org.skife.jdbi.v2.exceptions.CallbackFailedException; +import org.skife.jdbi.v2.exceptions.UnableToExecuteStatementException; +import org.skife.jdbi.v2.tweak.HandleCallback; + +import java.util.Deque; +import java.util.List; +import java.util.Map; +import java.util.concurrent.ConcurrentLinkedDeque; @ManageLifecycle public class SQLCatalogManager implements CatalogManager { - private final TableDefnManager tableManager; + private static final String INSERT_TABLE = + "INSERT INTO %s\n" + + " (schemaName, name, owner, creationTime, updateTime, state, payload)\n" + + " VALUES(:schemaName, :name, :owner, :creationTime, :updateTime, :state, :payload)"; + + private static final String UPDATE_HEAD = + "UPDATE %s\n SET\n"; + + private static final String WHERE_TABLE_ID = + "WHERE schemaName = :schemaName\n" + + " AND name = :name\n"; + + private static final String SAFETY_CHECK = + " AND updateTime = :oldVersion"; + + private static final String UPDATE_DEFN_UNSAFE = + UPDATE_HEAD + + " payload = :payload,\n" + + " updateTime = :updateTime\n" + + WHERE_TABLE_ID; + + private static final String UPDATE_DEFN_SAFE = + UPDATE_DEFN_UNSAFE + + SAFETY_CHECK; + + private static final String UPDATE_STATE = + UPDATE_HEAD + + " state = :state,\n" + + " updateTime = :updateTime\n" + + WHERE_TABLE_ID; + + private static final String SELECT_TABLE = + "SELECT owner, creationTime, updateTime, state, payload\n" + + "FROM %s\n" + + WHERE_TABLE_ID; + + private static final String SELECT_ALL_TABLES = + "SELECT schemaName, name\n" + + "FROM %s\n" + + "ORDER BY schemaName, name"; + + private static final String SELECT_TABLES_IN_SCHEMA = + "SELECT name\n" + + "FROM %s\n" + + "WHERE schemaName = :schemaName\n" + + "ORDER BY name"; + + private static final String SELECT_TABLE_DETAILS_IN_SCHEMA = + "SELECT name, owner, creationTime, updateTime, state, payload\n" + + "FROM %s\n" + + "WHERE schemaName = :schemaName\n" + + "ORDER BY name"; + + private static final String DELETE_TABLE = + "DELETE FROM %s\n" + + WHERE_TABLE_ID; + + private final SQLMetadataConnector connector; + private final ObjectMapper jsonMapper; + private final IDBI dbi; + private final String tableName; + private final Deque listeners = new ConcurrentLinkedDeque<>(); @Inject public SQLCatalogManager(MetastoreManager metastoreManager) @@ -36,14 +117,17 @@ public SQLCatalogManager(MetastoreManager metastoreManager) if (!metastoreManager.isSql()) { throw new ISE("SQLCatalogManager only works with SQL based metadata store at this time"); } - tableManager = new SQLTableManager(metastoreManager); + this.connector = metastoreManager.sqlConnector(); + this.dbi = connector.getDBI(); + this.jsonMapper = metastoreManager.jsonMapper(); + this.tableName = metastoreManager.tablesConfig().getTableDefnTable(); } @Override @LifecycleStart public void start() { - tableManager.createTableDefnTable(); + createTableDefnTable(); } @Override @@ -52,8 +136,326 @@ public void stop() } @Override - public TableDefnManager tables() + public void createTableDefnTable() { - return tableManager; + connector.createTableDefnTable(); + } + + @Override + public long create(TableMetadata table) throws DuplicateKeyException + { + try { + return dbi.withHandle( + new HandleCallback() + { + @Override + public Long withHandle(Handle handle) throws DuplicateKeyException + { + long updateTime = System.currentTimeMillis(); + Update stmt = handle.createStatement( + StringUtils.format(INSERT_TABLE, tableName) + ) + .bind("schemaName", table.resolveDbSchema()) + .bind("name", table.name()) + .bind("owner", table.owner()) + .bind("creationTime", updateTime) + .bind("updateTime", updateTime) + .bind("state", TableState.ACTIVE.code()) + .bind("payload", table.defn().toBytes(jsonMapper)); + try { + stmt.execute(); + } + catch (UnableToExecuteStatementException e) { + if (connector.isDuplicateRecordException(e)) { + throw new DuplicateKeyException( + "Tried to insert a duplicate table: " + table.sqlName(), + e); + } else { + throw e; + } + } + sendAddition(table, updateTime); + return updateTime; + } + } + ); + } + catch (CallbackFailedException e) { + if (e.getCause() instanceof DuplicateKeyException) { + throw (DuplicateKeyException) e.getCause(); + } + throw e; + } + } + + @Override + public TableMetadata read(TableId id) + { + return dbi.withHandle( + new HandleCallback() + { + @Override + public TableMetadata withHandle(Handle handle) + { + Query> query = handle.createQuery( + StringUtils.format(SELECT_TABLE, tableName) + ) + .setFetchSize(connector.getStreamingFetchSize()) + .bind("schemaName", id.schema()) + .bind("name", id.name()); + final ResultIterator resultIterator = + query.map((index, r, ctx) -> + new TableMetadata( + id.schema(), + id.name(), + r.getString(1), + r.getLong(2), + r.getLong(3), + TableState.fromCode(r.getString(4)), + TableSpec.fromBytes(jsonMapper, r.getBytes(5)) + )) + .iterator(); + if (resultIterator.hasNext()) { + return resultIterator.next(); + } + return null; + } + } + ); + } + + @Override + public long updateDefn(TableId id, TableSpec defn, long oldVersion) throws OutOfDateException + { + try { + return dbi.withHandle( + new HandleCallback() + { + @Override + public Long withHandle(Handle handle) throws OutOfDateException + { + long updateTime = System.currentTimeMillis(); + int updateCount = handle.createStatement( + StringUtils.format(UPDATE_DEFN_SAFE, tableName)) + .bind("schemaName", id.schema()) + .bind("name", id.name()) + .bind("payload", defn.toBytes(jsonMapper)) + .bind("updateTime", updateTime) + .bind("oldVersion", oldVersion) + .execute(); + if (updateCount == 0) { + throw new OutOfDateException( + StringUtils.format( + "Table %s: not found or update version does not match DB version", + id.sqlName())); + } + sendUpdate(id); + return updateTime; + } + } + ); + } + catch (CallbackFailedException e) { + if (e.getCause() instanceof OutOfDateException) { + throw (OutOfDateException) e.getCause(); + } + throw e; + } + } + + @Override + public long updateDefn(TableId id, TableSpec defn) throws NotFoundException + { + try { + return dbi.withHandle( + new HandleCallback() + { + @Override + public Long withHandle(Handle handle) throws NotFoundException + { + long updateTime = System.currentTimeMillis(); + int updateCount = handle.createStatement( + StringUtils.format(UPDATE_DEFN_UNSAFE, tableName)) + .bind("schemaName", id.schema()) + .bind("name", id.name()) + .bind("payload", defn.toBytes(jsonMapper)) + .bind("updateTime", updateTime) + .execute(); + if (updateCount == 0) { + throw new NotFoundException( + StringUtils.format( + "Table %s: not found", + id.sqlName())); + } + sendUpdate(id); + return updateTime; + } + } + ); + } + catch (CallbackFailedException e) { + if (e.getCause() instanceof NotFoundException) { + throw (NotFoundException) e.getCause(); + } + throw e; + } + } + + @Override + public long markDeleting(TableId id) + { + return dbi.withHandle( + new HandleCallback() + { + @Override + public Long withHandle(Handle handle) + { + long updateTime = System.currentTimeMillis(); + int updateCount = handle.createStatement( + StringUtils.format(UPDATE_STATE, tableName)) + .bind("schemaName", id.schema()) + .bind("name", id.name()) + .bind("updateTime", updateTime) + .bind("state", TableState.DELETING.code()) + .execute(); + sendDeletion(id); + return updateCount == 1 ? updateTime : 0; + } + } + ); + } + + @Override + public boolean delete(TableId id) + { + return dbi.withHandle( + new HandleCallback() + { + @Override + public Boolean withHandle(Handle handle) + { + int updateCount = handle.createStatement( + StringUtils.format(DELETE_TABLE, tableName)) + .bind("schemaName", id.schema()) + .bind("name", id.name()) + .execute(); + sendDeletion(id); + return updateCount > 0; + } + } + ); + } + + @Override + public List list() + { + return dbi.withHandle( + new HandleCallback>() + { + @Override + public List withHandle(Handle handle) + { + Query> query = handle.createQuery( + StringUtils.format(SELECT_ALL_TABLES, tableName) + ) + .setFetchSize(connector.getStreamingFetchSize()); + final ResultIterator resultIterator = + query.map((index, r, ctx) -> + new TableId(r.getString(1), r.getString(2))) + .iterator(); + return Lists.newArrayList(resultIterator); + } + } + ); + } + + @Override + public List list(String dbSchema) + { + return dbi.withHandle( + new HandleCallback>() + { + @Override + public List withHandle(Handle handle) + { + Query> query = handle.createQuery( + StringUtils.format(SELECT_TABLES_IN_SCHEMA, tableName) + ) + .bind("schemaName", dbSchema) + .setFetchSize(connector.getStreamingFetchSize()); + final ResultIterator resultIterator = + query.map((index, r, ctx) -> + r.getString(1)) + .iterator(); + return Lists.newArrayList(resultIterator); + } + } + ); + } + + @Override + public List listDetails(String dbSchema) + { + return dbi.withHandle( + new HandleCallback>() + { + @Override + public List withHandle(Handle handle) + { + Query> query = handle.createQuery( + StringUtils.format(SELECT_TABLE_DETAILS_IN_SCHEMA, tableName) + ) + .bind("schemaName", dbSchema) + .setFetchSize(connector.getStreamingFetchSize()); + final ResultIterator resultIterator = + query.map((index, r, ctx) -> + new TableMetadata( + dbSchema, + r.getString(1), + r.getString(2), + r.getLong(3), + r.getLong(4), + TableState.fromCode(r.getString(5)), + TableSpec.fromBytes(jsonMapper, r.getBytes(6)))) + .iterator(); + return Lists.newArrayList(resultIterator); + } + } + ); + } + + @Override + public synchronized void register(Listener listener) + { + listeners.add(listener); + } + + protected synchronized void sendAddition(TableMetadata table, long updateTime) + { + if (listeners.isEmpty()) { + return; + } + TableMetadata newTable = table.fromInsert(table.dbSchema(), updateTime); + for (Listener listener : listeners) { + listener.added(newTable); + } + } + + protected synchronized void sendUpdate(TableId id) + { + if (listeners.isEmpty()) { + return; + } + TableMetadata updatedTable = read(id); + for (Listener listener : listeners) { + listener.updated(updatedTable); + } + } + + protected synchronized void sendDeletion(TableId id) + { + for (Listener listener : listeners) { + listener.deleted(id); + } } } diff --git a/server/src/main/java/org/apache/druid/metadata/catalog/SQLTableManager.java b/server/src/main/java/org/apache/druid/metadata/catalog/SQLTableManager.java deleted file mode 100644 index 6faf88c6df7d..000000000000 --- a/server/src/main/java/org/apache/druid/metadata/catalog/SQLTableManager.java +++ /dev/null @@ -1,446 +0,0 @@ -/* - * Licensed to the Apache Software Foundation (ASF) under one - * or more contributor license agreements. See the NOTICE file - * distributed with this work for additional information - * regarding copyright ownership. The ASF licenses this file - * to you under the Apache License, Version 2.0 (the - * "License"); you may not use this file except in compliance - * with the License. You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, - * software distributed under the License is distributed on an - * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY - * KIND, either express or implied. See the License for the - * specific language governing permissions and limitations - * under the License. - */ - -package org.apache.druid.metadata.catalog; - -import com.fasterxml.jackson.databind.ObjectMapper; -import com.google.common.collect.Lists; -import org.apache.druid.catalog.MetastoreManager; -import org.apache.druid.catalog.TableDefn; -import org.apache.druid.catalog.TableId; -import org.apache.druid.catalog.TableSpec; -import org.apache.druid.java.util.common.StringUtils; -import org.apache.druid.metadata.SQLMetadataConnector; -import org.apache.druid.metadata.catalog.CatalogManager.DuplicateKeyException; -import org.apache.druid.metadata.catalog.CatalogManager.NotFoundException; -import org.apache.druid.metadata.catalog.CatalogManager.OutOfDateException; -import org.apache.druid.metadata.catalog.CatalogManager.TableState; -import org.skife.jdbi.v2.Handle; -import org.skife.jdbi.v2.IDBI; -import org.skife.jdbi.v2.Query; -import org.skife.jdbi.v2.ResultIterator; -import org.skife.jdbi.v2.Update; -import org.skife.jdbi.v2.exceptions.CallbackFailedException; -import org.skife.jdbi.v2.exceptions.UnableToExecuteStatementException; -import org.skife.jdbi.v2.tweak.HandleCallback; - -import java.util.Deque; -import java.util.List; -import java.util.Map; -import java.util.concurrent.ConcurrentLinkedDeque; - -public class SQLTableManager implements TableDefnManager -{ - private static final String INSERT_TABLE = - "INSERT INTO %s\n" + - " (schemaName, name, owner, creationTime, updateTime, state, payload)\n" + - " VALUES(:schemaName, :name, :owner, :creationTime, :updateTime, :state, :payload)"; - - private static final String UPDATE_HEAD = - "UPDATE %s\n SET\n"; - - private static final String WHERE_TABLE_ID = - "WHERE schemaName = :schemaName\n" + - " AND name = :name\n"; - - private static final String SAFETY_CHECK = - " AND updateTime = :oldVersion"; - - private static final String UPDATE_DEFN_UNSAFE = - UPDATE_HEAD + - " payload = :payload,\n" + - " updateTime = :updateTime\n" + - WHERE_TABLE_ID; - - private static final String UPDATE_DEFN_SAFE = - UPDATE_DEFN_UNSAFE + - SAFETY_CHECK; - - private static final String UPDATE_STATE = - UPDATE_HEAD + - " state = :state,\n" + - " updateTime = :updateTime\n" + - WHERE_TABLE_ID; - - private static final String SELECT_TABLE = - "SELECT owner, creationTime, updateTime, state, payload\n" + - "FROM %s\n" + - WHERE_TABLE_ID; - - private static final String SELECT_ALL_TABLES = - "SELECT schemaName, name\n" + - "FROM %s\n" + - "ORDER BY schemaName, name"; - - private static final String SELECT_TABLES_IN_SCHEMA = - "SELECT name\n" + - "FROM %s\n" + - "WHERE schemaName = :schemaName\n" + - "ORDER BY name"; - - private static final String SELECT_TABLE_DETAILS_IN_SCHEMA = - "SELECT name, owner, creationTime, updateTime, state, payload\n" + - "FROM %s\n" + - "WHERE schemaName = :schemaName\n" + - "ORDER BY name"; - - private static final String DELETE_TABLE = - "DELETE FROM %s\n" + - WHERE_TABLE_ID; - - private final SQLMetadataConnector connector; - private final ObjectMapper jsonMapper; - private final IDBI dbi; - private final String tableName; - private final Deque listeners = new ConcurrentLinkedDeque<>(); - - public SQLTableManager( - MetastoreManager metastoreManager - ) - { - this.connector = metastoreManager.sqlConnector(); - this.dbi = connector.getDBI(); - this.jsonMapper = metastoreManager.jsonMapper(); - this.tableName = metastoreManager.tablesConfig().getTableDefnTable(); - } - - @Override - public void createTableDefnTable() - { - connector.createTableDefnTable(); - } - - @Override - public long create(TableSpec table) throws DuplicateKeyException - { - try { - return dbi.withHandle( - new HandleCallback() - { - @Override - public Long withHandle(Handle handle) throws DuplicateKeyException - { - long updateTime = System.currentTimeMillis(); - Update stmt = handle.createStatement( - StringUtils.format(INSERT_TABLE, tableName) - ) - .bind("schemaName", table.resolveDbSchema()) - .bind("name", table.name()) - .bind("owner", table.owner()) - .bind("creationTime", updateTime) - .bind("updateTime", updateTime) - .bind("state", TableState.ACTIVE.code()) - .bind("payload", table.defn().toBytes(jsonMapper)); - try { - stmt.execute(); - } - catch (UnableToExecuteStatementException e) { - if (connector.isDuplicateRecordException(e)) { - throw new DuplicateKeyException( - "Tried to insert a duplicate table: " + table.sqlName(), - e); - } else { - throw e; - } - } - sendAddition(table, updateTime); - return updateTime; - } - } - ); - } - catch (CallbackFailedException e) { - if (e.getCause() instanceof DuplicateKeyException) { - throw (DuplicateKeyException) e.getCause(); - } - throw e; - } - } - - @Override - public TableSpec read(TableId id) - { - return dbi.withHandle( - new HandleCallback() - { - @Override - public TableSpec withHandle(Handle handle) - { - Query> query = handle.createQuery( - StringUtils.format(SELECT_TABLE, tableName) - ) - .setFetchSize(connector.getStreamingFetchSize()) - .bind("schemaName", id.schema()) - .bind("name", id.name()); - final ResultIterator resultIterator = - query.map((index, r, ctx) -> - new TableSpec( - id.schema(), - id.name(), - r.getString(1), - r.getLong(2), - r.getLong(3), - TableState.fromCode(r.getString(4)), - TableDefn.fromBytes(jsonMapper, r.getBytes(5)) - )) - .iterator(); - if (resultIterator.hasNext()) { - return resultIterator.next(); - } - return null; - } - } - ); - } - - @Override - public long updateDefn(TableId id, TableDefn defn, long oldVersion) throws OutOfDateException - { - try { - return dbi.withHandle( - new HandleCallback() - { - @Override - public Long withHandle(Handle handle) throws OutOfDateException - { - long updateTime = System.currentTimeMillis(); - int updateCount = handle.createStatement( - StringUtils.format(UPDATE_DEFN_SAFE, tableName)) - .bind("schemaName", id.schema()) - .bind("name", id.name()) - .bind("payload", defn.toBytes(jsonMapper)) - .bind("updateTime", updateTime) - .bind("oldVersion", oldVersion) - .execute(); - if (updateCount == 0) { - throw new OutOfDateException( - StringUtils.format( - "Table %s: not found or update version does not match DB version", - id.sqlName())); - } - sendUpdate(id); - return updateTime; - } - } - ); - } - catch (CallbackFailedException e) { - if (e.getCause() instanceof OutOfDateException) { - throw (OutOfDateException) e.getCause(); - } - throw e; - } - } - - @Override - public long updateDefn(TableId id, TableDefn defn) throws NotFoundException - { - try { - return dbi.withHandle( - new HandleCallback() - { - @Override - public Long withHandle(Handle handle) throws NotFoundException - { - long updateTime = System.currentTimeMillis(); - int updateCount = handle.createStatement( - StringUtils.format(UPDATE_DEFN_UNSAFE, tableName)) - .bind("schemaName", id.schema()) - .bind("name", id.name()) - .bind("payload", defn.toBytes(jsonMapper)) - .bind("updateTime", updateTime) - .execute(); - if (updateCount == 0) { - throw new NotFoundException( - StringUtils.format( - "Table %s: not found", - id.sqlName())); - } - sendUpdate(id); - return updateTime; - } - } - ); - } - catch (CallbackFailedException e) { - if (e.getCause() instanceof NotFoundException) { - throw (NotFoundException) e.getCause(); - } - throw e; - } - } - - @Override - public long markDeleting(TableId id) - { - return dbi.withHandle( - new HandleCallback() - { - @Override - public Long withHandle(Handle handle) - { - long updateTime = System.currentTimeMillis(); - int updateCount = handle.createStatement( - StringUtils.format(UPDATE_STATE, tableName)) - .bind("schemaName", id.schema()) - .bind("name", id.name()) - .bind("updateTime", updateTime) - .bind("state", TableState.DELETING.code()) - .execute(); - sendDeletion(id); - return updateCount == 1 ? updateTime : 0; - } - } - ); - } - - @Override - public boolean delete(TableId id) - { - return dbi.withHandle( - new HandleCallback() - { - @Override - public Boolean withHandle(Handle handle) - { - int updateCount = handle.createStatement( - StringUtils.format(DELETE_TABLE, tableName)) - .bind("schemaName", id.schema()) - .bind("name", id.name()) - .execute(); - sendDeletion(id); - return updateCount > 0; - } - } - ); - } - - @Override - public List list() - { - return dbi.withHandle( - new HandleCallback>() - { - @Override - public List withHandle(Handle handle) - { - Query> query = handle.createQuery( - StringUtils.format(SELECT_ALL_TABLES, tableName) - ) - .setFetchSize(connector.getStreamingFetchSize()); - final ResultIterator resultIterator = - query.map((index, r, ctx) -> - new TableId(r.getString(1), r.getString(2))) - .iterator(); - return Lists.newArrayList(resultIterator); - } - } - ); - } - - @Override - public List list(String dbSchema) - { - return dbi.withHandle( - new HandleCallback>() - { - @Override - public List withHandle(Handle handle) - { - Query> query = handle.createQuery( - StringUtils.format(SELECT_TABLES_IN_SCHEMA, tableName) - ) - .bind("schemaName", dbSchema) - .setFetchSize(connector.getStreamingFetchSize()); - final ResultIterator resultIterator = - query.map((index, r, ctx) -> - r.getString(1)) - .iterator(); - return Lists.newArrayList(resultIterator); - } - } - ); - } - - @Override - public List listDetails(String dbSchema) - { - return dbi.withHandle( - new HandleCallback>() - { - @Override - public List withHandle(Handle handle) - { - Query> query = handle.createQuery( - StringUtils.format(SELECT_TABLE_DETAILS_IN_SCHEMA, tableName) - ) - .bind("schemaName", dbSchema) - .setFetchSize(connector.getStreamingFetchSize()); - final ResultIterator resultIterator = - query.map((index, r, ctx) -> - new TableSpec( - dbSchema, - r.getString(1), - r.getString(2), - r.getLong(3), - r.getLong(4), - TableState.fromCode(r.getString(5)), - TableDefn.fromBytes(jsonMapper, r.getBytes(6)))) - .iterator(); - return Lists.newArrayList(resultIterator); - } - } - ); - } - - @Override - public synchronized void register(Listener listener) - { - listeners.add(listener); - } - - protected synchronized void sendAddition(TableSpec table, long updateTime) - { - if (listeners.isEmpty()) { - return; - } - TableSpec newTable = table.fromInsert(table.dbSchema(), updateTime); - for (Listener listener : listeners) { - listener.added(newTable); - } - } - - protected synchronized void sendUpdate(TableId id) - { - if (listeners.isEmpty()) { - return; - } - TableSpec updatedTable = read(id); - for (Listener listener : listeners) { - listener.updated(updatedTable); - } - } - - protected synchronized void sendDeletion(TableId id) - { - for (Listener listener : listeners) { - listener.deleted(id); - } - } -} diff --git a/server/src/main/java/org/apache/druid/metadata/catalog/TableDefnManager.java b/server/src/main/java/org/apache/druid/metadata/catalog/TableDefnManager.java deleted file mode 100644 index c2fcb2598e47..000000000000 --- a/server/src/main/java/org/apache/druid/metadata/catalog/TableDefnManager.java +++ /dev/null @@ -1,115 +0,0 @@ -/* - * Licensed to the Apache Software Foundation (ASF) under one - * or more contributor license agreements. See the NOTICE file - * distributed with this work for additional information - * regarding copyright ownership. The ASF licenses this file - * to you under the Apache License, Version 2.0 (the - * "License"); you may not use this file except in compliance - * with the License. You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, - * software distributed under the License is distributed on an - * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY - * KIND, either express or implied. See the License for the - * specific language governing permissions and limitations - * under the License. - */ - -package org.apache.druid.metadata.catalog; - -import org.apache.druid.catalog.TableDefn; -import org.apache.druid.catalog.TableId; -import org.apache.druid.catalog.TableSpec; -import org.apache.druid.metadata.catalog.CatalogManager.DuplicateKeyException; -import org.apache.druid.metadata.catalog.CatalogManager.NotFoundException; -import org.apache.druid.metadata.catalog.CatalogManager.OutOfDateException; - -import javax.annotation.Nullable; - -import java.util.List; - -/** - * The Table Manager performs detailed CRUD operations on the - * catalog tables table. Higher-level operations appear - * elsewhere. - */ -public interface TableDefnManager -{ - interface Listener - { - void added(TableSpec table); - void updated(TableSpec table); - void deleted(TableId id); - } - - void register(Listener listener); - void createTableDefnTable(); - - /** - * Create a table entry. - * - * @return the version of the newly created table. Call - * {@link TableSpec#asUpdate(long)} if you want a new - * {@link TableSpec} with the new version. - * @throws {@link DuplicateKeyException} if the row is a duplicate - * (schema, name) pair. This generally indicates a code error, - * or since our code is perfect, a race condition or a DB - * update outside of Druid. In any event, the error is not - * retryable: the user should pick another name, or update the - * existing table - */ - long create(TableSpec table) throws DuplicateKeyException; - - /** - * Update a table definition, but only if the database entry is at - * the given {@code oldVersion}. - */ - long updateDefn(TableId id, TableDefn defn, long oldVersion) throws OutOfDateException; - - /** - * Update a table definition, overwriting any current content. - * This is a potential race conditions if this is a partial update - * because of the possibility of another user doing an update since the - * read. Fine when the goal is to replace the entire definition. - */ - long updateDefn(TableId id, TableDefn defn) throws NotFoundException; - - /** - * Move the table to the deleting state. No version check: fine - * if the table is already in the deleting state. Does nothing if the - * table does not exist. - * - * @return new table update timestamp, or 0 if the table does not - * exist - */ - long markDeleting(TableId id); - - /** - * Read the table record for the given ID. - * - * @return the table record, or {@code null} if the entry is not - * found in the DB. - */ - @Nullable TableSpec read(TableId id); - - /** - * Delete the table record for the given ID. Essentially does a - * "DELETE IF EXISTS". There is no version check. Delete should be - * called only when there are no segments left for the table: use - * {@link #markDeleting(TableId)} to indicates that the segments are - * being deleted. Call this method after deletion is complete. - *

- * Does not cascade deletes yet. Eventually, should delete all entries - * for the table. - * - * @return {@code true} if the table exists and was deleted, - * {@code false} if the table did not exist. - */ - boolean delete(TableId id); - - List list(); - List list(String dbSchema); - List listDetails(String dbSchema); -} diff --git a/server/src/main/java/org/apache/druid/server/http/CatalogListenerResource.java b/server/src/main/java/org/apache/druid/server/http/CatalogListenerResource.java index 20542cdc123d..ccff0a87549a 100644 --- a/server/src/main/java/org/apache/druid/server/http/CatalogListenerResource.java +++ b/server/src/main/java/org/apache/druid/server/http/CatalogListenerResource.java @@ -22,7 +22,7 @@ import com.fasterxml.jackson.databind.ObjectMapper; import com.fasterxml.jackson.jaxrs.smile.SmileMediaTypes; import org.apache.druid.catalog.MetadataCatalog.CatalogListener; -import org.apache.druid.catalog.TableDefn; +import org.apache.druid.catalog.TableMetadata; import org.apache.druid.catalog.TableSpec; import org.apache.druid.guice.annotations.Json; import org.apache.druid.guice.annotations.Smile; @@ -85,15 +85,15 @@ public Response syncTable( final String reqContentType = req.getContentType(); final boolean isSmile = SmileMediaTypes.APPLICATION_JACKSON_SMILE.equals(reqContentType); final ObjectMapper mapper = isSmile ? smileMapper : jsonMapper; - TableSpec tableSpec; + TableMetadata tableSpec; try { - tableSpec = mapper.readValue(inputStream, TableSpec.class); + tableSpec = mapper.readValue(inputStream, TableMetadata.class); } catch (IOException e) { return Response.serverError().entity(e.getMessage()).build(); } - TableDefn defn = tableSpec.defn(); - if (defn instanceof TableDefn.Tombstone) { + TableSpec defn = tableSpec.defn(); + if (defn instanceof TableSpec.Tombstone) { listener.deleted(tableSpec.id()); } else { listener.updated(tableSpec); diff --git a/server/src/main/java/org/apache/druid/server/http/CatalogResource.java b/server/src/main/java/org/apache/druid/server/http/CatalogResource.java index 8f2478167032..5c07e72b87ba 100644 --- a/server/src/main/java/org/apache/druid/server/http/CatalogResource.java +++ b/server/src/main/java/org/apache/druid/server/http/CatalogResource.java @@ -24,16 +24,16 @@ import org.apache.druid.catalog.Actions; import org.apache.druid.catalog.CatalogStorage; import org.apache.druid.catalog.SchemaRegistry.SchemaDefn; -import org.apache.druid.catalog.TableDefn; import org.apache.druid.catalog.TableId; +import org.apache.druid.catalog.TableMetadata; import org.apache.druid.catalog.TableSpec; import org.apache.druid.java.util.common.IAE; import org.apache.druid.java.util.common.Pair; import org.apache.druid.java.util.common.StringUtils; +import org.apache.druid.metadata.catalog.CatalogManager; import org.apache.druid.metadata.catalog.CatalogManager.DuplicateKeyException; import org.apache.druid.metadata.catalog.CatalogManager.NotFoundException; import org.apache.druid.metadata.catalog.CatalogManager.OutOfDateException; -import org.apache.druid.metadata.catalog.TableDefnManager; import org.apache.druid.server.security.Action; import org.apache.druid.server.security.AuthorizationUtils; import org.apache.druid.server.security.ForbiddenException; @@ -94,7 +94,7 @@ public CatalogResource(CatalogStorage catalog) @Consumes(MediaType.APPLICATION_JSON) @Produces(MediaType.APPLICATION_JSON) public Response createTable( - TableSpec table, + TableMetadata table, @QueryParam("ifnew") boolean ifNew, @Context final HttpServletRequest req) { @@ -116,7 +116,7 @@ public Response createTable( catch (IAE e) { return Actions.badRequest(Actions.INVALID, e.getMessage()); } - TableDefn defn = table.defn(); + TableSpec defn = table.defn(); if (!schema.accepts(defn)) { return Actions.badRequest( Actions.INVALID, @@ -174,7 +174,7 @@ public Response createTable( public Response updateTableDefn( @PathParam("dbSchema") String dbSchema, @PathParam("name") String name, - TableDefn defn, + TableSpec defn, @QueryParam("version") long version, @Context final HttpServletRequest req) { @@ -214,7 +214,7 @@ public Response updateTableDefn( return Actions.forbidden(e); } try { - TableDefnManager tableMgr = catalog.tables(); + CatalogManager tableMgr = catalog.tables(); TableId tableId = new TableId(dbSchema, name); long newVersion; if (version == 0) { @@ -279,7 +279,7 @@ public Response getTable( } try { TableId tableId = new TableId(dbSchema, name); - TableSpec table = catalog.tables().read(tableId); + TableMetadata table = catalog.tables().read(tableId); if (table == null) { return Response.status(Response.Status.NOT_FOUND).build(); } @@ -443,8 +443,8 @@ public Response syncSchema( return result.lhs; } SchemaDefn schema = result.rhs; - List tables = catalog.tables().listDetails(dbSchema); - Iterable filtered = AuthorizationUtils.filterAuthorizedResources( + List tables = catalog.tables().listDetails(dbSchema); + Iterable filtered = AuthorizationUtils.filterAuthorizedResources( req, tables, table -> diff --git a/server/src/test/java/org/apache/druid/catalog/CacheNotifierTest.java b/server/src/test/java/org/apache/druid/catalog/CacheNotifierTest.java index 4b02c5baa12a..b5ec77caeb8f 100644 --- a/server/src/test/java/org/apache/druid/catalog/CacheNotifierTest.java +++ b/server/src/test/java/org/apache/druid/catalog/CacheNotifierTest.java @@ -52,7 +52,7 @@ public void accept(byte[] update) public void testNotifier() { MockSender sender = new MockSender(); - CommonCacheNotifierEx notifier = new CommonCacheNotifierEx("test", sender); + CacheNotifier notifier = new CacheNotifier("test", sender); notifier.start(); for (int i = 0; i < 100; i++) { byte[] msg = new byte[] {(byte) i}; @@ -110,7 +110,7 @@ public void testStack() restSender, "/test/foo", 1000); - CommonCacheNotifierEx notifier = new CommonCacheNotifierEx("test", updateSender); + CacheNotifier notifier = new CacheNotifier("test", updateSender); notifier.start(); for (int i = 0; i < 100; i++) { byte[] msg = new byte[] {(byte) i}; diff --git a/server/src/test/java/org/apache/druid/catalog/CatalogObjectTest.java b/server/src/test/java/org/apache/druid/catalog/CatalogObjectTest.java index 5f6b70bb6b9d..172c7aa9bf98 100644 --- a/server/src/test/java/org/apache/druid/catalog/CatalogObjectTest.java +++ b/server/src/test/java/org/apache/druid/catalog/CatalogObjectTest.java @@ -34,7 +34,7 @@ public class CatalogObjectTest @Test public void testMinimalTable() { - TableSpec table = new TableSpec( + TableMetadata table = new TableMetadata( TableId.DRUID_SCHEMA, "foo", "bob", @@ -52,7 +52,7 @@ public void testMinimalTable() assertNull(table.defn()); try { - table = new TableSpec( + table = new TableMetadata( null, "foo", "bob", @@ -68,7 +68,7 @@ public void testMinimalTable() } try { - table = new TableSpec( + table = new TableMetadata( TableId.DRUID_SCHEMA, null, "bob", @@ -87,10 +87,10 @@ public void testMinimalTable() @Test public void testDefn() { - DatasourceDefn defn = DatasourceDefn.builder() + DatasourceSpec defn = DatasourceSpec.builder() .segmentGranularity("PT1D") .build(); - TableSpec table = new TableSpec( + TableMetadata table = new TableMetadata( TableId.DRUID_SCHEMA, "foo", "bob", @@ -102,7 +102,7 @@ public void testDefn() assertSame(defn, table.defn()); try { - table = new TableSpec( + table = new TableMetadata( "wrong", "foo", "bob", @@ -121,10 +121,10 @@ public void testDefn() @Test public void testConversions() { - DatasourceDefn defn = DatasourceDefn.builder() + DatasourceSpec defn = DatasourceSpec.builder() .segmentGranularity("PT1D") .build(); - TableSpec table = TableSpec.newSegmentTable( + TableMetadata table = TableMetadata.newSegmentTable( "ds", defn); assertEquals(TableId.datasource("ds"), table.id()); @@ -132,17 +132,17 @@ public void testConversions() assertEquals(0, table.updateTime()); assertSame(defn, table.defn()); - TableSpec table2 = TableSpec.newSegmentTable("ds", defn); + TableMetadata table2 = TableMetadata.newSegmentTable("ds", defn); assertEquals(table, table2); - TableSpec table3 = table2.asUpdate(20); + TableMetadata table3 = table2.asUpdate(20); assertEquals(20, table3.updateTime()); } @Test public void testEquals() { - EqualsVerifier.forClass(TableSpec.class) + EqualsVerifier.forClass(TableMetadata.class) .usingGetClass() .verify(); } diff --git a/server/src/test/java/org/apache/druid/catalog/CatalogResourceTest.java b/server/src/test/java/org/apache/druid/catalog/CatalogResourceTest.java index 7ecb3ab0ab14..4f0cd1129648 100644 --- a/server/src/test/java/org/apache/druid/catalog/CatalogResourceTest.java +++ b/server/src/test/java/org/apache/druid/catalog/CatalogResourceTest.java @@ -74,13 +74,13 @@ private static long getVersion(Response resp) @Test public void testCreate() { - DatasourceDefn defn = DatasourceDefn.builder() + DatasourceSpec defn = DatasourceSpec.builder() .segmentGranularity("PT1D") .build(); // Missing schema name: infer the schema. String tableName = "create"; - TableSpec table = TableSpec.newTable( + TableMetadata table = TableMetadata.newTable( null, "create1", defn); @@ -88,7 +88,7 @@ public void testCreate() assertEquals(Response.Status.OK.getStatusCode(), resp.getStatus()); // Blank schema name: infer the schema. - table = TableSpec.newTable( + table = TableMetadata.newTable( "", "create2", defn); @@ -96,27 +96,27 @@ public void testCreate() assertEquals(Response.Status.OK.getStatusCode(), resp.getStatus()); // Missing table name - table = TableSpec.newTable(TableId.DRUID_SCHEMA, null, defn); + table = TableMetadata.newTable(TableId.DRUID_SCHEMA, null, defn); resp = resource.createTable(table, false, postBy(DummyRequest.SUPER_USER)); assertEquals(Response.Status.BAD_REQUEST.getStatusCode(), resp.getStatus()); // Unknown schema - table = TableSpec.newTable("bogus", tableName, defn); + table = TableMetadata.newTable("bogus", tableName, defn); resp = resource.createTable(table, false, postBy(DummyRequest.SUPER_USER)); assertEquals(Response.Status.NOT_FOUND.getStatusCode(), resp.getStatus()); // Immutable schema - table = TableSpec.newTable(TableId.CATALOG_SCHEMA, tableName, defn); + table = TableMetadata.newTable(TableId.CATALOG_SCHEMA, tableName, defn); resp = resource.createTable(table, false, postBy(DummyRequest.SUPER_USER)); assertEquals(Response.Status.BAD_REQUEST.getStatusCode(), resp.getStatus()); // Wrong definition type. - table = TableSpec.newTable(TableId.INPUT_SCHEMA, tableName, defn); + table = TableMetadata.newTable(TableId.INPUT_SCHEMA, tableName, defn); resp = resource.createTable(table, false, postBy(DummyRequest.DENY_USER)); assertEquals(Response.Status.BAD_REQUEST.getStatusCode(), resp.getStatus()); // No permissions - table = TableSpec.newTable(TableId.DRUID_SCHEMA, tableName, defn); + table = TableMetadata.newTable(TableId.DRUID_SCHEMA, tableName, defn); resp = resource.createTable(table, false, postBy(DummyRequest.DENY_USER)); assertEquals(Response.Status.FORBIDDEN.getStatusCode(), resp.getStatus()); @@ -141,13 +141,13 @@ public void testCreate() // Input source InputSource inputSource = new InlineInputSource("a,b,1\nc,d,2\n"); InputFormat inputFormat = CatalogTests.csvFormat(); - InputSourceDefn inputDefn = InputSourceDefn + InputTableSpec inputDefn = InputTableSpec .builder() .source(inputSource) .format(inputFormat) .column("a", "varchar") .build(); - table = TableSpec.newTable(TableId.INPUT_SCHEMA, "input", inputDefn); + table = TableMetadata.newTable(TableId.INPUT_SCHEMA, "input", inputDefn); resp = resource.createTable(table, true, postBy(DummyRequest.WRITER_USER)); assertEquals(Response.Status.OK.getStatusCode(), resp.getStatus()); } @@ -155,7 +155,7 @@ public void testCreate() @Test public void testUpdate() { - DatasourceDefn defn = DatasourceDefn.builder() + DatasourceSpec defn = DatasourceSpec.builder() .segmentGranularity("PT1D") .build(); @@ -183,7 +183,7 @@ public void testUpdate() assertEquals(Response.Status.NOT_FOUND.getStatusCode(), resp.getStatus()); // Create the table - TableSpec table = TableSpec.newTable( + TableMetadata table = TableMetadata.newTable( TableId.DRUID_SCHEMA, "update", defn); @@ -214,7 +214,7 @@ public void testUpdate() @Test public void testRead() { - DatasourceDefn defn = DatasourceDefn.builder() + DatasourceSpec defn = DatasourceSpec.builder() .segmentGranularity("PT1D") .build(); @@ -236,7 +236,7 @@ public void testRead() assertEquals(Response.Status.NOT_FOUND.getStatusCode(), resp.getStatus()); // Create the table - TableSpec table = TableSpec.newTable( + TableMetadata table = TableMetadata.newTable( TableId.DRUID_SCHEMA, tableName, defn); @@ -251,13 +251,13 @@ public void testRead() // Valid resp = resource.getTable(TableId.DRUID_SCHEMA, tableName, getBy(DummyRequest.READER_USER)); assertEquals(Response.Status.OK.getStatusCode(), resp.getStatus()); - TableSpec read = (TableSpec) resp.getEntity(); + TableMetadata read = (TableMetadata) resp.getEntity(); assertEquals(table, read); // Internal sync API resp = resource.syncTable(TableId.DRUID_SCHEMA, tableName, getBy(DummyRequest.SUPER_USER)); assertEquals(Response.Status.OK.getStatusCode(), resp.getStatus()); - read = (TableSpec) resp.getEntity(); + read = (TableMetadata) resp.getEntity(); assertEquals(table, read); } @@ -274,9 +274,9 @@ private List getTableList(Response resp) } @SuppressWarnings("unchecked") - private List getDetailsList(Response resp) + private List getDetailsList(Response resp) { - return (List) resp.getEntity(); + return (List) resp.getEntity(); } @Test @@ -302,10 +302,10 @@ public void testList() assertEquals(Response.Status.NOT_FOUND.getStatusCode(), resp.getStatus()); // Create a table - DatasourceDefn defn = DatasourceDefn.builder() + DatasourceSpec defn = DatasourceSpec.builder() .segmentGranularity("PT1D") .build(); - TableSpec table = TableSpec.newTable(TableId.DRUID_SCHEMA, "list", defn); + TableMetadata table = TableMetadata.newTable(TableId.DRUID_SCHEMA, "list", defn); resp = resource.createTable(table, false, postBy(DummyRequest.WRITER_USER)); assertEquals(Response.Status.OK.getStatusCode(), resp.getStatus()); @@ -343,7 +343,7 @@ public void testList() resp = resource.syncSchema(TableId.DRUID_SCHEMA, getBy(DummyRequest.SUPER_USER)); assertEquals(Response.Status.OK.getStatusCode(), resp.getStatus()); - List details = getDetailsList(resp); + List details = getDetailsList(resp); assertEquals(1, details.size()); } @@ -375,10 +375,10 @@ public void testDelete() assertEquals(Response.Status.OK.getStatusCode(), resp.getStatus()); // Create the table - DatasourceDefn defn = DatasourceDefn.builder() + DatasourceSpec defn = DatasourceSpec.builder() .segmentGranularity("PT1D") .build(); - TableSpec table = TableSpec.newTable( + TableMetadata table = TableMetadata.newTable( TableId.DRUID_SCHEMA, tableName, defn); @@ -405,10 +405,10 @@ public void testLifecycle() { // Operations for one table - create String table1Name = "lifecycle1"; - DatasourceDefn defn = DatasourceDefn.builder() + DatasourceSpec defn = DatasourceSpec.builder() .segmentGranularity("PT1D") .build(); - TableSpec table = TableSpec.newTable(TableId.DRUID_SCHEMA, table1Name, defn); + TableMetadata table = TableMetadata.newTable(TableId.DRUID_SCHEMA, table1Name, defn); Response resp = resource.createTable(table, false, postBy(DummyRequest.WRITER_USER)); assertEquals(Response.Status.OK.getStatusCode(), resp.getStatus()); long version = getVersion(resp); @@ -417,7 +417,7 @@ public void testLifecycle() // read resp = resource.getTable(TableId.DRUID_SCHEMA, table1Name, postBy(DummyRequest.READER_USER)); assertEquals(Response.Status.OK.getStatusCode(), resp.getStatus()); - TableSpec read = (TableSpec) resp.getEntity(); + TableMetadata read = (TableMetadata) resp.getEntity(); assertEquals(table, read); // list @@ -434,7 +434,7 @@ public void testLifecycle() assertEquals(table.name(), tables.get(0)); // update - DatasourceDefn defn2 = DatasourceDefn.builder() + DatasourceSpec defn2 = DatasourceSpec.builder() .segmentGranularity("PT1H") .build(); resp = resource.updateTableDefn(TableId.DRUID_SCHEMA, table1Name, defn2, version, postBy(DummyRequest.WRITER_USER)); @@ -445,14 +445,14 @@ public void testLifecycle() // verify update resp = resource.getTable(TableId.DRUID_SCHEMA, table1Name, getBy(DummyRequest.READER_USER)); assertEquals(Response.Status.OK.getStatusCode(), resp.getStatus()); - read = (TableSpec) resp.getEntity(); + read = (TableMetadata) resp.getEntity(); assertEquals(table.creationTime(), read.creationTime()); assertEquals(version, read.updateTime()); assertEquals(defn2, read.defn()); // add second table String table2Name = "lifecycle2"; - TableSpec table2 = TableSpec.newTable(TableId.DRUID_SCHEMA, table2Name, defn); + TableMetadata table2 = TableMetadata.newTable(TableId.DRUID_SCHEMA, table2Name, defn); resp = resource.createTable(table2, false, postBy(DummyRequest.WRITER_USER)); assertEquals(Response.Status.OK.getStatusCode(), resp.getStatus()); diff --git a/server/src/test/java/org/apache/druid/catalog/DatasourceDefnTest.java b/server/src/test/java/org/apache/druid/catalog/DatasourceSpecTest.java similarity index 72% rename from server/src/test/java/org/apache/druid/catalog/DatasourceDefnTest.java rename to server/src/test/java/org/apache/druid/catalog/DatasourceSpecTest.java index fc5ff65bf919..a8e2fcdfb24a 100644 --- a/server/src/test/java/org/apache/druid/catalog/DatasourceDefnTest.java +++ b/server/src/test/java/org/apache/druid/catalog/DatasourceSpecTest.java @@ -22,6 +22,7 @@ import com.fasterxml.jackson.databind.ObjectMapper; import com.google.common.collect.ImmutableMap; import nl.jqno.equalsverifier.EqualsVerifier; +import org.apache.druid.catalog.DatasourceColumnSpec.MeasureSpec; import org.apache.druid.java.util.common.IAE; import org.junit.Test; @@ -37,13 +38,13 @@ /** * Test of validation and serialization of the catalog table definitions. */ -public class DatasourceDefnTest +public class DatasourceSpecTest { @Test public void testMinimalBuilder() { // Minimum possible definition - DatasourceDefn defn = DatasourceDefn.builder() + DatasourceSpec defn = DatasourceSpec.builder() .segmentGranularity("PT1D") .build(); @@ -52,14 +53,14 @@ public void testMinimalBuilder() assertNull(defn.rollupGranularity()); assertEquals(0, defn.targetSegmentRows()); - DatasourceDefn copy = defn.toBuilder().build(); + DatasourceSpec copy = defn.toBuilder().build(); assertEquals(defn, copy); } @Test public void testFullBuilder() { - DatasourceDefn defn = DatasourceDefn.builder() + DatasourceSpec defn = DatasourceSpec.builder() .segmentGranularity("PT1H") .rollupGranularity("PT1M") .targetSegmentRows(1_000_000) @@ -70,7 +71,7 @@ public void testFullBuilder() assertEquals("PT1M", defn.rollupGranularity()); assertEquals(1_000_000, defn.targetSegmentRows()); - DatasourceDefn copy = defn.toBuilder().build(); + DatasourceSpec copy = defn.toBuilder().build(); assertEquals(defn, copy); } @@ -79,48 +80,48 @@ public void testProperties() { Map props = ImmutableMap.of( "foo", 10, "bar", "mumble"); - DatasourceDefn defn = DatasourceDefn.builder() - .segmentGranularity("PT1D") + DatasourceSpec defn = DatasourceSpec.builder() + .segmentGranularity("PT1M") .properties(props) .build(); defn.validate(); assertEquals(props, defn.properties()); - DatasourceDefn copy = defn.toBuilder().build(); + DatasourceSpec copy = defn.toBuilder().build(); assertEquals(defn, copy); } @Test public void testColumns() { - DatasourceDefn defn = DatasourceDefn.builder() + DatasourceSpec defn = DatasourceSpec.builder() .segmentGranularity("PT1D") .rollupGranularity("PT1M") - .column(DatasourceColumnDefn.builder("a").build()) - .column(DatasourceColumnDefn.builder("b").sqlType("VARCHAR").build()) - .column(DatasourceColumnDefn.builder("c").sqlType("BIGINT").measure("SUM").build()) + .column("a", null) + .column("b", "VARCHAR") + .measure("c", "BIGINT", "SUM") .build(); defn.validate(); - List columns = defn.columns(); + List columns = defn.columns(); assertEquals(3, columns.size()); - assertTrue(columns.get(0) instanceof DatasourceColumnDefn); + assertTrue(columns.get(0) instanceof DatasourceColumnSpec); assertEquals("a", columns.get(0).name()); assertNull(columns.get(0).sqlType()); - assertTrue(columns.get(1) instanceof DatasourceColumnDefn); + assertTrue(columns.get(1) instanceof DatasourceColumnSpec); assertEquals("b", columns.get(1).name()); assertEquals("VARCHAR", columns.get(1).sqlType()); - assertTrue(columns.get(2) instanceof MeasureColumnDefn); + assertTrue(columns.get(2) instanceof MeasureSpec); assertEquals("c", columns.get(2).name()); assertEquals("BIGINT", columns.get(2).sqlType()); - assertEquals("SUM", ((MeasureColumnDefn) columns.get(2)).aggregateFn()); + assertEquals("SUM", ((MeasureSpec) columns.get(2)).aggregateFn()); - DatasourceDefn copy = defn.toBuilder().build(); + DatasourceSpec copy = defn.toBuilder().build(); assertEquals(defn, copy); try { - defn = DatasourceDefn.builder() + defn = DatasourceSpec.builder() .segmentGranularity("PT1D") .column("c", "FOO") .build(); @@ -132,9 +133,9 @@ public void testColumns() } try { - defn = DatasourceDefn.builder() + defn = DatasourceSpec.builder() .segmentGranularity("PT1D") - .column(DatasourceColumnDefn.builder("c").sqlType("BIGINT").measure("SUM").build()) + .measure("c", "BIGINT", "SUM") .build(); defn.validate(); fail(); @@ -144,10 +145,10 @@ public void testColumns() } try { - defn = DatasourceDefn.builder() + defn = DatasourceSpec.builder() .segmentGranularity("PT1D") - .column(DatasourceColumnDefn.builder("a").build()) - .column(DatasourceColumnDefn.builder("a").build()) + .column("a", null) + .column("a", null) .build(); defn.validate(); fail(); @@ -161,7 +162,7 @@ public void testColumns() public void testValidation() { // Ignore rollup grain for detail table - DatasourceDefn defn = DatasourceDefn.builder() + DatasourceSpec defn = DatasourceSpec.builder() .segmentGranularity("PT1H") .build(); @@ -169,7 +170,7 @@ public void testValidation() assertEquals("PT1H", defn.segmentGranularity()); // Negative segment size mapped to 0 - defn = DatasourceDefn.builder() + defn = DatasourceSpec.builder() .segmentGranularity("PT1H") .targetSegmentRows(-1) .build(); @@ -180,14 +181,14 @@ public void testValidation() public void testSerialization() { ObjectMapper mapper = new ObjectMapper(); - DatasourceDefn defn = DatasourceDefn.builder() + DatasourceSpec defn = DatasourceSpec.builder() .segmentGranularity("PT1H") .rollupGranularity("PT1M") .targetSegmentRows(1_000_000) .build(); // Round-trip - TableDefn defn2 = TableDefn.fromBytes(mapper, defn.toBytes(mapper)); + TableSpec defn2 = TableSpec.fromBytes(mapper, defn.toBytes(mapper)); assertEquals(defn, defn2); // Sanity check of toString, which uses JSON @@ -197,7 +198,7 @@ public void testSerialization() @Test public void testEquals() { - EqualsVerifier.forClass(DatasourceDefn.class) + EqualsVerifier.forClass(DatasourceSpec.class) .usingGetClass() .verify(); } diff --git a/server/src/test/java/org/apache/druid/catalog/InputSourceDefnTest.java b/server/src/test/java/org/apache/druid/catalog/InputTableSpecTest.java similarity index 87% rename from server/src/test/java/org/apache/druid/catalog/InputSourceDefnTest.java rename to server/src/test/java/org/apache/druid/catalog/InputTableSpecTest.java index 2880e0112337..fedaed9a33db 100644 --- a/server/src/test/java/org/apache/druid/catalog/InputSourceDefnTest.java +++ b/server/src/test/java/org/apache/druid/catalog/InputTableSpecTest.java @@ -34,7 +34,7 @@ import static org.junit.Assert.assertSame; import static org.junit.Assert.fail; -public class InputSourceDefnTest +public class InputTableSpecTest { @Test public void testMinimalBuilder() @@ -42,7 +42,7 @@ public void testMinimalBuilder() // Minimum possible definition InputSource inputSource = new InlineInputSource("a,b,1\nc,d,2\n"); InputFormat inputFormat = CatalogTests.csvFormat(); - InputSourceDefn defn = InputSourceDefn + InputTableSpec defn = InputTableSpec .builder() .source(inputSource) .format(inputFormat) @@ -52,19 +52,19 @@ public void testMinimalBuilder() defn.validate(); assertSame(inputSource, defn.inputSource()); assertSame(inputFormat, defn.format()); - List columns = defn.columns(); + List columns = defn.columns(); assertEquals(1, columns.size()); assertEquals("a", columns.get(0).name()); assertEquals("varchar", columns.get(0).sqlType()); - InputSourceDefn copy = defn.toBuilder().build(); + InputTableSpec copy = defn.toBuilder().build(); assertEquals(defn, copy); } @Test public void testValidation() { - InputSourceDefn defn = InputSourceDefn.builder().build(); + InputTableSpec defn = InputTableSpec.builder().build(); try { defn.validate(); fail(); @@ -74,7 +74,7 @@ public void testValidation() } InputSource inputSource = new InlineInputSource("a,b,1\nc,d,2\n"); - defn = InputSourceDefn + defn = InputTableSpec .builder() .source(inputSource) .build(); @@ -87,7 +87,7 @@ public void testValidation() } InputFormat inputFormat = CatalogTests.csvFormat(); - defn = InputSourceDefn + defn = InputTableSpec .builder() .source(inputSource) .format(inputFormat) @@ -101,7 +101,7 @@ public void testValidation() } try { - defn = InputSourceDefn + defn = InputTableSpec .builder() .source(inputSource) .format(inputFormat) @@ -114,7 +114,7 @@ public void testValidation() // Expected } - defn = InputSourceDefn + defn = InputTableSpec .builder() .source(inputSource) .format(inputFormat) @@ -128,7 +128,7 @@ public void testValidation() // Expected } - defn = InputSourceDefn + defn = InputTableSpec .builder() .source(inputSource) .format(inputFormat) @@ -150,7 +150,7 @@ public void testSerialization() ObjectMapper mapper = new ObjectMapper(); InputSource inputSource = new InlineInputSource("a,b,1\nc,d,2\n"); InputFormat inputFormat = CatalogTests.csvFormat(); - InputSourceDefn defn = InputSourceDefn + InputTableSpec defn = InputTableSpec .builder() .source(inputSource) .format(inputFormat) @@ -158,7 +158,7 @@ public void testSerialization() .build(); // Round-trip - TableDefn defn2 = TableDefn.fromBytes(mapper, defn.toBytes(mapper)); + TableSpec defn2 = TableSpec.fromBytes(mapper, defn.toBytes(mapper)); assertEquals(defn, defn2); // Sanity check of toString, which uses JSON @@ -168,7 +168,7 @@ public void testSerialization() @Test public void testEquals() { - EqualsVerifier.forClass(InputSourceDefn.class) + EqualsVerifier.forClass(InputTableSpec.class) .usingGetClass() .verify(); } diff --git a/server/src/test/java/org/apache/druid/catalog/MetadataCatalogTest.java b/server/src/test/java/org/apache/druid/catalog/MetadataCatalogTest.java index 4bb1a02cee31..fc78345c39cd 100644 --- a/server/src/test/java/org/apache/druid/catalog/MetadataCatalogTest.java +++ b/server/src/test/java/org/apache/druid/catalog/MetadataCatalogTest.java @@ -21,21 +21,15 @@ import com.fasterxml.jackson.databind.ObjectMapper; import com.fasterxml.jackson.dataformat.smile.SmileFactory; -import org.apache.druid.catalog.AbstractColumnMetadata.InputColumn; -import org.apache.druid.catalog.AbstractColumnMetadata.MeasureColumn; -import org.apache.druid.catalog.AbstractTableMetadata.DatasourceTable; -import org.apache.druid.catalog.AbstractTableMetadata.InputSourceTable; -import org.apache.druid.catalog.MetadataCatalog.ColumnKind; -import org.apache.druid.catalog.MetadataCatalog.ColumnMetadata; -import org.apache.druid.catalog.MetadataCatalog.TableMetadata; -import org.apache.druid.catalog.MetadataCatalog.TableType; +import org.apache.druid.catalog.ColumnSpec.ColumnKind; +import org.apache.druid.catalog.DatasourceColumnSpec.MeasureSpec; +import org.apache.druid.catalog.TableMetadata.TableType; import org.apache.druid.data.input.InputFormat; import org.apache.druid.data.input.InputSource; import org.apache.druid.data.input.impl.InlineInputSource; import org.apache.druid.metadata.TestDerbyConnector; import org.apache.druid.metadata.catalog.CatalogManager.DuplicateKeyException; import org.apache.druid.metadata.catalog.CatalogManager.OutOfDateException; -import org.apache.druid.segment.column.ColumnType; import org.junit.After; import org.junit.Before; import org.junit.Rule; @@ -47,7 +41,6 @@ import static org.junit.Assert.assertFalse; import static org.junit.Assert.assertNotNull; import static org.junit.Assert.assertNull; -import static org.junit.Assert.assertSame; import static org.junit.Assert.assertTrue; public class MetadataCatalogTest @@ -145,25 +138,25 @@ private void doTestRemote(boolean useSmile) throws DuplicateKeyException, OutOfD */ private void populateCatalog() throws DuplicateKeyException { - DatasourceDefn defn = DatasourceDefn.builder() + DatasourceSpec defn = DatasourceSpec.builder() .segmentGranularity("PT1D") .timeColumn() .column("a", "VARCHAR") .build(); - TableSpec table = TableSpec.newTable( + TableMetadata table = TableMetadata.newTable( TableId.DRUID_SCHEMA, "table1", defn); storage.tables().create(table); - defn = DatasourceDefn.builder() + defn = DatasourceSpec.builder() .segmentGranularity("PT1D") .rollupGranularity("PT1H") .timeColumn() .column("dim", "VARCHAR") .measure("measure", "BIGINT", "SUM") .build(); - table = TableSpec.newTable( + table = TableMetadata.newTable( TableId.DRUID_SCHEMA, "table2", defn); @@ -171,13 +164,13 @@ private void populateCatalog() throws DuplicateKeyException InputSource inputSource = new InlineInputSource("a,b,1\nc,d,2\n"); InputFormat inputFormat = CatalogTests.csvFormat(); - InputSourceDefn inputDefn = InputSourceDefn + InputTableSpec inputDefn = InputTableSpec .builder() .source(inputSource) .format(inputFormat) .column("a", "varchar") .build(); - table = TableSpec.newTable( + table = TableMetadata.newTable( TableId.INPUT_SCHEMA, "input", inputDefn); @@ -193,23 +186,20 @@ private void verifyInitial(MetadataCatalog catalog) assertTrue(table.updateTime() > 0); assertEquals(TableType.DATASOURCE, table.type()); - List cols = table.columns(); + DatasourceSpec dsDefn = (DatasourceSpec) table.defn(); + List cols = dsDefn.columns(); assertEquals(2, cols.size()); assertEquals("__time", cols.get(0).name()); assertEquals("TIMESTAMP", cols.get(0).sqlType()); - assertEquals(ColumnKind.SIMPLE, cols.get(0).kind()); + assertEquals(ColumnKind.DETAIL, cols.get(0).kind()); assertEquals("a", cols.get(1).name()); assertEquals("VARCHAR", cols.get(1).sqlType()); - assertEquals(ColumnKind.SIMPLE, cols.get(0).kind()); - assertSame(cols.get(0), table.column("__time")); - assertSame(cols.get(1), table.column("a")); - assertNull(table.column("b")); - - DatasourceTable dsTable = (DatasourceTable) table; - assertEquals("PT1D", dsTable.segmentGranularity()); - assertTrue(dsTable.isDetail()); - assertFalse(dsTable.isRollup()); - assertNull(dsTable.rollupGranularity()); + assertEquals(ColumnKind.DETAIL, cols.get(0).kind()); + + assertEquals("PT1D", dsDefn.segmentGranularity()); + assertTrue(dsDefn.isDetail()); + assertFalse(dsDefn.isRollup()); + assertNull(dsDefn.rollupGranularity()); } { TableId id = TableId.datasource("table2"); @@ -218,7 +208,8 @@ private void verifyInitial(MetadataCatalog catalog) assertTrue(table.updateTime() > 0); assertEquals(TableType.DATASOURCE, table.type()); - List cols = table.columns(); + DatasourceSpec dsDefn = (DatasourceSpec) table.defn(); + List cols = dsDefn.columns(); assertEquals(3, cols.size()); assertEquals("__time", cols.get(0).name()); assertEquals("TIMESTAMP", cols.get(0).sqlType()); @@ -229,16 +220,12 @@ private void verifyInitial(MetadataCatalog catalog) assertEquals("measure", cols.get(2).name()); assertEquals("BIGINT", cols.get(2).sqlType()); assertEquals(ColumnKind.MEASURE, cols.get(2).kind()); - assertEquals("SUM", ((MeasureColumn) cols.get(2)).aggFn()); - assertSame(cols.get(0), table.column("__time")); - assertSame(cols.get(1), table.column("dim")); - assertSame(cols.get(2), table.column("measure")); - - DatasourceTable dsTable = (DatasourceTable) table; - assertEquals("PT1D", dsTable.segmentGranularity()); - assertFalse(dsTable.isDetail()); - assertTrue(dsTable.isRollup()); - assertEquals("PT1H", dsTable.rollupGranularity()); + assertEquals("SUM", ((MeasureSpec) cols.get(2)).aggregateFn()); + + assertEquals("PT1D", dsDefn.segmentGranularity()); + assertFalse(dsDefn.isDetail()); + assertTrue(dsDefn.isRollup()); + assertEquals("PT1H", dsDefn.rollupGranularity()); } assertNull(catalog.resolveTable(TableId.datasource("table3"))); { @@ -248,16 +235,15 @@ private void verifyInitial(MetadataCatalog catalog) assertTrue(table.updateTime() > 0); assertEquals(TableType.INPUT, table.type()); - List cols = table.columns(); + InputTableSpec inputDefn = (InputTableSpec) table.defn(); + List cols = inputDefn.columns(); assertEquals(1, cols.size()); assertEquals("a", cols.get(0).name()); assertEquals("varchar", cols.get(0).sqlType()); assertEquals(ColumnKind.INPUT, cols.get(0).kind()); - assertEquals(ColumnType.STRING, ((InputColumn) cols.get(0)).druidType()); - InputSourceTable inputTable = (InputSourceTable) table; - assertNotNull(inputTable.inputSource()); - assertNotNull(inputTable.format()); + assertNotNull(inputDefn.inputSource()); + assertNotNull(inputDefn.format()); } List tables = catalog.tables(TableId.DRUID_SCHEMA); @@ -274,22 +260,22 @@ private void alterCatalog() throws DuplicateKeyException, OutOfDateException { // Add a column to table 1 TableId id1 = TableId.datasource("table1"); - TableSpec table1 = storage.tables().read(id1); + TableMetadata table1 = storage.tables().read(id1); assertNotNull(table1); - DatasourceDefn defn = (DatasourceDefn) table1.defn(); + DatasourceSpec defn = (DatasourceSpec) table1.defn(); defn = defn.toBuilder() .column("b", "DOUBLE") .build(); storage.tables().updateDefn(id1, defn, table1.updateTime()); // Create a table 3 - defn = DatasourceDefn.builder() + defn = DatasourceSpec.builder() .segmentGranularity("PT1D") .timeColumn() .column("x", "FLOAT") .build(); - TableSpec table = TableSpec.newTable( + TableMetadata table = TableMetadata.newTable( TableId.DRUID_SCHEMA, "table3", defn); @@ -302,20 +288,21 @@ private void verifyAltered(MetadataCatalog catalog) TableId id = TableId.datasource("table1"); TableMetadata table = catalog.resolveTable(id); - List cols = table.columns(); + DatasourceSpec dsDefn = (DatasourceSpec) table.defn(); + List cols = dsDefn.columns(); assertEquals(3, cols.size()); assertEquals("__time", cols.get(0).name()); assertEquals("a", cols.get(1).name()); assertEquals("b", cols.get(2).name()); assertEquals("DOUBLE", cols.get(2).sqlType()); - assertEquals(ColumnKind.SIMPLE, cols.get(2).kind()); - assertSame(cols.get(2), table.column("b")); + assertEquals(ColumnKind.DETAIL, cols.get(2).kind()); } { TableId id = TableId.datasource("table3"); TableMetadata table = catalog.resolveTable(id); - List cols = table.columns(); + DatasourceSpec dsDefn = (DatasourceSpec) table.defn(); + List cols = dsDefn.columns(); assertEquals(2, cols.size()); assertEquals("__time", cols.get(0).name()); assertEquals("x", cols.get(1).name()); diff --git a/server/src/test/java/org/apache/druid/catalog/MockCatalogSync.java b/server/src/test/java/org/apache/druid/catalog/MockCatalogSync.java index e61dd6dbb95d..45f46ca90959 100644 --- a/server/src/test/java/org/apache/druid/catalog/MockCatalogSync.java +++ b/server/src/test/java/org/apache/druid/catalog/MockCatalogSync.java @@ -57,12 +57,12 @@ public MockCatalogSync( } @Override - public void updated(TableSpec update) + public void updated(TableMetadata update) { doSync(update); } - private void doSync(TableSpec update) + private void doSync(TableMetadata update) { byte[] encoded = update.toBytes(useSmile ? smileMapper : jsonMapper); listenerResource.syncTable( @@ -76,10 +76,10 @@ private void doSync(TableSpec update) @Override public void deleted(TableId tableId) { - TableSpec spec = TableSpec.newTable( + TableMetadata spec = TableMetadata.newTable( tableId.schema(), tableId.name(), - new TableDefn.Tombstone()); + new TableSpec.Tombstone()); doSync(spec); } diff --git a/server/src/test/java/org/apache/druid/metadata/catalog/TableManagerTest.java b/server/src/test/java/org/apache/druid/metadata/catalog/TableManagerTest.java index 70667ec63414..0c355695fd34 100644 --- a/server/src/test/java/org/apache/druid/metadata/catalog/TableManagerTest.java +++ b/server/src/test/java/org/apache/druid/metadata/catalog/TableManagerTest.java @@ -20,11 +20,11 @@ package org.apache.druid.metadata.catalog; import com.fasterxml.jackson.databind.ObjectMapper; -import org.apache.druid.catalog.DatasourceDefn; +import org.apache.druid.catalog.DatasourceSpec; import org.apache.druid.catalog.MetastoreManager; import org.apache.druid.catalog.MetastoreManagerImpl; import org.apache.druid.catalog.TableId; -import org.apache.druid.catalog.TableSpec; +import org.apache.druid.catalog.TableMetadata; import org.apache.druid.jackson.DefaultObjectMapper; import org.apache.druid.metadata.TestDerbyConnector; import org.apache.druid.metadata.catalog.CatalogManager.DuplicateKeyException; @@ -79,29 +79,27 @@ public void tearDown() @Test public void testCreate() throws DuplicateKeyException { - TableDefnManager tableMgr = manager.tables(); - - DatasourceDefn defn = DatasourceDefn.builder() + DatasourceSpec defn = DatasourceSpec.builder() .segmentGranularity("PT1H") .rollupGranularity("PT1M") .targetSegmentRows(1_000_000) .build(); - TableSpec table = TableSpec.newSegmentTable("table1", defn); + TableMetadata table = TableMetadata.newSegmentTable("table1", defn); // Table does not exist, read returns nothing. - assertNull(tableMgr.read(table.id())); + assertNull(manager.read(table.id())); // Create the table - long version = tableMgr.create(table); - TableSpec created = table.fromInsert(table.dbSchema(), version); + long version = manager.create(table); + TableMetadata created = table.fromInsert(table.dbSchema(), version); // Read the record - TableSpec read = tableMgr.read(table.id()); + TableMetadata read = manager.read(table.id()); assertEquals(created, read); // Try to create a second time try { - tableMgr.create(table); + manager.create(table); fail(); } catch (DuplicateKeyException e) { @@ -112,90 +110,84 @@ public void testCreate() throws DuplicateKeyException @Test public void testUpdate() throws DuplicateKeyException, OutOfDateException, NotFoundException { - TableDefnManager tableMgr = manager.tables(); - - DatasourceDefn defn = DatasourceDefn.builder() + DatasourceSpec defn = DatasourceSpec.builder() .segmentGranularity("PT1H") .rollupGranularity("PT1M") .targetSegmentRows(1_000_000) .build(); - TableSpec table = TableSpec.newSegmentTable("table1", defn); - long version = tableMgr.create(table); + TableMetadata table = TableMetadata.newSegmentTable("table1", defn); + long version = manager.create(table); // Change the definition - DatasourceDefn defn2 = DatasourceDefn.builder() + DatasourceSpec defn2 = DatasourceSpec.builder() .segmentGranularity("PT1D") .rollupGranularity("PT1H") .targetSegmentRows(2_000_000) .build(); try { - tableMgr.updateDefn(table.id(), defn2, 3); + manager.updateDefn(table.id(), defn2, 3); fail(); } catch (OutOfDateException e) { // expected } - assertEquals(version, tableMgr.read(table.id()).updateTime()); - long newVersion = tableMgr.updateDefn(table.id(), defn2, version); - TableSpec table3 = tableMgr.read(table.id()); + assertEquals(version, manager.read(table.id()).updateTime()); + long newVersion = manager.updateDefn(table.id(), defn2, version); + TableMetadata table3 = manager.read(table.id()); assertEquals(defn2, table3.defn()); assertEquals(newVersion, table3.updateTime()); // Changing the state requires no version check assertEquals(TableState.ACTIVE, table3.state()); - newVersion = tableMgr.markDeleting(table.id()); - TableSpec table4 = tableMgr.read(table.id()); + newVersion = manager.markDeleting(table.id()); + TableMetadata table4 = manager.read(table.id()); assertEquals(TableState.DELETING, table4.state()); assertEquals(newVersion, table4.updateTime()); // Update: no version check) - long newerVersion = tableMgr.updateDefn(table.id(), defn2); + long newerVersion = manager.updateDefn(table.id(), defn2); assertTrue(newerVersion > newVersion); } @Test public void testDelete() throws DuplicateKeyException { - TableDefnManager tableMgr = manager.tables(); - - DatasourceDefn defn = DatasourceDefn.builder() + DatasourceSpec defn = DatasourceSpec.builder() .segmentGranularity("PT1H") .rollupGranularity("PT1M") .targetSegmentRows(1_000_000) .build(); - TableSpec table = TableSpec.newSegmentTable("table1", defn); + TableMetadata table = TableMetadata.newSegmentTable("table1", defn); - assertFalse(tableMgr.delete(table.id())); - tableMgr.create(table); - assertTrue(tableMgr.delete(table.id())); - assertFalse(tableMgr.delete(table.id())); + assertFalse(manager.delete(table.id())); + manager.create(table); + assertTrue(manager.delete(table.id())); + assertFalse(manager.delete(table.id())); } @Test public void testList() throws DuplicateKeyException { - TableDefnManager tableMgr = manager.tables(); - - List list = tableMgr.list(); + List list = manager.list(); assertTrue(list.isEmpty()); - DatasourceDefn defn = DatasourceDefn.builder() + DatasourceSpec defn = DatasourceSpec.builder() .segmentGranularity("PT1H") .rollupGranularity("PT1M") .targetSegmentRows(1_000_000) .build(); // Create tables in inverse order - TableSpec table2 = TableSpec.newSegmentTable("table2", defn); - long version = tableMgr.create(table2); + TableMetadata table2 = TableMetadata.newSegmentTable("table2", defn); + long version = manager.create(table2); table2 = table2.fromInsert(TableId.DRUID_SCHEMA, version); - TableSpec table1 = TableSpec.newSegmentTable("table1", defn); - version = tableMgr.create(table1); + TableMetadata table1 = TableMetadata.newSegmentTable("table1", defn); + version = manager.create(table1); table1 = table1.fromInsert(TableId.DRUID_SCHEMA, version); - list = tableMgr.list(); + list = manager.list(); assertEquals(2, list.size()); TableId id = list.get(0); assertEquals(TableId.DRUID_SCHEMA, id.schema()); @@ -204,13 +196,13 @@ public void testList() throws DuplicateKeyException assertEquals(TableId.DRUID_SCHEMA, id.schema()); assertEquals("table2", id.name()); - List names = tableMgr.list(TableId.DRUID_SCHEMA); + List names = manager.list(TableId.DRUID_SCHEMA); assertEquals(2, names.size()); - names = tableMgr.list(TableId.SYSTEM_SCHEMA); + names = manager.list(TableId.SYSTEM_SCHEMA); assertEquals(0, names.size()); - List details = tableMgr.listDetails(TableId.DRUID_SCHEMA); + List details = manager.listDetails(TableId.DRUID_SCHEMA); assertEquals(Arrays.asList(table1, table2), details); } } From d18a5129612c40257ec15f0c282ad3feb4437761 Mon Sep 17 00:00:00 2001 From: Paul Rogers Date: Thu, 16 Jun 2022 13:57:57 -0700 Subject: [PATCH 5/8] More renaming & build fixes --- .../apache/druid/data/input/InputSource.java | 2 + .../data/input/impl/InlineInputSource.java | 2 + .../druid/catalog/CachedMetadataCatalog.java | 10 +-- .../druid/catalog/CatalogAuthorizer.java | 6 +- .../apache/druid/catalog/CatalogStorage.java | 4 +- .../apache/druid/catalog/SchemaRegistry.java | 6 +- .../druid/catalog/SchemaRegistryImpl.java | 14 ++--- .../apache/druid/catalog/TableMetadata.java | 2 +- .../metadata/catalog/CatalogManager.java | 2 +- .../metadata/catalog/SQLCatalogManager.java | 4 +- .../server/http/CatalogListenerResource.java | 2 +- .../druid/server/http/CatalogResource.java | 52 ++++++++-------- .../druid/catalog/CatalogObjectTest.java | 23 ++++--- .../druid/catalog/CatalogResourceTest.java | 2 +- .../druid/catalog/InputTableSpecTest.java | 46 +++++++------- .../druid/catalog/MetadataCatalogTest.java | 62 +++++++++---------- .../metadata/catalog/TableManagerTest.java | 6 +- 17 files changed, 126 insertions(+), 119 deletions(-) diff --git a/core/src/main/java/org/apache/druid/data/input/InputSource.java b/core/src/main/java/org/apache/druid/data/input/InputSource.java index 0a3cda250f43..fd872827e928 100644 --- a/core/src/main/java/org/apache/druid/data/input/InputSource.java +++ b/core/src/main/java/org/apache/druid/data/input/InputSource.java @@ -19,6 +19,7 @@ package org.apache.druid.data.input; +import com.fasterxml.jackson.annotation.JsonIgnore; import com.fasterxml.jackson.annotation.JsonSubTypes; import com.fasterxml.jackson.annotation.JsonSubTypes.Type; import com.fasterxml.jackson.annotation.JsonTypeInfo; @@ -60,6 +61,7 @@ public interface InputSource * Returns true if this inputSource can be processed in parallel using ParallelIndexSupervisorTask. It must be * castable to SplittableInputSource and the various SplittableInputSource methods must work as documented. */ + @JsonIgnore boolean isSplittable(); /** diff --git a/core/src/main/java/org/apache/druid/data/input/impl/InlineInputSource.java b/core/src/main/java/org/apache/druid/data/input/impl/InlineInputSource.java index 4cc6d0f9cf4c..81dbe31568a5 100644 --- a/core/src/main/java/org/apache/druid/data/input/impl/InlineInputSource.java +++ b/core/src/main/java/org/apache/druid/data/input/impl/InlineInputSource.java @@ -20,6 +20,7 @@ package org.apache.druid.data.input.impl; import com.fasterxml.jackson.annotation.JsonCreator; +import com.fasterxml.jackson.annotation.JsonIgnore; import com.fasterxml.jackson.annotation.JsonProperty; import com.google.common.base.Preconditions; import org.apache.druid.data.input.AbstractInputSource; @@ -51,6 +52,7 @@ public String getData() } @Override + @JsonIgnore public boolean isSplittable() { return false; diff --git a/server/src/main/java/org/apache/druid/catalog/CachedMetadataCatalog.java b/server/src/main/java/org/apache/druid/catalog/CachedMetadataCatalog.java index 4fe6f703bcb4..e331ad657198 100644 --- a/server/src/main/java/org/apache/druid/catalog/CachedMetadataCatalog.java +++ b/server/src/main/java/org/apache/druid/catalog/CachedMetadataCatalog.java @@ -20,7 +20,7 @@ package org.apache.druid.catalog; import org.apache.druid.catalog.MetadataCatalog.CatalogListener; -import org.apache.druid.catalog.SchemaRegistry.SchemaDefn; +import org.apache.druid.catalog.SchemaRegistry.SchemaSpec; import javax.inject.Inject; @@ -60,7 +60,7 @@ private static class TableEntry { private final TableMetadata table; - protected TableEntry(SchemaDefn schema, TableMetadata table) + protected TableEntry(SchemaSpec schema, TableMetadata table) { this.table = table; } @@ -73,11 +73,11 @@ protected long version() private class SchemaEntry { - private final SchemaDefn schema; + private final SchemaSpec schema; private long version = NOT_FETCHED; private final ConcurrentHashMap cache = new ConcurrentHashMap<>(); - protected SchemaEntry(SchemaDefn schema) + protected SchemaEntry(SchemaSpec schema) { this.schema = schema; } @@ -205,7 +205,7 @@ private SchemaEntry entryFor(String schemaName) return schemaCache.computeIfAbsent( schemaName, k -> { - SchemaDefn schema = schemaRegistry.schema(k); + SchemaSpec schema = schemaRegistry.schema(k); return schema == null ? null : new SchemaEntry(schema); }); } diff --git a/server/src/main/java/org/apache/druid/catalog/CatalogAuthorizer.java b/server/src/main/java/org/apache/druid/catalog/CatalogAuthorizer.java index 64d41bdb43c2..ef5df29c0e39 100644 --- a/server/src/main/java/org/apache/druid/catalog/CatalogAuthorizer.java +++ b/server/src/main/java/org/apache/druid/catalog/CatalogAuthorizer.java @@ -19,7 +19,7 @@ package org.apache.druid.catalog; -import org.apache.druid.catalog.SchemaRegistry.SchemaDefn; +import org.apache.druid.catalog.SchemaRegistry.SchemaSpec; import org.apache.druid.server.security.Access; import org.apache.druid.server.security.Action; import org.apache.druid.server.security.AuthorizationUtils; @@ -50,7 +50,7 @@ public AuthorizerMapper mapper() return authorizerMapper; } - public void authorizeTable(SchemaDefn schema, String name, Action action, HttpServletRequest request) + public void authorizeTable(SchemaSpec schema, String name, Action action, HttpServletRequest request) { if (action == Action.WRITE && !schema.writable()) { throw new ForbiddenException( @@ -82,7 +82,7 @@ public Access authorizeAccess(String resource, String key, Action action, HttpSe ); } - public ResourceAction resourceAction(SchemaDefn schema, String name, Action action) + public ResourceAction resourceAction(SchemaSpec schema, String name, Action action) { return new ResourceAction(new Resource(name, schema.securityResource()), action); } diff --git a/server/src/main/java/org/apache/druid/catalog/CatalogStorage.java b/server/src/main/java/org/apache/druid/catalog/CatalogStorage.java index 25e50813a447..3f363357d755 100644 --- a/server/src/main/java/org/apache/druid/catalog/CatalogStorage.java +++ b/server/src/main/java/org/apache/druid/catalog/CatalogStorage.java @@ -22,7 +22,7 @@ import org.apache.druid.catalog.MetadataCatalog.CatalogListener; import org.apache.druid.catalog.MetadataCatalog.CatalogSource; import org.apache.druid.catalog.MetadataCatalog.CatalogUpdateProvider; -import org.apache.druid.catalog.SchemaRegistry.SchemaDefn; +import org.apache.druid.catalog.SchemaRegistry.SchemaSpec; import org.apache.druid.metadata.catalog.CatalogManager; import org.apache.druid.server.security.AuthorizerMapper; @@ -94,7 +94,7 @@ public SchemaRegistry schemaRegistry() return schemaRegistry; } - public SchemaDefn resolveSchema(String dbSchema) + public SchemaSpec resolveSchema(String dbSchema) { return schemaRegistry.schema(dbSchema); } diff --git a/server/src/main/java/org/apache/druid/catalog/SchemaRegistry.java b/server/src/main/java/org/apache/druid/catalog/SchemaRegistry.java index 0d41c003ff34..80358caa6e64 100644 --- a/server/src/main/java/org/apache/druid/catalog/SchemaRegistry.java +++ b/server/src/main/java/org/apache/druid/catalog/SchemaRegistry.java @@ -31,15 +31,15 @@ */ public interface SchemaRegistry { - interface SchemaDefn + interface SchemaSpec { String name(); String securityResource(); boolean writable(); - boolean accepts(TableSpec defn); + boolean accepts(TableSpec spec); TableType tableType(); } - SchemaDefn schema(String name); + SchemaSpec schema(String name); Set names(); } diff --git a/server/src/main/java/org/apache/druid/catalog/SchemaRegistryImpl.java b/server/src/main/java/org/apache/druid/catalog/SchemaRegistryImpl.java index 5f18ec7900e0..6908031af511 100644 --- a/server/src/main/java/org/apache/druid/catalog/SchemaRegistryImpl.java +++ b/server/src/main/java/org/apache/druid/catalog/SchemaRegistryImpl.java @@ -38,7 +38,7 @@ public class SchemaRegistryImpl implements SchemaRegistry // TODO: Change this when ExternalOperatorConvertion changes private String EXTERNAL_RESOURCE = "EXTERNAL"; - public static class SchemaDefnImpl implements SchemaDefn + public static class SchemaDefnImpl implements SchemaSpec { private final String name; private final String resource; @@ -76,15 +76,15 @@ public boolean writable() } @Override - public boolean accepts(TableSpec defn) + public boolean accepts(TableSpec spec) { if (acceptedClass == null) { return false; } - if (defn == null) { + if (spec == null) { return false; } - return acceptedClass.isAssignableFrom(defn.getClass()); + return acceptedClass.isAssignableFrom(spec.getClass()); } @Override @@ -94,7 +94,7 @@ public TableType tableType() } } - private final Map builtIns; + private final Map builtIns; public SchemaRegistryImpl() { @@ -131,13 +131,13 @@ public SchemaRegistryImpl() null)); // TODO } - private void register(SchemaDefn schemaDefn) + private void register(SchemaSpec schemaDefn) { builtIns.put(schemaDefn.name(), schemaDefn); } @Override - public SchemaDefn schema(String name) + public SchemaSpec schema(String name) { return builtIns.get(name); } diff --git a/server/src/main/java/org/apache/druid/catalog/TableMetadata.java b/server/src/main/java/org/apache/druid/catalog/TableMetadata.java index 03857474847a..b449364fd225 100644 --- a/server/src/main/java/org/apache/druid/catalog/TableMetadata.java +++ b/server/src/main/java/org/apache/druid/catalog/TableMetadata.java @@ -193,7 +193,7 @@ public long updateTime() } @JsonProperty("defn") - public TableSpec defn() + public TableSpec spec() { return defn; } diff --git a/server/src/main/java/org/apache/druid/metadata/catalog/CatalogManager.java b/server/src/main/java/org/apache/druid/metadata/catalog/CatalogManager.java index 3f34da6ee86f..939535192493 100644 --- a/server/src/main/java/org/apache/druid/metadata/catalog/CatalogManager.java +++ b/server/src/main/java/org/apache/druid/metadata/catalog/CatalogManager.java @@ -134,7 +134,7 @@ interface Listener * Update a table definition, but only if the database entry is at * the given {@code oldVersion}. */ - long updateDefn(TableId id, TableSpec defn, long oldVersion) throws OutOfDateException; + long updateSpec(TableId id, TableSpec defn, long oldVersion) throws OutOfDateException; /** * Update a table definition, overwriting any current content. diff --git a/server/src/main/java/org/apache/druid/metadata/catalog/SQLCatalogManager.java b/server/src/main/java/org/apache/druid/metadata/catalog/SQLCatalogManager.java index 0400494c25da..45b9f394931f 100644 --- a/server/src/main/java/org/apache/druid/metadata/catalog/SQLCatalogManager.java +++ b/server/src/main/java/org/apache/druid/metadata/catalog/SQLCatalogManager.java @@ -161,7 +161,7 @@ public Long withHandle(Handle handle) throws DuplicateKeyException .bind("creationTime", updateTime) .bind("updateTime", updateTime) .bind("state", TableState.ACTIVE.code()) - .bind("payload", table.defn().toBytes(jsonMapper)); + .bind("payload", table.spec().toBytes(jsonMapper)); try { stmt.execute(); } @@ -225,7 +225,7 @@ public TableMetadata withHandle(Handle handle) } @Override - public long updateDefn(TableId id, TableSpec defn, long oldVersion) throws OutOfDateException + public long updateSpec(TableId id, TableSpec defn, long oldVersion) throws OutOfDateException { try { return dbi.withHandle( diff --git a/server/src/main/java/org/apache/druid/server/http/CatalogListenerResource.java b/server/src/main/java/org/apache/druid/server/http/CatalogListenerResource.java index ccff0a87549a..2827e4ccba7d 100644 --- a/server/src/main/java/org/apache/druid/server/http/CatalogListenerResource.java +++ b/server/src/main/java/org/apache/druid/server/http/CatalogListenerResource.java @@ -92,7 +92,7 @@ public Response syncTable( catch (IOException e) { return Response.serverError().entity(e.getMessage()).build(); } - TableSpec defn = tableSpec.defn(); + TableSpec defn = tableSpec.spec(); if (defn instanceof TableSpec.Tombstone) { listener.deleted(tableSpec.id()); } else { diff --git a/server/src/main/java/org/apache/druid/server/http/CatalogResource.java b/server/src/main/java/org/apache/druid/server/http/CatalogResource.java index 5c07e72b87ba..ae6eddfeb78a 100644 --- a/server/src/main/java/org/apache/druid/server/http/CatalogResource.java +++ b/server/src/main/java/org/apache/druid/server/http/CatalogResource.java @@ -23,7 +23,7 @@ import org.apache.curator.shaded.com.google.common.collect.Lists; import org.apache.druid.catalog.Actions; import org.apache.druid.catalog.CatalogStorage; -import org.apache.druid.catalog.SchemaRegistry.SchemaDefn; +import org.apache.druid.catalog.SchemaRegistry.SchemaSpec; import org.apache.druid.catalog.TableId; import org.apache.druid.catalog.TableMetadata; import org.apache.druid.catalog.TableSpec; @@ -99,11 +99,11 @@ public Response createTable( @Context final HttpServletRequest req) { String dbSchema = table.resolveDbSchema(); - Pair result = validateSchema(dbSchema); + Pair result = validateSchema(dbSchema); if (result.lhs != null) { return result.lhs; } - SchemaDefn schema = result.rhs; + SchemaSpec schema = result.rhs; if (!schema.writable()) { return Actions.badRequest( Actions.INVALID, @@ -116,13 +116,13 @@ public Response createTable( catch (IAE e) { return Actions.badRequest(Actions.INVALID, e.getMessage()); } - TableSpec defn = table.defn(); - if (!schema.accepts(defn)) { + TableSpec spec = table.spec(); + if (!schema.accepts(spec)) { return Actions.badRequest( Actions.INVALID, StringUtils.format( "Cannot create tables of type %s in schema %s", - defn == null ? "null" : defn.getClass().getSimpleName(), + spec == null ? "null" : spec.getClass().getSimpleName(), dbSchema)); } try { @@ -159,7 +159,7 @@ public Response createTable( * and the user must have at least read access. * @param name The name of the table definition to modify. The user must * have write access to the table. - * @param defn The new table definition. + * @param spec The new table definition. * @param version An optional table version. If provided, the metadata DB * entry for the table must be at this exact version or the update * will fail. (Provides "optimistic locking.") If omitted (that is, @@ -174,37 +174,37 @@ public Response createTable( public Response updateTableDefn( @PathParam("dbSchema") String dbSchema, @PathParam("name") String name, - TableSpec defn, + TableSpec spec, @QueryParam("version") long version, @Context final HttpServletRequest req) { try { - if (defn != null) { - defn.validate(); + if (spec != null) { + spec.validate(); } } catch (IAE e) { return Actions.badRequest(Actions.INVALID, e.getMessage()); } - Pair result = validateSchema(dbSchema); + Pair result = validateSchema(dbSchema); if (result.lhs != null) { return result.lhs; } if (Strings.isNullOrEmpty(name)) { return Actions.badRequest(Actions.INVALID, "Table name is required"); } - SchemaDefn schema = result.rhs; + SchemaSpec schema = result.rhs; if (!schema.writable()) { return Actions.badRequest( Actions.INVALID, StringUtils.format("Cannot update tables in schema %s", dbSchema)); } - if (!schema.accepts(defn)) { + if (!schema.accepts(spec)) { return Actions.badRequest( Actions.INVALID, StringUtils.format( "Cannot update tables to type %s in schema %s", - defn == null ? "null" : defn.getClass().getSimpleName(), + spec == null ? "null" : spec.getClass().getSimpleName(), dbSchema)); } try { @@ -218,9 +218,9 @@ public Response updateTableDefn( TableId tableId = new TableId(dbSchema, name); long newVersion; if (version == 0) { - newVersion = tableMgr.updateDefn(tableId, defn); + newVersion = tableMgr.updateDefn(tableId, spec); } else { - newVersion = tableMgr.updateDefn(tableId, defn, version); + newVersion = tableMgr.updateSpec(tableId, spec, version); } return Actions.okWithVersion(newVersion); } @@ -264,7 +264,7 @@ public Response getTable( @PathParam("name") String name, @Context final HttpServletRequest req) { - Pair result = validateSchema(dbSchema); + Pair result = validateSchema(dbSchema); if (result.lhs != null) { return result.lhs; } @@ -320,7 +320,7 @@ public Response listTables( req, tables, tableId -> { - SchemaDefn schema = catalog.resolveSchema(tableId.schema()); + SchemaSpec schema = catalog.resolveSchema(tableId.schema()); if (schema == null) { // Should never occur. return null; @@ -351,11 +351,11 @@ public Response listTables( @PathParam("dbSchema") String dbSchema, @Context final HttpServletRequest req) { - Pair result = validateSchema(dbSchema); + Pair result = validateSchema(dbSchema); if (result.lhs != null) { return result.lhs; } - SchemaDefn schema = result.rhs; + SchemaSpec schema = result.rhs; List tables = catalog.tables().list(dbSchema); Iterable filtered = AuthorizationUtils.filterAuthorizedResources( req, @@ -389,11 +389,11 @@ public Response deleteTable( @Context final HttpServletRequest req) { TableId tableId = new TableId(dbSchema, name); - Pair result = validateSchema(tableId.schema()); + Pair result = validateSchema(tableId.schema()); if (result.lhs != null) { return result.lhs; } - SchemaDefn schema = result.rhs; + SchemaSpec schema = result.rhs; if (!schema.writable()) { return Actions.badRequest( Actions.INVALID, @@ -438,11 +438,11 @@ public Response syncSchema( @Context final HttpServletRequest req ) { - Pair result = validateSchema(dbSchema); + Pair result = validateSchema(dbSchema); if (result.lhs != null) { return result.lhs; } - SchemaDefn schema = result.rhs; + SchemaSpec schema = result.rhs; List tables = catalog.tables().listDetails(dbSchema); Iterable filtered = AuthorizationUtils.filterAuthorizedResources( req, @@ -472,12 +472,12 @@ public Response syncTable( return getTable(dbSchema, name, req); } - private Pair validateSchema(String dbSchema) + private Pair validateSchema(String dbSchema) { if (Strings.isNullOrEmpty(dbSchema)) { return Pair.of(Actions.badRequest(Actions.INVALID, "Schema name is required"), null); } - SchemaDefn schema = catalog.resolveSchema(dbSchema); + SchemaSpec schema = catalog.resolveSchema(dbSchema); if (schema == null) { return Pair.of(Actions.notFound( StringUtils.format("Unknown schema %s", dbSchema)), diff --git a/server/src/test/java/org/apache/druid/catalog/CatalogObjectTest.java b/server/src/test/java/org/apache/druid/catalog/CatalogObjectTest.java index 172c7aa9bf98..69fba1b29487 100644 --- a/server/src/test/java/org/apache/druid/catalog/CatalogObjectTest.java +++ b/server/src/test/java/org/apache/druid/catalog/CatalogObjectTest.java @@ -49,7 +49,7 @@ public void testMinimalTable() assertEquals(10, table.creationTime()); assertEquals(20, table.updateTime()); assertEquals(TableState.ACTIVE, table.state()); - assertNull(table.defn()); + assertNull(table.spec()); try { table = new TableMetadata( @@ -85,9 +85,9 @@ public void testMinimalTable() } @Test - public void testDefn() + public void testSpec() { - DatasourceSpec defn = DatasourceSpec.builder() + DatasourceSpec spec = DatasourceSpec.builder() .segmentGranularity("PT1D") .build(); TableMetadata table = new TableMetadata( @@ -97,11 +97,14 @@ public void testDefn() 10, 20, TableState.ACTIVE, - defn); + spec); table.validate(); - assertSame(defn, table.defn()); + assertSame(spec, table.spec()); + // Segment grain is required. try { + spec = DatasourceSpec.builder() + .build(); table = new TableMetadata( "wrong", "foo", @@ -109,7 +112,7 @@ public void testDefn() 10, 20, TableState.ACTIVE, - defn); + spec); table.validate(); fail(); } @@ -121,18 +124,18 @@ public void testDefn() @Test public void testConversions() { - DatasourceSpec defn = DatasourceSpec.builder() + DatasourceSpec spec = DatasourceSpec.builder() .segmentGranularity("PT1D") .build(); TableMetadata table = TableMetadata.newSegmentTable( "ds", - defn); + spec); assertEquals(TableId.datasource("ds"), table.id()); assertEquals(TableState.ACTIVE, table.state()); assertEquals(0, table.updateTime()); - assertSame(defn, table.defn()); + assertSame(spec, table.spec()); - TableMetadata table2 = TableMetadata.newSegmentTable("ds", defn); + TableMetadata table2 = TableMetadata.newSegmentTable("ds", spec); assertEquals(table, table2); TableMetadata table3 = table2.asUpdate(20); diff --git a/server/src/test/java/org/apache/druid/catalog/CatalogResourceTest.java b/server/src/test/java/org/apache/druid/catalog/CatalogResourceTest.java index 4f0cd1129648..50dc812068ed 100644 --- a/server/src/test/java/org/apache/druid/catalog/CatalogResourceTest.java +++ b/server/src/test/java/org/apache/druid/catalog/CatalogResourceTest.java @@ -448,7 +448,7 @@ public void testLifecycle() read = (TableMetadata) resp.getEntity(); assertEquals(table.creationTime(), read.creationTime()); assertEquals(version, read.updateTime()); - assertEquals(defn2, read.defn()); + assertEquals(defn2, read.spec()); // add second table String table2Name = "lifecycle2"; diff --git a/server/src/test/java/org/apache/druid/catalog/InputTableSpecTest.java b/server/src/test/java/org/apache/druid/catalog/InputTableSpecTest.java index fedaed9a33db..773f4027ee05 100644 --- a/server/src/test/java/org/apache/druid/catalog/InputTableSpecTest.java +++ b/server/src/test/java/org/apache/druid/catalog/InputTableSpecTest.java @@ -42,31 +42,31 @@ public void testMinimalBuilder() // Minimum possible definition InputSource inputSource = new InlineInputSource("a,b,1\nc,d,2\n"); InputFormat inputFormat = CatalogTests.csvFormat(); - InputTableSpec defn = InputTableSpec + InputTableSpec spec = InputTableSpec .builder() .source(inputSource) .format(inputFormat) .column("a", "varchar") .build(); - defn.validate(); - assertSame(inputSource, defn.inputSource()); - assertSame(inputFormat, defn.format()); - List columns = defn.columns(); + spec.validate(); + assertSame(inputSource, spec.inputSource()); + assertSame(inputFormat, spec.format()); + List columns = spec.columns(); assertEquals(1, columns.size()); assertEquals("a", columns.get(0).name()); assertEquals("varchar", columns.get(0).sqlType()); - InputTableSpec copy = defn.toBuilder().build(); - assertEquals(defn, copy); + InputTableSpec copy = spec.toBuilder().build(); + assertEquals(spec, copy); } @Test public void testValidation() { - InputTableSpec defn = InputTableSpec.builder().build(); + InputTableSpec spec = InputTableSpec.builder().build(); try { - defn.validate(); + spec.validate(); fail(); } catch (IAE e) { @@ -74,12 +74,12 @@ public void testValidation() } InputSource inputSource = new InlineInputSource("a,b,1\nc,d,2\n"); - defn = InputTableSpec + spec = InputTableSpec .builder() .source(inputSource) .build(); try { - defn.validate(); + spec.validate(); fail(); } catch (IAE e) { @@ -87,13 +87,13 @@ public void testValidation() } InputFormat inputFormat = CatalogTests.csvFormat(); - defn = InputTableSpec + spec = InputTableSpec .builder() .source(inputSource) .format(inputFormat) .build(); try { - defn.validate(); + spec.validate(); fail(); } catch (IAE e) { @@ -101,34 +101,34 @@ public void testValidation() } try { - defn = InputTableSpec + spec = InputTableSpec .builder() .source(inputSource) .format(inputFormat) .column(null, "VARCHAR") .build(); - defn.validate(); + spec.validate(); fail(); } catch (IAE e) { // Expected } - defn = InputTableSpec + spec = InputTableSpec .builder() .source(inputSource) .format(inputFormat) .column("a", null) .build(); try { - defn.validate(); + spec.validate(); fail(); } catch (IAE e) { // Expected } - defn = InputTableSpec + spec = InputTableSpec .builder() .source(inputSource) .format(inputFormat) @@ -136,7 +136,7 @@ public void testValidation() .column("a", "varchar") .build(); try { - defn.validate(); + spec.validate(); fail(); } catch (IAE e) { @@ -150,7 +150,7 @@ public void testSerialization() ObjectMapper mapper = new ObjectMapper(); InputSource inputSource = new InlineInputSource("a,b,1\nc,d,2\n"); InputFormat inputFormat = CatalogTests.csvFormat(); - InputTableSpec defn = InputTableSpec + InputTableSpec spec1 = InputTableSpec .builder() .source(inputSource) .format(inputFormat) @@ -158,11 +158,11 @@ public void testSerialization() .build(); // Round-trip - TableSpec defn2 = TableSpec.fromBytes(mapper, defn.toBytes(mapper)); - assertEquals(defn, defn2); + TableSpec spec2 = TableSpec.fromBytes(mapper, spec1.toBytes(mapper)); + assertEquals(spec1, spec2); // Sanity check of toString, which uses JSON - assertNotNull(defn.toString()); + assertNotNull(spec1.toString()); } @Test diff --git a/server/src/test/java/org/apache/druid/catalog/MetadataCatalogTest.java b/server/src/test/java/org/apache/druid/catalog/MetadataCatalogTest.java index fc78345c39cd..f275eaf288eb 100644 --- a/server/src/test/java/org/apache/druid/catalog/MetadataCatalogTest.java +++ b/server/src/test/java/org/apache/druid/catalog/MetadataCatalogTest.java @@ -138,7 +138,7 @@ private void doTestRemote(boolean useSmile) throws DuplicateKeyException, OutOfD */ private void populateCatalog() throws DuplicateKeyException { - DatasourceSpec defn = DatasourceSpec.builder() + DatasourceSpec spec = DatasourceSpec.builder() .segmentGranularity("PT1D") .timeColumn() .column("a", "VARCHAR") @@ -146,10 +146,10 @@ private void populateCatalog() throws DuplicateKeyException TableMetadata table = TableMetadata.newTable( TableId.DRUID_SCHEMA, "table1", - defn); + spec); storage.tables().create(table); - defn = DatasourceSpec.builder() + spec = DatasourceSpec.builder() .segmentGranularity("PT1D") .rollupGranularity("PT1H") .timeColumn() @@ -159,12 +159,12 @@ private void populateCatalog() throws DuplicateKeyException table = TableMetadata.newTable( TableId.DRUID_SCHEMA, "table2", - defn); + spec); storage.tables().create(table); InputSource inputSource = new InlineInputSource("a,b,1\nc,d,2\n"); InputFormat inputFormat = CatalogTests.csvFormat(); - InputTableSpec inputDefn = InputTableSpec + InputTableSpec inputSpec = InputTableSpec .builder() .source(inputSource) .format(inputFormat) @@ -173,7 +173,7 @@ private void populateCatalog() throws DuplicateKeyException table = TableMetadata.newTable( TableId.INPUT_SCHEMA, "input", - inputDefn); + inputSpec); storage.tables().create(table); } @@ -186,8 +186,8 @@ private void verifyInitial(MetadataCatalog catalog) assertTrue(table.updateTime() > 0); assertEquals(TableType.DATASOURCE, table.type()); - DatasourceSpec dsDefn = (DatasourceSpec) table.defn(); - List cols = dsDefn.columns(); + DatasourceSpec dsSpec = (DatasourceSpec) table.spec(); + List cols = dsSpec.columns(); assertEquals(2, cols.size()); assertEquals("__time", cols.get(0).name()); assertEquals("TIMESTAMP", cols.get(0).sqlType()); @@ -196,10 +196,10 @@ private void verifyInitial(MetadataCatalog catalog) assertEquals("VARCHAR", cols.get(1).sqlType()); assertEquals(ColumnKind.DETAIL, cols.get(0).kind()); - assertEquals("PT1D", dsDefn.segmentGranularity()); - assertTrue(dsDefn.isDetail()); - assertFalse(dsDefn.isRollup()); - assertNull(dsDefn.rollupGranularity()); + assertEquals("PT1D", dsSpec.segmentGranularity()); + assertTrue(dsSpec.isDetail()); + assertFalse(dsSpec.isRollup()); + assertNull(dsSpec.rollupGranularity()); } { TableId id = TableId.datasource("table2"); @@ -208,8 +208,8 @@ private void verifyInitial(MetadataCatalog catalog) assertTrue(table.updateTime() > 0); assertEquals(TableType.DATASOURCE, table.type()); - DatasourceSpec dsDefn = (DatasourceSpec) table.defn(); - List cols = dsDefn.columns(); + DatasourceSpec dsSpec = (DatasourceSpec) table.spec(); + List cols = dsSpec.columns(); assertEquals(3, cols.size()); assertEquals("__time", cols.get(0).name()); assertEquals("TIMESTAMP", cols.get(0).sqlType()); @@ -222,10 +222,10 @@ private void verifyInitial(MetadataCatalog catalog) assertEquals(ColumnKind.MEASURE, cols.get(2).kind()); assertEquals("SUM", ((MeasureSpec) cols.get(2)).aggregateFn()); - assertEquals("PT1D", dsDefn.segmentGranularity()); - assertFalse(dsDefn.isDetail()); - assertTrue(dsDefn.isRollup()); - assertEquals("PT1H", dsDefn.rollupGranularity()); + assertEquals("PT1D", dsSpec.segmentGranularity()); + assertFalse(dsSpec.isDetail()); + assertTrue(dsSpec.isRollup()); + assertEquals("PT1H", dsSpec.rollupGranularity()); } assertNull(catalog.resolveTable(TableId.datasource("table3"))); { @@ -235,15 +235,15 @@ private void verifyInitial(MetadataCatalog catalog) assertTrue(table.updateTime() > 0); assertEquals(TableType.INPUT, table.type()); - InputTableSpec inputDefn = (InputTableSpec) table.defn(); - List cols = inputDefn.columns(); + InputTableSpec inputSpec = (InputTableSpec) table.spec(); + List cols = inputSpec.columns(); assertEquals(1, cols.size()); assertEquals("a", cols.get(0).name()); assertEquals("varchar", cols.get(0).sqlType()); assertEquals(ColumnKind.INPUT, cols.get(0).kind()); - assertNotNull(inputDefn.inputSource()); - assertNotNull(inputDefn.format()); + assertNotNull(inputSpec.inputSource()); + assertNotNull(inputSpec.format()); } List tables = catalog.tables(TableId.DRUID_SCHEMA); @@ -263,14 +263,14 @@ private void alterCatalog() throws DuplicateKeyException, OutOfDateException TableMetadata table1 = storage.tables().read(id1); assertNotNull(table1); - DatasourceSpec defn = (DatasourceSpec) table1.defn(); - defn = defn.toBuilder() + DatasourceSpec spec = (DatasourceSpec) table1.spec(); + spec = spec.toBuilder() .column("b", "DOUBLE") .build(); - storage.tables().updateDefn(id1, defn, table1.updateTime()); + storage.tables().updateSpec(id1, spec, table1.updateTime()); // Create a table 3 - defn = DatasourceSpec.builder() + spec = DatasourceSpec.builder() .segmentGranularity("PT1D") .timeColumn() .column("x", "FLOAT") @@ -278,7 +278,7 @@ private void alterCatalog() throws DuplicateKeyException, OutOfDateException TableMetadata table = TableMetadata.newTable( TableId.DRUID_SCHEMA, "table3", - defn); + spec); storage.tables().create(table); } @@ -288,8 +288,8 @@ private void verifyAltered(MetadataCatalog catalog) TableId id = TableId.datasource("table1"); TableMetadata table = catalog.resolveTable(id); - DatasourceSpec dsDefn = (DatasourceSpec) table.defn(); - List cols = dsDefn.columns(); + DatasourceSpec dsSpec = (DatasourceSpec) table.spec(); + List cols = dsSpec.columns(); assertEquals(3, cols.size()); assertEquals("__time", cols.get(0).name()); assertEquals("a", cols.get(1).name()); @@ -301,8 +301,8 @@ private void verifyAltered(MetadataCatalog catalog) TableId id = TableId.datasource("table3"); TableMetadata table = catalog.resolveTable(id); - DatasourceSpec dsDefn = (DatasourceSpec) table.defn(); - List cols = dsDefn.columns(); + DatasourceSpec dsSpec = (DatasourceSpec) table.spec(); + List cols = dsSpec.columns(); assertEquals(2, cols.size()); assertEquals("__time", cols.get(0).name()); assertEquals("x", cols.get(1).name()); diff --git a/server/src/test/java/org/apache/druid/metadata/catalog/TableManagerTest.java b/server/src/test/java/org/apache/druid/metadata/catalog/TableManagerTest.java index 0c355695fd34..e1bd243dd9b5 100644 --- a/server/src/test/java/org/apache/druid/metadata/catalog/TableManagerTest.java +++ b/server/src/test/java/org/apache/druid/metadata/catalog/TableManagerTest.java @@ -126,7 +126,7 @@ public void testUpdate() throws DuplicateKeyException, OutOfDateException, NotFo .build(); try { - manager.updateDefn(table.id(), defn2, 3); + manager.updateSpec(table.id(), defn2, 3); fail(); } catch (OutOfDateException e) { @@ -134,9 +134,9 @@ public void testUpdate() throws DuplicateKeyException, OutOfDateException, NotFo } assertEquals(version, manager.read(table.id()).updateTime()); - long newVersion = manager.updateDefn(table.id(), defn2, version); + long newVersion = manager.updateSpec(table.id(), defn2, version); TableMetadata table3 = manager.read(table.id()); - assertEquals(defn2, table3.defn()); + assertEquals(defn2, table3.spec()); assertEquals(newVersion, table3.updateTime()); // Changing the state requires no version check From 27614b93bc3c5a9403da508def5c6c023b7a339c Mon Sep 17 00:00:00 2001 From: Paul Rogers Date: Thu, 16 Jun 2022 16:24:13 -0700 Subject: [PATCH 6/8] Fix warning --- .../main/java/org/apache/druid/server/http/CatalogResource.java | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/server/src/main/java/org/apache/druid/server/http/CatalogResource.java b/server/src/main/java/org/apache/druid/server/http/CatalogResource.java index ae6eddfeb78a..a6c131661ab2 100644 --- a/server/src/main/java/org/apache/druid/server/http/CatalogResource.java +++ b/server/src/main/java/org/apache/druid/server/http/CatalogResource.java @@ -80,7 +80,7 @@ public CatalogResource(CatalogStorage catalog) /** * Create a new table within the indicated schema. * - * @param table The table definition to create. + * @param table The table specification to create. * @param ifNew Whether to skip the action if the table already exists. * This is the same as the SQL IF NOT EXISTS clause. If {@code false}, * then an error is raised if the table exists. If {@code true}, then From 79bba4640ad539047bb4d75cdf1c8dbda795ad5d Mon Sep 17 00:00:00 2001 From: Paul Rogers Date: Fri, 17 Jun 2022 18:05:14 -0700 Subject: [PATCH 7/8] Build fix --- .../java/org/apache/druid/catalog/LocalMetadataCatalog.java | 6 +++--- 1 file changed, 3 insertions(+), 3 deletions(-) diff --git a/server/src/main/java/org/apache/druid/catalog/LocalMetadataCatalog.java b/server/src/main/java/org/apache/druid/catalog/LocalMetadataCatalog.java index e2a9f23fa61e..bcba0aac59d9 100644 --- a/server/src/main/java/org/apache/druid/catalog/LocalMetadataCatalog.java +++ b/server/src/main/java/org/apache/druid/catalog/LocalMetadataCatalog.java @@ -19,7 +19,7 @@ package org.apache.druid.catalog; -import org.apache.druid.catalog.SchemaRegistry.SchemaDefn; +import org.apache.druid.catalog.SchemaRegistry.SchemaSpec; import javax.inject.Inject; @@ -57,7 +57,7 @@ public TableMetadata resolveTable(TableId tableId) @Override public List tables(String schemaName) { - SchemaDefn schema = schemaRegistry.schema(schemaName); + SchemaSpec schema = schemaRegistry.schema(schemaName); if (schema == null || !schema.writable()) { return Collections.emptyList(); } @@ -67,7 +67,7 @@ public List tables(String schemaName) @Override public Set tableNames(String schemaName) { - SchemaDefn schema = schemaRegistry.schema(schemaName); + SchemaSpec schema = schemaRegistry.schema(schemaName); if (schema == null || !schema.writable()) { return Collections.emptySet(); } From 551b9016628d1695169c04c4a184312c0e4297e5 Mon Sep 17 00:00:00 2001 From: Paul Rogers Date: Sat, 18 Jun 2022 11:53:09 -0700 Subject: [PATCH 8/8] IT fix --- .../src/main/java/org/apache/druid/server/security/Access.java | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/server/src/main/java/org/apache/druid/server/security/Access.java b/server/src/main/java/org/apache/druid/server/security/Access.java index 8686e853238d..d30914fdfc8e 100644 --- a/server/src/main/java/org/apache/druid/server/security/Access.java +++ b/server/src/main/java/org/apache/druid/server/security/Access.java @@ -53,6 +53,6 @@ public String getMessage() @Override public String toString() { - return StringUtils.format("Access{Allowed: %s, Message: %s}", allowed, message); + return StringUtils.format("Allowed:%s, Message:%s", allowed, message); } }