diff --git a/core/src/main/java/org/apache/druid/data/input/InputSource.java b/core/src/main/java/org/apache/druid/data/input/InputSource.java index 0a3cda250f43..fd872827e928 100644 --- a/core/src/main/java/org/apache/druid/data/input/InputSource.java +++ b/core/src/main/java/org/apache/druid/data/input/InputSource.java @@ -19,6 +19,7 @@ package org.apache.druid.data.input; +import com.fasterxml.jackson.annotation.JsonIgnore; import com.fasterxml.jackson.annotation.JsonSubTypes; import com.fasterxml.jackson.annotation.JsonSubTypes.Type; import com.fasterxml.jackson.annotation.JsonTypeInfo; @@ -60,6 +61,7 @@ public interface InputSource * Returns true if this inputSource can be processed in parallel using ParallelIndexSupervisorTask. It must be * castable to SplittableInputSource and the various SplittableInputSource methods must work as documented. */ + @JsonIgnore boolean isSplittable(); /** diff --git a/core/src/main/java/org/apache/druid/data/input/impl/InlineInputSource.java b/core/src/main/java/org/apache/druid/data/input/impl/InlineInputSource.java index 4cc6d0f9cf4c..81dbe31568a5 100644 --- a/core/src/main/java/org/apache/druid/data/input/impl/InlineInputSource.java +++ b/core/src/main/java/org/apache/druid/data/input/impl/InlineInputSource.java @@ -20,6 +20,7 @@ package org.apache.druid.data.input.impl; import com.fasterxml.jackson.annotation.JsonCreator; +import com.fasterxml.jackson.annotation.JsonIgnore; import com.fasterxml.jackson.annotation.JsonProperty; import com.google.common.base.Preconditions; import org.apache.druid.data.input.AbstractInputSource; @@ -51,6 +52,7 @@ public String getData() } @Override + @JsonIgnore public boolean isSplittable() { return false; diff --git a/core/src/main/java/org/apache/druid/metadata/MetadataStorageConnector.java b/core/src/main/java/org/apache/druid/metadata/MetadataStorageConnector.java index 45fb6639082c..0e0e52abf190 100644 --- a/core/src/main/java/org/apache/druid/metadata/MetadataStorageConnector.java +++ b/core/src/main/java/org/apache/druid/metadata/MetadataStorageConnector.java @@ -87,5 +87,7 @@ default void exportTable( void createSupervisorsTable(); + void createTableDefnTable(); + void deleteAllRecords(String tableName); } diff --git a/core/src/main/java/org/apache/druid/metadata/MetadataStorageTablesConfig.java b/core/src/main/java/org/apache/druid/metadata/MetadataStorageTablesConfig.java index 766efabb5261..7f4e4d7c037c 100644 --- a/core/src/main/java/org/apache/druid/metadata/MetadataStorageTablesConfig.java +++ b/core/src/main/java/org/apache/druid/metadata/MetadataStorageTablesConfig.java @@ -30,14 +30,16 @@ */ public class MetadataStorageTablesConfig { + public static final String CONFIG_BASE = "druid.metadata.storage.tables"; + public static MetadataStorageTablesConfig fromBase(String base) { - return new MetadataStorageTablesConfig(base, null, null, null, null, null, null, null, null, null, null); + return new MetadataStorageTablesConfig(base, null, null, null, null, null, null, null, null, null, null, null); } public static final String TASK_ENTRY_TYPE = "task"; - private static final String DEFAULT_BASE = "druid"; + public static final String DEFAULT_BASE = "druid"; private final Map entryTables = new HashMap<>(); private final Map logTables = new HashMap<>(); @@ -76,6 +78,9 @@ public static MetadataStorageTablesConfig fromBase(String base) @JsonProperty("supervisors") private final String supervisorTable; + @JsonProperty("tableDefn") + private final String tableDefnTable; + @JsonCreator public MetadataStorageTablesConfig( @JsonProperty("base") String base, @@ -88,7 +93,8 @@ public MetadataStorageTablesConfig( @JsonProperty("taskLog") String taskLogTable, @JsonProperty("taskLock") String taskLockTable, @JsonProperty("audit") String auditTable, - @JsonProperty("supervisors") String supervisorTable + @JsonProperty("supervisors") String supervisorTable, + @JsonProperty("tableDefn") String tablesTable ) { this.base = (base == null) ? DEFAULT_BASE : base; @@ -106,6 +112,39 @@ public MetadataStorageTablesConfig( lockTables.put(TASK_ENTRY_TYPE, this.taskLockTable); this.auditTable = makeTableName(auditTable, "audit"); this.supervisorTable = makeTableName(supervisorTable, "supervisors"); + this.tableDefnTable = makeTableName(tablesTable, "tableDefn"); + } + + /** + * Shim constructor for backwards compatibility with code that + * cannot be changed due to missing unit tests. + */ + public MetadataStorageTablesConfig( + String base, + String dataSourceTable, + String pendingSegmentsTable, + String segmentsTable, + String rulesTable, + String configTable, + String tasksTable, + String taskLogTable, + String taskLockTable, + String auditTable, + String supervisorTable + ) + { + this( + base, + dataSourceTable, + pendingSegmentsTable, + segmentsTable, rulesTable, + configTable, + tasksTable, + taskLogTable, + taskLockTable, + auditTable, + supervisorTable, + null); } private String makeTableName(String explicitTableName, String defaultSuffix) @@ -194,4 +233,9 @@ public String getTaskLockTable() { return taskLockTable; } + + public String getTableDefnTable() + { + return tableDefnTable; + } } diff --git a/core/src/test/java/org/apache/druid/metadata/MetadataStorageTablesConfigTest.java b/core/src/test/java/org/apache/druid/metadata/MetadataStorageTablesConfigTest.java new file mode 100644 index 000000000000..39a9c16268bc --- /dev/null +++ b/core/src/test/java/org/apache/druid/metadata/MetadataStorageTablesConfigTest.java @@ -0,0 +1,48 @@ +/* + * Licensed to the Apache Software Foundation (ASF) under one + * or more contributor license agreements. See the NOTICE file + * distributed with this work for additional information + * regarding copyright ownership. The ASF licenses this file + * to you under the Apache License, Version 2.0 (the + * "License"); you may not use this file except in compliance + * with the License. You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, + * software distributed under the License is distributed on an + * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY + * KIND, either express or implied. See the License for the + * specific language governing permissions and limitations + * under the License. + */ + +package org.apache.druid.metadata; + +import org.junit.Test; + +import static org.junit.Assert.assertEquals; + +public class MetadataStorageTablesConfigTest +{ + /** + * Pretty lame test: mostly to get the static checks to not complain. + */ + @Test + public void testDefaults() + { + MetadataStorageTablesConfig config = MetadataStorageTablesConfig.fromBase(null); + assertEquals(MetadataStorageTablesConfig.DEFAULT_BASE, config.getBase()); + assertEquals(MetadataStorageTablesConfig.DEFAULT_BASE + "_dataSource", config.getDataSourceTable()); + assertEquals(MetadataStorageTablesConfig.DEFAULT_BASE + "_pendingSegments", config.getPendingSegmentsTable()); + assertEquals(MetadataStorageTablesConfig.DEFAULT_BASE + "_segments", config.getSegmentsTable()); + assertEquals(MetadataStorageTablesConfig.DEFAULT_BASE + "_rules", config.getRulesTable()); + assertEquals(MetadataStorageTablesConfig.DEFAULT_BASE + "_config", config.getConfigTable()); + assertEquals(MetadataStorageTablesConfig.DEFAULT_BASE + "_tasks", config.getTasksTable()); + assertEquals(MetadataStorageTablesConfig.DEFAULT_BASE + "_tasklogs", config.getTaskLogTable()); + assertEquals(MetadataStorageTablesConfig.DEFAULT_BASE + "_tasklocks", config.getTaskLockTable()); + assertEquals(MetadataStorageTablesConfig.DEFAULT_BASE + "_audit", config.getAuditTable()); + assertEquals(MetadataStorageTablesConfig.DEFAULT_BASE + "_supervisors", config.getSupervisorTable()); + assertEquals(MetadataStorageTablesConfig.DEFAULT_BASE + "_tableDefn", config.getTableDefnTable()); + } +} diff --git a/extensions-contrib/sqlserver-metadata-storage/src/test/java/org/apache/druid/metadata/storage/sqlserver/SQLServerConnectorTest.java b/extensions-contrib/sqlserver-metadata-storage/src/test/java/org/apache/druid/metadata/storage/sqlserver/SQLServerConnectorTest.java index a31159e88134..9fa13c2c70c7 100644 --- a/extensions-contrib/sqlserver-metadata-storage/src/test/java/org/apache/druid/metadata/storage/sqlserver/SQLServerConnectorTest.java +++ b/extensions-contrib/sqlserver-metadata-storage/src/test/java/org/apache/druid/metadata/storage/sqlserver/SQLServerConnectorTest.java @@ -30,26 +30,13 @@ @SuppressWarnings("nls") public class SQLServerConnectorTest { - @Test public void testIsTransientException() { SQLServerConnector connector = new SQLServerConnector( Suppliers.ofInstance(new MetadataStorageConnectorConfig()), Suppliers.ofInstance( - new MetadataStorageTablesConfig( - null, - null, - null, - null, - null, - null, - null, - null, - null, - null, - null - ) + MetadataStorageTablesConfig.fromBase(null) ) ); @@ -69,8 +56,7 @@ public void testLimitClause() { SQLServerConnector connector = new SQLServerConnector( Suppliers.ofInstance(new MetadataStorageConnectorConfig()), - Suppliers.ofInstance( - new MetadataStorageTablesConfig(null, null, null, null, null, null, null, null, null, null, null) + Suppliers.ofInstance(MetadataStorageTablesConfig.fromBase(null) ) ); Assert.assertEquals("FETCH NEXT 100 ROWS ONLY", connector.limitClause(100)); diff --git a/extensions-core/mysql-metadata-storage/src/test/java/org/apache/druid/metadata/storage/mysql/MySQLConnectorTest.java b/extensions-core/mysql-metadata-storage/src/test/java/org/apache/druid/metadata/storage/mysql/MySQLConnectorTest.java index 23ce46282232..2498e8a65c17 100644 --- a/extensions-core/mysql-metadata-storage/src/test/java/org/apache/druid/metadata/storage/mysql/MySQLConnectorTest.java +++ b/extensions-core/mysql-metadata-storage/src/test/java/org/apache/druid/metadata/storage/mysql/MySQLConnectorTest.java @@ -44,7 +44,7 @@ public String getDriverClassName() private static final Supplier CONNECTOR_CONFIG_SUPPLIER = MetadataStorageConnectorConfig::new; private static final Supplier TABLES_CONFIG_SUPPLIER = - () -> new MetadataStorageTablesConfig(null, null, null, null, null, null, null, null, null, null, null); + () -> MetadataStorageTablesConfig.fromBase(null); @Test diff --git a/extensions-core/postgresql-metadata-storage/src/test/java/org/apache/druid/metadata/storage/postgresql/PostgreSQLConnectorTest.java b/extensions-core/postgresql-metadata-storage/src/test/java/org/apache/druid/metadata/storage/postgresql/PostgreSQLConnectorTest.java index 08f3c333a1fb..4e5e4d85fc06 100644 --- a/extensions-core/postgresql-metadata-storage/src/test/java/org/apache/druid/metadata/storage/postgresql/PostgreSQLConnectorTest.java +++ b/extensions-core/postgresql-metadata-storage/src/test/java/org/apache/druid/metadata/storage/postgresql/PostgreSQLConnectorTest.java @@ -36,19 +36,7 @@ public void testIsTransientException() PostgreSQLConnector connector = new PostgreSQLConnector( Suppliers.ofInstance(new MetadataStorageConnectorConfig()), Suppliers.ofInstance( - new MetadataStorageTablesConfig( - null, - null, - null, - null, - null, - null, - null, - null, - null, - null, - null - ) + MetadataStorageTablesConfig.fromBase(null) ), new PostgreSQLConnectorConfig(), new PostgreSQLTablesConfig() diff --git a/server/src/main/java/org/apache/druid/catalog/Actions.java b/server/src/main/java/org/apache/druid/catalog/Actions.java new file mode 100644 index 000000000000..9ba1ae688a63 --- /dev/null +++ b/server/src/main/java/org/apache/druid/catalog/Actions.java @@ -0,0 +1,104 @@ +/* + * Licensed to the Apache Software Foundation (ASF) under one + * or more contributor license agreements. See the NOTICE file + * distributed with this work for additional information + * regarding copyright ownership. The ASF licenses this file + * to you under the Apache License, Version 2.0 (the + * "License"); you may not use this file except in compliance + * with the License. You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, + * software distributed under the License is distributed on an + * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY + * KIND, either express or implied. See the License for the + * specific language governing permissions and limitations + * under the License. + */ + +package org.apache.druid.catalog; + +import com.google.common.collect.ImmutableMap; +import org.apache.druid.server.security.ForbiddenException; + +import javax.ws.rs.core.Response; + +import java.util.Map; + +/** + * Helper functions for the catalog REST API actions. + */ +public class Actions +{ + public static final String DUPLICATE_ERROR = "Already exists"; + public static final String FAILED_ERROR = "Failed"; + public static final String INVALID = "Invalid"; + public static final String FORBIDDEN = "Forbidden"; + public static final String NOT_FOUND = "Not found"; + + public static final String ERROR_KEY = "error"; + public static final String ERR_MSG_KEY = "errorMessage"; + + public static Map error(String code, String msg) + { + return ImmutableMap.of(ERROR_KEY, code, ERR_MSG_KEY, msg); + } + + public static Response exception(Exception e) + { + return Response + .serverError() + .entity(error(FAILED_ERROR, e.getMessage())) + .build(); + } + + public static Response badRequest(String code, String msg) + { + return Response + .status(Response.Status.BAD_REQUEST) + .entity(error(code, msg)) + .build(); + } + + public static Response notFound(String msg) + { + return Response + .status(Response.Status.NOT_FOUND) + .entity(error(NOT_FOUND, msg)) + .build(); + } + + public static Response ok() + { + return Response.ok().build(); + } + + public static Response forbidden() + { + return forbidden("Unauthorized"); + } + + public static Response forbidden(ForbiddenException e) + { + return forbidden(e.getMessage()); + } + + public static Response forbidden(String msg) + { + // Like ForbiddenExceptionMapper, but in the standard error + // format. Used instead of throwing ForbiddenException + return Response.status(Response.Status.FORBIDDEN) + .entity(error(FORBIDDEN, msg)) + .build(); + } + + public static Response okWithVersion(long version) + { + return Response + .ok() + .entity(ImmutableMap.of("version", version)) + .build(); + + } +} diff --git a/server/src/main/java/org/apache/druid/catalog/CacheNotifier.java b/server/src/main/java/org/apache/druid/catalog/CacheNotifier.java new file mode 100644 index 000000000000..d220dc23461c --- /dev/null +++ b/server/src/main/java/org/apache/druid/catalog/CacheNotifier.java @@ -0,0 +1,118 @@ +/* + * Licensed to the Apache Software Foundation (ASF) under one + * or more contributor license agreements. See the NOTICE file + * distributed with this work for additional information + * regarding copyright ownership. The ASF licenses this file + * to you under the Apache License, Version 2.0 (the + * "License"); you may not use this file except in compliance + * with the License. You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, + * software distributed under the License is distributed on an + * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY + * KIND, either express or implied. See the License for the + * specific language governing permissions and limitations + * under the License. + */ + +package org.apache.druid.catalog; + +import com.google.common.annotations.VisibleForTesting; +import org.apache.druid.concurrent.Threads; +import org.apache.druid.java.util.common.StringUtils; +import org.apache.druid.java.util.common.concurrent.Execs; +import org.apache.druid.java.util.emitter.EmittingLogger; + +import java.util.concurrent.BlockingQueue; +import java.util.concurrent.ExecutorService; +import java.util.concurrent.LinkedBlockingQueue; +import java.util.concurrent.TimeUnit; +import java.util.function.Consumer; + +/** + * Push style notifications that allow propagation of data from whatever server is + * running this notifier to whoever might be listening. Notifications arrive + * via a queue, then are dispatched via a configured sender. Details of the + * source of the updates, and how updates are sent, are handled external + * to this class. + * + * The algorithm is simple: each update is processed entirely before the + * next one is processed. As a result, this class is suitable for + * low-frequency updates: where the worst-case send times are less than + * the worst-case update frequency. If updates are faster, they will back + * up, and the class should be redesigned to allow healthy receivers to + * continue to get updates while laggards block only themselves. + * + * Events can be queued before startup. They will be send once the notifier + * is started. Events left in the queue at shutdown will be lost. + * + * Defined by composition so it can be tested and reused in other + * contexts. + */ +public class CacheNotifier +{ + private static final EmittingLogger LOG = new EmittingLogger(CacheNotifier.class); + + private final ExecutorService exec; + private final String callerName; + private final BlockingQueue updates = new LinkedBlockingQueue<>(); + private final Consumer sender; + + public CacheNotifier( + final String callerName, + final Consumer sender + ) + { + this.callerName = callerName; + this.sender = sender; + + this.exec = Execs.singleThreaded( + StringUtils.format( + "%s-notifierThread-", + StringUtils.encodeForFormat(callerName)) + "%d" + ); + } + + public void start() + { + exec.submit(() -> { + while (!Thread.interrupted()) { + try { + sender.accept(updates.take()); + } + catch (InterruptedException e) { + return; + } + catch (Throwable t) { + LOG.makeAlert(t, callerName + ": Error occured while handling updates.").emit(); + } + } + }); + } + + public void send(byte[] update) + { + updates.add(update); + } + + @VisibleForTesting + public void stopGracefully() + { + try { + while (!updates.isEmpty()) { + Threads.sleepFor(100, TimeUnit.MILLISECONDS); + } + } + catch (InterruptedException e) { + // Ignore + } + stop(); + } + + public void stop() + { + exec.shutdownNow(); + } +} diff --git a/server/src/main/java/org/apache/druid/catalog/CachedMetadataCatalog.java b/server/src/main/java/org/apache/druid/catalog/CachedMetadataCatalog.java new file mode 100644 index 000000000000..e331ad657198 --- /dev/null +++ b/server/src/main/java/org/apache/druid/catalog/CachedMetadataCatalog.java @@ -0,0 +1,212 @@ +/* + * Licensed to the Apache Software Foundation (ASF) under one + * or more contributor license agreements. See the NOTICE file + * distributed with this work for additional information + * regarding copyright ownership. The ASF licenses this file + * to you under the Apache License, Version 2.0 (the + * "License"); you may not use this file except in compliance + * with the License. You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, + * software distributed under the License is distributed on an + * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY + * KIND, either express or implied. See the License for the + * specific language governing permissions and limitations + * under the License. + */ + +package org.apache.druid.catalog; + +import org.apache.druid.catalog.MetadataCatalog.CatalogListener; +import org.apache.druid.catalog.SchemaRegistry.SchemaSpec; + +import javax.inject.Inject; + +import java.util.ArrayList; +import java.util.Collections; +import java.util.HashSet; +import java.util.List; +import java.util.Set; +import java.util.concurrent.ConcurrentHashMap; + +/** + * Caching version of the metadata catalog. Draws information + * from a base catalog. Fetches from the base if: + *
    + *
  • A table is requested that has not yet been requested. + * Once requested, the entry is cached, even if the table does + * not exist in the base catalog.
  • + *
  • The contents of a schema are requested, and have not yet + * been fetched.
  • + *
+ * + * Both tables and schemas are cached. In particular, if a table or + * schema is requested, and does not exist in the base catalog, then + * that schema is marked as not existing and won't be fetched again. + * + * The cache is updated via an update facility which either flushes + * the cache (crude) or listens to the base catalog for updates and + * populates the cache with updates. For a local cache, the DB layer + * provides the updates. For a remote cache, the DB host pushes updates. + */ +public class CachedMetadataCatalog implements MetadataCatalog, CatalogListener +{ + public static final int NOT_FETCHED = -1; + public static final int UNDEFINED = 0; + + private static class TableEntry + { + private final TableMetadata table; + + protected TableEntry(SchemaSpec schema, TableMetadata table) + { + this.table = table; + } + + protected long version() + { + return table == null ? UNDEFINED : table.updateTime(); + } + } + + private class SchemaEntry + { + private final SchemaSpec schema; + private long version = NOT_FETCHED; + private final ConcurrentHashMap cache = new ConcurrentHashMap<>(); + + protected SchemaEntry(SchemaSpec schema) + { + this.schema = schema; + } + + protected TableMetadata resolveTable(TableId tableId) + { + TableEntry entry = cache.computeIfAbsent( + tableId.name(), + key -> new TableEntry(schema, base.table(tableId)) + ); + return entry.table; + } + + public synchronized List tables() + { + if (version == UNDEFINED) { + return Collections.emptyList(); + } + if (version == NOT_FETCHED) { + List catalogTables = base.tablesForSchema(schema.name()); + for (TableMetadata table : catalogTables) { + update(table); + } + } + List orderedTables = new ArrayList<>(); + + // Get the list of actual tables; excluding any cached "misses". + cache.forEach((k, v) -> { + if (v.table != null) { + orderedTables.add(v.table); + } + }); + orderedTables.sort((e1, e2) -> e1.id().name().compareTo(e2.id().name())); + return orderedTables; + } + + public synchronized void update(TableMetadata table) + { + cache.compute( + table.name(), + (k, v) -> v == null || v.version() < table.updateTime() + ? new TableEntry(schema, table) + : v + ); + version = Math.max(version, table.updateTime()); + } + + public void remove(String name) + { + cache.remove(name); + } + + public Set tableNames() + { + Set tables = new HashSet<>(); + cache.forEach((k, v) -> { + if (v.table != null) { + tables.add(k); + } + }); + return tables; + } + } + + private final ConcurrentHashMap schemaCache = new ConcurrentHashMap<>(); + private final CatalogSource base; + private final SchemaRegistry schemaRegistry; + + @Inject + public CachedMetadataCatalog( + CatalogSource catalog, + SchemaRegistry schemaRegistry + ) + { + this.base = catalog; + this.schemaRegistry = schemaRegistry; + } + + @Override + public TableMetadata resolveTable(TableId tableId) + { + SchemaEntry schemaEntry = entryFor(tableId.schema()); + return schemaEntry == null ? null : schemaEntry.resolveTable(tableId); + } + + @Override + public List tables(String schemaName) + { + SchemaEntry schemaEntry = entryFor(schemaName); + return schemaEntry == null ? null : schemaEntry.tables(); + } + + @Override + public void updated(TableMetadata table) + { + SchemaEntry schemaEntry = entryFor(table.dbSchema()); + if (schemaEntry != null) { + schemaEntry.update(table); + } + } + + @Override + public void deleted(TableId tableId) + { + SchemaEntry schemaEntry = entryFor(tableId.schema()); + if (schemaEntry != null) { + schemaEntry.remove(tableId.name()); + } + } + + @Override + public Set tableNames(String schemaName) + { + SchemaEntry schemaEntry = entryFor(schemaName); + return schemaEntry == null ? Collections.emptySet() : schemaEntry.tableNames(); + } + + public void flush() + { + schemaCache.clear(); + } + + private SchemaEntry entryFor(String schemaName) + { + return schemaCache.computeIfAbsent( + schemaName, + k -> { + SchemaSpec schema = schemaRegistry.schema(k); + return schema == null ? null : new SchemaEntry(schema); + }); + } +} diff --git a/server/src/main/java/org/apache/druid/catalog/CatalogAuthorizer.java b/server/src/main/java/org/apache/druid/catalog/CatalogAuthorizer.java new file mode 100644 index 000000000000..ef5df29c0e39 --- /dev/null +++ b/server/src/main/java/org/apache/druid/catalog/CatalogAuthorizer.java @@ -0,0 +1,100 @@ +/* + * Licensed to the Apache Software Foundation (ASF) under one + * or more contributor license agreements. See the NOTICE file + * distributed with this work for additional information + * regarding copyright ownership. The ASF licenses this file + * to you under the Apache License, Version 2.0 (the + * "License"); you may not use this file except in compliance + * with the License. You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, + * software distributed under the License is distributed on an + * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY + * KIND, either express or implied. See the License for the + * specific language governing permissions and limitations + * under the License. + */ + +package org.apache.druid.catalog; + +import org.apache.druid.catalog.SchemaRegistry.SchemaSpec; +import org.apache.druid.server.security.Access; +import org.apache.druid.server.security.Action; +import org.apache.druid.server.security.AuthorizationUtils; +import org.apache.druid.server.security.AuthorizerMapper; +import org.apache.druid.server.security.ForbiddenException; +import org.apache.druid.server.security.Resource; +import org.apache.druid.server.security.ResourceAction; + +import javax.inject.Inject; +import javax.servlet.http.HttpServletRequest; + +/** + * Encapsulates the details of catalog authorization. + */ +public class CatalogAuthorizer +{ + private final AuthorizerMapper authorizerMapper; + + @Inject + public CatalogAuthorizer( + AuthorizerMapper authorizerMapper) + { + this.authorizerMapper = authorizerMapper; + } + + public AuthorizerMapper mapper() + { + return authorizerMapper; + } + + public void authorizeTable(SchemaSpec schema, String name, Action action, HttpServletRequest request) + { + if (action == Action.WRITE && !schema.writable()) { + throw new ForbiddenException( + "Cannot create table definitions in schema: " + schema.name()); + } + authorize(schema.securityResource(), name, action, request); + } + + public void authorize(String resource, String key, Action action, HttpServletRequest request) + { + final Access authResult = authorizeAccess(resource, key, action, request); + if (!authResult.isAllowed()) { + throw new ForbiddenException(authResult.toString()); + } + } + + public boolean isAuthorized(String resource, String key, Action action, HttpServletRequest request) + { + final Access authResult = authorizeAccess(resource, key, action, request); + return authResult.isAllowed(); + } + + public Access authorizeAccess(String resource, String key, Action action, HttpServletRequest request) + { + return AuthorizationUtils.authorizeResourceAction( + request, + new ResourceAction(new Resource(key, resource), action), + authorizerMapper + ); + } + + public ResourceAction resourceAction(SchemaSpec schema, String name, Action action) + { + return new ResourceAction(new Resource(name, schema.securityResource()), action); + } + + public Action inferAction(HttpServletRequest request) + { + switch (request.getMethod()) { + case "GET": + case "HEAD": + return Action.READ; + default: + return Action.WRITE; + } + } +} diff --git a/server/src/main/java/org/apache/druid/catalog/CatalogClient.java b/server/src/main/java/org/apache/druid/catalog/CatalogClient.java new file mode 100644 index 000000000000..f2fc929273c4 --- /dev/null +++ b/server/src/main/java/org/apache/druid/catalog/CatalogClient.java @@ -0,0 +1,139 @@ +/* + * Licensed to the Apache Software Foundation (ASF) under one + * or more contributor license agreements. See the NOTICE file + * distributed with this work for additional information + * regarding copyright ownership. The ASF licenses this file + * to you under the Apache License, Version 2.0 (the + * "License"); you may not use this file except in compliance + * with the License. You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, + * software distributed under the License is distributed on an + * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY + * KIND, either express or implied. See the License for the + * specific language governing permissions and limitations + * under the License. + */ + +package org.apache.druid.catalog; + +import com.fasterxml.jackson.core.type.TypeReference; +import com.fasterxml.jackson.databind.ObjectMapper; +import org.apache.druid.catalog.MetadataCatalog.CatalogSource; +import org.apache.druid.client.coordinator.Coordinator; +import org.apache.druid.discovery.DruidLeaderClient; +import org.apache.druid.guice.annotations.Smile; +import org.apache.druid.java.util.common.ISE; +import org.apache.druid.java.util.common.StringUtils; +import org.apache.druid.java.util.http.client.Request; +import org.apache.druid.java.util.http.client.response.StringFullResponseHolder; +import org.apache.druid.server.http.CatalogResource; +import org.jboss.netty.handler.codec.http.HttpHeaders; +import org.jboss.netty.handler.codec.http.HttpMethod; +import org.jboss.netty.handler.codec.http.HttpResponseStatus; + +import javax.inject.Inject; +import javax.ws.rs.core.MediaType; + +import java.io.IOException; +import java.util.Collections; +import java.util.List; + +/** + * Guice-injected client for the catalog update sync process. Requests + * tables and schemas from the catalog component on the Coordinator. + * + * This class handles any recoverable error case. If this class throws + * an exception, then something went very wrong and there is little the + * caller can do to make things better. All the caller can do is try + * again later and hope things improve. + */ +public class CatalogClient implements CatalogSource +{ + public static final String SCHEMA_SYNC_PATH = CatalogResource.ROOT_PATH + CatalogResource.SCHEMA_SYNC; + public static final String TABLE_SYNC_PATH = CatalogResource.ROOT_PATH + CatalogResource.TABLE_SYNC; + private static final TypeReference> LIST_OF_TABLE_SPECS_TYPE = new TypeReference>() + { + }; + // Not strictly needed as a TypeReference, but doing so makes the code simpler. + private static final TypeReference TABLE_SPEC_TYPE = new TypeReference() + { + }; + + private final DruidLeaderClient coordClient; + private final ObjectMapper smileMapper; + + @Inject + public CatalogClient( + @Coordinator DruidLeaderClient coordClient, + @Smile ObjectMapper smileMapper + ) + { + this.coordClient = coordClient; + this.smileMapper = smileMapper; + } + + @Override + public List tablesForSchema(String dbSchema) + { + String url = StringUtils.replace(SCHEMA_SYNC_PATH, "{dbSchema}", dbSchema); + List results = send(url, LIST_OF_TABLE_SPECS_TYPE); + + // Not found for a list is an empty list. + return results == null ? Collections.emptyList() : results; + } + + @Override + public TableMetadata table(TableId id) + { + String url = StringUtils.replace(SCHEMA_SYNC_PATH, "{dbSchema}", id.schema()); + url = StringUtils.replace(url, "{table}", id.name()); + return send(url, TABLE_SPEC_TYPE); + } + + /** + * Send the update. Exceptions are "unexpected": they should never occur in a + * working system. If they occur, something is broken. + * + * @return the requested update, or null if the item was not found in the + * catalog. + */ + private T send(String url, TypeReference typeRef) + { + final Request request; + try { + request = coordClient.makeRequest(HttpMethod.GET, url) + .addHeader(HttpHeaders.Names.ACCEPT, MediaType.APPLICATION_JSON); + } + catch (IOException e) { + throw new ISE("Cannot create catalog sync request"); + } + final StringFullResponseHolder responseHolder; + try { + responseHolder = coordClient.go(request); + } + catch (IOException e) { + throw new ISE(e, "Failed to send catalog sync"); + } + catch (InterruptedException e1) { + // Treat as a not-found: the only way this exception should occur + // is during shutdown. + return null; + } + if (responseHolder.getStatus().getCode() == HttpResponseStatus.NOT_FOUND.getCode()) { + // Not found means the item disappeared. Returning null means "not found". + return null; + } + if (responseHolder.getStatus().getCode() != HttpResponseStatus.OK.getCode()) { + throw new ISE("Unexpected status from catalog sync: " + responseHolder.getStatus()); + } + try { + return smileMapper.readValue(responseHolder.getContent(), typeRef); + } + catch (IOException e) { + throw new ISE(e, "Could not decode the JSON response from catalog sync."); + } + } +} diff --git a/server/src/main/java/org/apache/druid/catalog/CatalogSpecs.java b/server/src/main/java/org/apache/druid/catalog/CatalogSpecs.java new file mode 100644 index 000000000000..b914e34d1966 --- /dev/null +++ b/server/src/main/java/org/apache/druid/catalog/CatalogSpecs.java @@ -0,0 +1,60 @@ +/* + * Licensed to the Apache Software Foundation (ASF) under one + * or more contributor license agreements. See the NOTICE file + * distributed with this work for additional information + * regarding copyright ownership. The ASF licenses this file + * to you under the Apache License, Version 2.0 (the + * "License"); you may not use this file except in compliance + * with the License. You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, + * software distributed under the License is distributed on an + * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY + * KIND, either express or implied. See the License for the + * specific language governing permissions and limitations + * under the License. + */ + +package org.apache.druid.catalog; + +import com.fasterxml.jackson.core.JsonProcessingException; +import com.fasterxml.jackson.databind.ObjectMapper; +import org.apache.druid.java.util.common.ISE; + +import java.io.IOException; + +public class CatalogSpecs +{ + public static byte[] toBytes(ObjectMapper jsonMapper, Object obj) + { + try { + return jsonMapper.writeValueAsBytes(obj); + } + catch (JsonProcessingException e) { + throw new ISE("Failed to serialize " + obj.getClass().getSimpleName()); + } + } + + public static T fromBytes(ObjectMapper jsonMapper, byte[] bytes, Class clazz) + { + try { + return jsonMapper.readValue(bytes, clazz); + } + catch (IOException e) { + throw new ISE(e, "Failed to deserialize a " + clazz.getSimpleName()); + } + } + + public static String toString(Object obj) + { + ObjectMapper jsonMapper = new ObjectMapper(); + try { + return jsonMapper.writeValueAsString(obj); + } + catch (JsonProcessingException e) { + throw new ISE("Failed to serialize TableDefn"); + } + } +} diff --git a/server/src/main/java/org/apache/druid/catalog/CatalogStorage.java b/server/src/main/java/org/apache/druid/catalog/CatalogStorage.java new file mode 100644 index 000000000000..3f363357d755 --- /dev/null +++ b/server/src/main/java/org/apache/druid/catalog/CatalogStorage.java @@ -0,0 +1,119 @@ +/* + * Licensed to the Apache Software Foundation (ASF) under one + * or more contributor license agreements. See the NOTICE file + * distributed with this work for additional information + * regarding copyright ownership. The ASF licenses this file + * to you under the Apache License, Version 2.0 (the + * "License"); you may not use this file except in compliance + * with the License. You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, + * software distributed under the License is distributed on an + * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY + * KIND, either express or implied. See the License for the + * specific language governing permissions and limitations + * under the License. + */ + +package org.apache.druid.catalog; + +import org.apache.druid.catalog.MetadataCatalog.CatalogListener; +import org.apache.druid.catalog.MetadataCatalog.CatalogSource; +import org.apache.druid.catalog.MetadataCatalog.CatalogUpdateProvider; +import org.apache.druid.catalog.SchemaRegistry.SchemaSpec; +import org.apache.druid.metadata.catalog.CatalogManager; +import org.apache.druid.server.security.AuthorizerMapper; + +import javax.inject.Inject; + +import java.util.List; + +/** + * Facade over the three internal components used to manage the metadata + * catalog from the REST API. + */ +public class CatalogStorage implements CatalogUpdateProvider, CatalogSource +{ + public static class ListenerAdapter implements CatalogManager.Listener + { + private final CatalogListener dest; + + public ListenerAdapter(CatalogListener dest) + { + this.dest = dest; + } + + @Override + public void added(TableMetadata table) + { + dest.updated(table); + } + + @Override + public void updated(TableMetadata table) + { + dest.updated(table); + } + + @Override + public void deleted(TableId id) + { + dest.deleted(id); + } + } + + protected final SchemaRegistry schemaRegistry; + protected final CatalogManager catalogMgr; + protected final CatalogAuthorizer authorizer; + + @Inject + public CatalogStorage( + CatalogManager catalogMgr, + AuthorizerMapper authorizerMapper + ) + { + this.schemaRegistry = new SchemaRegistryImpl(); + this.catalogMgr = catalogMgr; + this.authorizer = new CatalogAuthorizer(authorizerMapper); + } + + public CatalogAuthorizer authorizer() + { + return authorizer; + } + + public CatalogManager tables() + { + return catalogMgr; + } + + public SchemaRegistry schemaRegistry() + { + return schemaRegistry; + } + + public SchemaSpec resolveSchema(String dbSchema) + { + return schemaRegistry.schema(dbSchema); + } + + @Override + public void register(CatalogListener listener) + { + tables().register(new ListenerAdapter(listener)); + } + + @Override + public List tablesForSchema(String dbSchema) + { + return tables().listDetails(dbSchema); + } + + @Override + public TableMetadata table(TableId id) + { + return tables().read(id); + } +} diff --git a/server/src/main/java/org/apache/druid/catalog/CatalogUpdateNotifier.java b/server/src/main/java/org/apache/druid/catalog/CatalogUpdateNotifier.java new file mode 100644 index 000000000000..3e095987fef7 --- /dev/null +++ b/server/src/main/java/org/apache/druid/catalog/CatalogUpdateNotifier.java @@ -0,0 +1,111 @@ +/* + * Licensed to the Apache Software Foundation (ASF) under one + * or more contributor license agreements. See the NOTICE file + * distributed with this work for additional information + * regarding copyright ownership. The ASF licenses this file + * to you under the Apache License, Version 2.0 (the + * "License"); you may not use this file except in compliance + * with the License. You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, + * software distributed under the License is distributed on an + * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY + * KIND, either express or implied. See the License for the + * specific language governing permissions and limitations + * under the License. + */ + +package org.apache.druid.catalog; + +import com.fasterxml.jackson.databind.ObjectMapper; +import org.apache.druid.catalog.MetadataCatalog.CatalogListener; +import org.apache.druid.catalog.RestUpdateSender.RestSender; +import org.apache.druid.discovery.DruidNodeDiscoveryProvider; +import org.apache.druid.discovery.NodeRole; +import org.apache.druid.guice.ManageLifecycle; +import org.apache.druid.guice.annotations.EscalatedClient; +import org.apache.druid.guice.annotations.Smile; +import org.apache.druid.java.util.common.lifecycle.LifecycleStart; +import org.apache.druid.java.util.common.lifecycle.LifecycleStop; +import org.apache.druid.java.util.http.client.HttpClient; +import org.apache.druid.server.DruidNode; +import org.apache.druid.server.http.CatalogListenerResource; +import org.joda.time.Duration; + +import javax.inject.Inject; + +import java.util.Collections; +import java.util.function.Supplier; + +/** + * Global update notifier for the catalog. Registers itself as a catalog + * listener, then uses the common cache notifier to send Smile-encoded JSON + * updates to broker nodes discovered from node discovery (typically ZooKeeper.) + *

+ * Deletes are encoded as a table update with a table definition of a special + * tombstone type. This saves having the need for two endpoints, or having + * a wrapper class to handle deletes. + */ +@ManageLifecycle +public class CatalogUpdateNotifier implements CatalogListener +{ + private final String CALLER_NAME = "Catalog Sync"; + private final long TIMEOUT_MS = 5000; + private final TableSpec TABLE_TOMBSTONE = new TableSpec.Tombstone(); + + private final CacheNotifier notifier; + private final ObjectMapper smileMapper; + + @Inject + public CatalogUpdateNotifier( + CatalogStorage catalog, + DruidNodeDiscoveryProvider discoveryProvider, + @EscalatedClient HttpClient httpClient, + @Smile ObjectMapper smileMapper + ) + { + long timeoutMs = TIMEOUT_MS; + this.smileMapper = smileMapper; + Supplier> nodeSupplier = new ListeningNodeSupplier( + Collections.singletonList(NodeRole.BROKER), + discoveryProvider); + RestSender restSender = RestUpdateSender.httpClientSender(httpClient, Duration.millis(timeoutMs)); + RestUpdateSender sender = new RestUpdateSender( + CALLER_NAME, + nodeSupplier, + restSender, + CatalogListenerResource.BASE_URL + CatalogListenerResource.SYNC_URL, + timeoutMs); + this.notifier = new CacheNotifier( + CALLER_NAME, + sender); + catalog.register(this); + } + + @LifecycleStart + public void start() + { + notifier.start(); + } + + @LifecycleStop + public void stop() + { + notifier.stop(); + } + + @Override + public void updated(TableMetadata update) + { + notifier.send(update.toBytes(smileMapper)); + } + + @Override + public void deleted(TableId tableId) + { + TableMetadata spec = TableMetadata.newTable(tableId, TABLE_TOMBSTONE); + notifier.send(spec.toBytes(smileMapper)); + } +} diff --git a/server/src/main/java/org/apache/druid/catalog/ColumnSpec.java b/server/src/main/java/org/apache/druid/catalog/ColumnSpec.java new file mode 100644 index 000000000000..5f26275fa267 --- /dev/null +++ b/server/src/main/java/org/apache/druid/catalog/ColumnSpec.java @@ -0,0 +1,103 @@ +/* + * Licensed to the Apache Software Foundation (ASF) under one + * or more contributor license agreements. See the NOTICE file + * distributed with this work for additional information + * regarding copyright ownership. The ASF licenses this file + * to you under the Apache License, Version 2.0 (the + * "License"); you may not use this file except in compliance + * with the License. You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, + * software distributed under the License is distributed on an + * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY + * KIND, either express or implied. See the License for the + * specific language governing permissions and limitations + * under the License. + */ + +package org.apache.druid.catalog; + +import com.fasterxml.jackson.annotation.JsonProperty; +import com.fasterxml.jackson.databind.ObjectMapper; +import com.google.common.base.Strings; +import com.google.common.collect.ImmutableMap; +import org.apache.druid.guice.annotations.PublicApi; +import org.apache.druid.java.util.common.IAE; +import org.apache.druid.segment.column.ColumnType; + +import java.util.Map; + +/** + * Base class for table columns. Columns have multiple types + * represented as subclasses. + */ +@PublicApi +public abstract class ColumnSpec +{ + enum ColumnKind + { + DETAIL, + DIMENSION, + MEASURE, + INPUT + } + + public static final Map VALID_SQL_TYPES = + new ImmutableMap.Builder() + .put("BIGINT", ColumnType.LONG) + .put("FLOAT", ColumnType.FLOAT) + .put("DOUBLE", ColumnType.DOUBLE) + .put("VARCHAR", ColumnType.STRING) + .build(); + + protected final String name; + protected final String sqlType; + + public ColumnSpec( + String name, + String sqlType + ) + { + this.name = name; + this.sqlType = sqlType; + } + + protected abstract ColumnKind kind(); + + @JsonProperty("name") + public String name() + { + return name; + } + + @JsonProperty("sqlType") + public String sqlType() + { + return sqlType; + } + + public void validate() + { + if (Strings.isNullOrEmpty(name)) { + throw new IAE("Column name is required"); + } + } + + public byte[] toBytes(ObjectMapper jsonMapper) + { + return CatalogSpecs.toBytes(jsonMapper, this); + } + + public static ColumnSpec fromBytes(ObjectMapper jsonMapper, byte[] bytes) + { + return CatalogSpecs.fromBytes(jsonMapper, bytes, ColumnSpec.class); + } + + @Override + public String toString() + { + return CatalogSpecs.toString(this); + } +} diff --git a/server/src/main/java/org/apache/druid/catalog/DatasourceColumnSpec.java b/server/src/main/java/org/apache/druid/catalog/DatasourceColumnSpec.java new file mode 100644 index 000000000000..7aa875980f6a --- /dev/null +++ b/server/src/main/java/org/apache/druid/catalog/DatasourceColumnSpec.java @@ -0,0 +1,135 @@ +/* + * Licensed to the Apache Software Foundation (ASF) under one + * or more contributor license agreements. See the NOTICE file + * distributed with this work for additional information + * regarding copyright ownership. The ASF licenses this file + * to you under the Apache License, Version 2.0 (the + * "License"); you may not use this file except in compliance + * with the License. You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, + * software distributed under the License is distributed on an + * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY + * KIND, either express or implied. See the License for the + * specific language governing permissions and limitations + * under the License. + */ + +package org.apache.druid.catalog; + +import com.fasterxml.jackson.annotation.JsonCreator; +import com.fasterxml.jackson.annotation.JsonProperty; +import com.fasterxml.jackson.annotation.JsonSubTypes; +import com.fasterxml.jackson.annotation.JsonSubTypes.Type; +import com.fasterxml.jackson.annotation.JsonTypeInfo; +import org.apache.druid.java.util.common.IAE; +import org.apache.druid.java.util.common.StringUtils; + +/** + * Description of a detail datasource column and a rollup + * dimension column. + */ +@JsonTypeInfo(use = JsonTypeInfo.Id.NAME, property = "type") +@JsonSubTypes(value = { + @Type(name = "detail", value = DatasourceColumnSpec.DetailColumnSpec.class), + @Type(name = "dimension", value = DatasourceColumnSpec.DimensionSpec.class), + @Type(name = "measure", value = DatasourceColumnSpec.MeasureSpec.class), +}) +public abstract class DatasourceColumnSpec extends ColumnSpec +{ + private static final String TIME_COLUMN = "__time"; + + @JsonCreator + public DatasourceColumnSpec( + @JsonProperty("name") String name, + @JsonProperty("sqlType") String sqlType + ) + { + super(name, sqlType); + } + + @Override + public void validate() + { + super.validate(); + if (sqlType == null) { + return; + } + if (TIME_COLUMN.equals(name)) { + if (!"TIMESTAMP".equalsIgnoreCase(sqlType)) { + throw new IAE("__time column must have type TIMESTAMP"); + } + } else if (!VALID_SQL_TYPES.containsKey(StringUtils.toUpperCase(sqlType))) { + throw new IAE("Not a supported SQL type: " + sqlType); + } + } + + public static class DetailColumnSpec extends DatasourceColumnSpec + { + @JsonCreator + public DetailColumnSpec( + @JsonProperty("name") String name, + @JsonProperty("sqlType") String sqlType + ) + { + super(name, sqlType); + } + + @Override + protected ColumnKind kind() + { + return ColumnKind.DETAIL; + } + } + + public static class DimensionSpec extends DatasourceColumnSpec + { + @JsonCreator + public DimensionSpec( + @JsonProperty("name") String name, + @JsonProperty("sqlType") String sqlType + ) + { + super(name, sqlType); + } + + @Override + protected ColumnKind kind() + { + return ColumnKind.DIMENSION; + } + } + + /** + * Catalog definition of a measure (metric) column. + */ + public static class MeasureSpec extends DatasourceColumnSpec + { + private final String aggregateFn; + + @JsonCreator + public MeasureSpec( + @JsonProperty("name") String name, + @JsonProperty("sqlType") String sqlType, + @JsonProperty("aggregateFn") String aggregateFn + ) + { + super(name, sqlType); + this.aggregateFn = aggregateFn; + } + + @Override + protected ColumnKind kind() + { + return ColumnKind.MEASURE; + } + + @JsonProperty("aggregateFn") + public String aggregateFn() + { + return aggregateFn; + } + } +} diff --git a/server/src/main/java/org/apache/druid/catalog/DatasourceSpec.java b/server/src/main/java/org/apache/druid/catalog/DatasourceSpec.java new file mode 100644 index 000000000000..2249a0ffab6e --- /dev/null +++ b/server/src/main/java/org/apache/druid/catalog/DatasourceSpec.java @@ -0,0 +1,376 @@ +/* + * Licensed to the Apache Software Foundation (ASF) under one + * or more contributor license agreements. See the NOTICE file + * distributed with this work for additional information + * regarding copyright ownership. The ASF licenses this file + * to you under the Apache License, Version 2.0 (the + * "License"); you may not use this file except in compliance + * with the License. You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, + * software distributed under the License is distributed on an + * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY + * KIND, either express or implied. See the License for the + * specific language governing permissions and limitations + * under the License. + */ + +package org.apache.druid.catalog; + +import com.fasterxml.jackson.annotation.JsonIgnore; +import com.fasterxml.jackson.annotation.JsonInclude; +import com.fasterxml.jackson.annotation.JsonInclude.Include; +import com.fasterxml.jackson.annotation.JsonProperty; +import com.google.common.base.Strings; +import org.apache.druid.catalog.DatasourceColumnSpec.DetailColumnSpec; +import org.apache.druid.catalog.DatasourceColumnSpec.DimensionSpec; +import org.apache.druid.catalog.DatasourceColumnSpec.MeasureSpec; +import org.apache.druid.catalog.TableMetadata.TableType; +import org.apache.druid.java.util.common.IAE; +import org.apache.druid.java.util.common.StringUtils; + +import java.util.ArrayList; +import java.util.Collections; +import java.util.HashMap; +import java.util.HashSet; +import java.util.List; +import java.util.Map; +import java.util.Objects; +import java.util.Set; + +/** + * Datasource metadata exchanged via the REST API and stored + * in the catalog. + */ +public class DatasourceSpec extends TableSpec +{ + /** + * Segment grain at ingestion and initial compaction. Aging rules + * may override the value as segments age. If not provided here, + * then it must be provided at ingestion time. + */ + private final String segmentGranularity; + + /** + * Ingestion and auto-compaction rollup granularity. If null, then no + * rollup is enabled. Same as {@code queryGranularity} in and ingest spec, + * but renamed since this granularity affects rollup, not queries. Can be + * overridden at ingestion time. The grain may change as segments evolve: + * this is the grain only for ingest. + */ + private final String rollupGranularity; + + /** + * The target segment size at ingestion and initial compaction. + * If 0, then the system setting is used. + */ + private final int targetSegmentRows; + + /** + * Whether to enable auto-compaction. Only relevant if no auto-compaction + * spec is defined, since the existence of a spec overrides this setting. + */ + private final boolean enableAutoCompaction; + + /** + * The offset of segments to be auto-compacted relative to the current + * time. If not present, the auto-compaction default is used if + * auto-compaction is enabled. + */ + private final String autoCompactionDelay; + + private final List columns; + + public DatasourceSpec( + @JsonProperty("segmentGranularity") String segmentGranularity, + @JsonProperty("rollupGranularity") String rollupGranularity, + @JsonProperty("targetSegmentRows") int targetSegmentRows, + @JsonProperty("enableAutoCompaction") boolean enableAutoCompaction, + @JsonProperty("autoCompactionDelay") String autoCompactionDelay, + @JsonProperty("properties") Map properties, + @JsonProperty("columns") List columns + ) + { + super(properties); + + // Note: no validation here. If a bad definition got into the + // DB, don't prevent deserialization. + + this.segmentGranularity = segmentGranularity; + this.rollupGranularity = rollupGranularity; + this.targetSegmentRows = targetSegmentRows; + this.enableAutoCompaction = enableAutoCompaction; + this.autoCompactionDelay = autoCompactionDelay; + this.columns = columns == null ? Collections.emptyList() : columns; + } + + @Override + public TableType type() + { + return TableType.DATASOURCE; + } + + @JsonProperty("rollupGranularity") + @JsonInclude(Include.NON_NULL) + public String rollupGranularity() + { + return rollupGranularity; + } + + @JsonProperty("segmentGranularity") + @JsonInclude(Include.NON_NULL) + public String segmentGranularity() + { + return segmentGranularity; + } + + @JsonProperty("targetSegmentRows") + @JsonInclude(Include.NON_DEFAULT) + public int targetSegmentRows() + { + return targetSegmentRows; + } + + @JsonProperty("enableAutoCompaction") + @JsonInclude(Include.NON_DEFAULT) + public boolean enableAutoCompaction() + { + return enableAutoCompaction; + } + + @JsonProperty("autoCompactionDelay") + @JsonInclude(Include.NON_NULL) + public String autoCompactionDelay() + { + return autoCompactionDelay; + } + + @JsonProperty("columns") + @JsonInclude(Include.NON_EMPTY) + public List columns() + { + return columns; + } + + public static Builder builder() + { + return new Builder(); + } + + public Builder toBuilder() + { + return new Builder(this); + } + + @JsonIgnore + public boolean isDetail() + { + return Strings.isNullOrEmpty(rollupGranularity); + } + + @JsonIgnore + public boolean isRollup() + { + return !isDetail(); + } + + @Override + public void validate() + { + super.validate(); + if (Strings.isNullOrEmpty(segmentGranularity)) { + throw new IAE("Segment granularity is required."); + } + boolean isDetail = isDetail(); + Set names = new HashSet<>(); + for (ColumnSpec col : columns) { + if (isDetail && col instanceof MeasureSpec) { + throw new IAE(StringUtils.format( + "Measure column %s not allowed for a detail table", + col.name())); + } + if (isDetail && col instanceof DimensionSpec) { + throw new IAE(StringUtils.format( + "Dimension column %s not allowed for a detail table", + col.name())); + } + if (!isDetail && col instanceof DetailColumnSpec) { + throw new IAE(StringUtils.format( + "Detail column %s not allowed for a rollup table", + col.name())); + } + col.validate(); + if (!names.add(col.name())) { + throw new IAE("Duplicate column name: " + col.name()); + } + } + } + + @Override + public String defaultSchema() + { + return TableId.DRUID_SCHEMA; + } + + @Override + public boolean equals(Object o) + { + if (o == this) { + return true; + } + if (o == null || o.getClass() != getClass()) { + return false; + } + DatasourceSpec other = (DatasourceSpec) o; + return Objects.equals(this.segmentGranularity, other.segmentGranularity) + && Objects.equals(this.rollupGranularity, other.rollupGranularity) + && this.targetSegmentRows == other.targetSegmentRows + && this.enableAutoCompaction == other.enableAutoCompaction + && Objects.equals(this.autoCompactionDelay, other.autoCompactionDelay) + && Objects.equals(this.columns, other.columns) + && Objects.equals(this.properties(), other.properties()); + } + + @Override + public int hashCode() + { + return Objects.hash( + segmentGranularity, + rollupGranularity, + targetSegmentRows, + enableAutoCompaction, + autoCompactionDelay, + columns, + properties()); + } + + public static class Builder + { + private String segmentGranularity; + private String rollupGranularity; + private int targetSegmentRows; + private boolean enableAutoCompaction; + private String autoCompactionDelay; + private List columns; + private Map properties; + + public Builder() + { + this.columns = new ArrayList<>(); + this.properties = new HashMap<>(); + } + + public Builder(DatasourceSpec defn) + { + this.segmentGranularity = defn.segmentGranularity; + this.rollupGranularity = defn.rollupGranularity; + this.targetSegmentRows = defn.targetSegmentRows; + this.enableAutoCompaction = defn.enableAutoCompaction; + this.autoCompactionDelay = defn.autoCompactionDelay; + this.properties = new HashMap<>(defn.properties()); + this.columns = new ArrayList<>(defn.columns); + } + + public Builder rollupGranularity(String rollupGranularty) + { + this.rollupGranularity = rollupGranularty; + return this; + } + + public Builder segmentGranularity(String segmentGranularity) + { + this.segmentGranularity = segmentGranularity; + return this; + } + + public Builder targetSegmentRows(int targetSegmentRows) + { + this.targetSegmentRows = targetSegmentRows; + return this; + } + + public Builder enableAutoCompaction(boolean enableAutoCompaction) + { + this.enableAutoCompaction = enableAutoCompaction; + return this; + } + + public Builder autoCompactionDelay(String autoCompactionDelay) + { + this.autoCompactionDelay = autoCompactionDelay; + return this; + } + + public List columns() + { + return columns; + } + + public Builder column(DatasourceColumnSpec column) + { + if (Strings.isNullOrEmpty(column.name())) { + throw new IAE("Column name is required"); + } + columns.add(column); + return this; + } + + public Builder timeColumn() + { + return column("__time", "TIMESTAMP"); + } + + public Builder column(String name, String sqlType) + { + if (rollupGranularity == null) { + column(new DetailColumnSpec(name, sqlType)); + } else { + column(new DimensionSpec(name, sqlType)); + } + return this; + } + + public Builder measure(String name, String sqlType, String aggFn) + { + return column(new MeasureSpec(name, sqlType, aggFn)); + } + + public Builder properties(Map properties) + { + this.properties = properties; + return this; + } + + public Builder property(String key, Object value) + { + if (properties == null) { + properties = new HashMap<>(); + } + properties.put(key, value); + return this; + } + + public Map properties() + { + return properties; + } + + public DatasourceSpec build() + { + if (targetSegmentRows < 0) { + targetSegmentRows = 0; + } + // TODO(paul): validate upper bound + return new DatasourceSpec( + segmentGranularity, + rollupGranularity, + targetSegmentRows, + enableAutoCompaction, + autoCompactionDelay, + properties, + columns); + } + } +} diff --git a/server/src/main/java/org/apache/druid/catalog/InputColumnSpec.java b/server/src/main/java/org/apache/druid/catalog/InputColumnSpec.java new file mode 100644 index 000000000000..4b7f439cefeb --- /dev/null +++ b/server/src/main/java/org/apache/druid/catalog/InputColumnSpec.java @@ -0,0 +1,87 @@ +/* + * Licensed to the Apache Software Foundation (ASF) under one + * or more contributor license agreements. See the NOTICE file + * distributed with this work for additional information + * regarding copyright ownership. The ASF licenses this file + * to you under the Apache License, Version 2.0 (the + * "License"); you may not use this file except in compliance + * with the License. You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, + * software distributed under the License is distributed on an + * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY + * KIND, either express or implied. See the License for the + * specific language governing permissions and limitations + * under the License. + */ + +package org.apache.druid.catalog; + +import com.fasterxml.jackson.annotation.JsonCreator; +import com.fasterxml.jackson.annotation.JsonProperty; +import com.google.common.base.Strings; +import org.apache.druid.java.util.common.IAE; +import org.apache.druid.java.util.common.StringUtils; + +import java.util.Objects; + +/** + * Definition of a column within an input source. Columns here describe + * the "as created" form of the columns: what is actually in the input. + * Column definitions are descriptive (of the data we already have), not + * proscriptive (of the columns we'd like to have, since Druid does not + * create input columns.) + */ +public class InputColumnSpec extends ColumnSpec +{ + @JsonCreator + public InputColumnSpec( + @JsonProperty("name") String name, + @JsonProperty("sqlType") String sqlType) + { + super(name, sqlType); + } + + @Override + protected ColumnKind kind() + { + return ColumnKind.INPUT; + } + + @Override + public void validate() + { + super.validate(); + if (Strings.isNullOrEmpty(name)) { + throw new IAE("Columns names cannot be empty"); + } + if (Strings.isNullOrEmpty(sqlType)) { + throw new IAE("Columns type is required: " + name); + } + if (!VALID_SQL_TYPES.containsKey(StringUtils.toUpperCase(sqlType))) { + throw new IAE("Not a supported SQL type: " + sqlType); + } + } + + @Override + public boolean equals(Object o) + { + if (this == o) { + return true; + } + if (o == null || getClass() != o.getClass()) { + return false; + } + InputColumnSpec other = (InputColumnSpec) o; + return Objects.equals(this.name, other.name) + && Objects.equals(this.sqlType, other.sqlType); + } + + @Override + public int hashCode() + { + return Objects.hash(name, sqlType); + } +} diff --git a/server/src/main/java/org/apache/druid/catalog/InputTableSpec.java b/server/src/main/java/org/apache/druid/catalog/InputTableSpec.java new file mode 100644 index 000000000000..9cbc7b2b2684 --- /dev/null +++ b/server/src/main/java/org/apache/druid/catalog/InputTableSpec.java @@ -0,0 +1,232 @@ +/* + * Licensed to the Apache Software Foundation (ASF) under one + * or more contributor license agreements. See the NOTICE file + * distributed with this work for additional information + * regarding copyright ownership. The ASF licenses this file + * to you under the Apache License, Version 2.0 (the + * "License"); you may not use this file except in compliance + * with the License. You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, + * software distributed under the License is distributed on an + * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY + * KIND, either express or implied. See the License for the + * specific language governing permissions and limitations + * under the License. + */ + +package org.apache.druid.catalog; + +import com.fasterxml.jackson.annotation.JsonProperty; +import com.google.common.base.Strings; +import org.apache.druid.catalog.TableMetadata.TableType; +import org.apache.druid.data.input.InputFormat; +import org.apache.druid.data.input.InputSource; +import org.apache.druid.java.util.common.IAE; + +import java.util.ArrayList; +import java.util.HashMap; +import java.util.HashSet; +import java.util.List; +import java.util.Map; +import java.util.Objects; +import java.util.Set; + +/** + * Definition of an external input source, primarily for ingestion. + * The components are derived from those for Druid ingestion: an + * input source, a format and a set of columns. Also provides + * properties, as do all table definitions. + */ +public class InputTableSpec extends TableSpec +{ + private final InputSource inputSource; + private final InputFormat format; + private final List columns; + + public InputTableSpec( + @JsonProperty("inputSource") InputSource inputSource, + @JsonProperty("format") InputFormat format, + @JsonProperty("columns") List columns, + @JsonProperty("properties") Map properties + ) + { + super(properties); + this.inputSource = inputSource; + this.format = format; + this.columns = columns; + } + + @Override + public TableType type() + { + return TableType.INPUT; + } + + @JsonProperty("inputSource") + public InputSource inputSource() + { + return inputSource; + } + + @JsonProperty("format") + public InputFormat format() + { + return format; + } + + @JsonProperty("columns") + public List columns() + { + return columns; + } + + @Override + public void validate() + { + super.validate(); + if (inputSource == null) { + throw new IAE("The input source is required"); + } + if (format == null) { + throw new IAE("The format is required"); + } + if (columns == null || columns.isEmpty()) { + throw new IAE("An input source must specify one or more columns"); + } + Set names = new HashSet<>(); + for (ColumnSpec col : columns) { + if (!names.add(col.name())) { + throw new IAE("Duplicate column name: " + col.name()); + } + col.validate(); + } + } + + @Override + public String defaultSchema() + { + return TableId.INPUT_SCHEMA; + } + + public static Builder builder() + { + return new Builder(); + } + + public Builder toBuilder() + { + return new Builder(this); + } + + @Override + public boolean equals(Object o) + { + if (o == this) { + return true; + } + if (o == null || o.getClass() != getClass()) { + return false; + } + InputTableSpec other = (InputTableSpec) o; + return Objects.equals(this.inputSource, other.inputSource) + && Objects.equals(this.format, other.format) + && Objects.equals(this.columns, other.columns) + && Objects.equals(this.properties(), other.properties()); + } + + @Override + public int hashCode() + { + return Objects.hash( + inputSource, + format, + columns, + properties()); + } + + public static class Builder + { + private InputSource inputSource; + private InputFormat format; + private List columns; + private Map properties; + + public Builder() + { + this.columns = new ArrayList<>(); + this.properties = new HashMap<>(); + } + + public Builder(InputTableSpec defn) + { + this.inputSource = defn.inputSource; + this.format = defn.format; + this.columns = new ArrayList<>(defn.columns); + this.properties = new HashMap<>(defn.properties()); + } + + public Builder source(InputSource inputSource) + { + this.inputSource = inputSource; + return this; + } + + public Builder format(InputFormat format) + { + this.format = format; + return this; + } + + public List columns() + { + return columns; + } + + public Builder column(InputColumnSpec column) + { + if (Strings.isNullOrEmpty(column.name())) { + throw new IAE("Column name is required"); + } + columns.add(column); + return this; + } + + public Builder column(String name, String sqlType) + { + return column(new InputColumnSpec(name, sqlType)); + } + + public Builder properties(Map properties) + { + this.properties = properties; + return this; + } + + public Builder property(String key, Object value) + { + if (properties == null) { + properties = new HashMap<>(); + } + properties.put(key, value); + return this; + } + + public Map properties() + { + return properties; + } + + public InputTableSpec build() + { + return new InputTableSpec( + inputSource, + format, + columns, + properties + ); + } + } +} diff --git a/server/src/main/java/org/apache/druid/catalog/ListeningNodeSupplier.java b/server/src/main/java/org/apache/druid/catalog/ListeningNodeSupplier.java new file mode 100644 index 000000000000..3672d4052844 --- /dev/null +++ b/server/src/main/java/org/apache/druid/catalog/ListeningNodeSupplier.java @@ -0,0 +1,67 @@ +/* + * Licensed to the Apache Software Foundation (ASF) under one + * or more contributor license agreements. See the NOTICE file + * distributed with this work for additional information + * regarding copyright ownership. The ASF licenses this file + * to you under the Apache License, Version 2.0 (the + * "License"); you may not use this file except in compliance + * with the License. You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, + * software distributed under the License is distributed on an + * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY + * KIND, either express or implied. See the License for the + * specific language governing permissions and limitations + * under the License. + */ + +package org.apache.druid.catalog; + +import org.apache.druid.discovery.DiscoveryDruidNode; +import org.apache.druid.discovery.DruidNodeDiscovery; +import org.apache.druid.discovery.DruidNodeDiscoveryProvider; +import org.apache.druid.discovery.NodeRole; +import org.apache.druid.server.DruidNode; + +import java.util.ArrayList; +import java.util.Collection; +import java.util.List; +import java.util.function.Supplier; + +/** + * Provides an up-to-date list of Druid nodes of the given types each + * time the list is requested. + * + * The algorithm could be improved to cache the list and update it only + * when the set of nodes changes. For the catalog, the rate of change is + * likely to be low, so creating the list each time is fine. If this code + * is used for high-speed updates, then caching would be desirable. + */ +public class ListeningNodeSupplier implements Supplier> +{ + private final List nodeTypes; + private final DruidNodeDiscoveryProvider discoveryProvider; + + public ListeningNodeSupplier( + List nodeTypes, + DruidNodeDiscoveryProvider discoveryProvider + ) + { + this.nodeTypes = nodeTypes; + this.discoveryProvider = discoveryProvider; + } + + @Override + public Iterable get() + { + List druidNodes = new ArrayList<>(); + for (NodeRole nodeRole : nodeTypes) { + DruidNodeDiscovery nodeDiscovery = discoveryProvider.getForNodeRole(nodeRole); + Collection nodes = nodeDiscovery.getAllNodes(); + nodes.forEach(node -> druidNodes.add(node.getDruidNode())); + } + return druidNodes; + } +} diff --git a/server/src/main/java/org/apache/druid/catalog/LocalMetadataCatalog.java b/server/src/main/java/org/apache/druid/catalog/LocalMetadataCatalog.java new file mode 100644 index 000000000000..bcba0aac59d9 --- /dev/null +++ b/server/src/main/java/org/apache/druid/catalog/LocalMetadataCatalog.java @@ -0,0 +1,81 @@ +/* + * Licensed to the Apache Software Foundation (ASF) under one + * or more contributor license agreements. See the NOTICE file + * distributed with this work for additional information + * regarding copyright ownership. The ASF licenses this file + * to you under the Apache License, Version 2.0 (the + * "License"); you may not use this file except in compliance + * with the License. You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, + * software distributed under the License is distributed on an + * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY + * KIND, either express or implied. See the License for the + * specific language governing permissions and limitations + * under the License. + */ + +package org.apache.druid.catalog; + +import org.apache.druid.catalog.SchemaRegistry.SchemaSpec; + +import javax.inject.Inject; + +import java.util.Collections; +import java.util.HashSet; +import java.util.List; +import java.util.Set; + +/** + * Metadata catalog which reads from the catalog storage. No caching. + * For testing, and as the Coordinator-side implementation of the remote + * synchronization protocol. + */ +public class LocalMetadataCatalog implements MetadataCatalog +{ + private final CatalogSource catalog; + private final SchemaRegistry schemaRegistry; + + @Inject + public LocalMetadataCatalog( + CatalogSource catalog, + SchemaRegistry schemaRegistry + ) + { + this.catalog = catalog; + this.schemaRegistry = schemaRegistry; + } + + @Override + public TableMetadata resolveTable(TableId tableId) + { + return catalog.table(tableId); + } + + @Override + public List tables(String schemaName) + { + SchemaSpec schema = schemaRegistry.schema(schemaName); + if (schema == null || !schema.writable()) { + return Collections.emptyList(); + } + return catalog.tablesForSchema(schemaName); + } + + @Override + public Set tableNames(String schemaName) + { + SchemaSpec schema = schemaRegistry.schema(schemaName); + if (schema == null || !schema.writable()) { + return Collections.emptySet(); + } + List catalogTables = catalog.tablesForSchema(schemaName); + Set tables = new HashSet<>(); + for (TableMetadata table : catalogTables) { + tables.add(table.name()); + } + return tables; + } +} diff --git a/server/src/main/java/org/apache/druid/catalog/MetadataCatalog.java b/server/src/main/java/org/apache/druid/catalog/MetadataCatalog.java new file mode 100644 index 000000000000..f6f942bad9b3 --- /dev/null +++ b/server/src/main/java/org/apache/druid/catalog/MetadataCatalog.java @@ -0,0 +1,74 @@ +/* + * Licensed to the Apache Software Foundation (ASF) under one + * or more contributor license agreements. See the NOTICE file + * distributed with this work for additional information + * regarding copyright ownership. The ASF licenses this file + * to you under the Apache License, Version 2.0 (the + * "License"); you may not use this file except in compliance + * with the License. You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, + * software distributed under the License is distributed on an + * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY + * KIND, either express or implied. See the License for the + * specific language governing permissions and limitations + * under the License. + */ + +package org.apache.druid.catalog; + +import java.util.List; +import java.util.Set; + +/** + * Client view of the metadata catalog. Implementations can be local + * (with the DB on the same node), or remote (if the DB is on another + * node.) Any caching that is desired is done behind this interface. + *

+ * This interface does not interpolate physical data from + * segments. That work is done by a layer on top of this one: a + * layer which also has visibility to the segment caching logic. + */ +public interface MetadataCatalog +{ + interface CatalogSource + { + List tablesForSchema(String dbSchema); + TableMetadata table(TableId id); + } + + interface CatalogListener + { + void updated(TableMetadata update); + void deleted(TableId tableId); + } + + interface CatalogUpdateProvider + { + void register(CatalogListener listener); + } + + /** + * Resolves a table given a {@link TableId} with the schema and + * table name. Does not do security checks: the caller is responsible. + * + * @return the table metadata, if any exists, else {@code null} if + * no metadata is available. Note that a datasource can exist without + * metadata. Views and input sources exist only if their + * metadata exists. System tables never have metadata. + */ + TableMetadata resolveTable(TableId tableId); + + /** + * List of tables defined within the given schema. Does not filter the + * tables by permissions: the caller is responsible for that. + * + * @param schemaName + * @return + */ + List tables(String schemaName); + + Set tableNames(String schemaName); +} diff --git a/server/src/main/java/org/apache/druid/catalog/MetastoreManager.java b/server/src/main/java/org/apache/druid/catalog/MetastoreManager.java new file mode 100644 index 000000000000..1016a25b40ac --- /dev/null +++ b/server/src/main/java/org/apache/druid/catalog/MetastoreManager.java @@ -0,0 +1,60 @@ +/* + * Licensed to the Apache Software Foundation (ASF) under one + * or more contributor license agreements. See the NOTICE file + * distributed with this work for additional information + * regarding copyright ownership. The ASF licenses this file + * to you under the Apache License, Version 2.0 (the + * "License"); you may not use this file except in compliance + * with the License. You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, + * software distributed under the License is distributed on an + * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY + * KIND, either express or implied. See the License for the + * specific language governing permissions and limitations + * under the License. + */ + +package org.apache.druid.catalog; + +import com.fasterxml.jackson.databind.ObjectMapper; +import org.apache.druid.metadata.MetadataStorageConnector; +import org.apache.druid.metadata.MetadataStorageConnectorConfig; +import org.apache.druid.metadata.MetadataStorageTablesConfig; +import org.apache.druid.metadata.SQLMetadataConnector; + +/** + * Represents the metastore manager database and its implementation. + * Abstracts away the various kick-knacks used to define the metastore. + * The metastore operations are defined via table-specific classes. + */ +public interface MetastoreManager +{ + MetadataStorageConnector connector(); + MetadataStorageConnectorConfig config(); + MetadataStorageTablesConfig tablesConfig(); + + /** + * Whether to create tables if they do not exist. + */ + boolean createTables(); + + /** + * Object mapper to use for serializing and deserializing + * JSON objects stored in the metastore DB. + */ + ObjectMapper jsonMapper(); + + /** + * Is the implementation SQL-based? + */ + boolean isSql(); + + /** + * If SQL based, return the SQL version of the metastore + * connector. Throws an exception if not SQL-based. + */ + SQLMetadataConnector sqlConnector(); +} diff --git a/server/src/main/java/org/apache/druid/catalog/MetastoreManagerImpl.java b/server/src/main/java/org/apache/druid/catalog/MetastoreManagerImpl.java new file mode 100644 index 000000000000..45bd67a71e86 --- /dev/null +++ b/server/src/main/java/org/apache/druid/catalog/MetastoreManagerImpl.java @@ -0,0 +1,94 @@ +/* + * Licensed to the Apache Software Foundation (ASF) under one + * or more contributor license agreements. See the NOTICE file + * distributed with this work for additional information + * regarding copyright ownership. The ASF licenses this file + * to you under the Apache License, Version 2.0 (the + * "License"); you may not use this file except in compliance + * with the License. You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, + * software distributed under the License is distributed on an + * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY + * KIND, either express or implied. See the License for the + * specific language governing permissions and limitations + * under the License. + */ + +package org.apache.druid.catalog; + +import com.fasterxml.jackson.databind.ObjectMapper; +import com.google.common.base.Supplier; +import org.apache.druid.guice.annotations.Json; +import org.apache.druid.metadata.MetadataStorageConnector; +import org.apache.druid.metadata.MetadataStorageConnectorConfig; +import org.apache.druid.metadata.MetadataStorageTablesConfig; +import org.apache.druid.metadata.SQLMetadataConnector; + +import javax.inject.Inject; + +public class MetastoreManagerImpl implements MetastoreManager +{ + private final ObjectMapper jsonMapper; + private final MetadataStorageConnector connector; + private final MetadataStorageConnectorConfig config; + private final MetadataStorageTablesConfig tablesConfig; + + @Inject + public MetastoreManagerImpl( + @Json ObjectMapper jsonMapper, + MetadataStorageConnector connector, + Supplier configSupplier, + Supplier tablesConfigSupplier + ) + { + this.jsonMapper = jsonMapper; + this.connector = connector; + this.config = configSupplier.get(); + this.tablesConfig = tablesConfigSupplier.get(); + } + + @Override + public MetadataStorageConnector connector() + { + return connector; + } + + @Override + public MetadataStorageConnectorConfig config() + { + return config; + } + + @Override + public MetadataStorageTablesConfig tablesConfig() + { + return tablesConfig; + } + + @Override + public boolean createTables() + { + return config.isCreateTables(); + } + + @Override + public ObjectMapper jsonMapper() + { + return jsonMapper; + } + + @Override + public boolean isSql() + { + return connector instanceof SQLMetadataConnector; + } + + @Override + public SQLMetadataConnector sqlConnector() + { + return (SQLMetadataConnector) connector; + } +} diff --git a/server/src/main/java/org/apache/druid/catalog/RestUpdateSender.java b/server/src/main/java/org/apache/druid/catalog/RestUpdateSender.java new file mode 100644 index 000000000000..0f48fbc85943 --- /dev/null +++ b/server/src/main/java/org/apache/druid/catalog/RestUpdateSender.java @@ -0,0 +1,193 @@ +/* + * Licensed to the Apache Software Foundation (ASF) under one + * or more contributor license agreements. See the NOTICE file + * distributed with this work for additional information + * regarding copyright ownership. The ASF licenses this file + * to you under the Apache License, Version 2.0 (the + * "License"); you may not use this file except in compliance + * with the License. You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, + * software distributed under the License is distributed on an + * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY + * KIND, either express or implied. See the License for the + * specific language governing permissions and limitations + * under the License. + */ + +package org.apache.druid.catalog; + +import com.fasterxml.jackson.jaxrs.smile.SmileMediaTypes; +import com.google.common.annotations.VisibleForTesting; +import com.google.common.util.concurrent.Futures; +import com.google.common.util.concurrent.ListenableFuture; +import org.apache.druid.java.util.common.RE; +import org.apache.druid.java.util.common.StringUtils; +import org.apache.druid.java.util.emitter.EmittingLogger; +import org.apache.druid.java.util.http.client.HttpClient; +import org.apache.druid.java.util.http.client.Request; +import org.apache.druid.java.util.http.client.response.StatusResponseHandler; +import org.apache.druid.java.util.http.client.response.StatusResponseHolder; +import org.apache.druid.server.DruidNode; +import org.jboss.netty.handler.codec.http.HttpMethod; +import org.jboss.netty.handler.codec.http.HttpResponseStatus; +import org.joda.time.Duration; + +import java.net.MalformedURLException; +import java.net.URL; +import java.util.ArrayList; +import java.util.List; +import java.util.concurrent.ExecutionException; +import java.util.concurrent.TimeUnit; +import java.util.concurrent.TimeoutException; +import java.util.function.Consumer; +import java.util.function.Supplier; + +/** + * Sends updates to a set of Druid nodes provided by the given supplier. + * The update is provided by the caller in serialized form. The class sends + * updates concurrently, and returns futures for all the requests. + * + * Updates are processed one by one, but each updates is sent concurrently. + * All responses from all receivers must arrive (or a timeout must occur) + * before the next updates can be sent. As a result, this class is suitable for + * low-frequency updates: where the worst-case send times are less than + * the worst-case update frequency. If updates are faster, they will back + * up, and the class should be redesigned to allow healthy receivers to + * continue to get updates while laggards block only themselves. + * + * Defined by composition so it can be tested and reused in other + * contexts. + */ +public class RestUpdateSender implements Consumer +{ + private static final EmittingLogger LOG = new EmittingLogger(RestUpdateSender.class); + + public interface RestSender + { + ListenableFuture send(URL listenerURL, byte[] serializedEntity); + } + + private static class HttpClientSender implements RestSender + { + private final HttpClient httpClient; + private final Duration cacheNotificationsTimeout; + + private HttpClientSender( + HttpClient httpClient, + Duration cacheNotificationsTimeout) + { + this.httpClient = httpClient; + this.cacheNotificationsTimeout = cacheNotificationsTimeout; + } + + @Override + public ListenableFuture send(URL listenerURL, byte[] serializedEntity) + { + // Best effort, if this fails, remote node will poll + // and pick up the update eventually. + return httpClient.go( + new Request(HttpMethod.POST, listenerURL) + .setContent(SmileMediaTypes.APPLICATION_JACKSON_SMILE, serializedEntity), + StatusResponseHandler.getInstance(), + cacheNotificationsTimeout + ); + } + } + + private final String callerName; + private final Supplier> destinationSupplier; + private final String baseUrl; + private final RestSender sender; + private final long cacheNotificationsTimeoutMs; + + public RestUpdateSender( + final String callerName, + final Supplier> destinationSupplier, + final RestSender sender, + final String baseUrl, + final long cacheNotificationsTimeoutMs + ) + { + this.callerName = callerName; + this.destinationSupplier = destinationSupplier; + this.sender = sender; + this.baseUrl = baseUrl; + this.cacheNotificationsTimeoutMs = cacheNotificationsTimeoutMs; + } + + public static RestSender httpClientSender(HttpClient httpClient, Duration cacheNotificationsTimeou) + { + return new HttpClientSender(httpClient, cacheNotificationsTimeou); + } + + @Override + public void accept(byte[] serializedEntity) + { + LOG.debug(callerName + ": Sending update notifications"); + + // Best effort, if a notification fails, the remote node will eventually poll to update its state + // We wait for responses however, to avoid flooding remote nodes with notifications. + List> futures = new ArrayList<>(); + for (DruidNode node : destinationSupplier.get()) { + futures.add( + sender.send( + getListenerURL(node, baseUrl), + serializedEntity)); + } + + try { + List responses = getResponsesFromFutures(futures); + + for (StatusResponseHolder response : responses) { + if (response == null) { + LOG.error("Got null future response from update request."); + continue; + } + HttpResponseStatus status = response.getStatus(); + if (HttpResponseStatus.OK.equals(status) || + HttpResponseStatus.ACCEPTED.equals(status)) { + LOG.debug("Got status [%s]", status); + } else { + LOG.error("Got error status [%s], content [%s]", status, response.getContent()); + } + } + } + catch (Exception e) { + LOG.makeAlert(e, callerName + ": Failed to get response for cache notification.").emit(); + } + + LOG.debug(callerName + ": Received responses for cache update notifications."); + } + + @VisibleForTesting + List getResponsesFromFutures( + List> futures + ) throws InterruptedException, ExecutionException, TimeoutException + { + return Futures.successfulAsList(futures) + .get( + cacheNotificationsTimeoutMs, + TimeUnit.MILLISECONDS + ); + } + + private URL getListenerURL(DruidNode druidNode, String baseUrl) + { + try { + return new URL( + druidNode.getServiceScheme(), + druidNode.getHost(), + druidNode.getPortToUse(), + baseUrl + ); + } + catch (MalformedURLException mue) { + String msg = StringUtils.format(callerName + ": Malformed url for DruidNode [%s] and baseUrl [%s]", druidNode, baseUrl); + LOG.error(msg); + throw new RE(mue, msg); + } + } +} diff --git a/server/src/main/java/org/apache/druid/catalog/SchemaRegistry.java b/server/src/main/java/org/apache/druid/catalog/SchemaRegistry.java new file mode 100644 index 000000000000..80358caa6e64 --- /dev/null +++ b/server/src/main/java/org/apache/druid/catalog/SchemaRegistry.java @@ -0,0 +1,45 @@ +/* + * Licensed to the Apache Software Foundation (ASF) under one + * or more contributor license agreements. See the NOTICE file + * distributed with this work for additional information + * regarding copyright ownership. The ASF licenses this file + * to you under the Apache License, Version 2.0 (the + * "License"); you may not use this file except in compliance + * with the License. You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, + * software distributed under the License is distributed on an + * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY + * KIND, either express or implied. See the License for the + * specific language governing permissions and limitations + * under the License. + */ + +package org.apache.druid.catalog; + +import org.apache.druid.catalog.TableMetadata.TableType; + +import java.util.Set; + +/** + * Defines the set of schemas available in Druid and their properties. + * Since Druid has a fixed set of schemas, this registry is currently + * hard-coded. That will change if/when Druid allows user-defined + * schemas. + */ +public interface SchemaRegistry +{ + interface SchemaSpec + { + String name(); + String securityResource(); + boolean writable(); + boolean accepts(TableSpec spec); + TableType tableType(); + } + + SchemaSpec schema(String name); + Set names(); +} diff --git a/server/src/main/java/org/apache/druid/catalog/SchemaRegistryImpl.java b/server/src/main/java/org/apache/druid/catalog/SchemaRegistryImpl.java new file mode 100644 index 000000000000..6908031af511 --- /dev/null +++ b/server/src/main/java/org/apache/druid/catalog/SchemaRegistryImpl.java @@ -0,0 +1,150 @@ +/* + * Licensed to the Apache Software Foundation (ASF) under one + * or more contributor license agreements. See the NOTICE file + * distributed with this work for additional information + * regarding copyright ownership. The ASF licenses this file + * to you under the Apache License, Version 2.0 (the + * "License"); you may not use this file except in compliance + * with the License. You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, + * software distributed under the License is distributed on an + * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY + * KIND, either express or implied. See the License for the + * specific language governing permissions and limitations + * under the License. + */ + +package org.apache.druid.catalog; + +import org.apache.druid.catalog.TableMetadata.TableType; +import org.apache.druid.server.security.ResourceType; + +import java.util.HashMap; +import java.util.Map; +import java.util.Set; +import java.util.TreeSet; + +/** + * Hard-coded schema registry that knows about the well-known, and + * a few obscure, Druid schemas. Does not allow for user-defined + * schemas, which the rest of Druid would not be able to support. + */ +public class SchemaRegistryImpl implements SchemaRegistry +{ + // Mimics the definition in ExternalOperatorConvertion + // TODO: Change this when ExternalOperatorConvertion changes + private String EXTERNAL_RESOURCE = "EXTERNAL"; + + public static class SchemaDefnImpl implements SchemaSpec + { + private final String name; + private final String resource; + private final TableType tableType; + private Class acceptedClass; + + public SchemaDefnImpl( + String name, + String resource, + TableType tableType, + Class acceptedClass) + { + this.name = name; + this.resource = resource; + this.tableType = tableType; + this.acceptedClass = acceptedClass; + } + + @Override + public String name() + { + return name; + } + + @Override + public String securityResource() + { + return resource; + } + + @Override + public boolean writable() + { + return acceptedClass != null; + } + + @Override + public boolean accepts(TableSpec spec) + { + if (acceptedClass == null) { + return false; + } + if (spec == null) { + return false; + } + return acceptedClass.isAssignableFrom(spec.getClass()); + } + + @Override + public TableType tableType() + { + return tableType; + } + } + + private final Map builtIns; + + public SchemaRegistryImpl() + { + builtIns = new HashMap<>(); + register(new SchemaDefnImpl( + TableId.DRUID_SCHEMA, + ResourceType.DATASOURCE, + TableType.DATASOURCE, + DatasourceSpec.class)); + register(new SchemaDefnImpl( + TableId.LOOKUP_SCHEMA, + ResourceType.CONFIG, + null, // TODO + null)); // TODO + register(new SchemaDefnImpl( + TableId.CATALOG_SCHEMA, + ResourceType.SYSTEM_TABLE, + null, + null)); + register(new SchemaDefnImpl( + TableId.SYSTEM_SCHEMA, + ResourceType.SYSTEM_TABLE, + null, + null)); + register(new SchemaDefnImpl( + TableId.INPUT_SCHEMA, + EXTERNAL_RESOURCE, + TableType.INPUT, + InputTableSpec.class)); + register(new SchemaDefnImpl( + TableId.VIEW_SCHEMA, + ResourceType.VIEW, + null, // TODO + null)); // TODO + } + + private void register(SchemaSpec schemaDefn) + { + builtIns.put(schemaDefn.name(), schemaDefn); + } + + @Override + public SchemaSpec schema(String name) + { + return builtIns.get(name); + } + + @Override + public Set names() + { + return new TreeSet(builtIns.keySet()); + } +} diff --git a/server/src/main/java/org/apache/druid/catalog/TableId.java b/server/src/main/java/org/apache/druid/catalog/TableId.java new file mode 100644 index 000000000000..33103835410a --- /dev/null +++ b/server/src/main/java/org/apache/druid/catalog/TableId.java @@ -0,0 +1,113 @@ +/* + * Licensed to the Apache Software Foundation (ASF) under one + * or more contributor license agreements. See the NOTICE file + * distributed with this work for additional information + * regarding copyright ownership. The ASF licenses this file + * to you under the Apache License, Version 2.0 (the + * "License"); you may not use this file except in compliance + * with the License. You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, + * software distributed under the License is distributed on an + * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY + * KIND, either express or implied. See the License for the + * specific language governing permissions and limitations + * under the License. + */ + +package org.apache.druid.catalog; + +import com.fasterxml.jackson.annotation.JsonCreator; +import com.fasterxml.jackson.annotation.JsonProperty; +import com.google.common.base.Objects; +import org.apache.druid.java.util.common.StringUtils; + +/** + * SQL-like compound table ID with schema and table name. + */ +public class TableId +{ + // Well-known Druid schemas + public static final String DRUID_SCHEMA = "druid"; + public static final String LOOKUP_SCHEMA = "lookups"; + public static final String SYSTEM_SCHEMA = "sys"; + public static final String CATALOG_SCHEMA = "INFORMATION_SCHEMA"; + + // Extra for MSQE + public static final String INPUT_SCHEMA = "input"; + + // Extra for views + public static final String VIEW_SCHEMA = "view"; + + private final String schema; + private final String name; + + @JsonCreator + public TableId( + @JsonProperty("schema") String schema, + @JsonProperty("schema") String name) + { + this.schema = schema; + this.name = name; + } + + public static TableId datasource(String name) + { + return new TableId(DRUID_SCHEMA, name); + } + + public static TableId inputSource(String name) + { + return new TableId(INPUT_SCHEMA, name); + } + + public static TableId of(String schema, String table) + { + return new TableId(schema, table); + } + + @JsonProperty("schema") + public String schema() + { + return schema; + } + + @JsonProperty("name") + public String name() + { + return name; + } + + public String sqlName() + { + return StringUtils.format("\"%s\".\"%s\"", schema, name); + } + + @Override + public String toString() + { + return sqlName(); + } + + @Override + public boolean equals(Object o) + { + if (o == this) { + return true; + } + if (o == null || o.getClass() != getClass()) { + return false; + } + TableId other = (TableId) o; + return Objects.equal(this.schema, other.schema) + && Objects.equal(this.name, other.name); + } + + @Override + public int hashCode() + { + return Objects.hashCode(schema, name); + } +} diff --git a/server/src/main/java/org/apache/druid/catalog/TableMetadata.java b/server/src/main/java/org/apache/druid/catalog/TableMetadata.java new file mode 100644 index 000000000000..b449364fd225 --- /dev/null +++ b/server/src/main/java/org/apache/druid/catalog/TableMetadata.java @@ -0,0 +1,281 @@ +/* + * Licensed to the Apache Software Foundation (ASF) under one + * or more contributor license agreements. See the NOTICE file + * distributed with this work for additional information + * regarding copyright ownership. The ASF licenses this file + * to you under the Apache License, Version 2.0 (the + * "License"); you may not use this file except in compliance + * with the License. You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, + * software distributed under the License is distributed on an + * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY + * KIND, either express or implied. See the License for the + * specific language governing permissions and limitations + * under the License. + */ + +package org.apache.druid.catalog; + +import com.fasterxml.jackson.annotation.JsonInclude; +import com.fasterxml.jackson.annotation.JsonProperty; +import com.fasterxml.jackson.databind.ObjectMapper; +import com.google.common.base.Strings; +import org.apache.druid.guice.annotations.PublicApi; +import org.apache.druid.java.util.common.IAE; +import org.apache.druid.java.util.common.StringUtils; +import org.apache.druid.metadata.catalog.CatalogManager.TableState; + +import java.util.Objects; + +/** + * REST API level description of a table. Tables have multiple types + * as described by subclasses. + */ +@PublicApi +public class TableMetadata +{ + public enum TableType + { + DATASOURCE, + INPUT, + VIEW, + TOMBSTONE + } + + private final String dbSchema; + private final String name; + private final String owner; + private final long creationTime; + private final long updateTime; + private final TableState state; + private final TableSpec defn; + + public TableMetadata( + @JsonProperty("dbSchema") String dbSchema, + @JsonProperty("name") String name, + @JsonProperty("owner") String owner, + @JsonProperty("creationTime") long creationTime, + @JsonProperty("updateTime") long updateTime, + @JsonProperty("state") TableState state, + @JsonProperty("defn") TableSpec defn) + { + this.dbSchema = dbSchema; + this.name = name; + this.owner = owner; + this.creationTime = creationTime; + this.updateTime = updateTime; + this.state = state; + this.defn = defn; + } + + public static TableMetadata newTable( + TableId id, + TableSpec defn + ) + { + return newTable(id.schema(), id.name(), defn); + } + + public static TableMetadata newTable( + String dbSchema, + String name, + TableSpec defn + ) + { + return new TableMetadata( + dbSchema, + name, + null, + 0, + 0, + TableState.ACTIVE, + defn); + } + + public static TableMetadata newSegmentTable( + String name, + TableSpec defn + ) + { + return newTable( + TableId.DRUID_SCHEMA, + name, + defn); + } + + public TableMetadata fromInsert(String dbSchema, long updateTime) + { + return new TableMetadata( + dbSchema, + name, + owner, + updateTime, + updateTime, + state, + defn); + } + + public TableMetadata asUpdate(long updateTime) + { + return new TableMetadata( + dbSchema, + name, + owner, + creationTime, + updateTime, + state, + defn); + } + + public TableMetadata withSchema(String dbSchema) + { + if (dbSchema.equals(this.dbSchema)) { + return this; + } + return new TableMetadata( + dbSchema, + name, + owner, + creationTime, + updateTime, + state, + defn); + } + + public TableId id() + { + return new TableId(resolveDbSchema(), name); + } + + @JsonProperty("dbSchema") + public String dbSchema() + { + return dbSchema; + } + + @JsonProperty("name") + public String name() + { + return name; + } + + public String sqlName() + { + return StringUtils.format("\"%s\".\"%s\"", dbSchema, name); + } + + @JsonProperty("owner") + @JsonInclude(JsonInclude.Include.NON_NULL) + public String owner() + { + return owner; + } + + @JsonProperty("state") + public TableState state() + { + return state; + } + + @JsonProperty("creationTime") + public long creationTime() + { + return creationTime; + } + + @JsonProperty("updateTime") + public long updateTime() + { + return updateTime; + } + + @JsonProperty("defn") + public TableSpec spec() + { + return defn; + } + + /** + * Syntactic validation of a table object. Validates only that which + * can be checked from this table object. + */ + public void validate() + { + if (Strings.isNullOrEmpty(dbSchema)) { + throw new IAE("Database schema is required"); + } + if (Strings.isNullOrEmpty(name)) { + throw new IAE("Table name is required"); + } + if (defn != null) { + defn.validate(); + } + } + + public byte[] toBytes(ObjectMapper jsonMapper) + { + return CatalogSpecs.toBytes(jsonMapper, this); + } + + public static TableMetadata fromBytes(ObjectMapper jsonMapper, byte[] bytes) + { + return CatalogSpecs.fromBytes(jsonMapper, bytes, TableMetadata.class); + } + + @Override + public String toString() + { + return CatalogSpecs.toString(this); + } + + public String resolveDbSchema() + { + if (!Strings.isNullOrEmpty(dbSchema)) { + return dbSchema; + } else if (defn != null) { + return defn.defaultSchema(); + } else { + return null; + } + } + + @Override + public boolean equals(Object o) + { + if (o == this) { + return true; + } + if (o == null || o.getClass() != getClass()) { + return false; + } + TableMetadata other = (TableMetadata) o; + return Objects.equals(dbSchema, other.dbSchema) + && Objects.equals(name, other.name) + && Objects.equals(owner, other.owner) + && creationTime == other.creationTime + && updateTime == other.updateTime + && state == other.state + && Objects.equals(defn, other.defn); + } + + @Override + public int hashCode() + { + return Objects.hash( + dbSchema, + name, + owner, + creationTime, + updateTime, + state, + defn); + } + + public TableType type() + { + return defn == null ? null : defn.type(); + } +} diff --git a/server/src/main/java/org/apache/druid/catalog/TableSpec.java b/server/src/main/java/org/apache/druid/catalog/TableSpec.java new file mode 100644 index 000000000000..c3a82bd76154 --- /dev/null +++ b/server/src/main/java/org/apache/druid/catalog/TableSpec.java @@ -0,0 +1,100 @@ +/* + * Licensed to the Apache Software Foundation (ASF) under one + * or more contributor license agreements. See the NOTICE file + * distributed with this work for additional information + * regarding copyright ownership. The ASF licenses this file + * to you under the Apache License, Version 2.0 (the + * "License"); you may not use this file except in compliance + * with the License. You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, + * software distributed under the License is distributed on an + * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY + * KIND, either express or implied. See the License for the + * specific language governing permissions and limitations + * under the License. + */ + +package org.apache.druid.catalog; + +import com.fasterxml.jackson.annotation.JsonInclude; +import com.fasterxml.jackson.annotation.JsonInclude.Include; +import com.fasterxml.jackson.annotation.JsonProperty; +import com.fasterxml.jackson.annotation.JsonSubTypes; +import com.fasterxml.jackson.annotation.JsonSubTypes.Type; +import com.fasterxml.jackson.annotation.JsonTypeInfo; +import com.fasterxml.jackson.databind.ObjectMapper; +import com.google.common.collect.ImmutableMap; +import org.apache.druid.catalog.TableMetadata.TableType; + +import java.util.Map; + +/** + * Definition of a table "hint" in the metastore, between client and + * Druid, and between Druid nodes. + */ +@JsonTypeInfo(use = JsonTypeInfo.Id.NAME, property = "type") +@JsonSubTypes(value = { + @Type(name = "datasource", value = DatasourceSpec.class), + @Type(name = "input", value = InputTableSpec.class), + @Type(name = "tombstone", value = TableSpec.Tombstone.class), +}) +public abstract class TableSpec +{ + private final Map properties; + + public TableSpec(Map properties) + { + this.properties = properties == null ? ImmutableMap.of() : properties; + } + + @JsonProperty("properties") + @JsonInclude(Include.NON_NULL) + public Map properties() + { + return properties; + } + + public void validate() + { + } + + public byte[] toBytes(ObjectMapper jsonMapper) + { + return CatalogSpecs.toBytes(jsonMapper, this); + } + + public static TableSpec fromBytes(ObjectMapper jsonMapper, byte[] bytes) + { + return CatalogSpecs.fromBytes(jsonMapper, bytes, TableSpec.class); + } + + @Override + public String toString() + { + return CatalogSpecs.toString(this); + } + + public String defaultSchema() + { + return null; + } + + public static class Tombstone extends TableSpec + { + public Tombstone() + { + super(null); + } + + @Override + public TableType type() + { + return TableType.TOMBSTONE; + } + } + + public abstract TableType type(); +} diff --git a/server/src/main/java/org/apache/druid/discovery/DruidLeaderClient.java b/server/src/main/java/org/apache/druid/discovery/DruidLeaderClient.java index 0679ac39e4ac..063957547c8c 100644 --- a/server/src/main/java/org/apache/druid/discovery/DruidLeaderClient.java +++ b/server/src/main/java/org/apache/druid/discovery/DruidLeaderClient.java @@ -146,12 +146,12 @@ public > H go(Request request, HttpResponseHa // Unwrap IOExceptions and ChannelExceptions, re-throw others Throwables.propagateIfInstanceOf(e.getCause(), IOException.class); Throwables.propagateIfInstanceOf(e.getCause(), ChannelException.class); - throw new RE(e, "HTTP request to[%s] failed", request.getUrl()); + throw new RE(e, "HTTP request to [%s] failed", request.getUrl()); } } catch (IOException | ChannelException ex) { // can happen if the node is stopped. - log.warn(ex, "Request[%s] failed.", request.getUrl()); + log.warn(ex, "Request [%s] failed.", request.getUrl()); try { if (request.getUrl().getQuery() == null) { @@ -176,7 +176,7 @@ public > H go(Request request, HttpResponseHa // Not an IOException; this is our own fault. throw new ISE( e, - "failed to build url with path[%] and query string [%s].", + "failed to build url with path [%] and query string [%s].", request.getUrl().getPath(), request.getUrl().getQuery() ); @@ -186,10 +186,10 @@ public > H go(Request request, HttpResponseHa if (HttpResponseStatus.TEMPORARY_REDIRECT.equals(fullResponseHolder.getResponse().getStatus())) { String redirectUrlStr = fullResponseHolder.getResponse().headers().get("Location"); if (redirectUrlStr == null) { - throw new IOE("No redirect location is found in response from url[%s].", request.getUrl()); + throw new IOE("No redirect location is found in response from url [%s].", request.getUrl()); } - log.info("Request[%s] received redirect response to location [%s].", request.getUrl(), redirectUrlStr); + log.info("Request [%s] received redirect response to location [%s].", request.getUrl(), redirectUrlStr); final URL redirectUrl; try { @@ -245,7 +245,7 @@ public String findCurrentLeader() return validatedUrl.toString(); } catch (MalformedURLException ex) { - log.error(ex, "Received malformed leader url[%s].", leaderUrl); + log.error(ex, "Received malformed leader url [%s].", leaderUrl); } } diff --git a/server/src/main/java/org/apache/druid/guice/CatalogClientModule.java b/server/src/main/java/org/apache/druid/guice/CatalogClientModule.java new file mode 100644 index 000000000000..2305feeaa37c --- /dev/null +++ b/server/src/main/java/org/apache/druid/guice/CatalogClientModule.java @@ -0,0 +1,78 @@ +/* + * Licensed to the Apache Software Foundation (ASF) under one + * or more contributor license agreements. See the NOTICE file + * distributed with this work for additional information + * regarding copyright ownership. The ASF licenses this file + * to you under the Apache License, Version 2.0 (the + * "License"); you may not use this file except in compliance + * with the License. You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, + * software distributed under the License is distributed on an + * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY + * KIND, either express or implied. See the License for the + * specific language governing permissions and limitations + * under the License. + */ + +package org.apache.druid.guice; + +import com.google.inject.Binder; +import com.google.inject.Module; +import org.apache.druid.catalog.CachedMetadataCatalog; +import org.apache.druid.catalog.CatalogClient; +import org.apache.druid.catalog.MetadataCatalog; +import org.apache.druid.catalog.MetadataCatalog.CatalogListener; +import org.apache.druid.catalog.MetadataCatalog.CatalogSource; +import org.apache.druid.catalog.SchemaRegistry; +import org.apache.druid.catalog.SchemaRegistryImpl; +import org.apache.druid.server.http.CatalogListenerResource; + +/** + * Configures the metadata catalog on the Broker to use a cache + * and network communications for pull and push updates. + */ +public class CatalogClientModule implements Module +{ + @Override + public void configure(Binder binder) + { + // The Broker (client) uses a cached metadata catalog. + binder + .bind(CachedMetadataCatalog.class) + .in(LazySingleton.class); + + // Broker code accesses he catalog through the + // MetadataCatalog interface. + binder + .bind(MetadataCatalog.class) + .to(CachedMetadataCatalog.class) + .in(LazySingleton.class); + + // The cached metadata catalog needs a "pull" source, + // which is the network client. + binder + .bind(CatalogSource.class) + .to(CatalogClient.class) + .in(LazySingleton.class); + + // The cached metadata catalog is the listener for + // "push" events. + binder + .bind(CatalogListener.class) + .to(CachedMetadataCatalog.class) + .in(LazySingleton.class); + + // At present, the set of schemas is fixed. + binder + .bind(SchemaRegistry.class) + .to(SchemaRegistryImpl.class) + .in(LazySingleton.class); + + // The listener resource sends to the catalog + // listener (the cached catalog.) + Jerseys.addResource(binder, CatalogListenerResource.class); + } +} diff --git a/server/src/main/java/org/apache/druid/guice/CatalogModule.java b/server/src/main/java/org/apache/druid/guice/CatalogModule.java new file mode 100644 index 000000000000..b0a77adbca27 --- /dev/null +++ b/server/src/main/java/org/apache/druid/guice/CatalogModule.java @@ -0,0 +1,77 @@ +/* + * Licensed to the Apache Software Foundation (ASF) under one + * or more contributor license agreements. See the NOTICE file + * distributed with this work for additional information + * regarding copyright ownership. The ASF licenses this file + * to you under the Apache License, Version 2.0 (the + * "License"); you may not use this file except in compliance + * with the License. You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, + * software distributed under the License is distributed on an + * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY + * KIND, either express or implied. See the License for the + * specific language governing permissions and limitations + * under the License. + */ + +package org.apache.druid.guice; + +import com.google.inject.Binder; +import com.google.inject.Module; +import org.apache.druid.catalog.CatalogStorage; +import org.apache.druid.catalog.CatalogUpdateNotifier; +import org.apache.druid.catalog.MetastoreManager; +import org.apache.druid.catalog.MetastoreManagerImpl; +import org.apache.druid.catalog.SchemaRegistry; +import org.apache.druid.catalog.SchemaRegistryImpl; +import org.apache.druid.metadata.catalog.CatalogManager; +import org.apache.druid.metadata.catalog.SQLCatalogManager; +import org.apache.druid.server.http.CatalogResource; + +/** + * Configures the catalog database on the Coordinator, along + * with its REST resource for CRUD updates and the notifier + * for push updates. + */ +public class CatalogModule implements Module +{ + @Override + public void configure(Binder binder) + { + // Database layer: only the SQL version is supported at present. + binder + .bind(CatalogManager.class) + .to(SQLCatalogManager.class) + .in(LazySingleton.class); + + // Storage abstraction used by the REST API, sits on top of the + // database layer. + binder + .bind(CatalogStorage.class) + .in(LazySingleton.class); + binder + .bind(MetastoreManager.class) + .to(MetastoreManagerImpl.class) + .in(LazySingleton.class); + + // At present, the set of schemas is fixed. + binder + .bind(SchemaRegistry.class) + .to(SchemaRegistryImpl.class) + .in(LazySingleton.class); + + // Push update notifier, which is lifecycle managed. No references, + // so force Guice to create the instance. (Lifecycle will also, if + // Guice hasn't done so.) + binder + .bind(CatalogUpdateNotifier.class) + .in(ManageLifecycle.class); + LifecycleModule.register(binder, CatalogUpdateNotifier.class); + + // Public REST API and private cache sync API. + Jerseys.addResource(binder, CatalogResource.class); + } +} diff --git a/server/src/main/java/org/apache/druid/metadata/IndexerSQLMetadataStorageCoordinator.java b/server/src/main/java/org/apache/druid/metadata/IndexerSQLMetadataStorageCoordinator.java index 931ae81774cc..5575108bc091 100644 --- a/server/src/main/java/org/apache/druid/metadata/IndexerSQLMetadataStorageCoordinator.java +++ b/server/src/main/java/org/apache/druid/metadata/IndexerSQLMetadataStorageCoordinator.java @@ -1227,7 +1227,7 @@ protected DataStoreMetadataUpdateResult updateDataSourceMetadataWithHandle( } /** - * Mark segments as unsed in a transaction. This method is idempotent in that if + * Mark segments as unused in a transaction. This method is idempotent in that if * the segments was already marked unused, it will return true. * * @param handle database handle diff --git a/server/src/main/java/org/apache/druid/metadata/MetadataRuleManager.java b/server/src/main/java/org/apache/druid/metadata/MetadataRuleManager.java index ea2b6e7461f8..eb2bec6b136f 100644 --- a/server/src/main/java/org/apache/druid/metadata/MetadataRuleManager.java +++ b/server/src/main/java/org/apache/druid/metadata/MetadataRuleManager.java @@ -44,7 +44,8 @@ public interface MetadataRuleManager boolean overrideRule(String dataSource, List rulesConfig, AuditInfo auditInfo); /** - * Remove rules for non-existence datasource (datasource with no segment) created older than the given timestamp. + * Remove rules for a non-existent datasource (datasource with no segments) + * created before than the given timestamp. * * @param timestamp timestamp in milliseconds * @return number of rules removed diff --git a/server/src/main/java/org/apache/druid/metadata/SQLMetadataConnector.java b/server/src/main/java/org/apache/druid/metadata/SQLMetadataConnector.java index 781a4b99c8f9..f7cf24ae7707 100644 --- a/server/src/main/java/org/apache/druid/metadata/SQLMetadataConnector.java +++ b/server/src/main/java/org/apache/druid/metadata/SQLMetadataConnector.java @@ -193,14 +193,14 @@ public void createTable(final String tableName, final Iterable sql) public Void withHandle(Handle handle) { if (!tableExists(handle, tableName)) { - log.info("Creating table[%s]", tableName); + log.info("Creating table [%s]", tableName); final Batch batch = handle.createBatch(); for (String s : sql) { batch.add(s); } batch.execute(); } else { - log.info("Table[%s] already exists", tableName); + log.info("Table [%s] already exists", tableName); } return null; } @@ -794,6 +794,36 @@ public void createAuditTable() } } + private void createTableDefnTable(final String tableName) + { + createTable( + tableName, + ImmutableList.of( + StringUtils.format( + "CREATE TABLE %s (\n" + + " schemaName VARCHAR(255) NOT NULL,\n" + + " name VARCHAR(255) NOT NULL,\n" + + " owner VARCHAR(255),\n" + + " creationTime BIGINT NOT NULL,\n" + + " updateTime BIGINT NOT NULL,\n" + + " state CHAR(1) NOT NULL,\n" + + " payload %s,\n" + + " PRIMARY KEY(schemaName, name)\n" + + ")", + tableName, getPayloadType() + ) + ) + ); + } + + @Override + public void createTableDefnTable() + { + if (config.get().isCreateTables()) { + createTableDefnTable(tablesConfigSupplier.get().getTableDefnTable()); + } + } + @Override public void deleteAllRecords(final String tableName) { @@ -821,4 +851,11 @@ public Void withHandle(Handle handle) log.warn(e, "Exception while deleting records from table"); } } + + public boolean isDuplicateRecordException(UnableToExecuteStatementException e) + { + // TODO(paul): Track down how to figure this out for each supported + // DB. + return false; + } } diff --git a/server/src/main/java/org/apache/druid/metadata/catalog/CatalogManager.java b/server/src/main/java/org/apache/druid/metadata/catalog/CatalogManager.java new file mode 100644 index 000000000000..939535192493 --- /dev/null +++ b/server/src/main/java/org/apache/druid/metadata/catalog/CatalogManager.java @@ -0,0 +1,185 @@ +/* + * Licensed to the Apache Software Foundation (ASF) under one + * or more contributor license agreements. See the NOTICE file + * distributed with this work for additional information + * regarding copyright ownership. The ASF licenses this file + * to you under the Apache License, Version 2.0 (the + * "License"); you may not use this file except in compliance + * with the License. You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, + * software distributed under the License is distributed on an + * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY + * KIND, either express or implied. See the License for the + * specific language governing permissions and limitations + * under the License. + */ + +package org.apache.druid.metadata.catalog; + +import org.apache.druid.catalog.TableId; +import org.apache.druid.catalog.TableMetadata; +import org.apache.druid.catalog.TableSpec; +import org.apache.druid.java.util.common.ISE; + +import javax.annotation.Nullable; + +import java.util.List; + +/** + * Manages catalog data. Used in Coordinator, which will be in either + * an leader or standby state. The Coordinator calls the {@link #start()} + * method when it becomes the leader, and calls {@link #stop()} when + * it loses leadership, or shuts down. + * + * Performs detailed CRUD operations on the catalog tables table. + * Higher-level operations appear elsewhere. + */ +public interface CatalogManager +{ + enum TableState + { + ACTIVE("A"), + DELETING("D"); + + private final String code; + + TableState(String code) + { + this.code = code; + } + + public String code() + { + return code; + } + + public static TableState fromCode(String code) + { + for (TableState state : values()) { + if (state.code.equals(code)) { + return state; + } + } + throw new ISE("Unknown TableState code: " + code); + } + } + + /** + * Thrown with an "optimistic lock" fails: the version of a + * catalog object being updated is not the same as that of + * the expected version. + */ + class OutOfDateException extends Exception + { + public OutOfDateException(String msg) + { + super(msg); + } + } + + class NotFoundException extends Exception + { + public NotFoundException(String msg) + { + super(msg); + } + } + + /** + * Indicates an attempt to insert a duplicate key into a table. + * This could indicate a logic error, or a race condition. It is + * generally not retryable: it us unrealistic to expect the other + * thread to helpfully delete the record it just added. + */ + class DuplicateKeyException extends Exception + { + public DuplicateKeyException(String msg, Exception e) + { + super(msg, e); + } + } + + interface Listener + { + void added(TableMetadata table); + void updated(TableMetadata table); + void deleted(TableId id); + } + + void start(); + + + void register(Listener listener); + void createTableDefnTable(); + + /** + * Create a table entry. + * + * @return the version of the newly created table. Call + * {@link TableMetadata#asUpdate(long)} if you want a new + * {@link TableMetadata} with the new version. + * @throws {@link DuplicateKeyException} if the row is a duplicate + * (schema, name) pair. This generally indicates a code error, + * or since our code is perfect, a race condition or a DB + * update outside of Druid. In any event, the error is not + * retryable: the user should pick another name, or update the + * existing table + */ + long create(TableMetadata table) throws DuplicateKeyException; + + /** + * Update a table definition, but only if the database entry is at + * the given {@code oldVersion}. + */ + long updateSpec(TableId id, TableSpec defn, long oldVersion) throws OutOfDateException; + + /** + * Update a table definition, overwriting any current content. + * This is a potential race conditions if this is a partial update + * because of the possibility of another user doing an update since the + * read. Fine when the goal is to replace the entire definition. + */ + long updateDefn(TableId id, TableSpec defn) throws NotFoundException; + + /** + * Move the table to the deleting state. No version check: fine + * if the table is already in the deleting state. Does nothing if the + * table does not exist. + * + * @return new table update timestamp, or 0 if the table does not + * exist + */ + long markDeleting(TableId id); + + /** + * Read the table record for the given ID. + * + * @return the table record, or {@code null} if the entry is not + * found in the DB. + */ + @Nullable TableMetadata read(TableId id); + + /** + * Delete the table record for the given ID. Essentially does a + * "DELETE IF EXISTS". There is no version check. Delete should be + * called only when there are no segments left for the table: use + * {@link #markDeleting(TableId)} to indicates that the segments are + * being deleted. Call this method after deletion is complete. + *

+ * Does not cascade deletes yet. Eventually, should delete all entries + * for the table. + * + * @return {@code true} if the table exists and was deleted, + * {@code false} if the table did not exist. + */ + boolean delete(TableId id); + + List list(); + List list(String dbSchema); + List listDetails(String dbSchema); + + void stop(); +} diff --git a/server/src/main/java/org/apache/druid/metadata/catalog/SQLCatalogManager.java b/server/src/main/java/org/apache/druid/metadata/catalog/SQLCatalogManager.java new file mode 100644 index 000000000000..45b9f394931f --- /dev/null +++ b/server/src/main/java/org/apache/druid/metadata/catalog/SQLCatalogManager.java @@ -0,0 +1,461 @@ +/* + * Licensed to the Apache Software Foundation (ASF) under one + * or more contributor license agreements. See the NOTICE file + * distributed with this work for additional information + * regarding copyright ownership. The ASF licenses this file + * to you under the Apache License, Version 2.0 (the + * "License"); you may not use this file except in compliance + * with the License. You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, + * software distributed under the License is distributed on an + * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY + * KIND, either express or implied. See the License for the + * specific language governing permissions and limitations + * under the License. + */ + +package org.apache.druid.metadata.catalog; + +import com.fasterxml.jackson.databind.ObjectMapper; +import com.google.common.collect.Lists; +import com.google.inject.Inject; +import org.apache.druid.catalog.MetastoreManager; +import org.apache.druid.catalog.TableId; +import org.apache.druid.catalog.TableMetadata; +import org.apache.druid.catalog.TableSpec; +import org.apache.druid.guice.ManageLifecycle; +import org.apache.druid.java.util.common.ISE; +import org.apache.druid.java.util.common.StringUtils; +import org.apache.druid.java.util.common.lifecycle.LifecycleStart; +import org.apache.druid.metadata.SQLMetadataConnector; +import org.skife.jdbi.v2.Handle; +import org.skife.jdbi.v2.IDBI; +import org.skife.jdbi.v2.Query; +import org.skife.jdbi.v2.ResultIterator; +import org.skife.jdbi.v2.Update; +import org.skife.jdbi.v2.exceptions.CallbackFailedException; +import org.skife.jdbi.v2.exceptions.UnableToExecuteStatementException; +import org.skife.jdbi.v2.tweak.HandleCallback; + +import java.util.Deque; +import java.util.List; +import java.util.Map; +import java.util.concurrent.ConcurrentLinkedDeque; + +@ManageLifecycle +public class SQLCatalogManager implements CatalogManager +{ + private static final String INSERT_TABLE = + "INSERT INTO %s\n" + + " (schemaName, name, owner, creationTime, updateTime, state, payload)\n" + + " VALUES(:schemaName, :name, :owner, :creationTime, :updateTime, :state, :payload)"; + + private static final String UPDATE_HEAD = + "UPDATE %s\n SET\n"; + + private static final String WHERE_TABLE_ID = + "WHERE schemaName = :schemaName\n" + + " AND name = :name\n"; + + private static final String SAFETY_CHECK = + " AND updateTime = :oldVersion"; + + private static final String UPDATE_DEFN_UNSAFE = + UPDATE_HEAD + + " payload = :payload,\n" + + " updateTime = :updateTime\n" + + WHERE_TABLE_ID; + + private static final String UPDATE_DEFN_SAFE = + UPDATE_DEFN_UNSAFE + + SAFETY_CHECK; + + private static final String UPDATE_STATE = + UPDATE_HEAD + + " state = :state,\n" + + " updateTime = :updateTime\n" + + WHERE_TABLE_ID; + + private static final String SELECT_TABLE = + "SELECT owner, creationTime, updateTime, state, payload\n" + + "FROM %s\n" + + WHERE_TABLE_ID; + + private static final String SELECT_ALL_TABLES = + "SELECT schemaName, name\n" + + "FROM %s\n" + + "ORDER BY schemaName, name"; + + private static final String SELECT_TABLES_IN_SCHEMA = + "SELECT name\n" + + "FROM %s\n" + + "WHERE schemaName = :schemaName\n" + + "ORDER BY name"; + + private static final String SELECT_TABLE_DETAILS_IN_SCHEMA = + "SELECT name, owner, creationTime, updateTime, state, payload\n" + + "FROM %s\n" + + "WHERE schemaName = :schemaName\n" + + "ORDER BY name"; + + private static final String DELETE_TABLE = + "DELETE FROM %s\n" + + WHERE_TABLE_ID; + + private final SQLMetadataConnector connector; + private final ObjectMapper jsonMapper; + private final IDBI dbi; + private final String tableName; + private final Deque listeners = new ConcurrentLinkedDeque<>(); + + @Inject + public SQLCatalogManager(MetastoreManager metastoreManager) + { + if (!metastoreManager.isSql()) { + throw new ISE("SQLCatalogManager only works with SQL based metadata store at this time"); + } + this.connector = metastoreManager.sqlConnector(); + this.dbi = connector.getDBI(); + this.jsonMapper = metastoreManager.jsonMapper(); + this.tableName = metastoreManager.tablesConfig().getTableDefnTable(); + } + + @Override + @LifecycleStart + public void start() + { + createTableDefnTable(); + } + + @Override + public void stop() + { + } + + @Override + public void createTableDefnTable() + { + connector.createTableDefnTable(); + } + + @Override + public long create(TableMetadata table) throws DuplicateKeyException + { + try { + return dbi.withHandle( + new HandleCallback() + { + @Override + public Long withHandle(Handle handle) throws DuplicateKeyException + { + long updateTime = System.currentTimeMillis(); + Update stmt = handle.createStatement( + StringUtils.format(INSERT_TABLE, tableName) + ) + .bind("schemaName", table.resolveDbSchema()) + .bind("name", table.name()) + .bind("owner", table.owner()) + .bind("creationTime", updateTime) + .bind("updateTime", updateTime) + .bind("state", TableState.ACTIVE.code()) + .bind("payload", table.spec().toBytes(jsonMapper)); + try { + stmt.execute(); + } + catch (UnableToExecuteStatementException e) { + if (connector.isDuplicateRecordException(e)) { + throw new DuplicateKeyException( + "Tried to insert a duplicate table: " + table.sqlName(), + e); + } else { + throw e; + } + } + sendAddition(table, updateTime); + return updateTime; + } + } + ); + } + catch (CallbackFailedException e) { + if (e.getCause() instanceof DuplicateKeyException) { + throw (DuplicateKeyException) e.getCause(); + } + throw e; + } + } + + @Override + public TableMetadata read(TableId id) + { + return dbi.withHandle( + new HandleCallback() + { + @Override + public TableMetadata withHandle(Handle handle) + { + Query> query = handle.createQuery( + StringUtils.format(SELECT_TABLE, tableName) + ) + .setFetchSize(connector.getStreamingFetchSize()) + .bind("schemaName", id.schema()) + .bind("name", id.name()); + final ResultIterator resultIterator = + query.map((index, r, ctx) -> + new TableMetadata( + id.schema(), + id.name(), + r.getString(1), + r.getLong(2), + r.getLong(3), + TableState.fromCode(r.getString(4)), + TableSpec.fromBytes(jsonMapper, r.getBytes(5)) + )) + .iterator(); + if (resultIterator.hasNext()) { + return resultIterator.next(); + } + return null; + } + } + ); + } + + @Override + public long updateSpec(TableId id, TableSpec defn, long oldVersion) throws OutOfDateException + { + try { + return dbi.withHandle( + new HandleCallback() + { + @Override + public Long withHandle(Handle handle) throws OutOfDateException + { + long updateTime = System.currentTimeMillis(); + int updateCount = handle.createStatement( + StringUtils.format(UPDATE_DEFN_SAFE, tableName)) + .bind("schemaName", id.schema()) + .bind("name", id.name()) + .bind("payload", defn.toBytes(jsonMapper)) + .bind("updateTime", updateTime) + .bind("oldVersion", oldVersion) + .execute(); + if (updateCount == 0) { + throw new OutOfDateException( + StringUtils.format( + "Table %s: not found or update version does not match DB version", + id.sqlName())); + } + sendUpdate(id); + return updateTime; + } + } + ); + } + catch (CallbackFailedException e) { + if (e.getCause() instanceof OutOfDateException) { + throw (OutOfDateException) e.getCause(); + } + throw e; + } + } + + @Override + public long updateDefn(TableId id, TableSpec defn) throws NotFoundException + { + try { + return dbi.withHandle( + new HandleCallback() + { + @Override + public Long withHandle(Handle handle) throws NotFoundException + { + long updateTime = System.currentTimeMillis(); + int updateCount = handle.createStatement( + StringUtils.format(UPDATE_DEFN_UNSAFE, tableName)) + .bind("schemaName", id.schema()) + .bind("name", id.name()) + .bind("payload", defn.toBytes(jsonMapper)) + .bind("updateTime", updateTime) + .execute(); + if (updateCount == 0) { + throw new NotFoundException( + StringUtils.format( + "Table %s: not found", + id.sqlName())); + } + sendUpdate(id); + return updateTime; + } + } + ); + } + catch (CallbackFailedException e) { + if (e.getCause() instanceof NotFoundException) { + throw (NotFoundException) e.getCause(); + } + throw e; + } + } + + @Override + public long markDeleting(TableId id) + { + return dbi.withHandle( + new HandleCallback() + { + @Override + public Long withHandle(Handle handle) + { + long updateTime = System.currentTimeMillis(); + int updateCount = handle.createStatement( + StringUtils.format(UPDATE_STATE, tableName)) + .bind("schemaName", id.schema()) + .bind("name", id.name()) + .bind("updateTime", updateTime) + .bind("state", TableState.DELETING.code()) + .execute(); + sendDeletion(id); + return updateCount == 1 ? updateTime : 0; + } + } + ); + } + + @Override + public boolean delete(TableId id) + { + return dbi.withHandle( + new HandleCallback() + { + @Override + public Boolean withHandle(Handle handle) + { + int updateCount = handle.createStatement( + StringUtils.format(DELETE_TABLE, tableName)) + .bind("schemaName", id.schema()) + .bind("name", id.name()) + .execute(); + sendDeletion(id); + return updateCount > 0; + } + } + ); + } + + @Override + public List list() + { + return dbi.withHandle( + new HandleCallback>() + { + @Override + public List withHandle(Handle handle) + { + Query> query = handle.createQuery( + StringUtils.format(SELECT_ALL_TABLES, tableName) + ) + .setFetchSize(connector.getStreamingFetchSize()); + final ResultIterator resultIterator = + query.map((index, r, ctx) -> + new TableId(r.getString(1), r.getString(2))) + .iterator(); + return Lists.newArrayList(resultIterator); + } + } + ); + } + + @Override + public List list(String dbSchema) + { + return dbi.withHandle( + new HandleCallback>() + { + @Override + public List withHandle(Handle handle) + { + Query> query = handle.createQuery( + StringUtils.format(SELECT_TABLES_IN_SCHEMA, tableName) + ) + .bind("schemaName", dbSchema) + .setFetchSize(connector.getStreamingFetchSize()); + final ResultIterator resultIterator = + query.map((index, r, ctx) -> + r.getString(1)) + .iterator(); + return Lists.newArrayList(resultIterator); + } + } + ); + } + + @Override + public List listDetails(String dbSchema) + { + return dbi.withHandle( + new HandleCallback>() + { + @Override + public List withHandle(Handle handle) + { + Query> query = handle.createQuery( + StringUtils.format(SELECT_TABLE_DETAILS_IN_SCHEMA, tableName) + ) + .bind("schemaName", dbSchema) + .setFetchSize(connector.getStreamingFetchSize()); + final ResultIterator resultIterator = + query.map((index, r, ctx) -> + new TableMetadata( + dbSchema, + r.getString(1), + r.getString(2), + r.getLong(3), + r.getLong(4), + TableState.fromCode(r.getString(5)), + TableSpec.fromBytes(jsonMapper, r.getBytes(6)))) + .iterator(); + return Lists.newArrayList(resultIterator); + } + } + ); + } + + @Override + public synchronized void register(Listener listener) + { + listeners.add(listener); + } + + protected synchronized void sendAddition(TableMetadata table, long updateTime) + { + if (listeners.isEmpty()) { + return; + } + TableMetadata newTable = table.fromInsert(table.dbSchema(), updateTime); + for (Listener listener : listeners) { + listener.added(newTable); + } + } + + protected synchronized void sendUpdate(TableId id) + { + if (listeners.isEmpty()) { + return; + } + TableMetadata updatedTable = read(id); + for (Listener listener : listeners) { + listener.updated(updatedTable); + } + } + + protected synchronized void sendDeletion(TableId id) + { + for (Listener listener : listeners) { + listener.deleted(id); + } + } +} diff --git a/server/src/main/java/org/apache/druid/metadata/storage/derby/DerbyConnector.java b/server/src/main/java/org/apache/druid/metadata/storage/derby/DerbyConnector.java index 19d0c6b04f16..2f6398dcb5f2 100644 --- a/server/src/main/java/org/apache/druid/metadata/storage/derby/DerbyConnector.java +++ b/server/src/main/java/org/apache/druid/metadata/storage/derby/DerbyConnector.java @@ -33,6 +33,7 @@ import org.apache.druid.metadata.SQLMetadataConnector; import org.skife.jdbi.v2.DBI; import org.skife.jdbi.v2.Handle; +import org.skife.jdbi.v2.exceptions.UnableToExecuteStatementException; import org.skife.jdbi.v2.tweak.HandleCallback; import java.sql.DatabaseMetaData; @@ -181,4 +182,13 @@ public void stop() log.info("Stopping DerbyConnector..."); storage.stop(); } + + @Override + public boolean isDuplicateRecordException(UnableToExecuteStatementException e) + { + // Done using class names to avoid a dependency on Derby for this one + // simple thing. + return e.getCause() != null && + e.getCause().getClass().getSimpleName().equals("DerbySQLIntegrityConstraintViolationException"); + } } diff --git a/server/src/main/java/org/apache/druid/metadata/storage/derby/DerbyMetadataStorage.java b/server/src/main/java/org/apache/druid/metadata/storage/derby/DerbyMetadataStorage.java index 725c531626ca..c79e62c75b5b 100644 --- a/server/src/main/java/org/apache/druid/metadata/storage/derby/DerbyMetadataStorage.java +++ b/server/src/main/java/org/apache/druid/metadata/storage/derby/DerbyMetadataStorage.java @@ -42,7 +42,6 @@ public DerbyMetadataStorage(MetadataStorageConnectorConfig config) catch (Exception e) { throw new RuntimeException(e); } - } @Override @@ -55,6 +54,26 @@ public void start() catch (Exception e) { throw new RuntimeException(e); } + + // It takes a while for the Derby server to start in another + // thread. Ping to ensure it is ready. Saves ugly failure/retry + // loops elsewhere in startup. Those loops look alarming in the + // log file. + while (true) { + try { + server.ping(); + break; + } + catch (Exception e) { + log.info("Derby server not yet ready, still trying..."); + try { + Thread.sleep(100); + } + catch (InterruptedException e1) { + // Ignore + } + } + } } @Override diff --git a/server/src/main/java/org/apache/druid/server/http/CatalogListenerResource.java b/server/src/main/java/org/apache/druid/server/http/CatalogListenerResource.java new file mode 100644 index 000000000000..2827e4ccba7d --- /dev/null +++ b/server/src/main/java/org/apache/druid/server/http/CatalogListenerResource.java @@ -0,0 +1,125 @@ +/* + * Licensed to the Apache Software Foundation (ASF) under one + * or more contributor license agreements. See the NOTICE file + * distributed with this work for additional information + * regarding copyright ownership. The ASF licenses this file + * to you under the Apache License, Version 2.0 (the + * "License"); you may not use this file except in compliance + * with the License. You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, + * software distributed under the License is distributed on an + * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY + * KIND, either express or implied. See the License for the + * specific language governing permissions and limitations + * under the License. + */ + +package org.apache.druid.server.http; + +import com.fasterxml.jackson.databind.ObjectMapper; +import com.fasterxml.jackson.jaxrs.smile.SmileMediaTypes; +import org.apache.druid.catalog.MetadataCatalog.CatalogListener; +import org.apache.druid.catalog.TableMetadata; +import org.apache.druid.catalog.TableSpec; +import org.apache.druid.guice.annotations.Json; +import org.apache.druid.guice.annotations.Smile; +import org.apache.druid.java.util.common.StringUtils; +import org.apache.druid.server.security.Access; +import org.apache.druid.server.security.Action; +import org.apache.druid.server.security.AuthorizationUtils; +import org.apache.druid.server.security.AuthorizerMapper; +import org.apache.druid.server.security.Resource; +import org.apache.druid.server.security.ResourceAction; +import org.apache.druid.server.security.ResourceType; + +import javax.inject.Inject; +import javax.servlet.http.HttpServletRequest; +import javax.ws.rs.Consumes; +import javax.ws.rs.POST; +import javax.ws.rs.Path; +import javax.ws.rs.core.Context; +import javax.ws.rs.core.MediaType; +import javax.ws.rs.core.Response; + +import java.io.IOException; +import java.io.InputStream; + +@Path(CatalogListenerResource.BASE_URL) +public class CatalogListenerResource +{ + public static final String BASE_URL = "/druid/broker/v1/catalog"; + public static final String SYNC_URL = "/sync"; + + private final CatalogListener listener; + private final AuthorizerMapper authorizerMapper; + private final ObjectMapper smileMapper; + private final ObjectMapper jsonMapper; + + @Inject + public CatalogListenerResource( + final CatalogListener listener, + @Smile final ObjectMapper smileMapper, + @Json final ObjectMapper jsonMapper, + final AuthorizerMapper authorizerMapper) + { + this.listener = listener; + this.authorizerMapper = authorizerMapper; + this.smileMapper = smileMapper; + this.jsonMapper = jsonMapper; + } + + @POST + @Path(SYNC_URL) + @Consumes({MediaType.APPLICATION_JSON, SmileMediaTypes.APPLICATION_JACKSON_SMILE}) + public Response syncTable( + final InputStream inputStream, + @Context final HttpServletRequest req) + { + Response resp = checkAuth(req); + if (resp != null) { + return resp; + } + final String reqContentType = req.getContentType(); + final boolean isSmile = SmileMediaTypes.APPLICATION_JACKSON_SMILE.equals(reqContentType); + final ObjectMapper mapper = isSmile ? smileMapper : jsonMapper; + TableMetadata tableSpec; + try { + tableSpec = mapper.readValue(inputStream, TableMetadata.class); + } + catch (IOException e) { + return Response.serverError().entity(e.getMessage()).build(); + } + TableSpec defn = tableSpec.spec(); + if (defn instanceof TableSpec.Tombstone) { + listener.deleted(tableSpec.id()); + } else { + listener.updated(tableSpec); + } + return Response.status(Response.Status.ACCEPTED).build(); + } + + private Response checkAuth(final HttpServletRequest request) + { + final ResourceAction resourceAction = new ResourceAction( + new Resource("CONFIG", ResourceType.CONFIG), + Action.WRITE + ); + + final Access authResult = AuthorizationUtils.authorizeResourceAction( + request, + resourceAction, + authorizerMapper + ); + + if (authResult.isAllowed()) { + return null; + } + return Response.status(Response.Status.FORBIDDEN) + .type(MediaType.TEXT_PLAIN) + .entity(StringUtils.format("Access-Check-Result: %s", authResult.toString())) + .build(); + } +} diff --git a/server/src/main/java/org/apache/druid/server/http/CatalogResource.java b/server/src/main/java/org/apache/druid/server/http/CatalogResource.java new file mode 100644 index 000000000000..a6c131661ab2 --- /dev/null +++ b/server/src/main/java/org/apache/druid/server/http/CatalogResource.java @@ -0,0 +1,488 @@ +/* + * Licensed to the Apache Software Foundation (ASF) under one + * or more contributor license agreements. See the NOTICE file + * distributed with this work for additional information + * regarding copyright ownership. The ASF licenses this file + * to you under the Apache License, Version 2.0 (the + * "License"); you may not use this file except in compliance + * with the License. You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, + * software distributed under the License is distributed on an + * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY + * KIND, either express or implied. See the License for the + * specific language governing permissions and limitations + * under the License. + */ + +package org.apache.druid.server.http; + +import com.google.common.base.Strings; +import org.apache.curator.shaded.com.google.common.collect.Lists; +import org.apache.druid.catalog.Actions; +import org.apache.druid.catalog.CatalogStorage; +import org.apache.druid.catalog.SchemaRegistry.SchemaSpec; +import org.apache.druid.catalog.TableId; +import org.apache.druid.catalog.TableMetadata; +import org.apache.druid.catalog.TableSpec; +import org.apache.druid.java.util.common.IAE; +import org.apache.druid.java.util.common.Pair; +import org.apache.druid.java.util.common.StringUtils; +import org.apache.druid.metadata.catalog.CatalogManager; +import org.apache.druid.metadata.catalog.CatalogManager.DuplicateKeyException; +import org.apache.druid.metadata.catalog.CatalogManager.NotFoundException; +import org.apache.druid.metadata.catalog.CatalogManager.OutOfDateException; +import org.apache.druid.server.security.Action; +import org.apache.druid.server.security.AuthorizationUtils; +import org.apache.druid.server.security.ForbiddenException; +import org.apache.druid.server.security.ResourceType; + +import javax.inject.Inject; +import javax.servlet.http.HttpServletRequest; +import javax.ws.rs.Consumes; +import javax.ws.rs.DELETE; +import javax.ws.rs.GET; +import javax.ws.rs.POST; +import javax.ws.rs.Path; +import javax.ws.rs.PathParam; +import javax.ws.rs.Produces; +import javax.ws.rs.QueryParam; +import javax.ws.rs.core.Context; +import javax.ws.rs.core.MediaType; +import javax.ws.rs.core.Response; + +import java.util.ArrayList; +import java.util.Collections; +import java.util.List; + +/** + * REST endpoint for user and internal catalog actions. Catalog actions + * occur at the global level (all schemas), the schema level, or the + * table level. + * + * @see {@link CatalogListenerResource} for the client-side API. + */ +@Path(CatalogResource.ROOT_PATH) +public class CatalogResource +{ + public static final String ROOT_PATH = "/druid/coordinator/v1/catalog"; + + private final CatalogStorage catalog; + + @Inject + public CatalogResource(CatalogStorage catalog) + { + this.catalog = catalog; + } + + /** + * Create a new table within the indicated schema. + * + * @param table The table specification to create. + * @param ifNew Whether to skip the action if the table already exists. + * This is the same as the SQL IF NOT EXISTS clause. If {@code false}, + * then an error is raised if the table exists. If {@code true}, then + * the action silently does nothing if the table exists. Primarily for + * use in scripts. + * @param req the HTTP request used for authorization. + * @return the version number of the table + */ + @POST + @Path("/tables") + @Consumes(MediaType.APPLICATION_JSON) + @Produces(MediaType.APPLICATION_JSON) + public Response createTable( + TableMetadata table, + @QueryParam("ifnew") boolean ifNew, + @Context final HttpServletRequest req) + { + String dbSchema = table.resolveDbSchema(); + Pair result = validateSchema(dbSchema); + if (result.lhs != null) { + return result.lhs; + } + SchemaSpec schema = result.rhs; + if (!schema.writable()) { + return Actions.badRequest( + Actions.INVALID, + StringUtils.format("Cannot create tables in schema %s", dbSchema)); + } + table = table.withSchema(dbSchema); + try { + table.validate(); + } + catch (IAE e) { + return Actions.badRequest(Actions.INVALID, e.getMessage()); + } + TableSpec spec = table.spec(); + if (!schema.accepts(spec)) { + return Actions.badRequest( + Actions.INVALID, + StringUtils.format( + "Cannot create tables of type %s in schema %s", + spec == null ? "null" : spec.getClass().getSimpleName(), + dbSchema)); + } + try { + catalog.authorizer().authorizeTable(schema, table.name(), Action.WRITE, req); + } + catch (ForbiddenException e) { + return Actions.forbidden(e); + } + try { + long createVersion = catalog.tables().create(table); + return Actions.okWithVersion(createVersion); + } + catch (DuplicateKeyException e) { + if (!ifNew) { + return Actions.badRequest( + Actions.DUPLICATE_ERROR, + StringUtils.format( + "A table of name %s.%s aleady exists", + table.dbSchema(), + table.name())); + } else { + return Actions.okWithVersion(0); + } + } + catch (Exception e) { + return Actions.exception(e); + } + } + + /** + * Update a table within the given schema. + * + * @param dbSchema The name of the Druid schema, which must be writable + * and the user must have at least read access. + * @param name The name of the table definition to modify. The user must + * have write access to the table. + * @param spec The new table definition. + * @param version An optional table version. If provided, the metadata DB + * entry for the table must be at this exact version or the update + * will fail. (Provides "optimistic locking.") If omitted (that is, + * if zero), then no update conflict change is done. + * @param req the HTTP request used for authorization. + * @return the new version number of the table + */ + @POST + @Path("/tables/{dbSchema}/{name}") + @Consumes(MediaType.APPLICATION_JSON) + @Produces(MediaType.APPLICATION_JSON) + public Response updateTableDefn( + @PathParam("dbSchema") String dbSchema, + @PathParam("name") String name, + TableSpec spec, + @QueryParam("version") long version, + @Context final HttpServletRequest req) + { + try { + if (spec != null) { + spec.validate(); + } + } + catch (IAE e) { + return Actions.badRequest(Actions.INVALID, e.getMessage()); + } + Pair result = validateSchema(dbSchema); + if (result.lhs != null) { + return result.lhs; + } + if (Strings.isNullOrEmpty(name)) { + return Actions.badRequest(Actions.INVALID, "Table name is required"); + } + SchemaSpec schema = result.rhs; + if (!schema.writable()) { + return Actions.badRequest( + Actions.INVALID, + StringUtils.format("Cannot update tables in schema %s", dbSchema)); + } + if (!schema.accepts(spec)) { + return Actions.badRequest( + Actions.INVALID, + StringUtils.format( + "Cannot update tables to type %s in schema %s", + spec == null ? "null" : spec.getClass().getSimpleName(), + dbSchema)); + } + try { + catalog.authorizer().authorizeTable(schema, name, Action.WRITE, req); + } + catch (ForbiddenException e) { + return Actions.forbidden(e); + } + try { + CatalogManager tableMgr = catalog.tables(); + TableId tableId = new TableId(dbSchema, name); + long newVersion; + if (version == 0) { + newVersion = tableMgr.updateDefn(tableId, spec); + } else { + newVersion = tableMgr.updateSpec(tableId, spec, version); + } + return Actions.okWithVersion(newVersion); + } + catch (NotFoundException e) { + return Response.status(Response.Status.NOT_FOUND).build(); + } + catch (OutOfDateException e) { + return Response + .status(Response.Status.BAD_REQUEST) + .entity( + Actions.error( + Actions.DUPLICATE_ERROR, + "The table entry not found or is older than the given version: reload and retry")) + .build(); + } + catch (Exception e) { + return Actions.exception(e); + } + } + + /** + * Retrieves the definition of the given table. + *

+ * Returns a 404 (NOT FOUND) error if the table definition does not exist. + * Note that this check is only for the definition; the table (or + * datasource) itself may exist. Similarly, this call may return a definition + * even if there is no datasource of the same name (typically occurs when + * the definition is created before the datasource itself.) + * + * @param dbSchema The Druid schema. The user must have read access. + * @param name The name of the table within the schema. The user must have + * read access. + * @param req the HTTP request used for authorization. + * @return the definition for the table, if any. + */ + @GET + @Path("/tables/{dbSchema}/{name}") + @Produces(MediaType.APPLICATION_JSON) + public Response getTable( + @PathParam("dbSchema") String dbSchema, + @PathParam("name") String name, + @Context final HttpServletRequest req) + { + Pair result = validateSchema(dbSchema); + if (result.lhs != null) { + return result.lhs; + } + if (Strings.isNullOrEmpty(name)) { + return Actions.badRequest(Actions.INVALID, "Table name is required"); + } + try { + catalog.authorizer().authorizeTable(result.rhs, name, Action.READ, req); + } + catch (ForbiddenException e) { + return Actions.forbidden(e); + } + try { + TableId tableId = new TableId(dbSchema, name); + TableMetadata table = catalog.tables().read(tableId); + if (table == null) { + return Response.status(Response.Status.NOT_FOUND).build(); + } + return Response.ok().entity(table).build(); + } + catch (Exception e) { + return Actions.exception(e); + } + } + + /** + * Retrieves the list of all Druid schema names. At present, Druid does + * not impose security on schemas, only tables within schemas. + */ + @GET + @Path("/schemas") + @Produces(MediaType.APPLICATION_JSON) + public Response listSchemas( + @Context final HttpServletRequest req) + { + // No good resource to use: we really need finer-grain control. + catalog.authorizer().authorizeAccess(ResourceType.STATE, "schemas", Action.READ, req); + return Response.ok().entity(catalog.schemaRegistry().names()).build(); + } + + /** + * Retrieves the list of all Druid table names for which the user has at + * least read access. + */ + @GET + @Path("/tables") + @Produces(MediaType.APPLICATION_JSON) + public Response listTables( + @Context final HttpServletRequest req) + { + List tables = catalog.tables().list(); + Iterable filtered = AuthorizationUtils.filterAuthorizedResources( + req, + tables, + tableId -> { + SchemaSpec schema = catalog.resolveSchema(tableId.schema()); + if (schema == null) { + // Should never occur. + return null; + } + return Collections.singletonList( + catalog.authorizer().resourceAction(schema, tableId.name(), Action.READ)); + }, + catalog.authorizer().mapper()); + List filteredList = new ArrayList<>(); + for (TableId tableId : filtered) { + filteredList.add(tableId); + } + return Response.ok().entity(filteredList).build(); + } + + /** + * Retrieves the list of table names within the given schema for which the + * user has at least read access. This returns the list of table definitions + * which will probably differ from the list of actual tables. For example, for + * the read-only schemas, there will be no table definitions. + * + * @param dbSchema The Druid schema to query. The user must have read access. + */ + @GET + @Path("/tables/{dbSchema}") + @Produces(MediaType.APPLICATION_JSON) + public Response listTables( + @PathParam("dbSchema") String dbSchema, + @Context final HttpServletRequest req) + { + Pair result = validateSchema(dbSchema); + if (result.lhs != null) { + return result.lhs; + } + SchemaSpec schema = result.rhs; + List tables = catalog.tables().list(dbSchema); + Iterable filtered = AuthorizationUtils.filterAuthorizedResources( + req, + tables, + name -> + Collections.singletonList( + catalog.authorizer().resourceAction(schema, name, Action.READ)), + catalog.authorizer().mapper()); + return Response.ok().entity(Lists.newArrayList(filtered)).build(); + } + + /** + * Deletes the table definition (but not the underlying table or datasource) + * for the given schema and table. + * + * @param dbSchema The name of the schema that holds the table. + * @param name The name of the table definition to delete. The user must have + * write access. + * @param ifExists Optional flag. If {@code false} (the default), 404 (NOT FOUND) + * error is returned if the table does not exist. If {@code true}, + * then acts like the SQL IF EXISTS clause and does not return an + * error if the table does not exist, + */ + @DELETE + @Path("/tables/{dbSchema}/{name}") + @Produces(MediaType.APPLICATION_JSON) + public Response deleteTable( + @PathParam("dbSchema") String dbSchema, + @PathParam("name") String name, + @QueryParam("ifExists") boolean ifExists, + @Context final HttpServletRequest req) + { + TableId tableId = new TableId(dbSchema, name); + Pair result = validateSchema(tableId.schema()); + if (result.lhs != null) { + return result.lhs; + } + SchemaSpec schema = result.rhs; + if (!schema.writable()) { + return Actions.badRequest( + Actions.INVALID, + StringUtils.format("Cannot delete tables from schema %s", tableId.schema())); + } + if (Strings.isNullOrEmpty(name)) { + return Actions.badRequest(Actions.INVALID, "Table name is required"); + } + try { + catalog.authorizer().authorizeTable(schema, tableId.name(), Action.WRITE, req); + } + catch (ForbiddenException e) { + return Actions.forbidden(e); + } + try { + if (!catalog.tables().delete(tableId) && !ifExists) { + return Actions.notFound(tableId.sqlName()); + } + } + catch (Exception e) { + return Actions.exception(e); + } + return Actions.ok(); + } + + public static final String SCHEMA_SYNC = "/sync/{dbSchema}"; + + /** + * Synchronization request from the Broker for a database schema. Requests all + * table definitions known to the catalog. Used to prime a cache on first access. + * After that, the Coordinator will push updates to Brokers. Returns the full + * list of table details. + * + * It is expected that the number of table definitions will be of small or moderate + * size, so no provision is made to handle very large lists. + */ + @GET + @Path(SCHEMA_SYNC) + @Produces(MediaType.APPLICATION_JSON) + public Response syncSchema( + @PathParam("dbSchema") String dbSchema, + @Context final HttpServletRequest req + ) + { + Pair result = validateSchema(dbSchema); + if (result.lhs != null) { + return result.lhs; + } + SchemaSpec schema = result.rhs; + List tables = catalog.tables().listDetails(dbSchema); + Iterable filtered = AuthorizationUtils.filterAuthorizedResources( + req, + tables, + table -> + Collections.singletonList( + catalog.authorizer().resourceAction(schema, table.name(), Action.READ)), + catalog.authorizer().mapper()); + return Response.ok().entity(Lists.newArrayList(filtered)).build(); + } + + public static final String TABLE_SYNC = "/sync/{dbSchema}/{name}"; + + /** + * Synchronization request from the Broker for information about a specific table + * (datasource). Done on first access to the table by any query. After that, the + * Coordinator pushes updates to the Broker on any changes. + */ + @GET + @Path(TABLE_SYNC) + @Produces(MediaType.APPLICATION_JSON) + public Response syncTable( + @PathParam("dbSchema") String dbSchema, + @PathParam("name") String name, + @Context final HttpServletRequest req) + { + return getTable(dbSchema, name, req); + } + + private Pair validateSchema(String dbSchema) + { + if (Strings.isNullOrEmpty(dbSchema)) { + return Pair.of(Actions.badRequest(Actions.INVALID, "Schema name is required"), null); + } + SchemaSpec schema = catalog.resolveSchema(dbSchema); + if (schema == null) { + return Pair.of(Actions.notFound( + StringUtils.format("Unknown schema %s", dbSchema)), + null); + } + return Pair.of(null, schema); + } +} diff --git a/server/src/main/java/org/apache/druid/server/security/Access.java b/server/src/main/java/org/apache/druid/server/security/Access.java index 0c86a42a1a21..d30914fdfc8e 100644 --- a/server/src/main/java/org/apache/druid/server/security/Access.java +++ b/server/src/main/java/org/apache/druid/server/security/Access.java @@ -24,6 +24,7 @@ public class Access { public static final Access OK = new Access(true); + public static final Access DENIED = new Access(false); private final boolean allowed; private final String message; diff --git a/server/src/main/java/org/apache/druid/server/security/AuthenticationResult.java b/server/src/main/java/org/apache/druid/server/security/AuthenticationResult.java index 361cb434a7c6..d41cebfe44c8 100644 --- a/server/src/main/java/org/apache/druid/server/security/AuthenticationResult.java +++ b/server/src/main/java/org/apache/druid/server/security/AuthenticationResult.java @@ -29,26 +29,26 @@ public class AuthenticationResult { /** - * the identity of the requester + * Identity of the requester. */ private final String identity; /** - * the name of the Authorizer that should handle the authenticated request. + * Name of the Authorizer that should handle the authenticated request. */ private final String authorizerName; - /** - * Name of authenticator whom created the results + * Name of authenticator whom created the results. * - * If you found your self asking why the authenticatedBy field can be null please read this + * If you found yourself asking why the authenticatedBy field can be null please read this * https://github.com/apache/druid/pull/5706#discussion_r185940889 */ @Nullable private final String authenticatedBy; + /** - * parameter containing additional context information from an Authenticator + * Additional context information from an Authenticator. */ @Nullable private final Map context; diff --git a/server/src/main/java/org/apache/druid/server/security/AuthorizationUtils.java b/server/src/main/java/org/apache/druid/server/security/AuthorizationUtils.java index 2b4449826476..2d862f794274 100644 --- a/server/src/main/java/org/apache/druid/server/security/AuthorizationUtils.java +++ b/server/src/main/java/org/apache/druid/server/security/AuthorizationUtils.java @@ -240,7 +240,7 @@ public static Iterable filterAuthorizedResources( * If every resource-action in the iterable is authorized, the resource will be added to the filtered resources. * * If there is an authorization failure for one of the resource-actions, the resource will not be - * added to the returned filtered resources.. + * added to the returned filtered resources. * * If the resourceActionGenerator returns null for a resource, that resource will not be added to the filtered * resources. @@ -322,7 +322,6 @@ public static Map> filterAuthorizedRes final AuthorizerMapper authorizerMapper ) { - if (request.getAttribute(AuthConfig.DRUID_ALLOW_UNSECURED_PATH) != null) { return unfilteredResources; } diff --git a/server/src/main/java/org/apache/druid/server/security/ForbiddenException.java b/server/src/main/java/org/apache/druid/server/security/ForbiddenException.java index 7de37d677d84..e10c722be5ac 100644 --- a/server/src/main/java/org/apache/druid/server/security/ForbiddenException.java +++ b/server/src/main/java/org/apache/druid/server/security/ForbiddenException.java @@ -27,8 +27,8 @@ import java.util.function.Function; /** - * Throw this when a request is unauthorized and we want to send a 403 response back, Jersey exception mapper will - * take care of sending the response. + * Throw this when a request is unauthorized and we want to send a 403 response back, + * Jersey exception mapper will take care of sending the response. */ public class ForbiddenException extends RuntimeException implements SanitizableException { @@ -48,7 +48,7 @@ public ForbiddenException(@JsonProperty("errorMessage") String msg) @JsonProperty public String getErrorMessage() { - return super.getMessage(); + return getMessage(); } @Override diff --git a/server/src/test/java/org/apache/druid/catalog/CacheNotifierTest.java b/server/src/test/java/org/apache/druid/catalog/CacheNotifierTest.java new file mode 100644 index 000000000000..b5ec77caeb8f --- /dev/null +++ b/server/src/test/java/org/apache/druid/catalog/CacheNotifierTest.java @@ -0,0 +1,122 @@ +/* + * Licensed to the Apache Software Foundation (ASF) under one + * or more contributor license agreements. See the NOTICE file + * distributed with this work for additional information + * regarding copyright ownership. The ASF licenses this file + * to you under the Apache License, Version 2.0 (the + * "License"); you may not use this file except in compliance + * with the License. You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, + * software distributed under the License is distributed on an + * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY + * KIND, either express or implied. See the License for the + * specific language governing permissions and limitations + * under the License. + */ + +package org.apache.druid.catalog; + +import com.google.common.util.concurrent.Futures; +import com.google.common.util.concurrent.ListenableFuture; +import org.apache.druid.catalog.RestUpdateSender.RestSender; +import org.apache.druid.java.util.http.client.response.StatusResponseHolder; +import org.apache.druid.server.DruidNode; +import org.jboss.netty.handler.codec.http.HttpResponseStatus; +import org.junit.Test; + +import java.net.URL; +import java.util.Arrays; +import java.util.List; +import java.util.function.Consumer; +import java.util.function.Supplier; + +import static org.junit.Assert.assertEquals; + +public class CacheNotifierTest +{ + private static class MockSender implements Consumer + { + int sendCount; + + @Override + public void accept(byte[] update) + { + assertEquals(sendCount++, update[0]); + } + } + + @Test + public void testNotifier() + { + MockSender sender = new MockSender(); + CacheNotifier notifier = new CacheNotifier("test", sender); + notifier.start(); + for (int i = 0; i < 100; i++) { + byte[] msg = new byte[] {(byte) i}; + notifier.send(msg); + } + notifier.stopGracefully(); + assertEquals(100, sender.sendCount); + } + + private static class MockRestSender implements RestSender + { + int sendCount; + + @Override + public ListenableFuture send(URL listenerURL, byte[] serializedEntity) + { + sendCount++; + StatusResponseHolder holder = new StatusResponseHolder(HttpResponseStatus.ACCEPTED, new StringBuilder()); + return Futures.immediateFuture(holder); + } + } + + @Test + public void testRestUpdateSender() + { + DruidNode node1 = new DruidNode("service", "host1", true, 1000, 0, true, false); + DruidNode node2 = new DruidNode("service", "host2", true, 1000, 0, true, false); + List nodes = Arrays.asList(node1, node2); + Supplier> nodeSupplier = () -> nodes; + MockRestSender restSender = new MockRestSender(); + RestUpdateSender updateSender = new RestUpdateSender( + "test", + nodeSupplier, + restSender, + "/test/foo", + 1000); + for (int i = 0; i < 100; i++) { + byte[] msg = new byte[] {(byte) i}; + updateSender.accept(msg); + } + assertEquals(200, restSender.sendCount); + } + + @Test + public void testStack() + { + DruidNode node1 = new DruidNode("service", "host1", true, 1000, 0, true, false); + DruidNode node2 = new DruidNode("service", "host2", true, 1000, 0, true, false); + List nodes = Arrays.asList(node1, node2); + Supplier> nodeSupplier = () -> nodes; + MockRestSender restSender = new MockRestSender(); + RestUpdateSender updateSender = new RestUpdateSender( + "test", + nodeSupplier, + restSender, + "/test/foo", + 1000); + CacheNotifier notifier = new CacheNotifier("test", updateSender); + notifier.start(); + for (int i = 0; i < 100; i++) { + byte[] msg = new byte[] {(byte) i}; + notifier.send(msg); + } + notifier.stopGracefully(); + assertEquals(200, restSender.sendCount); + } +} diff --git a/server/src/test/java/org/apache/druid/catalog/CatalogObjectTest.java b/server/src/test/java/org/apache/druid/catalog/CatalogObjectTest.java new file mode 100644 index 000000000000..69fba1b29487 --- /dev/null +++ b/server/src/test/java/org/apache/druid/catalog/CatalogObjectTest.java @@ -0,0 +1,152 @@ +/* + * Licensed to the Apache Software Foundation (ASF) under one + * or more contributor license agreements. See the NOTICE file + * distributed with this work for additional information + * regarding copyright ownership. The ASF licenses this file + * to you under the Apache License, Version 2.0 (the + * "License"); you may not use this file except in compliance + * with the License. You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, + * software distributed under the License is distributed on an + * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY + * KIND, either express or implied. See the License for the + * specific language governing permissions and limitations + * under the License. + */ + +package org.apache.druid.catalog; + +import nl.jqno.equalsverifier.EqualsVerifier; +import org.apache.druid.java.util.common.IAE; +import org.apache.druid.metadata.catalog.CatalogManager.TableState; +import org.junit.Test; + +import static org.junit.Assert.assertEquals; +import static org.junit.Assert.assertNull; +import static org.junit.Assert.assertSame; +import static org.junit.Assert.fail; + +public class CatalogObjectTest +{ + @Test + public void testMinimalTable() + { + TableMetadata table = new TableMetadata( + TableId.DRUID_SCHEMA, + "foo", + "bob", + 10, + 20, + TableState.ACTIVE, + null); + table.validate(); + assertEquals(TableId.DRUID_SCHEMA, table.dbSchema()); + assertEquals("foo", table.name()); + assertEquals("bob", table.owner()); + assertEquals(10, table.creationTime()); + assertEquals(20, table.updateTime()); + assertEquals(TableState.ACTIVE, table.state()); + assertNull(table.spec()); + + try { + table = new TableMetadata( + null, + "foo", + "bob", + 10, + 20, + TableState.ACTIVE, + null); + table.validate(); + fail(); + } + catch (IAE e) { + // Expected + } + + try { + table = new TableMetadata( + TableId.DRUID_SCHEMA, + null, + "bob", + 10, + 20, + TableState.ACTIVE, + null); + table.validate(); + fail(); + } + catch (IAE e) { + // Expected + } + } + + @Test + public void testSpec() + { + DatasourceSpec spec = DatasourceSpec.builder() + .segmentGranularity("PT1D") + .build(); + TableMetadata table = new TableMetadata( + TableId.DRUID_SCHEMA, + "foo", + "bob", + 10, + 20, + TableState.ACTIVE, + spec); + table.validate(); + assertSame(spec, table.spec()); + + // Segment grain is required. + try { + spec = DatasourceSpec.builder() + .build(); + table = new TableMetadata( + "wrong", + "foo", + "bob", + 10, + 20, + TableState.ACTIVE, + spec); + table.validate(); + fail(); + } + catch (IAE e) { + // Expected + } + } + + @Test + public void testConversions() + { + DatasourceSpec spec = DatasourceSpec.builder() + .segmentGranularity("PT1D") + .build(); + TableMetadata table = TableMetadata.newSegmentTable( + "ds", + spec); + assertEquals(TableId.datasource("ds"), table.id()); + assertEquals(TableState.ACTIVE, table.state()); + assertEquals(0, table.updateTime()); + assertSame(spec, table.spec()); + + TableMetadata table2 = TableMetadata.newSegmentTable("ds", spec); + assertEquals(table, table2); + + TableMetadata table3 = table2.asUpdate(20); + assertEquals(20, table3.updateTime()); + } + + @Test + public void testEquals() + { + EqualsVerifier.forClass(TableMetadata.class) + .usingGetClass() + .verify(); + } +} diff --git a/server/src/test/java/org/apache/druid/catalog/CatalogResourceTest.java b/server/src/test/java/org/apache/druid/catalog/CatalogResourceTest.java new file mode 100644 index 000000000000..50dc812068ed --- /dev/null +++ b/server/src/test/java/org/apache/druid/catalog/CatalogResourceTest.java @@ -0,0 +1,491 @@ +/* + * Licensed to the Apache Software Foundation (ASF) under one + * or more contributor license agreements. See the NOTICE file + * distributed with this work for additional information + * regarding copyright ownership. The ASF licenses this file + * to you under the Apache License, Version 2.0 (the + * "License"); you may not use this file except in compliance + * with the License. You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, + * software distributed under the License is distributed on an + * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY + * KIND, either express or implied. See the License for the + * specific language governing permissions and limitations + * under the License. + */ + +package org.apache.druid.catalog; + +import org.apache.druid.data.input.InputFormat; +import org.apache.druid.data.input.InputSource; +import org.apache.druid.data.input.impl.InlineInputSource; +import org.apache.druid.metadata.TestDerbyConnector; +import org.apache.druid.server.http.CatalogResource; +import org.junit.After; +import org.junit.Before; +import org.junit.Rule; +import org.junit.Test; + +import javax.ws.rs.core.Response; + +import java.util.List; +import java.util.Map; + +import static org.apache.druid.catalog.DummyRequest.deleteBy; +import static org.apache.druid.catalog.DummyRequest.getBy; +import static org.apache.druid.catalog.DummyRequest.postBy; +import static org.junit.Assert.assertEquals; +import static org.junit.Assert.assertTrue; + +/** + * Test of REST API operations for the table catalog. + */ +public class CatalogResourceTest +{ + @Rule + public final TestDerbyConnector.DerbyConnectorRule derbyConnectorRule = new TestDerbyConnector.DerbyConnectorRule(); + + private CatalogTests.DbFixture dbFixture; + private CatalogResource resource; + + @Before + public void setUp() + { + dbFixture = new CatalogTests.DbFixture(derbyConnectorRule); + resource = new CatalogResource(dbFixture.storage); + } + + @After + public void tearDown() + { + CatalogTests.tearDown(dbFixture); + } + + private static long getVersion(Response resp) + { + @SuppressWarnings("unchecked") + Map result = (Map) resp.getEntity(); + return (Long) result.get("version"); + } + + @Test + public void testCreate() + { + DatasourceSpec defn = DatasourceSpec.builder() + .segmentGranularity("PT1D") + .build(); + + // Missing schema name: infer the schema. + String tableName = "create"; + TableMetadata table = TableMetadata.newTable( + null, + "create1", + defn); + Response resp = resource.createTable(table, false, postBy(DummyRequest.SUPER_USER)); + assertEquals(Response.Status.OK.getStatusCode(), resp.getStatus()); + + // Blank schema name: infer the schema. + table = TableMetadata.newTable( + "", + "create2", + defn); + resp = resource.createTable(table, false, postBy(DummyRequest.SUPER_USER)); + assertEquals(Response.Status.OK.getStatusCode(), resp.getStatus()); + + // Missing table name + table = TableMetadata.newTable(TableId.DRUID_SCHEMA, null, defn); + resp = resource.createTable(table, false, postBy(DummyRequest.SUPER_USER)); + assertEquals(Response.Status.BAD_REQUEST.getStatusCode(), resp.getStatus()); + + // Unknown schema + table = TableMetadata.newTable("bogus", tableName, defn); + resp = resource.createTable(table, false, postBy(DummyRequest.SUPER_USER)); + assertEquals(Response.Status.NOT_FOUND.getStatusCode(), resp.getStatus()); + + // Immutable schema + table = TableMetadata.newTable(TableId.CATALOG_SCHEMA, tableName, defn); + resp = resource.createTable(table, false, postBy(DummyRequest.SUPER_USER)); + assertEquals(Response.Status.BAD_REQUEST.getStatusCode(), resp.getStatus()); + + // Wrong definition type. + table = TableMetadata.newTable(TableId.INPUT_SCHEMA, tableName, defn); + resp = resource.createTable(table, false, postBy(DummyRequest.DENY_USER)); + assertEquals(Response.Status.BAD_REQUEST.getStatusCode(), resp.getStatus()); + + // No permissions + table = TableMetadata.newTable(TableId.DRUID_SCHEMA, tableName, defn); + resp = resource.createTable(table, false, postBy(DummyRequest.DENY_USER)); + assertEquals(Response.Status.FORBIDDEN.getStatusCode(), resp.getStatus()); + + // Read permission + resp = resource.createTable(table, false, postBy(DummyRequest.READER_USER)); + assertEquals(Response.Status.FORBIDDEN.getStatusCode(), resp.getStatus()); + + // Write permission + resp = resource.createTable(table, false, postBy(DummyRequest.WRITER_USER)); + assertEquals(Response.Status.OK.getStatusCode(), resp.getStatus()); + assertTrue(getVersion(resp) > 0); + + // Duplicate + resp = resource.createTable(table, false, postBy(DummyRequest.WRITER_USER)); + assertEquals(Response.Status.BAD_REQUEST.getStatusCode(), resp.getStatus()); + + // Duplicate, "if not exists" + resp = resource.createTable(table, true, postBy(DummyRequest.WRITER_USER)); + assertEquals(Response.Status.OK.getStatusCode(), resp.getStatus()); + assertEquals(0, getVersion(resp)); + + // Input source + InputSource inputSource = new InlineInputSource("a,b,1\nc,d,2\n"); + InputFormat inputFormat = CatalogTests.csvFormat(); + InputTableSpec inputDefn = InputTableSpec + .builder() + .source(inputSource) + .format(inputFormat) + .column("a", "varchar") + .build(); + table = TableMetadata.newTable(TableId.INPUT_SCHEMA, "input", inputDefn); + resp = resource.createTable(table, true, postBy(DummyRequest.WRITER_USER)); + assertEquals(Response.Status.OK.getStatusCode(), resp.getStatus()); + } + + @Test + public void testUpdate() + { + DatasourceSpec defn = DatasourceSpec.builder() + .segmentGranularity("PT1D") + .build(); + + // Missing schema name + String tableName = "update"; + Response resp = resource.updateTableDefn("", tableName, defn, 0, postBy(DummyRequest.SUPER_USER)); + assertEquals(Response.Status.BAD_REQUEST.getStatusCode(), resp.getStatus()); + + // Missing table name + resp = resource.updateTableDefn(TableId.DRUID_SCHEMA, null, defn, 0, postBy(DummyRequest.SUPER_USER)); + assertEquals(Response.Status.BAD_REQUEST.getStatusCode(), resp.getStatus()); + + // Unknown schema + resp = resource.updateTableDefn("bogus", tableName, defn, 0, postBy(DummyRequest.SUPER_USER)); + assertEquals(Response.Status.NOT_FOUND.getStatusCode(), resp.getStatus()); + + // Immutable schema + resp = resource.updateTableDefn(TableId.CATALOG_SCHEMA, tableName, defn, 0, postBy(DummyRequest.SUPER_USER)); + assertEquals(Response.Status.BAD_REQUEST.getStatusCode(), resp.getStatus()); + + // TODO: Wrong definition type. + + // Does not exist + resp = resource.updateTableDefn(TableId.DRUID_SCHEMA, tableName, defn, 0, postBy(DummyRequest.SUPER_USER)); + assertEquals(Response.Status.NOT_FOUND.getStatusCode(), resp.getStatus()); + + // Create the table + TableMetadata table = TableMetadata.newTable( + TableId.DRUID_SCHEMA, + "update", + defn); + resp = resource.createTable(table, false, postBy(DummyRequest.WRITER_USER)); + assertEquals(Response.Status.OK.getStatusCode(), resp.getStatus()); + long version = getVersion(resp); + + // No update permission + resp = resource.updateTableDefn(TableId.DRUID_SCHEMA, tableName, defn, 0, postBy(DummyRequest.READER_USER)); + assertEquals(Response.Status.FORBIDDEN.getStatusCode(), resp.getStatus()); + + // Out-of-date version + resp = resource.updateTableDefn(TableId.DRUID_SCHEMA, tableName, defn, 10, postBy(DummyRequest.WRITER_USER)); + assertEquals(Response.Status.BAD_REQUEST.getStatusCode(), resp.getStatus()); + + // Valid version + resp = resource.updateTableDefn(TableId.DRUID_SCHEMA, tableName, defn, version, postBy(DummyRequest.WRITER_USER)); + assertEquals(Response.Status.OK.getStatusCode(), resp.getStatus()); + assertTrue(getVersion(resp) > version); + version = getVersion(resp); + + // Overwrite + resp = resource.updateTableDefn(TableId.DRUID_SCHEMA, tableName, defn, 0, postBy(DummyRequest.WRITER_USER)); + assertEquals(Response.Status.OK.getStatusCode(), resp.getStatus()); + assertTrue(getVersion(resp) > version); + } + + @Test + public void testRead() + { + DatasourceSpec defn = DatasourceSpec.builder() + .segmentGranularity("PT1D") + .build(); + + // Missing schema name + String tableName = "read"; + Response resp = resource.getTable("", tableName, getBy(DummyRequest.SUPER_USER)); + assertEquals(Response.Status.BAD_REQUEST.getStatusCode(), resp.getStatus()); + + // Missing table name + resp = resource.getTable(TableId.DRUID_SCHEMA, null, getBy(DummyRequest.SUPER_USER)); + assertEquals(Response.Status.BAD_REQUEST.getStatusCode(), resp.getStatus()); + + // Unknown schema + resp = resource.getTable("bogus", tableName, getBy(DummyRequest.SUPER_USER)); + assertEquals(Response.Status.NOT_FOUND.getStatusCode(), resp.getStatus()); + + // Does not exist + resp = resource.getTable(TableId.DRUID_SCHEMA, tableName, getBy(DummyRequest.SUPER_USER)); + assertEquals(Response.Status.NOT_FOUND.getStatusCode(), resp.getStatus()); + + // Create the table + TableMetadata table = TableMetadata.newTable( + TableId.DRUID_SCHEMA, + tableName, + defn); + resp = resource.createTable(table, false, postBy(DummyRequest.WRITER_USER)); + assertEquals(Response.Status.OK.getStatusCode(), resp.getStatus()); + table = table.fromInsert(TableId.DRUID_SCHEMA, getVersion(resp)); + + // No read permission + resp = resource.getTable(TableId.DRUID_SCHEMA, tableName, getBy(DummyRequest.DENY_USER)); + assertEquals(Response.Status.FORBIDDEN.getStatusCode(), resp.getStatus()); + + // Valid + resp = resource.getTable(TableId.DRUID_SCHEMA, tableName, getBy(DummyRequest.READER_USER)); + assertEquals(Response.Status.OK.getStatusCode(), resp.getStatus()); + TableMetadata read = (TableMetadata) resp.getEntity(); + assertEquals(table, read); + + // Internal sync API + resp = resource.syncTable(TableId.DRUID_SCHEMA, tableName, getBy(DummyRequest.SUPER_USER)); + assertEquals(Response.Status.OK.getStatusCode(), resp.getStatus()); + read = (TableMetadata) resp.getEntity(); + assertEquals(table, read); + } + + @SuppressWarnings("unchecked") + private List getTableIdList(Response resp) + { + return (List) resp.getEntity(); + } + + @SuppressWarnings("unchecked") + private List getTableList(Response resp) + { + return (List) resp.getEntity(); + } + + @SuppressWarnings("unchecked") + private List getDetailsList(Response resp) + { + return (List) resp.getEntity(); + } + + @Test + public void testList() + { + // No entries + Response resp = resource.listTables(getBy(DummyRequest.READER_USER)); + assertEquals(Response.Status.OK.getStatusCode(), resp.getStatus()); + List tableIds = getTableIdList(resp); + assertTrue(tableIds.isEmpty()); + + resp = resource.listTables(TableId.DRUID_SCHEMA, getBy(DummyRequest.READER_USER)); + assertEquals(Response.Status.OK.getStatusCode(), resp.getStatus()); + List tables = getTableList(resp); + assertTrue(tables.isEmpty()); + + // Missing schema + resp = resource.listTables(null, getBy(DummyRequest.READER_USER)); + assertEquals(Response.Status.BAD_REQUEST.getStatusCode(), resp.getStatus()); + + // Invalid schema + resp = resource.listTables("bogus", getBy(DummyRequest.READER_USER)); + assertEquals(Response.Status.NOT_FOUND.getStatusCode(), resp.getStatus()); + + // Create a table + DatasourceSpec defn = DatasourceSpec.builder() + .segmentGranularity("PT1D") + .build(); + TableMetadata table = TableMetadata.newTable(TableId.DRUID_SCHEMA, "list", defn); + resp = resource.createTable(table, false, postBy(DummyRequest.WRITER_USER)); + assertEquals(Response.Status.OK.getStatusCode(), resp.getStatus()); + + // No read access + resp = resource.listTables(getBy(DummyRequest.DENY_USER)); + assertEquals(Response.Status.OK.getStatusCode(), resp.getStatus()); + tableIds = getTableIdList(resp); + assertTrue(tableIds.isEmpty()); + + resp = resource.listTables(TableId.DRUID_SCHEMA, getBy(DummyRequest.DENY_USER)); + assertEquals(Response.Status.OK.getStatusCode(), resp.getStatus()); + tables = getTableList(resp); + assertTrue(tables.isEmpty()); + + // Read access + resp = resource.listTables(getBy(DummyRequest.READER_USER)); + assertEquals(Response.Status.OK.getStatusCode(), resp.getStatus()); + tableIds = getTableIdList(resp); + assertEquals(1, tableIds.size()); + + resp = resource.listTables(TableId.DRUID_SCHEMA, getBy(DummyRequest.READER_USER)); + assertEquals(Response.Status.OK.getStatusCode(), resp.getStatus()); + tables = getTableList(resp); + assertEquals(1, tables.size()); + + resp = resource.listTables(TableId.SYSTEM_SCHEMA, getBy(DummyRequest.READER_USER)); + assertEquals(Response.Status.OK.getStatusCode(), resp.getStatus()); + tables = getTableList(resp); + assertTrue(tables.isEmpty()); + + // Internal sync schema API + resp = resource.syncSchema(TableId.SYSTEM_SCHEMA, getBy(DummyRequest.SUPER_USER)); + assertEquals(Response.Status.OK.getStatusCode(), resp.getStatus()); + assertTrue(getDetailsList(resp).isEmpty()); + + resp = resource.syncSchema(TableId.DRUID_SCHEMA, getBy(DummyRequest.SUPER_USER)); + assertEquals(Response.Status.OK.getStatusCode(), resp.getStatus()); + List details = getDetailsList(resp); + assertEquals(1, details.size()); + } + + @Test + public void testDelete() + { + // Missing schema name + String tableName = "delete"; + Response resp = resource.deleteTable("", tableName, false, deleteBy(DummyRequest.SUPER_USER)); + assertEquals(Response.Status.BAD_REQUEST.getStatusCode(), resp.getStatus()); + + // Missing table name + resp = resource.deleteTable(TableId.DRUID_SCHEMA, null, false, deleteBy(DummyRequest.SUPER_USER)); + assertEquals(Response.Status.BAD_REQUEST.getStatusCode(), resp.getStatus()); + + // Unknown schema + resp = resource.deleteTable("bogus", tableName, false, deleteBy(DummyRequest.SUPER_USER)); + assertEquals(Response.Status.NOT_FOUND.getStatusCode(), resp.getStatus()); + + // Immutable schema + resp = resource.deleteTable(TableId.CATALOG_SCHEMA, tableName, false, deleteBy(DummyRequest.SUPER_USER)); + assertEquals(Response.Status.BAD_REQUEST.getStatusCode(), resp.getStatus()); + + // Does not exist + resp = resource.deleteTable(TableId.DRUID_SCHEMA, tableName, false, deleteBy(DummyRequest.SUPER_USER)); + assertEquals(Response.Status.NOT_FOUND.getStatusCode(), resp.getStatus()); + + resp = resource.deleteTable(TableId.DRUID_SCHEMA, tableName, true, deleteBy(DummyRequest.SUPER_USER)); + assertEquals(Response.Status.OK.getStatusCode(), resp.getStatus()); + + // Create the table + DatasourceSpec defn = DatasourceSpec.builder() + .segmentGranularity("PT1D") + .build(); + TableMetadata table = TableMetadata.newTable( + TableId.DRUID_SCHEMA, + tableName, + defn); + resp = resource.createTable(table, false, postBy(DummyRequest.WRITER_USER)); + assertEquals(Response.Status.OK.getStatusCode(), resp.getStatus()); + + // No write permission + resp = resource.deleteTable(TableId.DRUID_SCHEMA, tableName, false, deleteBy(DummyRequest.READER_USER)); + assertEquals(Response.Status.FORBIDDEN.getStatusCode(), resp.getStatus()); + + // Write permission + resp = resource.deleteTable(TableId.DRUID_SCHEMA, tableName, false, deleteBy(DummyRequest.WRITER_USER)); + assertEquals(Response.Status.OK.getStatusCode(), resp.getStatus()); + + resp = resource.deleteTable(TableId.DRUID_SCHEMA, tableName, false, deleteBy(DummyRequest.WRITER_USER)); + assertEquals(Response.Status.NOT_FOUND.getStatusCode(), resp.getStatus()); + + resp = resource.deleteTable(TableId.DRUID_SCHEMA, tableName, true, deleteBy(DummyRequest.WRITER_USER)); + assertEquals(Response.Status.OK.getStatusCode(), resp.getStatus()); + } + + @Test + public void testLifecycle() + { + // Operations for one table - create + String table1Name = "lifecycle1"; + DatasourceSpec defn = DatasourceSpec.builder() + .segmentGranularity("PT1D") + .build(); + TableMetadata table = TableMetadata.newTable(TableId.DRUID_SCHEMA, table1Name, defn); + Response resp = resource.createTable(table, false, postBy(DummyRequest.WRITER_USER)); + assertEquals(Response.Status.OK.getStatusCode(), resp.getStatus()); + long version = getVersion(resp); + table = table.fromInsert(TableId.DRUID_SCHEMA, version); + + // read + resp = resource.getTable(TableId.DRUID_SCHEMA, table1Name, postBy(DummyRequest.READER_USER)); + assertEquals(Response.Status.OK.getStatusCode(), resp.getStatus()); + TableMetadata read = (TableMetadata) resp.getEntity(); + assertEquals(table, read); + + // list + resp = resource.listTables(getBy(DummyRequest.READER_USER)); + assertEquals(Response.Status.OK.getStatusCode(), resp.getStatus()); + List tableIds = getTableIdList(resp); + assertEquals(1, tableIds.size()); + assertEquals(table.id(), tableIds.get(0)); + + resp = resource.listTables(TableId.DRUID_SCHEMA, getBy(DummyRequest.READER_USER)); + assertEquals(Response.Status.OK.getStatusCode(), resp.getStatus()); + List tables = getTableList(resp); + assertEquals(1, tables.size()); + assertEquals(table.name(), tables.get(0)); + + // update + DatasourceSpec defn2 = DatasourceSpec.builder() + .segmentGranularity("PT1H") + .build(); + resp = resource.updateTableDefn(TableId.DRUID_SCHEMA, table1Name, defn2, version, postBy(DummyRequest.WRITER_USER)); + assertEquals(Response.Status.OK.getStatusCode(), resp.getStatus()); + assertTrue(getVersion(resp) > version); + version = getVersion(resp); + + // verify update + resp = resource.getTable(TableId.DRUID_SCHEMA, table1Name, getBy(DummyRequest.READER_USER)); + assertEquals(Response.Status.OK.getStatusCode(), resp.getStatus()); + read = (TableMetadata) resp.getEntity(); + assertEquals(table.creationTime(), read.creationTime()); + assertEquals(version, read.updateTime()); + assertEquals(defn2, read.spec()); + + // add second table + String table2Name = "lifecycle2"; + TableMetadata table2 = TableMetadata.newTable(TableId.DRUID_SCHEMA, table2Name, defn); + resp = resource.createTable(table2, false, postBy(DummyRequest.WRITER_USER)); + assertEquals(Response.Status.OK.getStatusCode(), resp.getStatus()); + + // verify lists + resp = resource.listTables(getBy(DummyRequest.READER_USER)); + assertEquals(Response.Status.OK.getStatusCode(), resp.getStatus()); + tableIds = getTableIdList(resp); + assertEquals(2, tableIds.size()); + assertEquals(table.id(), tableIds.get(0)); + assertEquals(table2.id(), tableIds.get(1)); + + resp = resource.listTables(TableId.DRUID_SCHEMA, getBy(DummyRequest.READER_USER)); + assertEquals(Response.Status.OK.getStatusCode(), resp.getStatus()); + tables = getTableList(resp); + assertEquals(2, tables.size()); + assertEquals(table.name(), tables.get(0)); + assertEquals(table2.name(), tables.get(1)); + + // delete and verify + resp = resource.deleteTable(TableId.DRUID_SCHEMA, table1Name, false, deleteBy(DummyRequest.WRITER_USER)); + assertEquals(Response.Status.OK.getStatusCode(), resp.getStatus()); + + resp = resource.listTables(TableId.DRUID_SCHEMA, getBy(DummyRequest.READER_USER)); + assertEquals(Response.Status.OK.getStatusCode(), resp.getStatus()); + tables = getTableList(resp); + assertEquals(1, tables.size()); + + resp = resource.deleteTable(TableId.DRUID_SCHEMA, table2Name, false, deleteBy(DummyRequest.WRITER_USER)); + assertEquals(Response.Status.OK.getStatusCode(), resp.getStatus()); + + resp = resource.listTables(TableId.DRUID_SCHEMA, getBy(DummyRequest.READER_USER)); + assertEquals(Response.Status.OK.getStatusCode(), resp.getStatus()); + tables = getTableList(resp); + assertEquals(0, tables.size()); + } +} diff --git a/server/src/test/java/org/apache/druid/catalog/CatalogTests.java b/server/src/test/java/org/apache/druid/catalog/CatalogTests.java new file mode 100644 index 000000000000..83599667e7c5 --- /dev/null +++ b/server/src/test/java/org/apache/druid/catalog/CatalogTests.java @@ -0,0 +1,81 @@ +/* + * Licensed to the Apache Software Foundation (ASF) under one + * or more contributor license agreements. See the NOTICE file + * distributed with this work for additional information + * regarding copyright ownership. The ASF licenses this file + * to you under the Apache License, Version 2.0 (the + * "License"); you may not use this file except in compliance + * with the License. You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, + * software distributed under the License is distributed on an + * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY + * KIND, either express or implied. See the License for the + * specific language governing permissions and limitations + * under the License. + */ + +package org.apache.druid.catalog; + +import com.fasterxml.jackson.databind.ObjectMapper; +import org.apache.druid.data.input.InputFormat; +import org.apache.druid.data.input.impl.CsvInputFormat; +import org.apache.druid.jackson.DefaultObjectMapper; +import org.apache.druid.metadata.TestDerbyConnector.DerbyConnectorRule; +import org.apache.druid.metadata.catalog.CatalogManager; +import org.apache.druid.metadata.catalog.SQLCatalogManager; + +import java.util.Arrays; + +public class CatalogTests +{ + public static InputFormat csvFormat() + { + return new CsvInputFormat( + Arrays.asList("x", "y", "z"), + null, // listDelimiter + false, // hasHeaderRow + false, // findColumnsFromHeader + 0 // skipHeaderRows + ); + } + + public static final ObjectMapper JSON_MAPPER = new DefaultObjectMapper(); + + public static class DbFixture + { + public CatalogManager manager; + public CatalogStorage storage; + + public DbFixture(DerbyConnectorRule derbyConnectorRule) + { + MetastoreManager metastoreMgr = new MetastoreManagerImpl( + JSON_MAPPER, + derbyConnectorRule.getConnector(), + () -> derbyConnectorRule.getMetadataConnectorConfig(), + derbyConnectorRule.metadataTablesConfigSupplier() + ); + manager = new SQLCatalogManager(metastoreMgr); + manager.start(); + storage = new CatalogStorage(manager, DummyRequest.AUTH_MAPPER); + } + + public void tearDown() + { + if (manager != null) { + manager.stop(); + manager = null; + } + } + } + + public static void tearDown(DbFixture fixture) + { + if (fixture != null) { + fixture.tearDown(); + } + } + +} diff --git a/server/src/test/java/org/apache/druid/catalog/DatasourceSpecTest.java b/server/src/test/java/org/apache/druid/catalog/DatasourceSpecTest.java new file mode 100644 index 000000000000..a8e2fcdfb24a --- /dev/null +++ b/server/src/test/java/org/apache/druid/catalog/DatasourceSpecTest.java @@ -0,0 +1,205 @@ +/* + * Licensed to the Apache Software Foundation (ASF) under one + * or more contributor license agreements. See the NOTICE file + * distributed with this work for additional information + * regarding copyright ownership. The ASF licenses this file + * to you under the Apache License, Version 2.0 (the + * "License"); you may not use this file except in compliance + * with the License. You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, + * software distributed under the License is distributed on an + * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY + * KIND, either express or implied. See the License for the + * specific language governing permissions and limitations + * under the License. + */ + +package org.apache.druid.catalog; + +import com.fasterxml.jackson.databind.ObjectMapper; +import com.google.common.collect.ImmutableMap; +import nl.jqno.equalsverifier.EqualsVerifier; +import org.apache.druid.catalog.DatasourceColumnSpec.MeasureSpec; +import org.apache.druid.java.util.common.IAE; +import org.junit.Test; + +import java.util.List; +import java.util.Map; + +import static org.junit.Assert.assertEquals; +import static org.junit.Assert.assertNotNull; +import static org.junit.Assert.assertNull; +import static org.junit.Assert.assertTrue; +import static org.junit.Assert.fail; + +/** + * Test of validation and serialization of the catalog table definitions. + */ +public class DatasourceSpecTest +{ + @Test + public void testMinimalBuilder() + { + // Minimum possible definition + DatasourceSpec defn = DatasourceSpec.builder() + .segmentGranularity("PT1D") + .build(); + + defn.validate(); + assertEquals("PT1D", defn.segmentGranularity()); + assertNull(defn.rollupGranularity()); + assertEquals(0, defn.targetSegmentRows()); + + DatasourceSpec copy = defn.toBuilder().build(); + assertEquals(defn, copy); + } + + @Test + public void testFullBuilder() + { + DatasourceSpec defn = DatasourceSpec.builder() + .segmentGranularity("PT1H") + .rollupGranularity("PT1M") + .targetSegmentRows(1_000_000) + .build(); + + defn.validate(); + assertEquals("PT1H", defn.segmentGranularity()); + assertEquals("PT1M", defn.rollupGranularity()); + assertEquals(1_000_000, defn.targetSegmentRows()); + + DatasourceSpec copy = defn.toBuilder().build(); + assertEquals(defn, copy); + } + + @Test + public void testProperties() + { + Map props = ImmutableMap.of( + "foo", 10, "bar", "mumble"); + DatasourceSpec defn = DatasourceSpec.builder() + .segmentGranularity("PT1M") + .properties(props) + .build(); + + defn.validate(); + assertEquals(props, defn.properties()); + + DatasourceSpec copy = defn.toBuilder().build(); + assertEquals(defn, copy); + } + + @Test + public void testColumns() + { + DatasourceSpec defn = DatasourceSpec.builder() + .segmentGranularity("PT1D") + .rollupGranularity("PT1M") + .column("a", null) + .column("b", "VARCHAR") + .measure("c", "BIGINT", "SUM") + .build(); + + defn.validate(); + List columns = defn.columns(); + assertEquals(3, columns.size()); + assertTrue(columns.get(0) instanceof DatasourceColumnSpec); + assertEquals("a", columns.get(0).name()); + assertNull(columns.get(0).sqlType()); + assertTrue(columns.get(1) instanceof DatasourceColumnSpec); + assertEquals("b", columns.get(1).name()); + assertEquals("VARCHAR", columns.get(1).sqlType()); + assertTrue(columns.get(2) instanceof MeasureSpec); + assertEquals("c", columns.get(2).name()); + assertEquals("BIGINT", columns.get(2).sqlType()); + assertEquals("SUM", ((MeasureSpec) columns.get(2)).aggregateFn()); + + DatasourceSpec copy = defn.toBuilder().build(); + assertEquals(defn, copy); + + try { + defn = DatasourceSpec.builder() + .segmentGranularity("PT1D") + .column("c", "FOO") + .build(); + defn.validate(); + fail(); + } + catch (IAE e) { + // Expected + } + + try { + defn = DatasourceSpec.builder() + .segmentGranularity("PT1D") + .measure("c", "BIGINT", "SUM") + .build(); + defn.validate(); + fail(); + } + catch (IAE e) { + // Expected + } + + try { + defn = DatasourceSpec.builder() + .segmentGranularity("PT1D") + .column("a", null) + .column("a", null) + .build(); + defn.validate(); + fail(); + } + catch (IAE e) { + // Expected + } + } + + @Test + public void testValidation() + { + // Ignore rollup grain for detail table + DatasourceSpec defn = DatasourceSpec.builder() + .segmentGranularity("PT1H") + .build(); + + assertNull(defn.rollupGranularity()); + assertEquals("PT1H", defn.segmentGranularity()); + + // Negative segment size mapped to 0 + defn = DatasourceSpec.builder() + .segmentGranularity("PT1H") + .targetSegmentRows(-1) + .build(); + assertEquals(0, defn.targetSegmentRows()); + } + + @Test + public void testSerialization() + { + ObjectMapper mapper = new ObjectMapper(); + DatasourceSpec defn = DatasourceSpec.builder() + .segmentGranularity("PT1H") + .rollupGranularity("PT1M") + .targetSegmentRows(1_000_000) + .build(); + + // Round-trip + TableSpec defn2 = TableSpec.fromBytes(mapper, defn.toBytes(mapper)); + assertEquals(defn, defn2); + + // Sanity check of toString, which uses JSON + assertNotNull(defn.toString()); + } + + @Test + public void testEquals() + { + EqualsVerifier.forClass(DatasourceSpec.class) + .usingGetClass() + .verify(); + } +} diff --git a/server/src/test/java/org/apache/druid/catalog/DummyRequest.java b/server/src/test/java/org/apache/druid/catalog/DummyRequest.java new file mode 100644 index 000000000000..5866b16528f4 --- /dev/null +++ b/server/src/test/java/org/apache/druid/catalog/DummyRequest.java @@ -0,0 +1,541 @@ +/* + * Licensed to the Apache Software Foundation (ASF) under one + * or more contributor license agreements. See the NOTICE file + * distributed with this work for additional information + * regarding copyright ownership. The ASF licenses this file + * to you under the Apache License, Version 2.0 (the + * "License"); you may not use this file except in compliance + * with the License. You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, + * software distributed under the License is distributed on an + * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY + * KIND, either express or implied. See the License for the + * specific language governing permissions and limitations + * under the License. + */ + +package org.apache.druid.catalog; + +import com.google.common.collect.ImmutableMap; +import org.apache.druid.server.security.Access; +import org.apache.druid.server.security.Action; +import org.apache.druid.server.security.AuthConfig; +import org.apache.druid.server.security.AuthenticationResult; +import org.apache.druid.server.security.Authorizer; +import org.apache.druid.server.security.AuthorizerMapper; +import org.apache.druid.server.security.Resource; +import org.apache.druid.server.security.ResourceType; + +import javax.servlet.AsyncContext; +import javax.servlet.DispatcherType; +import javax.servlet.RequestDispatcher; +import javax.servlet.ServletContext; +import javax.servlet.ServletInputStream; +import javax.servlet.ServletRequest; +import javax.servlet.ServletResponse; +import javax.servlet.http.Cookie; +import javax.servlet.http.HttpServletRequest; +import javax.servlet.http.HttpServletResponse; +import javax.servlet.http.HttpSession; +import javax.servlet.http.HttpUpgradeHandler; +import javax.servlet.http.Part; + +import java.io.BufferedReader; +import java.security.Principal; +import java.util.Collection; +import java.util.Enumeration; +import java.util.HashMap; +import java.util.Locale; +import java.util.Map; + +/** + * Test-only implementation of an HTTP request. Allows us to control + * aspects of the request without resorting to mocks. + */ +public class DummyRequest implements HttpServletRequest +{ + protected static final String SUPER_USER = "super"; + protected static final String READER_USER = "reader"; + protected static final String WRITER_USER = "writer"; + protected static final String DENY_USER = "denyAll"; + + protected static final String TEST_AUTHORITY = "test"; + + protected static final String GET = "GET"; + protected static final String POST = "POST"; + protected static final String DELETE = "DELETE"; + + private static class TestAuthorizer implements Authorizer + { + @Override + public Access authorize( + AuthenticationResult authenticationResult, + Resource resource, + Action action + ) + { + final String userName = authenticationResult.getIdentity(); + if (DummyRequest.SUPER_USER.equals(userName)) { + return Access.OK; + } + if (ResourceType.DATASOURCE.equals(resource.getType())) { + if ("forbidden".equals(resource.getName())) { + return Access.DENIED; + } + return new Access( + DummyRequest.WRITER_USER.equals(userName) || + DummyRequest.READER_USER.equals(userName) && action == Action.READ); + } + return Access.OK; + } + } + + protected static final AuthorizerMapper AUTH_MAPPER = new AuthorizerMapper( + ImmutableMap.of(DummyRequest.TEST_AUTHORITY, new TestAuthorizer())); + + private final String method; + private final Map attribs = new HashMap<>(); + private final String contentType; + + public DummyRequest(String method, String userName) + { + this(method, userName, null); + } + + public DummyRequest(String method, String userName, String contentType) + { + this.method = method; + AuthenticationResult authResult = + new AuthenticationResult(userName, TEST_AUTHORITY, null, null); + attribs.put(AuthConfig.DRUID_AUTHENTICATION_RESULT, authResult); + this.contentType = contentType; + } + + public static HttpServletRequest postBy(String user) + { + return new DummyRequest(DummyRequest.POST, user); + } + + public static HttpServletRequest getBy(String user) + { + return new DummyRequest(DummyRequest.GET, user); + } + + public static HttpServletRequest deleteBy(String user) + { + return new DummyRequest(DummyRequest.DELETE, user); + } + + @Override + public Object getAttribute(String name) + { + return attribs.get(name); + } + + @Override + public Enumeration getAttributeNames() + { + return null; + } + + @Override + public String getCharacterEncoding() + { + return null; + } + + @Override + public void setCharacterEncoding(String env) + { + } + + @Override + public int getContentLength() + { + return 0; + } + + @Override + public long getContentLengthLong() + { + return 0; + } + + @Override + public String getContentType() + { + return contentType; + } + + @Override + public ServletInputStream getInputStream() + { + return null; + } + + @Override + public String getParameter(String name) + { + return null; + } + + @Override + public Enumeration getParameterNames() + { + return null; + } + + @Override + public String[] getParameterValues(String name) + { + return null; + } + + @Override + public Map getParameterMap() + { + return null; + } + + @Override + public String getProtocol() + { + return null; + } + + @Override + public String getScheme() + { + return null; + } + + @Override + public String getServerName() + { + return null; + } + + @Override + public int getServerPort() + { + return 0; + } + + @Override + public BufferedReader getReader() + { + return null; + } + + @Override + public String getRemoteAddr() + { + return null; + } + + @Override + public String getRemoteHost() + { + return null; + } + + @Override + public void setAttribute(String name, Object o) + { + attribs.put(name, o); + } + + @Override + public void removeAttribute(String name) + { + } + + @Override + public Locale getLocale() + { + return null; + } + + @Override + public Enumeration getLocales() + { + return null; + } + + @Override + public boolean isSecure() + { + return false; + } + + @Override + public RequestDispatcher getRequestDispatcher(String path) + { + return null; + } + + @Override + public String getRealPath(String path) + { + return null; + } + + @Override + public int getRemotePort() + { + return 0; + } + + @Override + public String getLocalName() + { + return null; + } + + @Override + public String getLocalAddr() + { + return null; + } + + @Override + public int getLocalPort() + { + return 0; + } + + @Override + public ServletContext getServletContext() + { + return null; + } + + @Override + public AsyncContext startAsync() + { + return null; + } + + @Override + public AsyncContext startAsync(ServletRequest servletRequest, ServletResponse servletResponse) + { + return null; + } + + @Override + public boolean isAsyncStarted() + { + return false; + } + + @Override + public boolean isAsyncSupported() + { + return false; + } + + @Override + public AsyncContext getAsyncContext() + { + return null; + } + + @Override + public DispatcherType getDispatcherType() + { + return null; + } + + @Override + public String getAuthType() + { + return null; + } + + @Override + public Cookie[] getCookies() + { + return null; + } + + @Override + public long getDateHeader(String name) + { + return 0; + } + + @Override + public String getHeader(String name) + { + return null; + } + + @Override + public Enumeration getHeaders(String name) + { + return null; + } + + @Override + public Enumeration getHeaderNames() + { + return null; + } + + @Override + public int getIntHeader(String name) + { + return 0; + } + + @Override + public String getMethod() + { + return method; + } + + @Override + public String getPathInfo() + { + return null; + } + + @Override + public String getPathTranslated() + { + return null; + } + + @Override + public String getContextPath() + { + return null; + } + + @Override + public String getQueryString() + { + return null; + } + + @Override + public String getRemoteUser() + { + return null; + } + + @Override + public boolean isUserInRole(String role) + { + return false; + } + + @Override + public Principal getUserPrincipal() + { + return null; + } + + @Override + public String getRequestedSessionId() + { + return null; + } + + @Override + public String getRequestURI() + { + return null; + } + + @Override + public StringBuffer getRequestURL() + { + return null; + } + + @Override + public String getServletPath() + { + return null; + } + + @Override + public HttpSession getSession(boolean create) + { + return null; + } + + @Override + public HttpSession getSession() + { + return null; + } + + @Override + public String changeSessionId() + { + return null; + } + + @Override + public boolean isRequestedSessionIdValid() + { + return false; + } + + @Override + public boolean isRequestedSessionIdFromCookie() + { + return false; + } + + @Override + public boolean isRequestedSessionIdFromURL() + { + return false; + } + + @Override + public boolean isRequestedSessionIdFromUrl() + { + return false; + } + + @Override + public boolean authenticate(HttpServletResponse response) + { + return false; + } + + @Override + public void login(String username, String password) + { + } + + @Override + public void logout() + { + } + + @Override + public Collection getParts() + { + return null; + } + + @Override + public Part getPart(String name) + { + return null; + } + + @Override + public T upgrade(Class handlerClass) + { + return null; + } +} diff --git a/server/src/test/java/org/apache/druid/catalog/InputTableSpecTest.java b/server/src/test/java/org/apache/druid/catalog/InputTableSpecTest.java new file mode 100644 index 000000000000..773f4027ee05 --- /dev/null +++ b/server/src/test/java/org/apache/druid/catalog/InputTableSpecTest.java @@ -0,0 +1,175 @@ +/* + * Licensed to the Apache Software Foundation (ASF) under one + * or more contributor license agreements. See the NOTICE file + * distributed with this work for additional information + * regarding copyright ownership. The ASF licenses this file + * to you under the Apache License, Version 2.0 (the + * "License"); you may not use this file except in compliance + * with the License. You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, + * software distributed under the License is distributed on an + * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY + * KIND, either express or implied. See the License for the + * specific language governing permissions and limitations + * under the License. + */ + +package org.apache.druid.catalog; + +import com.fasterxml.jackson.databind.ObjectMapper; +import nl.jqno.equalsverifier.EqualsVerifier; +import org.apache.druid.data.input.InputFormat; +import org.apache.druid.data.input.InputSource; +import org.apache.druid.data.input.impl.InlineInputSource; +import org.apache.druid.java.util.common.IAE; +import org.junit.Test; + +import java.util.List; + +import static org.junit.Assert.assertEquals; +import static org.junit.Assert.assertNotNull; +import static org.junit.Assert.assertSame; +import static org.junit.Assert.fail; + +public class InputTableSpecTest +{ + @Test + public void testMinimalBuilder() + { + // Minimum possible definition + InputSource inputSource = new InlineInputSource("a,b,1\nc,d,2\n"); + InputFormat inputFormat = CatalogTests.csvFormat(); + InputTableSpec spec = InputTableSpec + .builder() + .source(inputSource) + .format(inputFormat) + .column("a", "varchar") + .build(); + + spec.validate(); + assertSame(inputSource, spec.inputSource()); + assertSame(inputFormat, spec.format()); + List columns = spec.columns(); + assertEquals(1, columns.size()); + assertEquals("a", columns.get(0).name()); + assertEquals("varchar", columns.get(0).sqlType()); + + InputTableSpec copy = spec.toBuilder().build(); + assertEquals(spec, copy); + } + + @Test + public void testValidation() + { + InputTableSpec spec = InputTableSpec.builder().build(); + try { + spec.validate(); + fail(); + } + catch (IAE e) { + // Expected + } + + InputSource inputSource = new InlineInputSource("a,b,1\nc,d,2\n"); + spec = InputTableSpec + .builder() + .source(inputSource) + .build(); + try { + spec.validate(); + fail(); + } + catch (IAE e) { + // Expected + } + + InputFormat inputFormat = CatalogTests.csvFormat(); + spec = InputTableSpec + .builder() + .source(inputSource) + .format(inputFormat) + .build(); + try { + spec.validate(); + fail(); + } + catch (IAE e) { + // Expected + } + + try { + spec = InputTableSpec + .builder() + .source(inputSource) + .format(inputFormat) + .column(null, "VARCHAR") + .build(); + spec.validate(); + fail(); + } + catch (IAE e) { + // Expected + } + + spec = InputTableSpec + .builder() + .source(inputSource) + .format(inputFormat) + .column("a", null) + .build(); + try { + spec.validate(); + fail(); + } + catch (IAE e) { + // Expected + } + + spec = InputTableSpec + .builder() + .source(inputSource) + .format(inputFormat) + .column("a", "varchar") + .column("a", "varchar") + .build(); + try { + spec.validate(); + fail(); + } + catch (IAE e) { + // Expected + } + } + + @Test + public void testSerialization() + { + ObjectMapper mapper = new ObjectMapper(); + InputSource inputSource = new InlineInputSource("a,b,1\nc,d,2\n"); + InputFormat inputFormat = CatalogTests.csvFormat(); + InputTableSpec spec1 = InputTableSpec + .builder() + .source(inputSource) + .format(inputFormat) + .column("a", "varchar") + .build(); + + // Round-trip + TableSpec spec2 = TableSpec.fromBytes(mapper, spec1.toBytes(mapper)); + assertEquals(spec1, spec2); + + // Sanity check of toString, which uses JSON + assertNotNull(spec1.toString()); + } + + @Test + public void testEquals() + { + EqualsVerifier.forClass(InputTableSpec.class) + .usingGetClass() + .verify(); + } +} diff --git a/server/src/test/java/org/apache/druid/catalog/MetadataCatalogTest.java b/server/src/test/java/org/apache/druid/catalog/MetadataCatalogTest.java new file mode 100644 index 000000000000..f275eaf288eb --- /dev/null +++ b/server/src/test/java/org/apache/druid/catalog/MetadataCatalogTest.java @@ -0,0 +1,317 @@ +/* + * Licensed to the Apache Software Foundation (ASF) under one + * or more contributor license agreements. See the NOTICE file + * distributed with this work for additional information + * regarding copyright ownership. The ASF licenses this file + * to you under the Apache License, Version 2.0 (the + * "License"); you may not use this file except in compliance + * with the License. You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, + * software distributed under the License is distributed on an + * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY + * KIND, either express or implied. See the License for the + * specific language governing permissions and limitations + * under the License. + */ + +package org.apache.druid.catalog; + +import com.fasterxml.jackson.databind.ObjectMapper; +import com.fasterxml.jackson.dataformat.smile.SmileFactory; +import org.apache.druid.catalog.ColumnSpec.ColumnKind; +import org.apache.druid.catalog.DatasourceColumnSpec.MeasureSpec; +import org.apache.druid.catalog.TableMetadata.TableType; +import org.apache.druid.data.input.InputFormat; +import org.apache.druid.data.input.InputSource; +import org.apache.druid.data.input.impl.InlineInputSource; +import org.apache.druid.metadata.TestDerbyConnector; +import org.apache.druid.metadata.catalog.CatalogManager.DuplicateKeyException; +import org.apache.druid.metadata.catalog.CatalogManager.OutOfDateException; +import org.junit.After; +import org.junit.Before; +import org.junit.Rule; +import org.junit.Test; + +import java.util.List; + +import static org.junit.Assert.assertEquals; +import static org.junit.Assert.assertFalse; +import static org.junit.Assert.assertNotNull; +import static org.junit.Assert.assertNull; +import static org.junit.Assert.assertTrue; + +public class MetadataCatalogTest +{ + @Rule + public final TestDerbyConnector.DerbyConnectorRule derbyConnectorRule = new TestDerbyConnector.DerbyConnectorRule(); + + private CatalogTests.DbFixture dbFixture; + private CatalogStorage storage; + private ObjectMapper jsonMapper; + private ObjectMapper smileMapper; + + @Before + public void setUp() + { + dbFixture = new CatalogTests.DbFixture(derbyConnectorRule); + storage = dbFixture.storage; + jsonMapper = new ObjectMapper(); + smileMapper = new ObjectMapper(new SmileFactory()); + } + + @After + public void tearDown() + { + CatalogTests.tearDown(dbFixture); + } + + @Test + public void testDirect() throws DuplicateKeyException, OutOfDateException + { + populateCatalog(); + MetadataCatalog catalog = new LocalMetadataCatalog(storage, storage.schemaRegistry); + verifyInitial(catalog); + alterCatalog(); + verifyAltered(catalog); + } + + @Test + public void testCached() throws DuplicateKeyException, OutOfDateException + { + populateCatalog(); + CachedMetadataCatalog catalog = new CachedMetadataCatalog(storage, storage.schemaRegistry); + storage.register(catalog); + verifyInitial(catalog); + alterCatalog(); + verifyAltered(catalog); + + // Also test the deletion case + TableId table2 = TableId.datasource("table2"); + storage.tables().delete(table2); + assertNull(storage.tables().read(table2)); + + List tables = catalog.tables(TableId.DRUID_SCHEMA); + assertEquals(2, tables.size()); + assertEquals("table1", tables.get(0).id().name()); + assertEquals("table3", tables.get(1).id().name()); + } + + @Test + public void testRemoteWithJson() throws DuplicateKeyException, OutOfDateException + { + doTestRemote(false); + } + + @Test + public void testRemoteWithSmile() throws DuplicateKeyException, OutOfDateException + { + doTestRemote(true); + } + + private void doTestRemote(boolean useSmile) throws DuplicateKeyException, OutOfDateException + { + populateCatalog(); + MockCatalogSync sync = new MockCatalogSync(storage, jsonMapper, smileMapper, useSmile); + MetadataCatalog catalog = sync.catalog(); + storage.register(sync); + verifyInitial(catalog); + alterCatalog(); + verifyAltered(catalog); + + // Also test the deletion case + TableId table2 = TableId.datasource("table2"); + storage.tables().delete(table2); + assertNull(storage.tables().read(table2)); + + List tables = catalog.tables(TableId.DRUID_SCHEMA); + assertEquals(2, tables.size()); + assertEquals("table1", tables.get(0).id().name()); + assertEquals("table3", tables.get(1).id().name()); + } + + /** + * Populate the catalog with a few items using the REST resource. + * @throws DuplicateKeyException + */ + private void populateCatalog() throws DuplicateKeyException + { + DatasourceSpec spec = DatasourceSpec.builder() + .segmentGranularity("PT1D") + .timeColumn() + .column("a", "VARCHAR") + .build(); + TableMetadata table = TableMetadata.newTable( + TableId.DRUID_SCHEMA, + "table1", + spec); + storage.tables().create(table); + + spec = DatasourceSpec.builder() + .segmentGranularity("PT1D") + .rollupGranularity("PT1H") + .timeColumn() + .column("dim", "VARCHAR") + .measure("measure", "BIGINT", "SUM") + .build(); + table = TableMetadata.newTable( + TableId.DRUID_SCHEMA, + "table2", + spec); + storage.tables().create(table); + + InputSource inputSource = new InlineInputSource("a,b,1\nc,d,2\n"); + InputFormat inputFormat = CatalogTests.csvFormat(); + InputTableSpec inputSpec = InputTableSpec + .builder() + .source(inputSource) + .format(inputFormat) + .column("a", "varchar") + .build(); + table = TableMetadata.newTable( + TableId.INPUT_SCHEMA, + "input", + inputSpec); + storage.tables().create(table); + } + + private void verifyInitial(MetadataCatalog catalog) + { + { + TableId id = TableId.datasource("table1"); + TableMetadata table = catalog.resolveTable(id); + assertEquals(id, table.id()); + assertTrue(table.updateTime() > 0); + assertEquals(TableType.DATASOURCE, table.type()); + + DatasourceSpec dsSpec = (DatasourceSpec) table.spec(); + List cols = dsSpec.columns(); + assertEquals(2, cols.size()); + assertEquals("__time", cols.get(0).name()); + assertEquals("TIMESTAMP", cols.get(0).sqlType()); + assertEquals(ColumnKind.DETAIL, cols.get(0).kind()); + assertEquals("a", cols.get(1).name()); + assertEquals("VARCHAR", cols.get(1).sqlType()); + assertEquals(ColumnKind.DETAIL, cols.get(0).kind()); + + assertEquals("PT1D", dsSpec.segmentGranularity()); + assertTrue(dsSpec.isDetail()); + assertFalse(dsSpec.isRollup()); + assertNull(dsSpec.rollupGranularity()); + } + { + TableId id = TableId.datasource("table2"); + TableMetadata table = catalog.resolveTable(id); + assertEquals(id, table.id()); + assertTrue(table.updateTime() > 0); + assertEquals(TableType.DATASOURCE, table.type()); + + DatasourceSpec dsSpec = (DatasourceSpec) table.spec(); + List cols = dsSpec.columns(); + assertEquals(3, cols.size()); + assertEquals("__time", cols.get(0).name()); + assertEquals("TIMESTAMP", cols.get(0).sqlType()); + assertEquals(ColumnKind.DIMENSION, cols.get(0).kind()); + assertEquals("dim", cols.get(1).name()); + assertEquals("VARCHAR", cols.get(1).sqlType()); + assertEquals(ColumnKind.DIMENSION, cols.get(1).kind()); + assertEquals("measure", cols.get(2).name()); + assertEquals("BIGINT", cols.get(2).sqlType()); + assertEquals(ColumnKind.MEASURE, cols.get(2).kind()); + assertEquals("SUM", ((MeasureSpec) cols.get(2)).aggregateFn()); + + assertEquals("PT1D", dsSpec.segmentGranularity()); + assertFalse(dsSpec.isDetail()); + assertTrue(dsSpec.isRollup()); + assertEquals("PT1H", dsSpec.rollupGranularity()); + } + assertNull(catalog.resolveTable(TableId.datasource("table3"))); + { + TableId id = TableId.inputSource("input"); + TableMetadata table = catalog.resolveTable(id); + assertEquals(id, table.id()); + assertTrue(table.updateTime() > 0); + assertEquals(TableType.INPUT, table.type()); + + InputTableSpec inputSpec = (InputTableSpec) table.spec(); + List cols = inputSpec.columns(); + assertEquals(1, cols.size()); + assertEquals("a", cols.get(0).name()); + assertEquals("varchar", cols.get(0).sqlType()); + assertEquals(ColumnKind.INPUT, cols.get(0).kind()); + + assertNotNull(inputSpec.inputSource()); + assertNotNull(inputSpec.format()); + } + + List tables = catalog.tables(TableId.DRUID_SCHEMA); + assertEquals(2, tables.size()); + assertEquals("table1", tables.get(0).id().name()); + assertEquals("table2", tables.get(1).id().name()); + + tables = catalog.tables(TableId.INPUT_SCHEMA); + assertEquals(1, tables.size()); + assertEquals("input", tables.get(0).id().name()); + } + + private void alterCatalog() throws DuplicateKeyException, OutOfDateException + { + // Add a column to table 1 + TableId id1 = TableId.datasource("table1"); + TableMetadata table1 = storage.tables().read(id1); + assertNotNull(table1); + + DatasourceSpec spec = (DatasourceSpec) table1.spec(); + spec = spec.toBuilder() + .column("b", "DOUBLE") + .build(); + storage.tables().updateSpec(id1, spec, table1.updateTime()); + + // Create a table 3 + spec = DatasourceSpec.builder() + .segmentGranularity("PT1D") + .timeColumn() + .column("x", "FLOAT") + .build(); + TableMetadata table = TableMetadata.newTable( + TableId.DRUID_SCHEMA, + "table3", + spec); + storage.tables().create(table); + } + + private void verifyAltered(MetadataCatalog catalog) + { + { + TableId id = TableId.datasource("table1"); + TableMetadata table = catalog.resolveTable(id); + + DatasourceSpec dsSpec = (DatasourceSpec) table.spec(); + List cols = dsSpec.columns(); + assertEquals(3, cols.size()); + assertEquals("__time", cols.get(0).name()); + assertEquals("a", cols.get(1).name()); + assertEquals("b", cols.get(2).name()); + assertEquals("DOUBLE", cols.get(2).sqlType()); + assertEquals(ColumnKind.DETAIL, cols.get(2).kind()); + } + { + TableId id = TableId.datasource("table3"); + TableMetadata table = catalog.resolveTable(id); + + DatasourceSpec dsSpec = (DatasourceSpec) table.spec(); + List cols = dsSpec.columns(); + assertEquals(2, cols.size()); + assertEquals("__time", cols.get(0).name()); + assertEquals("x", cols.get(1).name()); + } + + List tables = catalog.tables(TableId.DRUID_SCHEMA); + assertEquals(3, tables.size()); + assertEquals("table1", tables.get(0).id().name()); + assertEquals("table2", tables.get(1).id().name()); + assertEquals("table3", tables.get(2).id().name()); + } +} diff --git a/server/src/test/java/org/apache/druid/catalog/MockCatalogSync.java b/server/src/test/java/org/apache/druid/catalog/MockCatalogSync.java new file mode 100644 index 000000000000..45f46ca90959 --- /dev/null +++ b/server/src/test/java/org/apache/druid/catalog/MockCatalogSync.java @@ -0,0 +1,90 @@ +/* + * Licensed to the Apache Software Foundation (ASF) under one + * or more contributor license agreements. See the NOTICE file + * distributed with this work for additional information + * regarding copyright ownership. The ASF licenses this file + * to you under the Apache License, Version 2.0 (the + * "License"); you may not use this file except in compliance + * with the License. You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, + * software distributed under the License is distributed on an + * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY + * KIND, either express or implied. See the License for the + * specific language governing permissions and limitations + * under the License. + */ + +package org.apache.druid.catalog; + +import com.fasterxml.jackson.databind.ObjectMapper; +import com.fasterxml.jackson.jaxrs.smile.SmileMediaTypes; +import org.apache.druid.catalog.MetadataCatalog.CatalogListener; +import org.apache.druid.server.http.CatalogListenerResource; + +import javax.ws.rs.core.MediaType; + +import java.io.ByteArrayInputStream; + +/** + * Simulates a network sync from catalog (Coordinator) to consumer (Broker). + */ +public class MockCatalogSync implements CatalogListener +{ + private final CatalogListenerResource listenerResource; + private final CachedMetadataCatalog catalog; + private final boolean useSmile; + private final ObjectMapper smileMapper; + private final ObjectMapper jsonMapper; + + public MockCatalogSync( + CatalogStorage storage, + final ObjectMapper smileMapper, + final ObjectMapper jsonMapper, + boolean useSmile) + { + this.catalog = new CachedMetadataCatalog(storage, storage.schemaRegistry); + this.listenerResource = new CatalogListenerResource( + catalog, + smileMapper, + jsonMapper, + storage.authorizer().mapper()); + this.useSmile = useSmile; + this.smileMapper = smileMapper; + this.jsonMapper = jsonMapper; + } + + @Override + public void updated(TableMetadata update) + { + doSync(update); + } + + private void doSync(TableMetadata update) + { + byte[] encoded = update.toBytes(useSmile ? smileMapper : jsonMapper); + listenerResource.syncTable( + new ByteArrayInputStream(encoded), + new DummyRequest( + DummyRequest.POST, + DummyRequest.SUPER_USER, + useSmile ? SmileMediaTypes.APPLICATION_JACKSON_SMILE : MediaType.APPLICATION_JSON)); + } + + @Override + public void deleted(TableId tableId) + { + TableMetadata spec = TableMetadata.newTable( + tableId.schema(), + tableId.name(), + new TableSpec.Tombstone()); + doSync(spec); + } + + public MetadataCatalog catalog() + { + return catalog; + } +} diff --git a/server/src/test/java/org/apache/druid/metadata/SQLMetadataConnectorTest.java b/server/src/test/java/org/apache/druid/metadata/SQLMetadataConnectorTest.java index 1c192da475d5..ddc5fc321d8d 100644 --- a/server/src/test/java/org/apache/druid/metadata/SQLMetadataConnectorTest.java +++ b/server/src/test/java/org/apache/druid/metadata/SQLMetadataConnectorTest.java @@ -69,6 +69,26 @@ public void testCreateTables() tables.add(tablesConfig.getEntryTable(entryType)); tables.add(tablesConfig.getAuditTable()); tables.add(tablesConfig.getSupervisorTable()); + tables.add(tablesConfig.getPendingSegmentsTable()); + tables.add(tablesConfig.getDataSourceTable()); + + connector.getDBI().withHandle( + new HandleCallback() + { + @Override + public Void withHandle(Handle handle) + { + for (String table : tables) { + Assert.assertFalse( + StringUtils.format("table %s already created!", table), + connector.tableExists(handle, table) + ); + } + + return null; + } + } + ); connector.createSegmentTable(); connector.createConfigTable(); @@ -76,6 +96,8 @@ public void testCreateTables() connector.createTaskTables(); connector.createAuditTable(); connector.createSupervisorsTable(); + connector.createPendingSegmentsTable(); + connector.createDataSourceTable(); connector.getDBI().withHandle( new HandleCallback() diff --git a/server/src/test/java/org/apache/druid/metadata/catalog/TableIdTest.java b/server/src/test/java/org/apache/druid/metadata/catalog/TableIdTest.java new file mode 100644 index 000000000000..61f0c9c48035 --- /dev/null +++ b/server/src/test/java/org/apache/druid/metadata/catalog/TableIdTest.java @@ -0,0 +1,56 @@ +/* + * Licensed to the Apache Software Foundation (ASF) under one + * or more contributor license agreements. See the NOTICE file + * distributed with this work for additional information + * regarding copyright ownership. The ASF licenses this file + * to you under the Apache License, Version 2.0 (the + * "License"); you may not use this file except in compliance + * with the License. You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, + * software distributed under the License is distributed on an + * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY + * KIND, either express or implied. See the License for the + * specific language governing permissions and limitations + * under the License. + */ + +package org.apache.druid.metadata.catalog; + +import nl.jqno.equalsverifier.EqualsVerifier; +import org.apache.druid.catalog.TableId; +import org.junit.Test; + +import static org.junit.Assert.assertEquals; + +/** + * Tests the various catalog table record objects. These are mostly + * just "data objects" that do nothing other than hold data. + */ +public class TableIdTest +{ + @Test + public void testId() + { + TableId id1 = new TableId("schema", "table"); + assertEquals(id1, id1); + assertEquals("schema", id1.schema()); + assertEquals("table", id1.name()); + assertEquals("\"schema\".\"table\"", id1.sqlName()); + assertEquals(id1.sqlName(), id1.toString()); + + TableId id2 = TableId.datasource("ds"); + assertEquals(TableId.DRUID_SCHEMA, id2.schema()); + assertEquals("ds", id2.name()); + } + + @Test + public void testEquals() + { + EqualsVerifier.forClass(TableId.class) + .usingGetClass() + .verify(); + } +} diff --git a/server/src/test/java/org/apache/druid/metadata/catalog/TableManagerTest.java b/server/src/test/java/org/apache/druid/metadata/catalog/TableManagerTest.java new file mode 100644 index 000000000000..e1bd243dd9b5 --- /dev/null +++ b/server/src/test/java/org/apache/druid/metadata/catalog/TableManagerTest.java @@ -0,0 +1,208 @@ +/* + * Licensed to the Apache Software Foundation (ASF) under one + * or more contributor license agreements. See the NOTICE file + * distributed with this work for additional information + * regarding copyright ownership. The ASF licenses this file + * to you under the Apache License, Version 2.0 (the + * "License"); you may not use this file except in compliance + * with the License. You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, + * software distributed under the License is distributed on an + * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY + * KIND, either express or implied. See the License for the + * specific language governing permissions and limitations + * under the License. + */ + +package org.apache.druid.metadata.catalog; + +import com.fasterxml.jackson.databind.ObjectMapper; +import org.apache.druid.catalog.DatasourceSpec; +import org.apache.druid.catalog.MetastoreManager; +import org.apache.druid.catalog.MetastoreManagerImpl; +import org.apache.druid.catalog.TableId; +import org.apache.druid.catalog.TableMetadata; +import org.apache.druid.jackson.DefaultObjectMapper; +import org.apache.druid.metadata.TestDerbyConnector; +import org.apache.druid.metadata.catalog.CatalogManager.DuplicateKeyException; +import org.apache.druid.metadata.catalog.CatalogManager.NotFoundException; +import org.apache.druid.metadata.catalog.CatalogManager.OutOfDateException; +import org.apache.druid.metadata.catalog.CatalogManager.TableState; +import org.junit.After; +import org.junit.Before; +import org.junit.Rule; +import org.junit.Test; + +import java.util.Arrays; +import java.util.List; + +import static org.junit.Assert.assertEquals; +import static org.junit.Assert.assertFalse; +import static org.junit.Assert.assertNull; +import static org.junit.Assert.assertTrue; +import static org.junit.Assert.fail; + +public class TableManagerTest +{ + private static final ObjectMapper JSON_MAPPER = new DefaultObjectMapper(); + + @Rule + public final TestDerbyConnector.DerbyConnectorRule derbyConnectorRule = + new TestDerbyConnector.DerbyConnectorRule(); + private CatalogManager manager; + + @Before + public void setUp() + { + MetastoreManager metastoreMgr = new MetastoreManagerImpl( + JSON_MAPPER, + derbyConnectorRule.getConnector(), + () -> derbyConnectorRule.getMetadataConnectorConfig(), + derbyConnectorRule.metadataTablesConfigSupplier() + ); + manager = new SQLCatalogManager(metastoreMgr); + manager.start(); + } + + @After + public void tearDown() + { + if (manager != null) { + manager.stop(); + manager = null; + } + } + + @Test + public void testCreate() throws DuplicateKeyException + { + DatasourceSpec defn = DatasourceSpec.builder() + .segmentGranularity("PT1H") + .rollupGranularity("PT1M") + .targetSegmentRows(1_000_000) + .build(); + TableMetadata table = TableMetadata.newSegmentTable("table1", defn); + + // Table does not exist, read returns nothing. + assertNull(manager.read(table.id())); + + // Create the table + long version = manager.create(table); + TableMetadata created = table.fromInsert(table.dbSchema(), version); + + // Read the record + TableMetadata read = manager.read(table.id()); + assertEquals(created, read); + + // Try to create a second time + try { + manager.create(table); + fail(); + } + catch (DuplicateKeyException e) { + // Expected + } + } + + @Test + public void testUpdate() throws DuplicateKeyException, OutOfDateException, NotFoundException + { + DatasourceSpec defn = DatasourceSpec.builder() + .segmentGranularity("PT1H") + .rollupGranularity("PT1M") + .targetSegmentRows(1_000_000) + .build(); + TableMetadata table = TableMetadata.newSegmentTable("table1", defn); + long version = manager.create(table); + + // Change the definition + DatasourceSpec defn2 = DatasourceSpec.builder() + .segmentGranularity("PT1D") + .rollupGranularity("PT1H") + .targetSegmentRows(2_000_000) + .build(); + + try { + manager.updateSpec(table.id(), defn2, 3); + fail(); + } + catch (OutOfDateException e) { + // expected + } + + assertEquals(version, manager.read(table.id()).updateTime()); + long newVersion = manager.updateSpec(table.id(), defn2, version); + TableMetadata table3 = manager.read(table.id()); + assertEquals(defn2, table3.spec()); + assertEquals(newVersion, table3.updateTime()); + + // Changing the state requires no version check + assertEquals(TableState.ACTIVE, table3.state()); + newVersion = manager.markDeleting(table.id()); + TableMetadata table4 = manager.read(table.id()); + assertEquals(TableState.DELETING, table4.state()); + assertEquals(newVersion, table4.updateTime()); + + // Update: no version check) + long newerVersion = manager.updateDefn(table.id(), defn2); + assertTrue(newerVersion > newVersion); + } + + @Test + public void testDelete() throws DuplicateKeyException + { + DatasourceSpec defn = DatasourceSpec.builder() + .segmentGranularity("PT1H") + .rollupGranularity("PT1M") + .targetSegmentRows(1_000_000) + .build(); + TableMetadata table = TableMetadata.newSegmentTable("table1", defn); + + assertFalse(manager.delete(table.id())); + manager.create(table); + assertTrue(manager.delete(table.id())); + assertFalse(manager.delete(table.id())); + } + + @Test + public void testList() throws DuplicateKeyException + { + List list = manager.list(); + assertTrue(list.isEmpty()); + + DatasourceSpec defn = DatasourceSpec.builder() + .segmentGranularity("PT1H") + .rollupGranularity("PT1M") + .targetSegmentRows(1_000_000) + .build(); + + // Create tables in inverse order + TableMetadata table2 = TableMetadata.newSegmentTable("table2", defn); + long version = manager.create(table2); + table2 = table2.fromInsert(TableId.DRUID_SCHEMA, version); + TableMetadata table1 = TableMetadata.newSegmentTable("table1", defn); + version = manager.create(table1); + table1 = table1.fromInsert(TableId.DRUID_SCHEMA, version); + + list = manager.list(); + assertEquals(2, list.size()); + TableId id = list.get(0); + assertEquals(TableId.DRUID_SCHEMA, id.schema()); + assertEquals("table1", id.name()); + id = list.get(1); + assertEquals(TableId.DRUID_SCHEMA, id.schema()); + assertEquals("table2", id.name()); + + List names = manager.list(TableId.DRUID_SCHEMA); + assertEquals(2, names.size()); + + names = manager.list(TableId.SYSTEM_SCHEMA); + assertEquals(0, names.size()); + + List details = manager.listDetails(TableId.DRUID_SCHEMA); + assertEquals(Arrays.asList(table1, table2), details); + } +}