diff --git a/docs/querying/sql-metadata-tables.md b/docs/querying/sql-metadata-tables.md index 6d59462fd1e9..09d0db23d96a 100644 --- a/docs/querying/sql-metadata-tables.md +++ b/docs/querying/sql-metadata-tables.md @@ -238,6 +238,7 @@ Servers table lists all discovered servers in the cluster. |start_time|STRING|Timestamp in ISO8601 format when the server was announced in the cluster| |version|VARCHAR|Druid version running on the server| |labels|VARCHAR|Labels for the server configured using the property [`druid.labels`](../configuration/index.md)| + To retrieve information about all servers, use the query: ```sql @@ -315,3 +316,21 @@ For example, to retrieve supervisor tasks information filtered by health status, ```sql SELECT * FROM sys.supervisors WHERE healthy=0; ``` + +### SERVER_PROPERTIES table + +The `server_properties` table exposes the runtime properties configured on for each Druid server. Each row represents a single property key-value pair associated with a specific server. + +|Column|Type|Notes| +|------|-----|-----| +|server|VARCHAR|Host and port of the server, in the form `host:port`| +|service_name|VARCHAR|Service name of the server, as defined by `druid.service`| +|node_roles|VARCHAR|Comma-separated list of roles that the server performs. For example, `[coordinator,overlord]` if the server functions as both a Coordinator and an Overlord.| +|property|VARCHAR|Name of the property| +|value|VARCHAR|Value of the property| + +For example, to retrieve properties for a specific server, use the query + +```sql +SELECT * FROM sys.server_properties WHERE server='192.168.1.1:8081' +``` \ No newline at end of file diff --git a/embedded-tests/src/test/java/org/apache/druid/testing/embedded/schema/SystemServerPropertiesTableTest.java b/embedded-tests/src/test/java/org/apache/druid/testing/embedded/schema/SystemServerPropertiesTableTest.java new file mode 100644 index 000000000000..d5f062e0d47d --- /dev/null +++ b/embedded-tests/src/test/java/org/apache/druid/testing/embedded/schema/SystemServerPropertiesTableTest.java @@ -0,0 +1,179 @@ +/* + * Licensed to the Apache Software Foundation (ASF) under one + * or more contributor license agreements. See the NOTICE file + * distributed with this work for additional information + * regarding copyright ownership. The ASF licenses this file + * to you under the Apache License, Version 2.0 (the + * "License"); you may not use this file except in compliance + * with the License. You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, + * software distributed under the License is distributed on an + * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY + * KIND, either express or implied. See the License for the + * specific language governing permissions and limitations + * under the License. + */ + +package org.apache.druid.testing.embedded.schema; + +import com.fasterxml.jackson.core.type.TypeReference; +import com.google.common.collect.ImmutableList; +import org.apache.druid.discovery.NodeRole; +import org.apache.druid.java.util.common.StringUtils; +import org.apache.druid.rpc.RequestBuilder; +import org.apache.druid.testing.embedded.EmbeddedBroker; +import org.apache.druid.testing.embedded.EmbeddedCoordinator; +import org.apache.druid.testing.embedded.EmbeddedDruidCluster; +import org.apache.druid.testing.embedded.EmbeddedOverlord; +import org.apache.druid.testing.embedded.junit5.EmbeddedClusterTestBase; +import org.jboss.netty.handler.codec.http.HttpMethod; +import org.junit.jupiter.api.Assertions; +import org.junit.jupiter.api.Test; + +import java.util.Arrays; +import java.util.Map; + +public class SystemServerPropertiesTableTest extends EmbeddedClusterTestBase +{ + private static final String BROKER_PORT = "9082"; + private static final String BROKER_SERVICE = "test/broker"; + private static final String OVERLORD_PORT = "9090"; + private static final String OVERLORD_SERVICE = "test/overlord"; + private static final String COORDINATOR_PORT = "9081"; + private static final String COORDINATOR_SERVICE = "test/coordinator"; + + private final EmbeddedBroker broker = new EmbeddedBroker() + .addProperty("druid.service", BROKER_SERVICE) + .addProperty("druid.plaintextPort", BROKER_PORT) + .addProperty("test.onlyBroker", "brokerValue") + .addProperty("test.nonUniqueProperty", "brokerNonUniqueValue") + .addProperty("password", "brokerPassword"); + + private final EmbeddedOverlord overlord = new EmbeddedOverlord() + .addProperty("druid.service", OVERLORD_SERVICE) + .addProperty("druid.plaintextPort", OVERLORD_PORT) + .addProperty("test.onlyOverlord", "overlordValue") + .addProperty("test.nonUniqueProperty", "overlordNonUniqueValue"); + + private final EmbeddedCoordinator coordinator = new EmbeddedCoordinator() + .addProperty("druid.service", COORDINATOR_SERVICE) + .addProperty("druid.plaintextPort", COORDINATOR_PORT) + .addProperty("test.onlyCoordinator", "coordinatorValue") + .addProperty("test.nonUniqueProperty", "coordinatorNonUniqueValue"); + + @Override + protected EmbeddedDruidCluster createCluster() + { + return EmbeddedDruidCluster + .withZookeeper() + .addServer(coordinator) + .addServer(overlord) + .addServer(broker) + .addCommonProperty("commonProperty", "commonValue"); + } + + @Test + public void test_serverPropertiesTable_brokerServer() + { + final Map brokerProps = cluster.callApi().serviceClient().onAnyBroker( + mapper -> new RequestBuilder(HttpMethod.GET, "/status/properties"), + new TypeReference<>(){} + ); + verifyPropertiesForServer(brokerProps, BROKER_SERVICE, StringUtils.format("localhost:%s", BROKER_PORT), NodeRole.BROKER_JSON_NAME); + } + + @Test + public void test_serverPropertiesTable_overlordServer() + { + final Map overlordProps = cluster.callApi().serviceClient().onLeaderOverlord( + mapper -> new RequestBuilder(HttpMethod.GET, "/status/properties"), + new TypeReference<>(){} + ); + verifyPropertiesForServer(overlordProps, OVERLORD_SERVICE, StringUtils.format("localhost:%s", OVERLORD_PORT), NodeRole.OVERLORD_JSON_NAME); + } + + @Test + public void test_serverPropertiesTable_coordinatorServer() + { + final Map coordinatorProps = cluster.callApi().serviceClient().onLeaderCoordinator( + mapper -> new RequestBuilder(HttpMethod.GET, "/status/properties"), + new TypeReference<>(){} + ); + verifyPropertiesForServer(coordinatorProps, COORDINATOR_SERVICE, StringUtils.format("localhost:%s", COORDINATOR_PORT), NodeRole.COORDINATOR_JSON_NAME); + } + + @Test + public void test_serverPropertiesTable_specificProperty() + { + Assertions.assertEquals( + "brokerValue", + cluster.runSql("SELECT \"value\" FROM sys.server_properties WHERE server = 'localhost:%s' AND property = 'test.onlyBroker'", BROKER_PORT) + ); + + Assertions.assertEquals( + "brokerValue", + cluster.runSql("SELECT \"value\" FROM sys.server_properties WHERE service_name = '%s' AND property = 'test.onlyBroker'", BROKER_SERVICE) + ); + + Assertions.assertEquals( + StringUtils.format("localhost:%s,%s,[%s],test.onlyBroker,brokerValue", BROKER_PORT, BROKER_SERVICE, NodeRole.BROKER_JSON_NAME), + cluster.runSql("SELECT * FROM sys.server_properties WHERE server = 'localhost:%s' AND property = 'test.onlyBroker'", BROKER_PORT) + ); + + String[] expectedRows = new String[] { + StringUtils.format("localhost:%s,%s,[%s],test.nonUniqueProperty,brokerNonUniqueValue", BROKER_PORT, BROKER_SERVICE, NodeRole.BROKER_JSON_NAME), + StringUtils.format("localhost:%s,%s,[%s],test.nonUniqueProperty,overlordNonUniqueValue", OVERLORD_PORT, OVERLORD_SERVICE, NodeRole.OVERLORD_JSON_NAME), + StringUtils.format("localhost:%s,%s,[%s],test.nonUniqueProperty,coordinatorNonUniqueValue", COORDINATOR_PORT, COORDINATOR_SERVICE, NodeRole.COORDINATOR_JSON_NAME), + }; + Arrays.sort(expectedRows, String::compareTo); + final String result = cluster.runSql("SELECT * FROM sys.server_properties WHERE property='test.nonUniqueProperty'"); + String[] actualRows = result.split("\n"); + Arrays.sort(actualRows, String::compareTo); + Assertions.assertArrayEquals(expectedRows, actualRows); + + } + + @Test + public void test_serverPropertiesTable_hiddenProperties() + { + final Map brokerProps = cluster.callApi().serviceClient().onAnyBroker( + mapper -> new RequestBuilder(HttpMethod.GET, "/status/properties"), + new TypeReference<>(){} + ); + Assertions.assertFalse(brokerProps.containsKey("password")); + } + + private void verifyPropertiesForServer(Map properties, String serivceName, String hostAndPort, String nodeRole) + { + String[] expectedRows = properties.entrySet().stream().map(entry -> String.join( + ",", + escapeCsvField(hostAndPort), + escapeCsvField(serivceName), + escapeCsvField(ImmutableList.of(nodeRole).toString()), + escapeCsvField(entry.getKey()), + escapeCsvField(entry.getValue()) + )).toArray(String[]::new); + Arrays.sort(expectedRows, String::compareTo); + final String result = cluster.runSql("SELECT * FROM sys.server_properties WHERE server='%s'", hostAndPort); + String[] actualRows = result.split("\n"); + Arrays.sort(actualRows, String::compareTo); + Assertions.assertArrayEquals(expectedRows, actualRows); + } + + /** + * Escapes a field value for CSV format. + */ + private String escapeCsvField(String field) + { + if (field == null) { + return ""; + } + if (field.contains(",") || field.contains("\"") || field.contains("\n") || field.contains("\r")) { + return "\"" + StringUtils.replace(field, "\"", "\"\"") + "\""; + } + return field; + } +} diff --git a/services/src/main/java/org/apache/druid/cli/CoordinatorJettyServerInitializer.java b/services/src/main/java/org/apache/druid/cli/CoordinatorJettyServerInitializer.java index ece5fa2be4ac..3fd6b41031be 100644 --- a/services/src/main/java/org/apache/druid/cli/CoordinatorJettyServerInitializer.java +++ b/services/src/main/java/org/apache/druid/cli/CoordinatorJettyServerInitializer.java @@ -24,7 +24,6 @@ import com.google.inject.Inject; import com.google.inject.Injector; import com.google.inject.Key; -import com.google.inject.servlet.GuiceFilter; import org.apache.druid.guice.annotations.Json; import org.apache.druid.server.http.OverlordProxyServlet; import org.apache.druid.server.http.RedirectFilter; @@ -110,7 +109,7 @@ public void initialize(Server server, Injector injector) // add some paths not to be redirected to leader. final FilterHolder guiceFilterHolder = JettyServerInitUtils.getGuiceFilterHolder(injector); - root.addFilter(GuiceFilter.class, "/status/*", null); + root.addFilter(guiceFilterHolder, "/status/*", null); root.addFilter(guiceFilterHolder, "/druid-internal/*", null); // redirect anything other than status to the current lead diff --git a/sql/src/main/java/org/apache/druid/sql/calcite/schema/SystemSchema.java b/sql/src/main/java/org/apache/druid/sql/calcite/schema/SystemSchema.java index 1343ace28fe4..6278506051ca 100644 --- a/sql/src/main/java/org/apache/druid/sql/calcite/schema/SystemSchema.java +++ b/sql/src/main/java/org/apache/druid/sql/calcite/schema/SystemSchema.java @@ -53,12 +53,14 @@ import org.apache.druid.discovery.DiscoveryDruidNode; import org.apache.druid.discovery.DruidNodeDiscoveryProvider; import org.apache.druid.discovery.NodeRole; +import org.apache.druid.guice.annotations.EscalatedClient; import org.apache.druid.indexer.TaskStatusPlus; import org.apache.druid.indexing.overlord.supervisor.SupervisorStatus; import org.apache.druid.java.util.common.ISE; import org.apache.druid.java.util.common.StringUtils; import org.apache.druid.java.util.common.jackson.JacksonUtils; import org.apache.druid.java.util.common.parsers.CloseableIterator; +import org.apache.druid.java.util.http.client.HttpClient; import org.apache.druid.rpc.indexing.OverlordClient; import org.apache.druid.segment.column.ColumnType; import org.apache.druid.segment.column.RowSignature; @@ -234,7 +236,8 @@ public SystemSchema( final CoordinatorClient coordinatorClient, final OverlordClient overlordClient, final DruidNodeDiscoveryProvider druidNodeDiscoveryProvider, - final ObjectMapper jsonMapper + final ObjectMapper jsonMapper, + @EscalatedClient final HttpClient httpClient ) { Preconditions.checkNotNull(serverView, "serverView"); @@ -255,7 +258,9 @@ public SystemSchema( TASKS_TABLE, new TasksTable(overlordClient, authorizerMapper), SUPERVISOR_TABLE, - new SupervisorsTable(overlordClient, authorizerMapper) + new SupervisorsTable(overlordClient, authorizerMapper), + SystemServerPropertiesTable.TABLE_NAME, + new SystemServerPropertiesTable(druidNodeDiscoveryProvider, authorizerMapper, httpClient, jsonMapper) ); } @@ -741,13 +746,6 @@ private static DruidServer toDruidServer(DiscoveryDruidNode discoveryDruidNode) } } - private static Iterator getDruidServers(DruidNodeDiscoveryProvider druidNodeDiscoveryProvider) - { - return Arrays.stream(NodeRole.values()) - .flatMap(nodeRole -> druidNodeDiscoveryProvider.getForNodeRole(nodeRole).getAllNodes().stream()) - .collect(Collectors.toList()) - .iterator(); - } } /** @@ -1105,7 +1103,7 @@ private static String toStringOrNull(@Nullable final Object object) /** * Checks if an authenticated user has the STATE READ permissions needed to view server information. */ - private static void checkStateReadAccessForServers( + public static void checkStateReadAccessForServers( AuthenticationResult authenticationResult, AuthorizerMapper authorizerMapper ) @@ -1121,6 +1119,17 @@ private static void checkStateReadAccessForServers( } } + /** + * Returns an iterator over all discoverable Druid nodes in the cluster. + */ + public static Iterator getDruidServers(DruidNodeDiscoveryProvider druidNodeDiscoveryProvider) + { + return Arrays.stream(NodeRole.values()) + .flatMap(nodeRole -> druidNodeDiscoveryProvider.getForNodeRole(nodeRole).getAllNodes().stream()) + .collect(Collectors.toList()) + .iterator(); + } + /** * Project a row using "projects" from {@link SegmentsTable#scan(DataContext, List, int[])}. *

diff --git a/sql/src/main/java/org/apache/druid/sql/calcite/schema/SystemServerPropertiesTable.java b/sql/src/main/java/org/apache/druid/sql/calcite/schema/SystemServerPropertiesTable.java new file mode 100644 index 000000000000..df8b313c8e1c --- /dev/null +++ b/sql/src/main/java/org/apache/druid/sql/calcite/schema/SystemServerPropertiesTable.java @@ -0,0 +1,200 @@ +/* + * Licensed to the Apache Software Foundation (ASF) under one + * or more contributor license agreements. See the NOTICE file + * distributed with this work for additional information + * regarding copyright ownership. The ASF licenses this file + * to you under the Apache License, Version 2.0 (the + * "License"); you may not use this file except in compliance + * with the License. You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, + * software distributed under the License is distributed on an + * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY + * KIND, either express or implied. See the License for the + * specific language governing permissions and limitations + * under the License. + */ + +package org.apache.druid.sql.calcite.schema; + +import com.fasterxml.jackson.core.type.TypeReference; +import com.fasterxml.jackson.databind.ObjectMapper; +import com.google.common.base.Preconditions; +import org.apache.calcite.DataContext; +import org.apache.calcite.linq4j.Enumerable; +import org.apache.calcite.linq4j.Linq4j; +import org.apache.calcite.rel.type.RelDataType; +import org.apache.calcite.rel.type.RelDataTypeFactory; +import org.apache.calcite.schema.ScannableTable; +import org.apache.calcite.schema.Schema; +import org.apache.calcite.schema.impl.AbstractTable; +import org.apache.druid.discovery.DiscoveryDruidNode; +import org.apache.druid.discovery.DruidNodeDiscoveryProvider; +import org.apache.druid.error.InternalServerError; +import org.apache.druid.java.util.common.RE; +import org.apache.druid.java.util.http.client.HttpClient; +import org.apache.druid.java.util.http.client.Request; +import org.apache.druid.java.util.http.client.response.StringFullResponseHandler; +import org.apache.druid.java.util.http.client.response.StringFullResponseHolder; +import org.apache.druid.segment.column.ColumnType; +import org.apache.druid.segment.column.RowSignature; +import org.apache.druid.server.DruidNode; +import org.apache.druid.server.security.AuthenticationResult; +import org.apache.druid.server.security.AuthorizerMapper; +import org.apache.druid.sql.calcite.planner.PlannerContext; +import org.apache.druid.sql.calcite.table.RowSignatures; +import org.jboss.netty.handler.codec.http.HttpMethod; + +import javax.servlet.http.HttpServletResponse; +import java.net.URL; +import java.nio.charset.StandardCharsets; +import java.util.ArrayList; +import java.util.Arrays; +import java.util.HashMap; +import java.util.Iterator; +import java.util.List; +import java.util.Map; +import java.util.stream.Collectors; + +/** + * System schema table {@code sys.server_properties} that contains the properties of all Druid servers. + * Each row contains the value of a single property. If a server has multiple node roles, all the rows for + * that server would have multiple values in the column {@code node_roles} rather than duplicating all the + * rows. + */ +public class SystemServerPropertiesTable extends AbstractTable implements ScannableTable +{ + public static final String TABLE_NAME = "server_properties"; + + static final RowSignature ROW_SIGNATURE = RowSignature + .builder() + .add("server", ColumnType.STRING) + .add("service_name", ColumnType.STRING) + .add("node_roles", ColumnType.STRING) + .add("property", ColumnType.STRING) + .add("value", ColumnType.STRING) + .build(); + + private final DruidNodeDiscoveryProvider druidNodeDiscoveryProvider; + private final AuthorizerMapper authorizerMapper; + private final HttpClient httpClient; + private final ObjectMapper jsonMapper; + + public SystemServerPropertiesTable( + DruidNodeDiscoveryProvider druidNodeDiscoveryProvider, + AuthorizerMapper authorizerMapper, + HttpClient httpClient, + ObjectMapper jsonMapper + ) + { + this.druidNodeDiscoveryProvider = druidNodeDiscoveryProvider; + this.authorizerMapper = authorizerMapper; + this.httpClient = httpClient; + this.jsonMapper = jsonMapper; + } + + @Override + public RelDataType getRowType(RelDataTypeFactory typeFactory) + { + return RowSignatures.toRelDataType(ROW_SIGNATURE, typeFactory); + } + + @Override + public Schema.TableType getJdbcTableType() + { + return Schema.TableType.SYSTEM_TABLE; + } + + @Override + public Enumerable scan(DataContext root) + { + final AuthenticationResult authenticationResult = (AuthenticationResult) Preconditions.checkNotNull( + root.get(PlannerContext.DATA_CTX_AUTHENTICATION_RESULT), + "authenticationResult in dataContext" + ); + SystemSchema.checkStateReadAccessForServers(authenticationResult, authorizerMapper); + final Iterator druidServers = SystemSchema.getDruidServers(druidNodeDiscoveryProvider); + + final Map serverToPropertiesMap = new HashMap<>(); + druidServers.forEachRemaining(discoveryDruidNode -> { + final DruidNode druidNode = discoveryDruidNode.getDruidNode(); + final Map propertiesMap = getProperties(druidNode); + if (serverToPropertiesMap.containsKey(druidNode.getHostAndPortToUse())) { + ServerProperties serverProperties = serverToPropertiesMap.get(druidNode.getHostAndPortToUse()); + serverProperties.addNodeRole(discoveryDruidNode.getNodeRole().getJsonName()); + } else { + serverToPropertiesMap.put( + druidNode.getHostAndPortToUse(), + new ServerProperties( + druidNode.getServiceName(), + druidNode.getHostAndPortToUse(), + new ArrayList<>(Arrays.asList(discoveryDruidNode.getNodeRole().getJsonName())), + propertiesMap + ) + ); + } + }); + ArrayList rows = new ArrayList<>(); + for (ServerProperties serverProperties : serverToPropertiesMap.values()) { + rows.addAll(serverProperties.toRows()); + } + return Linq4j.asEnumerable(rows); + } + + private Map getProperties(DruidNode druidNode) + { + final String url = druidNode.getUriToUse().resolve("/status/properties").toString(); + try { + final Request request = new Request(HttpMethod.GET, new URL(url)); + final StringFullResponseHolder response; + response = httpClient + .go(request, new StringFullResponseHandler(StandardCharsets.UTF_8)) + .get(); + + if (response.getStatus().getCode() != HttpServletResponse.SC_OK) { + throw new RE( + "Failed to get properties from node[%s]. Error code[%d], description[%s].", + url, + response.getStatus().getCode(), + response.getStatus().getReasonPhrase() + ); + } + return jsonMapper.readValue( + response.getContent(), + new TypeReference<>(){} + ); + } + catch (Exception e) { + throw InternalServerError.exception(e, "HTTP request to[%s] failed", url); + } + } + + private static class ServerProperties + { + final String serviceName; + final String server; + final List nodeRoles; + final Map properties; + + public ServerProperties(String serviceName, String server, List nodeRoles, Map properties) + { + this.serviceName = serviceName; + this.server = server; + this.nodeRoles = nodeRoles; + this.properties = properties; + } + + public void addNodeRole(String nodeRole) + { + nodeRoles.add(nodeRole); + } + + public List toRows() + { + String nodeRolesString = nodeRoles.toString(); + return properties.entrySet().stream().map(entry -> new Object[]{server, serviceName, nodeRolesString, entry.getKey(), entry.getValue()}).collect(Collectors.toList()); + } + } +} diff --git a/sql/src/test/java/org/apache/druid/sql/calcite/CalciteQueryTest.java b/sql/src/test/java/org/apache/druid/sql/calcite/CalciteQueryTest.java index 269f58633741..af9d827af031 100644 --- a/sql/src/test/java/org/apache/druid/sql/calcite/CalciteQueryTest.java +++ b/sql/src/test/java/org/apache/druid/sql/calcite/CalciteQueryTest.java @@ -208,6 +208,7 @@ public void testInformationSchemaTables() .add(new Object[]{"lookup", "lookyloo-chain", "TABLE", "YES", "YES"}) .add(new Object[]{"lookup", "lookyloo121", "TABLE", "YES", "YES"}) .add(new Object[]{"sys", "segments", "SYSTEM_TABLE", "NO", "NO"}) + .add(new Object[]{"sys", "server_properties", "SYSTEM_TABLE", "NO", "NO"}) .add(new Object[]{"sys", "server_segments", "SYSTEM_TABLE", "NO", "NO"}) .add(new Object[]{"sys", "servers", "SYSTEM_TABLE", "NO", "NO"}) .add(new Object[]{"sys", "supervisors", "SYSTEM_TABLE", "NO", "NO"}) @@ -253,6 +254,7 @@ public void testInformationSchemaTables() .add(new Object[]{"lookup", "lookyloo-chain", "TABLE", "YES", "YES"}) .add(new Object[]{"lookup", "lookyloo121", "TABLE", "YES", "YES"}) .add(new Object[]{"sys", "segments", "SYSTEM_TABLE", "NO", "NO"}) + .add(new Object[]{"sys", "server_properties", "SYSTEM_TABLE", "NO", "NO"}) .add(new Object[]{"sys", "server_segments", "SYSTEM_TABLE", "NO", "NO"}) .add(new Object[]{"sys", "servers", "SYSTEM_TABLE", "NO", "NO"}) .add(new Object[]{"sys", "supervisors", "SYSTEM_TABLE", "NO", "NO"}) diff --git a/sql/src/test/java/org/apache/druid/sql/calcite/DruidPlannerResourceAnalyzeTest.java b/sql/src/test/java/org/apache/druid/sql/calcite/DruidPlannerResourceAnalyzeTest.java index b0e707d00b86..7027a5de9755 100644 --- a/sql/src/test/java/org/apache/druid/sql/calcite/DruidPlannerResourceAnalyzeTest.java +++ b/sql/src/test/java/org/apache/druid/sql/calcite/DruidPlannerResourceAnalyzeTest.java @@ -199,12 +199,14 @@ public void testSysTables() testSysTable("SELECT * FROM sys.server_segments", null, PLANNER_CONFIG_DEFAULT); testSysTable("SELECT * FROM sys.tasks", null, PLANNER_CONFIG_DEFAULT); testSysTable("SELECT * FROM sys.supervisors", null, PLANNER_CONFIG_DEFAULT); + testSysTable("SELECT * FROM sys.server_properties", null, PLANNER_CONFIG_DEFAULT); testSysTable("SELECT * FROM sys.segments", "segments", PLANNER_CONFIG_AUTHORIZE_SYS_TABLES); testSysTable("SELECT * FROM sys.servers", "servers", PLANNER_CONFIG_AUTHORIZE_SYS_TABLES); testSysTable("SELECT * FROM sys.server_segments", "server_segments", PLANNER_CONFIG_AUTHORIZE_SYS_TABLES); testSysTable("SELECT * FROM sys.tasks", "tasks", PLANNER_CONFIG_AUTHORIZE_SYS_TABLES); testSysTable("SELECT * FROM sys.supervisors", "supervisors", PLANNER_CONFIG_AUTHORIZE_SYS_TABLES); + testSysTable("SELECT * FROM sys.server_properties", "server_properties", PLANNER_CONFIG_AUTHORIZE_SYS_TABLES); } private void testSysTable(String sql, String name, PlannerConfig plannerConfig) diff --git a/sql/src/test/java/org/apache/druid/sql/calcite/schema/DruidCalciteSchemaModuleTest.java b/sql/src/test/java/org/apache/druid/sql/calcite/schema/DruidCalciteSchemaModuleTest.java index eb0ab607e82d..55e72a7a682a 100644 --- a/sql/src/test/java/org/apache/druid/sql/calcite/schema/DruidCalciteSchemaModuleTest.java +++ b/sql/src/test/java/org/apache/druid/sql/calcite/schema/DruidCalciteSchemaModuleTest.java @@ -37,8 +37,10 @@ import org.apache.druid.discovery.DruidNodeDiscoveryProvider; import org.apache.druid.guice.LazySingleton; import org.apache.druid.guice.LifecycleModule; +import org.apache.druid.guice.annotations.EscalatedClient; import org.apache.druid.guice.annotations.Json; import org.apache.druid.java.util.emitter.service.ServiceEmitter; +import org.apache.druid.java.util.http.client.HttpClient; import org.apache.druid.query.lookup.LookupExtractorFactoryContainerProvider; import org.apache.druid.query.lookup.LookupReferencesManager; import org.apache.druid.rpc.indexing.NoopOverlordClient; @@ -95,6 +97,8 @@ public class DruidCalciteSchemaModuleTest extends CalciteTestBase private SegmentManager segmentManager; @Mock private DruidOperatorTable druidOperatorTable; + @Mock + private HttpClient httpClient; private DruidCalciteSchemaModule target; private Injector injector; @@ -128,6 +132,8 @@ public void setUp() binder.bind(CoordinatorClient.class).to(NoopCoordinatorClient.class); binder.bind(CentralizedDatasourceSchemaConfig.class) .toInstance(CentralizedDatasourceSchemaConfig.create()); + binder.bind(HttpClient.class).toInstance(httpClient); + binder.bind(HttpClient.class).annotatedWith(EscalatedClient.class).toInstance(httpClient); }, new LifecycleModule(), target); diff --git a/sql/src/test/java/org/apache/druid/sql/calcite/schema/SystemSchemaTest.java b/sql/src/test/java/org/apache/druid/sql/calcite/schema/SystemSchemaTest.java index dd5af77e92f9..7c35d16e9e58 100644 --- a/sql/src/test/java/org/apache/druid/sql/calcite/schema/SystemSchemaTest.java +++ b/sql/src/test/java/org/apache/druid/sql/calcite/schema/SystemSchemaTest.java @@ -26,6 +26,7 @@ import com.google.common.collect.ImmutableMap; import com.google.common.collect.ImmutableSet; import com.google.common.util.concurrent.Futures; +import com.google.common.util.concurrent.ListenableFuture; import junitparams.converters.Nullable; import org.apache.calcite.DataContext; import org.apache.calcite.adapter.java.JavaTypeFactory; @@ -64,9 +65,11 @@ import org.apache.druid.java.util.common.Intervals; import org.apache.druid.java.util.common.StringUtils; import org.apache.druid.java.util.common.io.Closer; +import org.apache.druid.java.util.http.client.HttpClient; import org.apache.druid.java.util.http.client.Request; import org.apache.druid.java.util.http.client.response.HttpResponseHandler; import org.apache.druid.java.util.http.client.response.InputStreamFullResponseHolder; +import org.apache.druid.java.util.http.client.response.StringFullResponseHandler; import org.apache.druid.java.util.http.client.response.StringFullResponseHolder; import org.apache.druid.query.QueryRunnerFactoryConglomerate; import org.apache.druid.query.aggregation.CountAggregatorFactory; @@ -112,7 +115,10 @@ import org.apache.druid.timeline.partition.NumberedShardSpec; import org.apache.druid.timeline.partition.ShardSpec; import org.easymock.EasyMock; +import org.jboss.netty.handler.codec.http.DefaultHttpResponse; import org.jboss.netty.handler.codec.http.HttpResponse; +import org.jboss.netty.handler.codec.http.HttpResponseStatus; +import org.jboss.netty.handler.codec.http.HttpVersion; import org.joda.time.DateTime; import org.junit.Assert; import org.junit.jupiter.api.AfterAll; @@ -183,6 +189,7 @@ public class SystemSchemaTest extends CalciteTestBase private MetadataSegmentView metadataView; private DruidNodeDiscoveryProvider druidNodeDiscoveryProvider; private FilteredServerInventoryView serverInventoryView; + private HttpClient httpClient; @BeforeAll public static void setUpClass() @@ -261,6 +268,7 @@ public void setUp(@TempDir File tmpDir) throws Exception metadataView = EasyMock.createMock(MetadataSegmentView.class); druidNodeDiscoveryProvider = EasyMock.createMock(DruidNodeDiscoveryProvider.class); serverInventoryView = EasyMock.createMock(FilteredServerInventoryView.class); + httpClient = EasyMock.createMock(HttpClient.class); schema = new SystemSchema( druidSchema, metadataView, @@ -270,7 +278,8 @@ public void setUp(@TempDir File tmpDir) throws Exception coordinatorClient, overlordClient, druidNodeDiscoveryProvider, - MAPPER + MAPPER, + httpClient ); } @@ -532,13 +541,13 @@ DataNodeService.DISCOVERY_SERVICE_KEY, new DataNodeService("tier", 1000, ServerT public void testGetTableMap() { Assert.assertEquals( - ImmutableSet.of("segments", "servers", "server_segments", "tasks", "supervisors"), + ImmutableSet.of("segments", "servers", "server_segments", "tasks", "supervisors", "server_properties"), schema.getTableNames() ); final Map tableMap = schema.getTableMap(); Assert.assertEquals( - ImmutableSet.of("segments", "servers", "server_segments", "tasks", "supervisors"), + ImmutableSet.of("segments", "servers", "server_segments", "tasks", "supervisors", "server_properties"), tableMap.keySet() ); final SystemSchema.SegmentsTable segmentsTable = (SystemSchema.SegmentsTable) schema.getTableMap().get("segments"); @@ -561,6 +570,12 @@ public void testGetTableMap() Assert.assertEquals(12, serverFields.size()); Assert.assertEquals("server", serverFields.get(0).getName()); Assert.assertEquals(SqlTypeName.VARCHAR, serverFields.get(0).getType().getSqlTypeName()); + + final SystemServerPropertiesTable propertiesTable = (SystemServerPropertiesTable) schema.getTableMap() + .get("server_properties"); + final RelDataType propertiesRowType = propertiesTable.getRowType(new JavaTypeFactoryImpl()); + final List propertiesFields = propertiesRowType.getFieldList(); + Assert.assertEquals(5, propertiesFields.size()); } @Test @@ -1448,6 +1463,111 @@ public void testSupervisorTableAuth() // verifyTypes(rows, SystemSchema.SUPERVISOR_SIGNATURE); } + @Test + public void testPropertiesTable() + { + SystemServerPropertiesTable propertiesTable = EasyMock.createMockBuilder(SystemServerPropertiesTable.class) + .withConstructor(druidNodeDiscoveryProvider, authMapper, httpClient, MAPPER) + .createMock(); + + EasyMock.replay(propertiesTable); + + List expectedRows = new ArrayList<>(); + + mockNodeDiscovery(NodeRole.BROKER); + mockNodeDiscovery(NodeRole.ROUTER); + mockNodeDiscovery(NodeRole.HISTORICAL); + mockNodeDiscovery(NodeRole.OVERLORD); + mockNodeDiscovery(NodeRole.PEON); + mockNodeDiscovery(NodeRole.INDEXER); + + mockNodeDiscovery(NodeRole.COORDINATOR, coordinator, coordinator2); + HttpResponse coordinatorHttpResponse = new DefaultHttpResponse(HttpVersion.HTTP_1_1, HttpResponseStatus.OK); + StringFullResponseHolder coordinatorResponseHolder = new StringFullResponseHolder(coordinatorHttpResponse, StandardCharsets.UTF_8); + String coordinatorJson = "{\"druid.test-key\": \"test-value\"}"; + coordinatorResponseHolder.addChunk(coordinatorJson); + expectedRows.add(new Object[]{ + coordinator.getDruidNode().getHostAndPortToUse(), + coordinator.getDruidNode().getServiceName(), + ImmutableList.of(coordinator.getNodeRole().getJsonName()).toString(), + "druid.test-key", "test-value" + }); + + HttpResponse coordinator2HttpResponse = new DefaultHttpResponse(HttpVersion.HTTP_1_1, HttpResponseStatus.OK); + StringFullResponseHolder coordinator2ResponseHolder = new StringFullResponseHolder(coordinator2HttpResponse, StandardCharsets.UTF_8); + String coordinator2Json = "{\"druid.test-key3\": \"test-value3\"}"; + coordinator2ResponseHolder.addChunk(coordinator2Json); + expectedRows + .add(new Object[]{ + coordinator2.getDruidNode().getHostAndPortToUse(), + coordinator2.getDruidNode().getServiceName(), + ImmutableList.of(coordinator2.getNodeRole().getJsonName()).toString(), + "druid.test-key3", "test-value3" + }); + + mockNodeDiscovery(NodeRole.MIDDLE_MANAGER, middleManager); + HttpResponse middleManagerHttpResponse = new DefaultHttpResponse(HttpVersion.HTTP_1_1, HttpResponseStatus.OK); + StringFullResponseHolder middleManagerResponseHolder = new StringFullResponseHolder(middleManagerHttpResponse, StandardCharsets.UTF_8); + String middleManagerJson = "{\n" + + "\"druid.test-key\": \"test-value\",\n" + + "\"druid.test-key2\": \"test-value2\"\n" + + "}"; + middleManagerResponseHolder.addChunk(middleManagerJson); + expectedRows + .add(new Object[]{ + middleManager.getDruidNode().getHostAndPortToUse(), + middleManager.getDruidNode().getServiceName(), + ImmutableList.of(middleManager.getNodeRole().getJsonName()).toString(), + "druid.test-key", "test-value" + }); + expectedRows + .add(new Object[]{ + middleManager.getDruidNode().getHostAndPortToUse(), + middleManager.getDruidNode().getServiceName(), + ImmutableList.of(middleManager.getNodeRole().getJsonName()).toString(), + "druid.test-key2", "test-value2" + }); + + Map> urlToResponse = ImmutableMap.of( + getStatusPropertiesUrl(coordinator), Futures.immediateFuture(coordinatorResponseHolder), + getStatusPropertiesUrl(coordinator2), Futures.immediateFuture(coordinator2ResponseHolder), + getStatusPropertiesUrl(middleManager), Futures.immediateFuture(middleManagerResponseHolder) + ); + + EasyMock.expect( + httpClient.go( + EasyMock.isA(Request.class), + EasyMock.isA(StringFullResponseHandler.class) + ) + ).andAnswer(() -> { + Request req = (Request) EasyMock.getCurrentArguments()[0]; + String url = req.getUrl().toString(); + + ListenableFuture future = urlToResponse.get(url); + if (future != null) { + return future; + } + return Futures.immediateFailedFuture(new AssertionError("Unexpected URL: " + url)); + }).times(3); + + EasyMock.replay(druidNodeDiscoveryProvider, responseHandler, httpClient); + + DataContext dataContext = createDataContext(Users.SUPER); + final List rows = propertiesTable.scan(dataContext).toList(); + expectedRows.sort((Object[] row1, Object[] row2) -> ((Comparable) row1[0]).compareTo(row2[0])); + rows.sort((Object[] row1, Object[] row2) -> ((Comparable) row1[0]).compareTo(row2[0])); + Assert.assertEquals(expectedRows.size(), rows.size()); + for (int i = 0; i < expectedRows.size(); i++) { + Assert.assertArrayEquals(expectedRows.get(i), rows.get(i)); + } + + } + + private String getStatusPropertiesUrl(DiscoveryDruidNode discoveryDruidNode) + { + return discoveryDruidNode.getDruidNode().getUriToUse().resolve("/status/properties").toString(); + } + /** * Creates a response holder that contains the given json. */ @@ -1585,6 +1705,15 @@ private static void verifyTypes(final List rows, final RowSignature si } } + private DruidNodeDiscovery mockNodeDiscovery(NodeRole nodeRole, DiscoveryDruidNode... discoveryDruidNodes) + { + final DruidNodeDiscovery druidNodeDiscovery = EasyMock.createMock(DruidNodeDiscovery.class); + EasyMock.expect(druidNodeDiscoveryProvider.getForNodeRole(nodeRole)).andReturn(druidNodeDiscovery).once(); + EasyMock.expect(druidNodeDiscovery.getAllNodes()).andReturn(ImmutableList.copyOf(discoveryDruidNodes)).once(); + EasyMock.replay(druidNodeDiscovery); + return druidNodeDiscovery; + } + /** * Usernames to be used in tests. */ diff --git a/sql/src/test/java/org/apache/druid/sql/calcite/util/CalciteTests.java b/sql/src/test/java/org/apache/druid/sql/calcite/util/CalciteTests.java index eef6d87d3ae0..59151d352e2c 100644 --- a/sql/src/test/java/org/apache/druid/sql/calcite/util/CalciteTests.java +++ b/sql/src/test/java/org/apache/druid/sql/calcite/util/CalciteTests.java @@ -418,7 +418,8 @@ private TaskStatusPlus createTaskStatus(String id, String datasource, Long durat coordinatorClient, overlordClient, provider, - getJsonMapper() + getJsonMapper(), + new FakeHttpClient() ); } diff --git a/sql/src/test/java/org/apache/druid/sql/guice/SqlModuleTest.java b/sql/src/test/java/org/apache/druid/sql/guice/SqlModuleTest.java index f0057eb9c5d9..38c344d75709 100644 --- a/sql/src/test/java/org/apache/druid/sql/guice/SqlModuleTest.java +++ b/sql/src/test/java/org/apache/druid/sql/guice/SqlModuleTest.java @@ -39,10 +39,12 @@ import org.apache.druid.guice.LifecycleModule; import org.apache.druid.guice.PolyBind; import org.apache.druid.guice.ServerModule; +import org.apache.druid.guice.annotations.EscalatedClient; import org.apache.druid.guice.security.PolicyModule; import org.apache.druid.initialization.DruidModule; import org.apache.druid.jackson.JacksonModule; import org.apache.druid.java.util.emitter.service.ServiceEmitter; +import org.apache.druid.java.util.http.client.HttpClient; import org.apache.druid.math.expr.ExprMacroTable; import org.apache.druid.query.DefaultQueryConfig; import org.apache.druid.query.GenericQueryMetricsFactory; @@ -120,6 +122,9 @@ public class SqlModuleTest @Mock private QueryRunnerFactoryConglomerate conglomerate; + @Mock + private HttpClient httpClient; + private Injector injector; @Before @@ -135,7 +140,8 @@ public void setUp() queryToolChestWarehouse, lookupExtractorFactoryContainerProvider, joinableFactory, - segmentCacheManager + segmentCacheManager, + httpClient ); } @@ -215,6 +221,7 @@ private Injector makeInjectorWithProperties(final Properties props) binder.bind(CentralizedDatasourceSchemaConfig.class) .toInstance(CentralizedDatasourceSchemaConfig.enabled(false)); binder.bind(DefaultQueryConfig.class).toInstance(DefaultQueryConfig.NIL); + binder.bind(HttpClient.class).annotatedWith(EscalatedClient.class).toInstance(httpClient); }, sqlModule, new TestViewManagerModule() diff --git a/website/.spelling b/website/.spelling index 2eaa410bace9..92207fe38f59 100644 --- a/website/.spelling +++ b/website/.spelling @@ -447,6 +447,7 @@ namespaced namespaces natively netflow +node_roles nondescriptive nonfinalized non-null @@ -539,6 +540,7 @@ searchable secondaryPartitionPruning seekable seekable-stream +service_name servlet setProcessingThreadNames sigterm @@ -677,6 +679,7 @@ PT5M SCHEMA_NAME SCHEMA_OWNER SERVER_SEGMENTS +SERVER_PROPERTIES SMALLINT SQL_PATH STRING_AGG