From 412eb62d7629d460c8ec34048237a09717864a7f Mon Sep 17 00:00:00 2001 From: Gabriel Chang <77312579+GabrielCWT@users.noreply.github.com> Date: Thu, 23 Oct 2025 17:52:09 +0800 Subject: [PATCH 01/28] Add properties table --- .../calcite/schema/SystemPropertiesTable.java | 163 ++++++++++++++++++ .../sql/calcite/schema/SystemSchema.java | 59 ++++--- .../sql/calcite/schema/SystemSchemaTest.java | 144 +++++++++++++++- .../druid/sql/calcite/util/CalciteTests.java | 3 +- 4 files changed, 337 insertions(+), 32 deletions(-) create mode 100644 sql/src/main/java/org/apache/druid/sql/calcite/schema/SystemPropertiesTable.java diff --git a/sql/src/main/java/org/apache/druid/sql/calcite/schema/SystemPropertiesTable.java b/sql/src/main/java/org/apache/druid/sql/calcite/schema/SystemPropertiesTable.java new file mode 100644 index 000000000000..17110d3df50e --- /dev/null +++ b/sql/src/main/java/org/apache/druid/sql/calcite/schema/SystemPropertiesTable.java @@ -0,0 +1,163 @@ +/* + * Licensed to the Apache Software Foundation (ASF) under one + * or more contributor license agreements. See the NOTICE file + * distributed with this work for additional information + * regarding copyright ownership. The ASF licenses this file + * to you under the Apache License, Version 2.0 (the + * "License"); you may not use this file except in compliance + * with the License. You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, + * software distributed under the License is distributed on an + * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY + * KIND, either express or implied. See the License for the + * specific language governing permissions and limitations + * under the License. + */ + +package org.apache.druid.sql.calcite.schema; + +import com.fasterxml.jackson.core.type.TypeReference; +import com.fasterxml.jackson.databind.ObjectMapper; +import com.google.common.base.Preconditions; +import com.google.common.collect.FluentIterable; +import org.apache.calcite.DataContext; +import org.apache.calcite.linq4j.Enumerable; +import org.apache.calcite.linq4j.Linq4j; +import org.apache.calcite.rel.type.RelDataType; +import org.apache.calcite.rel.type.RelDataTypeFactory; +import org.apache.calcite.schema.ScannableTable; +import org.apache.calcite.schema.Schema; +import org.apache.calcite.schema.impl.AbstractTable; +import org.apache.druid.discovery.DiscoveryDruidNode; +import org.apache.druid.discovery.DruidNodeDiscoveryProvider; +import org.apache.druid.java.util.common.RE; +import org.apache.druid.java.util.http.client.HttpClient; +import org.apache.druid.java.util.http.client.Request; +import org.apache.druid.java.util.http.client.response.StringFullResponseHandler; +import org.apache.druid.java.util.http.client.response.StringFullResponseHolder; +import org.apache.druid.segment.column.ColumnType; +import org.apache.druid.segment.column.RowSignature; +import org.apache.druid.server.DruidNode; +import org.apache.druid.server.security.AuthenticationResult; +import org.apache.druid.server.security.AuthorizerMapper; +import org.apache.druid.sql.calcite.planner.PlannerContext; +import org.apache.druid.sql.calcite.table.RowSignatures; +import org.jboss.netty.handler.codec.http.HttpMethod; + +import javax.servlet.http.HttpServletResponse; +import java.io.IOException; +import java.net.URL; +import java.nio.charset.StandardCharsets; +import java.util.Iterator; +import java.util.Map; +import java.util.concurrent.ExecutionException; +import java.util.stream.Collectors; + +public final class SystemPropertiesTable extends AbstractTable implements ScannableTable +{ + public static final String PROPERTIES_TABLE = "properties"; + + static final RowSignature PROPERTIES_SIGNATURE = RowSignature + .builder() + .add("service", ColumnType.STRING) + .add("host", ColumnType.STRING) + .add("server_type", ColumnType.STRING) + .add("property", ColumnType.STRING) + .add("value", ColumnType.STRING) + .build(); + + private final DruidNodeDiscoveryProvider druidNodeDiscoveryProvider; + private final AuthorizerMapper authorizerMapper; + private final HttpClient httpClient; + private final ObjectMapper jsonMapper; + + public SystemPropertiesTable( + DruidNodeDiscoveryProvider druidNodeDiscoveryProvider, + AuthorizerMapper authorizerMapper, + HttpClient httpClient, + ObjectMapper jsonMapper + ) + { + this.druidNodeDiscoveryProvider = druidNodeDiscoveryProvider; + this.authorizerMapper = authorizerMapper; + this.httpClient = httpClient; + this.jsonMapper = jsonMapper; + } + + @Override + public RelDataType getRowType(RelDataTypeFactory typeFactory) + { + return RowSignatures.toRelDataType(PROPERTIES_SIGNATURE, typeFactory); + } + + @Override + public Schema.TableType getJdbcTableType() + { + return Schema.TableType.SYSTEM_TABLE; + } + + @Override + public Enumerable scan(DataContext root) + { + final AuthenticationResult authenticationResult = (AuthenticationResult) Preconditions.checkNotNull( + root.get(PlannerContext.DATA_CTX_AUTHENTICATION_RESULT), + "authenticationResult in dataContext" + ); + SystemSchema.checkStateReadAccessForServers(authenticationResult, authorizerMapper); + final Iterator druidServers = SystemSchema.getDruidServers(druidNodeDiscoveryProvider); + + final FluentIterable results = FluentIterable + .from(() -> druidServers) + .transformAndConcat((DiscoveryDruidNode discoveryDruidNode) -> { + final DruidNode druidNode = discoveryDruidNode.getDruidNode(); + final Map propertiesMap = getProperties(druidNode); + return propertiesMap.entrySet().stream() + .map(entry -> new Object[]{ + druidNode.getServiceName(), + druidNode.getHost(), + discoveryDruidNode.getNodeRole().getJsonName(), + entry.getKey(), + entry.getValue() + }) + .collect(Collectors.toList()); + }); + return Linq4j.asEnumerable(results); + } + + private Map getProperties(DruidNode druidNode) + { + try { + final String url = druidNode.getUriToUse().resolve("/status/properties").toString(); + final Request request = new Request(HttpMethod.GET, new URL(url)); + final StringFullResponseHolder response; + try { + response = httpClient + .go(request, new StringFullResponseHandler(StandardCharsets.UTF_8)) + .get(); + } + catch (ExecutionException e) { + throw new RE(e, "HTTP request to[%s] failed", request.getUrl()); + } + + if (response.getStatus().getCode() != HttpServletResponse.SC_OK) { + throw new RE( + "Failed to get properties from node at [%s]. Error code [%d], description [%s].", + url, + response.getStatus().getCode(), + response.getStatus().getReasonPhrase() + ); + } + return jsonMapper.readValue( + response.getContent(), new TypeReference>() + { + } + ); + } + catch (IOException | InterruptedException e) { + throw new RuntimeException(e); + } + } +} diff --git a/sql/src/main/java/org/apache/druid/sql/calcite/schema/SystemSchema.java b/sql/src/main/java/org/apache/druid/sql/calcite/schema/SystemSchema.java index 1343ace28fe4..5896b36caa0c 100644 --- a/sql/src/main/java/org/apache/druid/sql/calcite/schema/SystemSchema.java +++ b/sql/src/main/java/org/apache/druid/sql/calcite/schema/SystemSchema.java @@ -53,12 +53,14 @@ import org.apache.druid.discovery.DiscoveryDruidNode; import org.apache.druid.discovery.DruidNodeDiscoveryProvider; import org.apache.druid.discovery.NodeRole; +import org.apache.druid.guice.annotations.EscalatedClient; import org.apache.druid.indexer.TaskStatusPlus; import org.apache.druid.indexing.overlord.supervisor.SupervisorStatus; import org.apache.druid.java.util.common.ISE; import org.apache.druid.java.util.common.StringUtils; import org.apache.druid.java.util.common.jackson.JacksonUtils; import org.apache.druid.java.util.common.parsers.CloseableIterator; +import org.apache.druid.java.util.http.client.HttpClient; import org.apache.druid.rpc.indexing.OverlordClient; import org.apache.druid.segment.column.ColumnType; import org.apache.druid.segment.column.RowSignature; @@ -234,29 +236,29 @@ public SystemSchema( final CoordinatorClient coordinatorClient, final OverlordClient overlordClient, final DruidNodeDiscoveryProvider druidNodeDiscoveryProvider, - final ObjectMapper jsonMapper + final ObjectMapper jsonMapper, + final @EscalatedClient HttpClient httpClient ) { Preconditions.checkNotNull(serverView, "serverView"); - this.tableMap = ImmutableMap.of( - SEGMENTS_TABLE, - new SegmentsTable(druidSchema, metadataView, jsonMapper, authorizerMapper), - SERVERS_TABLE, - new ServersTable( - druidNodeDiscoveryProvider, - serverInventoryView, - authorizerMapper, - overlordClient, - coordinatorClient, - jsonMapper - ), - SERVER_SEGMENTS_TABLE, - new ServerSegmentsTable(serverView, authorizerMapper), - TASKS_TABLE, - new TasksTable(overlordClient, authorizerMapper), - SUPERVISOR_TABLE, - new SupervisorsTable(overlordClient, authorizerMapper) - ); + this.tableMap = ImmutableMap.builder() + .put(SEGMENTS_TABLE, new SegmentsTable(druidSchema, metadataView, jsonMapper, authorizerMapper)) + .put( + SERVERS_TABLE, + new ServersTable( + druidNodeDiscoveryProvider, + serverInventoryView, + authorizerMapper, + overlordClient, + coordinatorClient, + jsonMapper + ) + ) + .put(SERVER_SEGMENTS_TABLE, new ServerSegmentsTable(serverView, authorizerMapper)) + .put(TASKS_TABLE, new TasksTable(overlordClient, authorizerMapper)) + .put(SUPERVISOR_TABLE, new SupervisorsTable(overlordClient, authorizerMapper)) + .put(SystemPropertiesTable.PROPERTIES_TABLE, new SystemPropertiesTable(druidNodeDiscoveryProvider, authorizerMapper, httpClient, jsonMapper)) + .build(); } @Override @@ -741,13 +743,6 @@ private static DruidServer toDruidServer(DiscoveryDruidNode discoveryDruidNode) } } - private static Iterator getDruidServers(DruidNodeDiscoveryProvider druidNodeDiscoveryProvider) - { - return Arrays.stream(NodeRole.values()) - .flatMap(nodeRole -> druidNodeDiscoveryProvider.getForNodeRole(nodeRole).getAllNodes().stream()) - .collect(Collectors.toList()) - .iterator(); - } } /** @@ -1105,7 +1100,7 @@ private static String toStringOrNull(@Nullable final Object object) /** * Checks if an authenticated user has the STATE READ permissions needed to view server information. */ - private static void checkStateReadAccessForServers( + public static void checkStateReadAccessForServers( AuthenticationResult authenticationResult, AuthorizerMapper authorizerMapper ) @@ -1121,6 +1116,14 @@ private static void checkStateReadAccessForServers( } } + public static Iterator getDruidServers(DruidNodeDiscoveryProvider druidNodeDiscoveryProvider) + { + return Arrays.stream(NodeRole.values()) + .flatMap(nodeRole -> druidNodeDiscoveryProvider.getForNodeRole(nodeRole).getAllNodes().stream()) + .collect(Collectors.toList()) + .iterator(); + } + /** * Project a row using "projects" from {@link SegmentsTable#scan(DataContext, List, int[])}. *

diff --git a/sql/src/test/java/org/apache/druid/sql/calcite/schema/SystemSchemaTest.java b/sql/src/test/java/org/apache/druid/sql/calcite/schema/SystemSchemaTest.java index dd5af77e92f9..0907d00b3007 100644 --- a/sql/src/test/java/org/apache/druid/sql/calcite/schema/SystemSchemaTest.java +++ b/sql/src/test/java/org/apache/druid/sql/calcite/schema/SystemSchemaTest.java @@ -26,6 +26,7 @@ import com.google.common.collect.ImmutableMap; import com.google.common.collect.ImmutableSet; import com.google.common.util.concurrent.Futures; +import com.google.common.util.concurrent.ListenableFuture; import junitparams.converters.Nullable; import org.apache.calcite.DataContext; import org.apache.calcite.adapter.java.JavaTypeFactory; @@ -64,9 +65,11 @@ import org.apache.druid.java.util.common.Intervals; import org.apache.druid.java.util.common.StringUtils; import org.apache.druid.java.util.common.io.Closer; +import org.apache.druid.java.util.http.client.HttpClient; import org.apache.druid.java.util.http.client.Request; import org.apache.druid.java.util.http.client.response.HttpResponseHandler; import org.apache.druid.java.util.http.client.response.InputStreamFullResponseHolder; +import org.apache.druid.java.util.http.client.response.StringFullResponseHandler; import org.apache.druid.java.util.http.client.response.StringFullResponseHolder; import org.apache.druid.query.QueryRunnerFactoryConglomerate; import org.apache.druid.query.aggregation.CountAggregatorFactory; @@ -112,7 +115,10 @@ import org.apache.druid.timeline.partition.NumberedShardSpec; import org.apache.druid.timeline.partition.ShardSpec; import org.easymock.EasyMock; +import org.jboss.netty.handler.codec.http.DefaultHttpResponse; import org.jboss.netty.handler.codec.http.HttpResponse; +import org.jboss.netty.handler.codec.http.HttpResponseStatus; +import org.jboss.netty.handler.codec.http.HttpVersion; import org.joda.time.DateTime; import org.junit.Assert; import org.junit.jupiter.api.AfterAll; @@ -183,6 +189,7 @@ public class SystemSchemaTest extends CalciteTestBase private MetadataSegmentView metadataView; private DruidNodeDiscoveryProvider druidNodeDiscoveryProvider; private FilteredServerInventoryView serverInventoryView; + private HttpClient httpClient; @BeforeAll public static void setUpClass() @@ -261,6 +268,7 @@ public void setUp(@TempDir File tmpDir) throws Exception metadataView = EasyMock.createMock(MetadataSegmentView.class); druidNodeDiscoveryProvider = EasyMock.createMock(DruidNodeDiscoveryProvider.class); serverInventoryView = EasyMock.createMock(FilteredServerInventoryView.class); + httpClient = EasyMock.createMock(HttpClient.class); schema = new SystemSchema( druidSchema, metadataView, @@ -270,7 +278,8 @@ public void setUp(@TempDir File tmpDir) throws Exception coordinatorClient, overlordClient, druidNodeDiscoveryProvider, - MAPPER + MAPPER, + httpClient ); } @@ -532,13 +541,13 @@ DataNodeService.DISCOVERY_SERVICE_KEY, new DataNodeService("tier", 1000, ServerT public void testGetTableMap() { Assert.assertEquals( - ImmutableSet.of("segments", "servers", "server_segments", "tasks", "supervisors"), + ImmutableSet.of("segments", "servers", "server_segments", "tasks", "supervisors", "properties"), schema.getTableNames() ); final Map tableMap = schema.getTableMap(); Assert.assertEquals( - ImmutableSet.of("segments", "servers", "server_segments", "tasks", "supervisors"), + ImmutableSet.of("segments", "servers", "server_segments", "tasks", "supervisors", "properties"), tableMap.keySet() ); final SystemSchema.SegmentsTable segmentsTable = (SystemSchema.SegmentsTable) schema.getTableMap().get("segments"); @@ -561,6 +570,12 @@ public void testGetTableMap() Assert.assertEquals(12, serverFields.size()); Assert.assertEquals("server", serverFields.get(0).getName()); Assert.assertEquals(SqlTypeName.VARCHAR, serverFields.get(0).getType().getSqlTypeName()); + + final SystemPropertiesTable propertiesTable = (SystemPropertiesTable) schema.getTableMap() + .get("properties"); + final RelDataType propertiesRowType = propertiesTable.getRowType(new JavaTypeFactoryImpl()); + final List propertiesFields = propertiesRowType.getFieldList(); + Assert.assertEquals(5, propertiesFields.size()); } @Test @@ -1448,6 +1463,129 @@ public void testSupervisorTableAuth() // verifyTypes(rows, SystemSchema.SUPERVISOR_SIGNATURE); } + @Test + public void testPropertiesTable() throws Exception + { + SystemPropertiesTable propertiesTable = EasyMock.createMockBuilder(SystemPropertiesTable.class) + .withConstructor(druidNodeDiscoveryProvider, authMapper, httpClient, MAPPER) + .createMock(); + + EasyMock.replay(propertiesTable); + + final DruidNodeDiscovery coordinatorNodeDiscovery = EasyMock.createMock(DruidNodeDiscovery.class); + final DruidNodeDiscovery middleManagerNodeDiscovery = EasyMock.createMock(DruidNodeDiscovery.class); + final DruidNodeDiscovery brokerNodeDiscovery = EasyMock.createMock(DruidNodeDiscovery.class); + final DruidNodeDiscovery routerNodeDiscovery = EasyMock.createMock(DruidNodeDiscovery.class); + final DruidNodeDiscovery historicalNodeDiscovery = EasyMock.createMock(DruidNodeDiscovery.class); + final DruidNodeDiscovery overlordNodeDiscovery = EasyMock.createMock(DruidNodeDiscovery.class); + final DruidNodeDiscovery peonNodeDiscovery = EasyMock.createMock(DruidNodeDiscovery.class); + final DruidNodeDiscovery indexerNodeDiscovery = EasyMock.createMock(DruidNodeDiscovery.class); + + EasyMock.expect(druidNodeDiscoveryProvider.getForNodeRole(NodeRole.COORDINATOR)).andReturn(coordinatorNodeDiscovery).once(); + EasyMock.expect(druidNodeDiscoveryProvider.getForNodeRole(NodeRole.MIDDLE_MANAGER)).andReturn(middleManagerNodeDiscovery).once(); + EasyMock.expect(druidNodeDiscoveryProvider.getForNodeRole(NodeRole.BROKER)).andReturn(brokerNodeDiscovery).once(); + EasyMock.expect(druidNodeDiscoveryProvider.getForNodeRole(NodeRole.ROUTER)).andReturn(routerNodeDiscovery).once(); + EasyMock.expect(druidNodeDiscoveryProvider.getForNodeRole(NodeRole.HISTORICAL)).andReturn(historicalNodeDiscovery).once(); + EasyMock.expect(druidNodeDiscoveryProvider.getForNodeRole(NodeRole.OVERLORD)).andReturn(overlordNodeDiscovery).once(); + EasyMock.expect(druidNodeDiscoveryProvider.getForNodeRole(NodeRole.PEON)).andReturn(peonNodeDiscovery).once(); + EasyMock.expect(druidNodeDiscoveryProvider.getForNodeRole(NodeRole.INDEXER)).andReturn(indexerNodeDiscovery).once(); + + EasyMock.expect(coordinatorNodeDiscovery.getAllNodes()).andReturn(ImmutableList.of(coordinator, coordinator2)).once(); + EasyMock.expect(middleManagerNodeDiscovery.getAllNodes()).andReturn(ImmutableList.of(middleManager)).once(); + EasyMock.expect(brokerNodeDiscovery.getAllNodes()).andReturn(ImmutableList.of()).once(); + EasyMock.expect(routerNodeDiscovery.getAllNodes()).andReturn(ImmutableList.of()).once(); + EasyMock.expect(historicalNodeDiscovery.getAllNodes()).andReturn(ImmutableList.of()).once(); + EasyMock.expect(overlordNodeDiscovery.getAllNodes()).andReturn(ImmutableList.of()).once(); + EasyMock.expect(peonNodeDiscovery.getAllNodes()).andReturn(ImmutableList.of()).once(); + EasyMock.expect(indexerNodeDiscovery.getAllNodes()).andReturn(ImmutableList.of()).once(); + + HttpResponse coordinatorHttpResponse = new DefaultHttpResponse(HttpVersion.HTTP_1_1, HttpResponseStatus.OK); + StringFullResponseHolder coordinatorResponseHolder = new StringFullResponseHolder(coordinatorHttpResponse, StandardCharsets.UTF_8); + String coordinatorJson = "{\"druid.test-key\": \"test-value\"}"; + coordinatorResponseHolder.addChunk(coordinatorJson); + + HttpResponse coordinator2HttpResponse = new DefaultHttpResponse(HttpVersion.HTTP_1_1, HttpResponseStatus.OK); + StringFullResponseHolder coordinator2ResponseHolder = new StringFullResponseHolder(coordinator2HttpResponse, StandardCharsets.UTF_8); + String coordinator2Json = "{\"druid.test-key3\": \"test-value3\"}"; + coordinator2ResponseHolder.addChunk(coordinator2Json); + + HttpResponse middleManagerHttpResponse = new DefaultHttpResponse(HttpVersion.HTTP_1_1, HttpResponseStatus.OK); + StringFullResponseHolder middleManagerResponseHolder = new StringFullResponseHolder(middleManagerHttpResponse, StandardCharsets.UTF_8); + String middleManagerJson = "{\n" + + "\"druid.test-key\": \"test-value\",\n" + + "\"druid.test-key2\": \"test-value2\"\n" + + "}"; + middleManagerResponseHolder.addChunk(middleManagerJson); + + Map> urlToResponse = ImmutableMap.of( + getStatusPropertiesUrl(coordinator), Futures.immediateFuture(coordinatorResponseHolder), + getStatusPropertiesUrl(coordinator2), Futures.immediateFuture(coordinator2ResponseHolder), + getStatusPropertiesUrl(middleManager), Futures.immediateFuture(middleManagerResponseHolder) + ); + + EasyMock.expect( + httpClient.go( + EasyMock.isA(Request.class), + EasyMock.isA(StringFullResponseHandler.class) + ) + ).andAnswer(() -> { + Request req = (Request) EasyMock.getCurrentArguments()[0]; + String url = req.getUrl().toString(); + + ListenableFuture future = urlToResponse.get(url); + if (future != null) { + return future; + } + return Futures.immediateFailedFuture(new AssertionError("Unexpected URL: " + url)); + }).times(3); + + EasyMock.replay(druidNodeDiscoveryProvider, responseHandler, httpClient); + EasyMock.replay( + coordinatorNodeDiscovery, + middleManagerNodeDiscovery, + brokerNodeDiscovery, + routerNodeDiscovery, + historicalNodeDiscovery, + overlordNodeDiscovery, + peonNodeDiscovery, + indexerNodeDiscovery + ); + + List expectedRows = new ArrayList<>(); + expectedRows.add(new Object[]{ + coordinator.getDruidNode().getServiceName(), coordinator.getDruidNode().getHost(), + coordinator.getNodeRole().getJsonName(), "druid.test-key", "test-value" + }); + expectedRows + .add(new Object[]{ + coordinator2.getDruidNode().getServiceName(), coordinator2.getDruidNode().getHost(), + coordinator2.getNodeRole().getJsonName(), "druid.test-key3", "test-value3" + }); + expectedRows + .add(new Object[]{ + middleManager.getDruidNode().getServiceName(), middleManager.getDruidNode().getHost(), + middleManager.getNodeRole().getJsonName(), "druid.test-key", "test-value" + }); + expectedRows + .add(new Object[]{ + middleManager.getDruidNode().getServiceName(), middleManager.getDruidNode().getHost(), + middleManager.getNodeRole().getJsonName(), "druid.test-key2", "test-value2" + }); + + DataContext dataContext = createDataContext(Users.SUPER); + final List rows = propertiesTable.scan(dataContext).toList(); + Assert.assertEquals(expectedRows.size(), rows.size()); + for (int i = 0; i < expectedRows.size(); i++) { + Assert.assertArrayEquals(expectedRows.get(i), rows.get(i)); + } + + } + + private String getStatusPropertiesUrl(DiscoveryDruidNode discoveryDruidNode) + { + return discoveryDruidNode.getDruidNode().getUriToUse().resolve("/status/properties").toString(); + } + /** * Creates a response holder that contains the given json. */ diff --git a/sql/src/test/java/org/apache/druid/sql/calcite/util/CalciteTests.java b/sql/src/test/java/org/apache/druid/sql/calcite/util/CalciteTests.java index eef6d87d3ae0..59151d352e2c 100644 --- a/sql/src/test/java/org/apache/druid/sql/calcite/util/CalciteTests.java +++ b/sql/src/test/java/org/apache/druid/sql/calcite/util/CalciteTests.java @@ -418,7 +418,8 @@ private TaskStatusPlus createTaskStatus(String id, String datasource, Long durat coordinatorClient, overlordClient, provider, - getJsonMapper() + getJsonMapper(), + new FakeHttpClient() ); } From 8f87895f10c0c5ee291fe429d8c2145cb956aaa3 Mon Sep 17 00:00:00 2001 From: Gabriel Chang <77312579+GabrielCWT@users.noreply.github.com> Date: Tue, 28 Oct 2025 11:07:56 +0800 Subject: [PATCH 02/28] Fix based on feedback --- .../calcite/schema/SystemPropertiesTable.java | 73 +++++++++++-------- .../sql/calcite/schema/SystemSchema.java | 3 + .../schema/DruidCalciteSchemaModuleTest.java | 4 + .../apache/druid/sql/guice/SqlModuleTest.java | 8 +- 4 files changed, 56 insertions(+), 32 deletions(-) diff --git a/sql/src/main/java/org/apache/druid/sql/calcite/schema/SystemPropertiesTable.java b/sql/src/main/java/org/apache/druid/sql/calcite/schema/SystemPropertiesTable.java index 17110d3df50e..d62d5eaae755 100644 --- a/sql/src/main/java/org/apache/druid/sql/calcite/schema/SystemPropertiesTable.java +++ b/sql/src/main/java/org/apache/druid/sql/calcite/schema/SystemPropertiesTable.java @@ -22,7 +22,6 @@ import com.fasterxml.jackson.core.type.TypeReference; import com.fasterxml.jackson.databind.ObjectMapper; import com.google.common.base.Preconditions; -import com.google.common.collect.FluentIterable; import org.apache.calcite.DataContext; import org.apache.calcite.linq4j.Enumerable; import org.apache.calcite.linq4j.Linq4j; @@ -33,6 +32,7 @@ import org.apache.calcite.schema.impl.AbstractTable; import org.apache.druid.discovery.DiscoveryDruidNode; import org.apache.druid.discovery.DruidNodeDiscoveryProvider; +import org.apache.druid.java.util.common.Pair; import org.apache.druid.java.util.common.RE; import org.apache.druid.java.util.http.client.HttpClient; import org.apache.druid.java.util.http.client.Request; @@ -51,20 +51,25 @@ import java.io.IOException; import java.net.URL; import java.nio.charset.StandardCharsets; +import java.util.HashMap; import java.util.Iterator; import java.util.Map; import java.util.concurrent.ExecutionException; import java.util.stream.Collectors; +import java.util.stream.Stream; +/** + * This table contains row per property. It contains all the properties of all druid servers. + */ public final class SystemPropertiesTable extends AbstractTable implements ScannableTable { - public static final String PROPERTIES_TABLE = "properties"; + public static final String PROPERTIES_TABLE = "server_properties"; static final RowSignature PROPERTIES_SIGNATURE = RowSignature .builder() - .add("service", ColumnType.STRING) - .add("host", ColumnType.STRING) - .add("server_type", ColumnType.STRING) + .add("service_name", ColumnType.STRING) + .add("server", ColumnType.STRING) + .add("node_roles", ColumnType.STRING) .add("property", ColumnType.STRING) .add("value", ColumnType.STRING) .build(); @@ -109,38 +114,40 @@ public Enumerable scan(DataContext root) SystemSchema.checkStateReadAccessForServers(authenticationResult, authorizerMapper); final Iterator druidServers = SystemSchema.getDruidServers(druidNodeDiscoveryProvider); - final FluentIterable results = FluentIterable - .from(() -> druidServers) - .transformAndConcat((DiscoveryDruidNode discoveryDruidNode) -> { - final DruidNode druidNode = discoveryDruidNode.getDruidNode(); - final Map propertiesMap = getProperties(druidNode); - return propertiesMap.entrySet().stream() - .map(entry -> new Object[]{ - druidNode.getServiceName(), - druidNode.getHost(), - discoveryDruidNode.getNodeRole().getJsonName(), - entry.getKey(), - entry.getValue() - }) - .collect(Collectors.toList()); - }); - return Linq4j.asEnumerable(results); + // ! TODO use string builder instead and issue: there are unique service names for each service so combining node_roles we lose the service names + final Map>> serverToPropertiesMap = new HashMap<>(); + druidServers.forEachRemaining(discoveryDruidNode -> { + final DruidNode druidNode = discoveryDruidNode.getDruidNode(); + final Map propertiesMap = getProperties(druidNode); + if (serverToPropertiesMap.containsKey(druidNode.getHostAndPortToUse())) { + Pair> pair = serverToPropertiesMap.get(druidNode.getHostAndPortToUse()); + serverToPropertiesMap.put(druidNode.getHostAndPortToUse(), Pair.of(pair.lhs + "," + discoveryDruidNode.getNodeRole().getJsonName(), pair.rhs)); + } + else { + serverToPropertiesMap.put( + druidNode.getHostAndPortToUse(), Pair.of(discoveryDruidNode.getNodeRole().getJsonName(), propertiesMap.entrySet().stream() + .map(entry -> new Object[]{ + druidNode.getServiceName(), + druidNode.getHostAndPortToUse(), + discoveryDruidNode.getNodeRole().getJsonName(), + entry.getKey(), + entry.getValue() + })) + ); + } + }); + return Linq4j.asEnumerable(serverToPropertiesMap.values().stream().flatMap(pair -> pair.rhs.map(entry -> new Object[]{entry[0], entry[1], pair.lhs, entry[3], entry[4]})).collect(Collectors.toList())); } private Map getProperties(DruidNode druidNode) { + final String url = druidNode.getUriToUse().resolve("/status/properties").toString(); try { - final String url = druidNode.getUriToUse().resolve("/status/properties").toString(); final Request request = new Request(HttpMethod.GET, new URL(url)); final StringFullResponseHolder response; - try { - response = httpClient - .go(request, new StringFullResponseHandler(StandardCharsets.UTF_8)) - .get(); - } - catch (ExecutionException e) { - throw new RE(e, "HTTP request to[%s] failed", request.getUrl()); - } + response = httpClient + .go(request, new StringFullResponseHandler(StandardCharsets.UTF_8)) + .get(); if (response.getStatus().getCode() != HttpServletResponse.SC_OK) { throw new RE( @@ -151,7 +158,8 @@ private Map getProperties(DruidNode druidNode) ); } return jsonMapper.readValue( - response.getContent(), new TypeReference>() + response.getContent(), + new TypeReference>() { } ); @@ -159,5 +167,8 @@ private Map getProperties(DruidNode druidNode) catch (IOException | InterruptedException e) { throw new RuntimeException(e); } + catch (ExecutionException e) { + throw new RE(e, "HTTP request to[%s] failed", url); + } } } diff --git a/sql/src/main/java/org/apache/druid/sql/calcite/schema/SystemSchema.java b/sql/src/main/java/org/apache/druid/sql/calcite/schema/SystemSchema.java index 5896b36caa0c..c72621211f8f 100644 --- a/sql/src/main/java/org/apache/druid/sql/calcite/schema/SystemSchema.java +++ b/sql/src/main/java/org/apache/druid/sql/calcite/schema/SystemSchema.java @@ -1116,6 +1116,9 @@ public static void checkStateReadAccessForServers( } } + /** + * Returns an iterator over all discoverable Druid nodes in the cluster. + */ public static Iterator getDruidServers(DruidNodeDiscoveryProvider druidNodeDiscoveryProvider) { return Arrays.stream(NodeRole.values()) diff --git a/sql/src/test/java/org/apache/druid/sql/calcite/schema/DruidCalciteSchemaModuleTest.java b/sql/src/test/java/org/apache/druid/sql/calcite/schema/DruidCalciteSchemaModuleTest.java index 23335615409f..fc711d8a110d 100644 --- a/sql/src/test/java/org/apache/druid/sql/calcite/schema/DruidCalciteSchemaModuleTest.java +++ b/sql/src/test/java/org/apache/druid/sql/calcite/schema/DruidCalciteSchemaModuleTest.java @@ -37,6 +37,7 @@ import org.apache.druid.guice.LifecycleModule; import org.apache.druid.guice.annotations.Json; import org.apache.druid.java.util.emitter.service.ServiceEmitter; +import org.apache.druid.java.util.http.client.HttpClient; import org.apache.druid.query.lookup.LookupExtractorFactoryContainerProvider; import org.apache.druid.query.lookup.LookupReferencesManager; import org.apache.druid.rpc.indexing.NoopOverlordClient; @@ -93,6 +94,8 @@ public class DruidCalciteSchemaModuleTest extends CalciteTestBase private SegmentManager segmentManager; @Mock private DruidOperatorTable druidOperatorTable; + @Mock + private HttpClient httpClient; private DruidCalciteSchemaModule target; private Injector injector; @@ -125,6 +128,7 @@ public void setUp() binder.bind(CoordinatorClient.class).to(NoopCoordinatorClient.class); binder.bind(CentralizedDatasourceSchemaConfig.class) .toInstance(CentralizedDatasourceSchemaConfig.create()); + binder.bind(HttpClient.class).toInstance(httpClient); }, new LifecycleModule(), target); diff --git a/sql/src/test/java/org/apache/druid/sql/guice/SqlModuleTest.java b/sql/src/test/java/org/apache/druid/sql/guice/SqlModuleTest.java index 28e8923f634e..408e18544095 100644 --- a/sql/src/test/java/org/apache/druid/sql/guice/SqlModuleTest.java +++ b/sql/src/test/java/org/apache/druid/sql/guice/SqlModuleTest.java @@ -42,6 +42,7 @@ import org.apache.druid.initialization.DruidModule; import org.apache.druid.jackson.JacksonModule; import org.apache.druid.java.util.emitter.service.ServiceEmitter; +import org.apache.druid.java.util.http.client.HttpClient; import org.apache.druid.math.expr.ExprMacroTable; import org.apache.druid.query.DefaultQueryConfig; import org.apache.druid.query.GenericQueryMetricsFactory; @@ -119,6 +120,9 @@ public class SqlModuleTest @Mock private QueryRunnerFactoryConglomerate conglomerate; + @Mock + private HttpClient httpClient; + private Injector injector; @Before @@ -134,7 +138,8 @@ public void setUp() queryToolChestWarehouse, lookupExtractorFactoryContainerProvider, joinableFactory, - segmentCacheManager + segmentCacheManager, + httpClient ); } @@ -213,6 +218,7 @@ private Injector makeInjectorWithProperties(final Properties props) binder.bind(CentralizedDatasourceSchemaConfig.class) .toInstance(CentralizedDatasourceSchemaConfig.enabled(false)); binder.bind(DefaultQueryConfig.class).toInstance(DefaultQueryConfig.NIL); + binder.bind(HttpClient.class).toInstance(httpClient); }, sqlModule, new TestViewManagerModule() From e3eabcc97c2a0885942eb178b7681ce0312b06d5 Mon Sep 17 00:00:00 2001 From: Gabriel Chang <77312579+GabrielCWT@users.noreply.github.com> Date: Tue, 28 Oct 2025 11:33:20 +0800 Subject: [PATCH 03/28] Combine rows from servers with same hostAndPort --- .../calcite/schema/SystemPropertiesTable.java | 48 +++++++++++-------- 1 file changed, 28 insertions(+), 20 deletions(-) diff --git a/sql/src/main/java/org/apache/druid/sql/calcite/schema/SystemPropertiesTable.java b/sql/src/main/java/org/apache/druid/sql/calcite/schema/SystemPropertiesTable.java index d62d5eaae755..2fd90dbbcf59 100644 --- a/sql/src/main/java/org/apache/druid/sql/calcite/schema/SystemPropertiesTable.java +++ b/sql/src/main/java/org/apache/druid/sql/calcite/schema/SystemPropertiesTable.java @@ -34,6 +34,7 @@ import org.apache.druid.discovery.DruidNodeDiscoveryProvider; import org.apache.druid.java.util.common.Pair; import org.apache.druid.java.util.common.RE; +import org.apache.druid.java.util.common.StringUtils; import org.apache.druid.java.util.http.client.HttpClient; import org.apache.druid.java.util.http.client.Request; import org.apache.druid.java.util.http.client.response.StringFullResponseHandler; @@ -61,15 +62,14 @@ /** * This table contains row per property. It contains all the properties of all druid servers. */ -public final class SystemPropertiesTable extends AbstractTable implements ScannableTable +public class SystemPropertiesTable extends AbstractTable implements ScannableTable { public static final String PROPERTIES_TABLE = "server_properties"; static final RowSignature PROPERTIES_SIGNATURE = RowSignature .builder() - .add("service_name", ColumnType.STRING) + .add("service_name_and_role", ColumnType.STRING) .add("server", ColumnType.STRING) - .add("node_roles", ColumnType.STRING) .add("property", ColumnType.STRING) .add("value", ColumnType.STRING) .build(); @@ -114,29 +114,37 @@ public Enumerable scan(DataContext root) SystemSchema.checkStateReadAccessForServers(authenticationResult, authorizerMapper); final Iterator druidServers = SystemSchema.getDruidServers(druidNodeDiscoveryProvider); - // ! TODO use string builder instead and issue: there are unique service names for each service so combining node_roles we lose the service names - final Map>> serverToPropertiesMap = new HashMap<>(); + final Map>> serverToPropertiesMap = new HashMap<>(); druidServers.forEachRemaining(discoveryDruidNode -> { final DruidNode druidNode = discoveryDruidNode.getDruidNode(); final Map propertiesMap = getProperties(druidNode); if (serverToPropertiesMap.containsKey(druidNode.getHostAndPortToUse())) { - Pair> pair = serverToPropertiesMap.get(druidNode.getHostAndPortToUse()); - serverToPropertiesMap.put(druidNode.getHostAndPortToUse(), Pair.of(pair.lhs + "," + discoveryDruidNode.getNodeRole().getJsonName(), pair.rhs)); + Pair> pair = serverToPropertiesMap.get(druidNode.getHostAndPortToUse()); + serverToPropertiesMap.put(druidNode.getHostAndPortToUse(), Pair.of(addServiceNameAndNodeRole(pair.lhs, discoveryDruidNode.getDruidNode().getServiceName(), discoveryDruidNode.getNodeRole().getJsonName(), false), pair.rhs)); + } else { + final StringBuilder builder = new StringBuilder(); + addServiceNameAndNodeRole(builder, discoveryDruidNode.getDruidNode().getServiceName(), discoveryDruidNode.getNodeRole().getJsonName(), true); + serverToPropertiesMap.put(druidNode.getHostAndPortToUse(), Pair.of(builder, propertiesMap.entrySet().stream() + .map(entry -> new Object[]{ + druidNode.getHostAndPortToUse(), + entry.getKey(), + entry.getValue() + }) + ) + ); } - else { - serverToPropertiesMap.put( - druidNode.getHostAndPortToUse(), Pair.of(discoveryDruidNode.getNodeRole().getJsonName(), propertiesMap.entrySet().stream() - .map(entry -> new Object[]{ - druidNode.getServiceName(), - druidNode.getHostAndPortToUse(), - discoveryDruidNode.getNodeRole().getJsonName(), - entry.getKey(), - entry.getValue() - })) - ); - } }); - return Linq4j.asEnumerable(serverToPropertiesMap.values().stream().flatMap(pair -> pair.rhs.map(entry -> new Object[]{entry[0], entry[1], pair.lhs, entry[3], entry[4]})).collect(Collectors.toList())); + return Linq4j.asEnumerable(serverToPropertiesMap.values().stream().flatMap(pair -> pair.rhs.map(entry -> new Object[]{pair.lhs.toString(), entry[0], entry[1], entry[2]})).collect(Collectors.toList())); + } + + private static StringBuilder addServiceNameAndNodeRole(StringBuilder builder, String serviceName, String nodeRole, boolean isFirstEntry) + { + if (isFirstEntry) { + builder.append(StringUtils.format("[%s,%s]", serviceName, nodeRole)); + } else { + builder.append(StringUtils.format(",[%s,%s]", serviceName, nodeRole)); + } + return builder; } private Map getProperties(DruidNode druidNode) From cd005de22432c8de9cb7ad7ed40cb336b0172985 Mon Sep 17 00:00:00 2001 From: Gabriel Chang <77312579+GabrielCWT@users.noreply.github.com> Date: Tue, 28 Oct 2025 15:03:52 +0800 Subject: [PATCH 04/28] Update tests --- .../sql/calcite/schema/SystemSchemaTest.java | 105 ++++++++---------- 1 file changed, 45 insertions(+), 60 deletions(-) diff --git a/sql/src/test/java/org/apache/druid/sql/calcite/schema/SystemSchemaTest.java b/sql/src/test/java/org/apache/druid/sql/calcite/schema/SystemSchemaTest.java index 0907d00b3007..7c08c5beee23 100644 --- a/sql/src/test/java/org/apache/druid/sql/calcite/schema/SystemSchemaTest.java +++ b/sql/src/test/java/org/apache/druid/sql/calcite/schema/SystemSchemaTest.java @@ -541,13 +541,13 @@ DataNodeService.DISCOVERY_SERVICE_KEY, new DataNodeService("tier", 1000, ServerT public void testGetTableMap() { Assert.assertEquals( - ImmutableSet.of("segments", "servers", "server_segments", "tasks", "supervisors", "properties"), + ImmutableSet.of("segments", "servers", "server_segments", "tasks", "supervisors", "server_properties"), schema.getTableNames() ); final Map tableMap = schema.getTableMap(); Assert.assertEquals( - ImmutableSet.of("segments", "servers", "server_segments", "tasks", "supervisors", "properties"), + ImmutableSet.of("segments", "servers", "server_segments", "tasks", "supervisors", "server_properties"), tableMap.keySet() ); final SystemSchema.SegmentsTable segmentsTable = (SystemSchema.SegmentsTable) schema.getTableMap().get("segments"); @@ -572,10 +572,10 @@ public void testGetTableMap() Assert.assertEquals(SqlTypeName.VARCHAR, serverFields.get(0).getType().getSqlTypeName()); final SystemPropertiesTable propertiesTable = (SystemPropertiesTable) schema.getTableMap() - .get("properties"); + .get("server_properties"); final RelDataType propertiesRowType = propertiesTable.getRowType(new JavaTypeFactoryImpl()); final List propertiesFields = propertiesRowType.getFieldList(); - Assert.assertEquals(5, propertiesFields.size()); + Assert.assertEquals(4, propertiesFields.size()); } @Test @@ -1472,43 +1472,38 @@ public void testPropertiesTable() throws Exception EasyMock.replay(propertiesTable); - final DruidNodeDiscovery coordinatorNodeDiscovery = EasyMock.createMock(DruidNodeDiscovery.class); - final DruidNodeDiscovery middleManagerNodeDiscovery = EasyMock.createMock(DruidNodeDiscovery.class); - final DruidNodeDiscovery brokerNodeDiscovery = EasyMock.createMock(DruidNodeDiscovery.class); - final DruidNodeDiscovery routerNodeDiscovery = EasyMock.createMock(DruidNodeDiscovery.class); - final DruidNodeDiscovery historicalNodeDiscovery = EasyMock.createMock(DruidNodeDiscovery.class); - final DruidNodeDiscovery overlordNodeDiscovery = EasyMock.createMock(DruidNodeDiscovery.class); - final DruidNodeDiscovery peonNodeDiscovery = EasyMock.createMock(DruidNodeDiscovery.class); - final DruidNodeDiscovery indexerNodeDiscovery = EasyMock.createMock(DruidNodeDiscovery.class); - - EasyMock.expect(druidNodeDiscoveryProvider.getForNodeRole(NodeRole.COORDINATOR)).andReturn(coordinatorNodeDiscovery).once(); - EasyMock.expect(druidNodeDiscoveryProvider.getForNodeRole(NodeRole.MIDDLE_MANAGER)).andReturn(middleManagerNodeDiscovery).once(); - EasyMock.expect(druidNodeDiscoveryProvider.getForNodeRole(NodeRole.BROKER)).andReturn(brokerNodeDiscovery).once(); - EasyMock.expect(druidNodeDiscoveryProvider.getForNodeRole(NodeRole.ROUTER)).andReturn(routerNodeDiscovery).once(); - EasyMock.expect(druidNodeDiscoveryProvider.getForNodeRole(NodeRole.HISTORICAL)).andReturn(historicalNodeDiscovery).once(); - EasyMock.expect(druidNodeDiscoveryProvider.getForNodeRole(NodeRole.OVERLORD)).andReturn(overlordNodeDiscovery).once(); - EasyMock.expect(druidNodeDiscoveryProvider.getForNodeRole(NodeRole.PEON)).andReturn(peonNodeDiscovery).once(); - EasyMock.expect(druidNodeDiscoveryProvider.getForNodeRole(NodeRole.INDEXER)).andReturn(indexerNodeDiscovery).once(); + List expectedRows = new ArrayList<>(); - EasyMock.expect(coordinatorNodeDiscovery.getAllNodes()).andReturn(ImmutableList.of(coordinator, coordinator2)).once(); - EasyMock.expect(middleManagerNodeDiscovery.getAllNodes()).andReturn(ImmutableList.of(middleManager)).once(); - EasyMock.expect(brokerNodeDiscovery.getAllNodes()).andReturn(ImmutableList.of()).once(); - EasyMock.expect(routerNodeDiscovery.getAllNodes()).andReturn(ImmutableList.of()).once(); - EasyMock.expect(historicalNodeDiscovery.getAllNodes()).andReturn(ImmutableList.of()).once(); - EasyMock.expect(overlordNodeDiscovery.getAllNodes()).andReturn(ImmutableList.of()).once(); - EasyMock.expect(peonNodeDiscovery.getAllNodes()).andReturn(ImmutableList.of()).once(); - EasyMock.expect(indexerNodeDiscovery.getAllNodes()).andReturn(ImmutableList.of()).once(); + mockNodeDiscovery(NodeRole.BROKER); + mockNodeDiscovery(NodeRole.ROUTER); + mockNodeDiscovery(NodeRole.HISTORICAL); + mockNodeDiscovery(NodeRole.OVERLORD); + mockNodeDiscovery(NodeRole.PEON); + mockNodeDiscovery(NodeRole.INDEXER); + mockNodeDiscovery(NodeRole.COORDINATOR, coordinator, coordinator2); HttpResponse coordinatorHttpResponse = new DefaultHttpResponse(HttpVersion.HTTP_1_1, HttpResponseStatus.OK); StringFullResponseHolder coordinatorResponseHolder = new StringFullResponseHolder(coordinatorHttpResponse, StandardCharsets.UTF_8); String coordinatorJson = "{\"druid.test-key\": \"test-value\"}"; coordinatorResponseHolder.addChunk(coordinatorJson); + expectedRows.add(new Object[]{ + StringUtils.format("[%s,%s]", coordinator.getDruidNode().getServiceName(), coordinator.getNodeRole().getJsonName()), + coordinator.getDruidNode().getHostAndPortToUse(), + "druid.test-key", "test-value" + }); HttpResponse coordinator2HttpResponse = new DefaultHttpResponse(HttpVersion.HTTP_1_1, HttpResponseStatus.OK); StringFullResponseHolder coordinator2ResponseHolder = new StringFullResponseHolder(coordinator2HttpResponse, StandardCharsets.UTF_8); String coordinator2Json = "{\"druid.test-key3\": \"test-value3\"}"; coordinator2ResponseHolder.addChunk(coordinator2Json); + expectedRows + .add(new Object[]{ + StringUtils.format("[%s,%s]", coordinator2.getDruidNode().getServiceName(), coordinator2.getNodeRole().getJsonName()), + coordinator2.getDruidNode().getHostAndPortToUse(), + "druid.test-key3", "test-value3" + }); + mockNodeDiscovery(NodeRole.MIDDLE_MANAGER, middleManager); HttpResponse middleManagerHttpResponse = new DefaultHttpResponse(HttpVersion.HTTP_1_1, HttpResponseStatus.OK); StringFullResponseHolder middleManagerResponseHolder = new StringFullResponseHolder(middleManagerHttpResponse, StandardCharsets.UTF_8); String middleManagerJson = "{\n" @@ -1516,6 +1511,16 @@ public void testPropertiesTable() throws Exception + "\"druid.test-key2\": \"test-value2\"\n" + "}"; middleManagerResponseHolder.addChunk(middleManagerJson); + expectedRows + .add(new Object[]{ + StringUtils.format("[%s,%s]", middleManager.getDruidNode().getServiceName(), middleManager.getNodeRole().getJsonName()), + middleManager.getDruidNode().getHostAndPortToUse(), "druid.test-key", "test-value" + }); + expectedRows + .add(new Object[]{ + StringUtils.format("[%s,%s]", middleManager.getDruidNode().getServiceName(), middleManager.getNodeRole().getJsonName()), + middleManager.getDruidNode().getHostAndPortToUse(), "druid.test-key2", "test-value2" + }); Map> urlToResponse = ImmutableMap.of( getStatusPropertiesUrl(coordinator), Futures.immediateFuture(coordinatorResponseHolder), @@ -1540,40 +1545,11 @@ public void testPropertiesTable() throws Exception }).times(3); EasyMock.replay(druidNodeDiscoveryProvider, responseHandler, httpClient); - EasyMock.replay( - coordinatorNodeDiscovery, - middleManagerNodeDiscovery, - brokerNodeDiscovery, - routerNodeDiscovery, - historicalNodeDiscovery, - overlordNodeDiscovery, - peonNodeDiscovery, - indexerNodeDiscovery - ); - - List expectedRows = new ArrayList<>(); - expectedRows.add(new Object[]{ - coordinator.getDruidNode().getServiceName(), coordinator.getDruidNode().getHost(), - coordinator.getNodeRole().getJsonName(), "druid.test-key", "test-value" - }); - expectedRows - .add(new Object[]{ - coordinator2.getDruidNode().getServiceName(), coordinator2.getDruidNode().getHost(), - coordinator2.getNodeRole().getJsonName(), "druid.test-key3", "test-value3" - }); - expectedRows - .add(new Object[]{ - middleManager.getDruidNode().getServiceName(), middleManager.getDruidNode().getHost(), - middleManager.getNodeRole().getJsonName(), "druid.test-key", "test-value" - }); - expectedRows - .add(new Object[]{ - middleManager.getDruidNode().getServiceName(), middleManager.getDruidNode().getHost(), - middleManager.getNodeRole().getJsonName(), "druid.test-key2", "test-value2" - }); DataContext dataContext = createDataContext(Users.SUPER); final List rows = propertiesTable.scan(dataContext).toList(); + expectedRows.sort((Object[] row1, Object[] row2) -> ((Comparable) row1[1]).compareTo(row2[1])); + rows.sort((Object[] row1, Object[] row2) -> ((Comparable) row1[1]).compareTo(row2[1])); Assert.assertEquals(expectedRows.size(), rows.size()); for (int i = 0; i < expectedRows.size(); i++) { Assert.assertArrayEquals(expectedRows.get(i), rows.get(i)); @@ -1723,6 +1699,15 @@ private static void verifyTypes(final List rows, final RowSignature si } } + private DruidNodeDiscovery mockNodeDiscovery(NodeRole nodeRole, DiscoveryDruidNode... discoveryDruidNodes) + { + final DruidNodeDiscovery druidNodeDiscovery = EasyMock.createMock(DruidNodeDiscovery.class); + EasyMock.expect(druidNodeDiscoveryProvider.getForNodeRole(nodeRole)).andReturn(druidNodeDiscovery).once(); + EasyMock.expect(druidNodeDiscovery.getAllNodes()).andReturn(ImmutableList.copyOf(discoveryDruidNodes)).once(); + EasyMock.replay(druidNodeDiscovery); + return druidNodeDiscovery; + } + /** * Usernames to be used in tests. */ From d4ad33533162a1f562539223be05191588f02089 Mon Sep 17 00:00:00 2001 From: Gabriel Chang <77312579+GabrielCWT@users.noreply.github.com> Date: Tue, 28 Oct 2025 16:24:34 +0800 Subject: [PATCH 05/28] Fix tests --- .../java/org/apache/druid/sql/calcite/CalciteQueryTest.java | 2 ++ .../druid/sql/calcite/DruidPlannerResourceAnalyzeTest.java | 2 ++ .../druid/sql/calcite/schema/DruidCalciteSchemaModuleTest.java | 2 ++ .../test/java/org/apache/druid/sql/guice/SqlModuleTest.java | 3 ++- 4 files changed, 8 insertions(+), 1 deletion(-) diff --git a/sql/src/test/java/org/apache/druid/sql/calcite/CalciteQueryTest.java b/sql/src/test/java/org/apache/druid/sql/calcite/CalciteQueryTest.java index edd78ae2007e..f00410465008 100644 --- a/sql/src/test/java/org/apache/druid/sql/calcite/CalciteQueryTest.java +++ b/sql/src/test/java/org/apache/druid/sql/calcite/CalciteQueryTest.java @@ -208,6 +208,7 @@ public void testInformationSchemaTables() .add(new Object[]{"lookup", "lookyloo-chain", "TABLE", "YES", "YES"}) .add(new Object[]{"lookup", "lookyloo121", "TABLE", "YES", "YES"}) .add(new Object[]{"sys", "segments", "SYSTEM_TABLE", "NO", "NO"}) + .add(new Object[]{"sys", "server_properties", "SYSTEM_TABLE", "NO", "NO"}) .add(new Object[]{"sys", "server_segments", "SYSTEM_TABLE", "NO", "NO"}) .add(new Object[]{"sys", "servers", "SYSTEM_TABLE", "NO", "NO"}) .add(new Object[]{"sys", "supervisors", "SYSTEM_TABLE", "NO", "NO"}) @@ -253,6 +254,7 @@ public void testInformationSchemaTables() .add(new Object[]{"lookup", "lookyloo-chain", "TABLE", "YES", "YES"}) .add(new Object[]{"lookup", "lookyloo121", "TABLE", "YES", "YES"}) .add(new Object[]{"sys", "segments", "SYSTEM_TABLE", "NO", "NO"}) + .add(new Object[]{"sys", "server_properties", "SYSTEM_TABLE", "NO", "NO"}) .add(new Object[]{"sys", "server_segments", "SYSTEM_TABLE", "NO", "NO"}) .add(new Object[]{"sys", "servers", "SYSTEM_TABLE", "NO", "NO"}) .add(new Object[]{"sys", "supervisors", "SYSTEM_TABLE", "NO", "NO"}) diff --git a/sql/src/test/java/org/apache/druid/sql/calcite/DruidPlannerResourceAnalyzeTest.java b/sql/src/test/java/org/apache/druid/sql/calcite/DruidPlannerResourceAnalyzeTest.java index b0e707d00b86..7027a5de9755 100644 --- a/sql/src/test/java/org/apache/druid/sql/calcite/DruidPlannerResourceAnalyzeTest.java +++ b/sql/src/test/java/org/apache/druid/sql/calcite/DruidPlannerResourceAnalyzeTest.java @@ -199,12 +199,14 @@ public void testSysTables() testSysTable("SELECT * FROM sys.server_segments", null, PLANNER_CONFIG_DEFAULT); testSysTable("SELECT * FROM sys.tasks", null, PLANNER_CONFIG_DEFAULT); testSysTable("SELECT * FROM sys.supervisors", null, PLANNER_CONFIG_DEFAULT); + testSysTable("SELECT * FROM sys.server_properties", null, PLANNER_CONFIG_DEFAULT); testSysTable("SELECT * FROM sys.segments", "segments", PLANNER_CONFIG_AUTHORIZE_SYS_TABLES); testSysTable("SELECT * FROM sys.servers", "servers", PLANNER_CONFIG_AUTHORIZE_SYS_TABLES); testSysTable("SELECT * FROM sys.server_segments", "server_segments", PLANNER_CONFIG_AUTHORIZE_SYS_TABLES); testSysTable("SELECT * FROM sys.tasks", "tasks", PLANNER_CONFIG_AUTHORIZE_SYS_TABLES); testSysTable("SELECT * FROM sys.supervisors", "supervisors", PLANNER_CONFIG_AUTHORIZE_SYS_TABLES); + testSysTable("SELECT * FROM sys.server_properties", "server_properties", PLANNER_CONFIG_AUTHORIZE_SYS_TABLES); } private void testSysTable(String sql, String name, PlannerConfig plannerConfig) diff --git a/sql/src/test/java/org/apache/druid/sql/calcite/schema/DruidCalciteSchemaModuleTest.java b/sql/src/test/java/org/apache/druid/sql/calcite/schema/DruidCalciteSchemaModuleTest.java index fc711d8a110d..54baec57225c 100644 --- a/sql/src/test/java/org/apache/druid/sql/calcite/schema/DruidCalciteSchemaModuleTest.java +++ b/sql/src/test/java/org/apache/druid/sql/calcite/schema/DruidCalciteSchemaModuleTest.java @@ -38,6 +38,7 @@ import org.apache.druid.guice.annotations.Json; import org.apache.druid.java.util.emitter.service.ServiceEmitter; import org.apache.druid.java.util.http.client.HttpClient; +import org.apache.druid.guice.annotations.EscalatedClient; import org.apache.druid.query.lookup.LookupExtractorFactoryContainerProvider; import org.apache.druid.query.lookup.LookupReferencesManager; import org.apache.druid.rpc.indexing.NoopOverlordClient; @@ -129,6 +130,7 @@ public void setUp() binder.bind(CentralizedDatasourceSchemaConfig.class) .toInstance(CentralizedDatasourceSchemaConfig.create()); binder.bind(HttpClient.class).toInstance(httpClient); + binder.bind(HttpClient.class).annotatedWith(EscalatedClient.class).toInstance(httpClient); }, new LifecycleModule(), target); diff --git a/sql/src/test/java/org/apache/druid/sql/guice/SqlModuleTest.java b/sql/src/test/java/org/apache/druid/sql/guice/SqlModuleTest.java index 408e18544095..90a3c4ab7b3e 100644 --- a/sql/src/test/java/org/apache/druid/sql/guice/SqlModuleTest.java +++ b/sql/src/test/java/org/apache/druid/sql/guice/SqlModuleTest.java @@ -38,6 +38,7 @@ import org.apache.druid.guice.LifecycleModule; import org.apache.druid.guice.PolyBind; import org.apache.druid.guice.ServerModule; +import org.apache.druid.guice.annotations.EscalatedClient; import org.apache.druid.guice.security.PolicyModule; import org.apache.druid.initialization.DruidModule; import org.apache.druid.jackson.JacksonModule; @@ -218,7 +219,7 @@ private Injector makeInjectorWithProperties(final Properties props) binder.bind(CentralizedDatasourceSchemaConfig.class) .toInstance(CentralizedDatasourceSchemaConfig.enabled(false)); binder.bind(DefaultQueryConfig.class).toInstance(DefaultQueryConfig.NIL); - binder.bind(HttpClient.class).toInstance(httpClient); + binder.bind(HttpClient.class).annotatedWith(EscalatedClient.class).toInstance(httpClient); }, sqlModule, new TestViewManagerModule() From 3de794c0b054a2ffb9fca5a299faa918f21629bd Mon Sep 17 00:00:00 2001 From: Gabriel Chang <77312579+GabrielCWT@users.noreply.github.com> Date: Tue, 28 Oct 2025 17:02:03 +0800 Subject: [PATCH 06/28] Revert tableMap to use ImmutableMap.of --- .../sql/calcite/schema/SystemSchema.java | 39 ++++++++++--------- 1 file changed, 21 insertions(+), 18 deletions(-) diff --git a/sql/src/main/java/org/apache/druid/sql/calcite/schema/SystemSchema.java b/sql/src/main/java/org/apache/druid/sql/calcite/schema/SystemSchema.java index c72621211f8f..bbb4d7f4f5d4 100644 --- a/sql/src/main/java/org/apache/druid/sql/calcite/schema/SystemSchema.java +++ b/sql/src/main/java/org/apache/druid/sql/calcite/schema/SystemSchema.java @@ -241,24 +241,27 @@ public SystemSchema( ) { Preconditions.checkNotNull(serverView, "serverView"); - this.tableMap = ImmutableMap.builder() - .put(SEGMENTS_TABLE, new SegmentsTable(druidSchema, metadataView, jsonMapper, authorizerMapper)) - .put( - SERVERS_TABLE, - new ServersTable( - druidNodeDiscoveryProvider, - serverInventoryView, - authorizerMapper, - overlordClient, - coordinatorClient, - jsonMapper - ) - ) - .put(SERVER_SEGMENTS_TABLE, new ServerSegmentsTable(serverView, authorizerMapper)) - .put(TASKS_TABLE, new TasksTable(overlordClient, authorizerMapper)) - .put(SUPERVISOR_TABLE, new SupervisorsTable(overlordClient, authorizerMapper)) - .put(SystemPropertiesTable.PROPERTIES_TABLE, new SystemPropertiesTable(druidNodeDiscoveryProvider, authorizerMapper, httpClient, jsonMapper)) - .build(); + this.tableMap = ImmutableMap.of( + SEGMENTS_TABLE, + new SegmentsTable(druidSchema, metadataView, jsonMapper, authorizerMapper), + SERVERS_TABLE, + new ServersTable( + druidNodeDiscoveryProvider, + serverInventoryView, + authorizerMapper, + overlordClient, + coordinatorClient, + jsonMapper + ), + SERVER_SEGMENTS_TABLE, + new ServerSegmentsTable(serverView, authorizerMapper), + TASKS_TABLE, + new TasksTable(overlordClient, authorizerMapper), + SUPERVISOR_TABLE, + new SupervisorsTable(overlordClient, authorizerMapper), + SystemPropertiesTable.PROPERTIES_TABLE, + new SystemPropertiesTable(druidNodeDiscoveryProvider, authorizerMapper, httpClient, jsonMapper) + ); } @Override From 22e915420fc6013918eae869ddd0e9e2cdf1a39c Mon Sep 17 00:00:00 2001 From: Gabriel Chang <77312579+GabrielCWT@users.noreply.github.com> Date: Tue, 28 Oct 2025 17:46:09 +0800 Subject: [PATCH 07/28] Separate service_name and node_roles column --- .../calcite/schema/SystemPropertiesTable.java | 20 ++++++------------- .../sql/calcite/schema/SystemSchemaTest.java | 20 ++++++++++++------- 2 files changed, 19 insertions(+), 21 deletions(-) diff --git a/sql/src/main/java/org/apache/druid/sql/calcite/schema/SystemPropertiesTable.java b/sql/src/main/java/org/apache/druid/sql/calcite/schema/SystemPropertiesTable.java index 2fd90dbbcf59..d31196f5bb1e 100644 --- a/sql/src/main/java/org/apache/druid/sql/calcite/schema/SystemPropertiesTable.java +++ b/sql/src/main/java/org/apache/druid/sql/calcite/schema/SystemPropertiesTable.java @@ -68,8 +68,9 @@ public class SystemPropertiesTable extends AbstractTable implements ScannableTab static final RowSignature PROPERTIES_SIGNATURE = RowSignature .builder() - .add("service_name_and_role", ColumnType.STRING) + .add("service_name", ColumnType.STRING) .add("server", ColumnType.STRING) + .add("node_roles", ColumnType.STRING) .add("property", ColumnType.STRING) .add("value", ColumnType.STRING) .build(); @@ -120,12 +121,13 @@ public Enumerable scan(DataContext root) final Map propertiesMap = getProperties(druidNode); if (serverToPropertiesMap.containsKey(druidNode.getHostAndPortToUse())) { Pair> pair = serverToPropertiesMap.get(druidNode.getHostAndPortToUse()); - serverToPropertiesMap.put(druidNode.getHostAndPortToUse(), Pair.of(addServiceNameAndNodeRole(pair.lhs, discoveryDruidNode.getDruidNode().getServiceName(), discoveryDruidNode.getNodeRole().getJsonName(), false), pair.rhs)); + pair.lhs.append(StringUtils.format(",%s", discoveryDruidNode.getNodeRole().getJsonName())); } else { final StringBuilder builder = new StringBuilder(); - addServiceNameAndNodeRole(builder, discoveryDruidNode.getDruidNode().getServiceName(), discoveryDruidNode.getNodeRole().getJsonName(), true); + builder.append(StringUtils.format("%s", discoveryDruidNode.getNodeRole().getJsonName())); serverToPropertiesMap.put(druidNode.getHostAndPortToUse(), Pair.of(builder, propertiesMap.entrySet().stream() .map(entry -> new Object[]{ + druidNode.getServiceName(), druidNode.getHostAndPortToUse(), entry.getKey(), entry.getValue() @@ -134,17 +136,7 @@ public Enumerable scan(DataContext root) ); } }); - return Linq4j.asEnumerable(serverToPropertiesMap.values().stream().flatMap(pair -> pair.rhs.map(entry -> new Object[]{pair.lhs.toString(), entry[0], entry[1], entry[2]})).collect(Collectors.toList())); - } - - private static StringBuilder addServiceNameAndNodeRole(StringBuilder builder, String serviceName, String nodeRole, boolean isFirstEntry) - { - if (isFirstEntry) { - builder.append(StringUtils.format("[%s,%s]", serviceName, nodeRole)); - } else { - builder.append(StringUtils.format(",[%s,%s]", serviceName, nodeRole)); - } - return builder; + return Linq4j.asEnumerable(serverToPropertiesMap.values().stream().flatMap(pair -> pair.rhs.map(entry -> new Object[]{entry[0], entry[1], pair.lhs.toString(), entry[2], entry[3]})).collect(Collectors.toList())); } private Map getProperties(DruidNode druidNode) diff --git a/sql/src/test/java/org/apache/druid/sql/calcite/schema/SystemSchemaTest.java b/sql/src/test/java/org/apache/druid/sql/calcite/schema/SystemSchemaTest.java index 7c08c5beee23..17f2baa67b4c 100644 --- a/sql/src/test/java/org/apache/druid/sql/calcite/schema/SystemSchemaTest.java +++ b/sql/src/test/java/org/apache/druid/sql/calcite/schema/SystemSchemaTest.java @@ -575,7 +575,7 @@ public void testGetTableMap() .get("server_properties"); final RelDataType propertiesRowType = propertiesTable.getRowType(new JavaTypeFactoryImpl()); final List propertiesFields = propertiesRowType.getFieldList(); - Assert.assertEquals(4, propertiesFields.size()); + Assert.assertEquals(5, propertiesFields.size()); } @Test @@ -1487,8 +1487,9 @@ public void testPropertiesTable() throws Exception String coordinatorJson = "{\"druid.test-key\": \"test-value\"}"; coordinatorResponseHolder.addChunk(coordinatorJson); expectedRows.add(new Object[]{ - StringUtils.format("[%s,%s]", coordinator.getDruidNode().getServiceName(), coordinator.getNodeRole().getJsonName()), + coordinator.getDruidNode().getServiceName(), coordinator.getDruidNode().getHostAndPortToUse(), + coordinator.getNodeRole().getJsonName(), "druid.test-key", "test-value" }); @@ -1498,8 +1499,9 @@ public void testPropertiesTable() throws Exception coordinator2ResponseHolder.addChunk(coordinator2Json); expectedRows .add(new Object[]{ - StringUtils.format("[%s,%s]", coordinator2.getDruidNode().getServiceName(), coordinator2.getNodeRole().getJsonName()), + coordinator2.getDruidNode().getServiceName(), coordinator2.getDruidNode().getHostAndPortToUse(), + coordinator2.getNodeRole().getJsonName(), "druid.test-key3", "test-value3" }); @@ -1513,13 +1515,17 @@ public void testPropertiesTable() throws Exception middleManagerResponseHolder.addChunk(middleManagerJson); expectedRows .add(new Object[]{ - StringUtils.format("[%s,%s]", middleManager.getDruidNode().getServiceName(), middleManager.getNodeRole().getJsonName()), - middleManager.getDruidNode().getHostAndPortToUse(), "druid.test-key", "test-value" + middleManager.getDruidNode().getServiceName(), + middleManager.getDruidNode().getHostAndPortToUse(), + middleManager.getNodeRole().getJsonName(), + "druid.test-key", "test-value" }); expectedRows .add(new Object[]{ - StringUtils.format("[%s,%s]", middleManager.getDruidNode().getServiceName(), middleManager.getNodeRole().getJsonName()), - middleManager.getDruidNode().getHostAndPortToUse(), "druid.test-key2", "test-value2" + middleManager.getDruidNode().getServiceName(), + middleManager.getDruidNode().getHostAndPortToUse(), + middleManager.getNodeRole().getJsonName(), + "druid.test-key2", "test-value2" }); Map> urlToResponse = ImmutableMap.of( From a9b29a26979d31008c01f9546236160162c0cce8 Mon Sep 17 00:00:00 2001 From: Gabriel Chang <77312579+GabrielCWT@users.noreply.github.com> Date: Wed, 29 Oct 2025 11:50:39 +0800 Subject: [PATCH 08/28] Add embedded test --- .../schema/SystemPropertiesTableTest.java | 122 ++++++++++++++++++ .../schema/DruidCalciteSchemaModuleTest.java | 2 +- 2 files changed, 123 insertions(+), 1 deletion(-) create mode 100644 embedded-tests/src/test/java/org/apache/druid/testing/embedded/schema/SystemPropertiesTableTest.java diff --git a/embedded-tests/src/test/java/org/apache/druid/testing/embedded/schema/SystemPropertiesTableTest.java b/embedded-tests/src/test/java/org/apache/druid/testing/embedded/schema/SystemPropertiesTableTest.java new file mode 100644 index 000000000000..ad4fd244d893 --- /dev/null +++ b/embedded-tests/src/test/java/org/apache/druid/testing/embedded/schema/SystemPropertiesTableTest.java @@ -0,0 +1,122 @@ +/* + * Licensed to the Apache Software Foundation (ASF) under one + * or more contributor license agreements. See the NOTICE file + * distributed with this work for additional information + * regarding copyright ownership. The ASF licenses this file + * to you under the Apache License, Version 2.0 (the + * "License"); you may not use this file except in compliance + * with the License. You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, + * software distributed under the License is distributed on an + * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY + * KIND, either express or implied. See the License for the + * specific language governing permissions and limitations + * under the License. + */ + +package org.apache.druid.testing.embedded.schema; + +import com.fasterxml.jackson.core.type.TypeReference; +import org.apache.druid.discovery.NodeRole; +import org.apache.druid.java.util.common.StringUtils; +import org.apache.druid.rpc.RequestBuilder; +import org.apache.druid.testing.embedded.EmbeddedBroker; +import org.apache.druid.testing.embedded.EmbeddedCoordinator; +import org.apache.druid.testing.embedded.EmbeddedDruidCluster; +import org.apache.druid.testing.embedded.EmbeddedOverlord; +import org.apache.druid.testing.embedded.junit5.EmbeddedClusterTestBase; +import org.jboss.netty.handler.codec.http.HttpMethod; +import org.junit.jupiter.api.Assertions; +import org.junit.jupiter.api.Test; + +import java.util.Arrays; +import java.util.Map; + +public class SystemPropertiesTableTest extends EmbeddedClusterTestBase +{ + private static final String HOST = "localhost"; + private static final String BROKER_PORT = "8082"; + private static final String OVERLORD_PORT = "8090"; + private static final String BROKER_SERVICE = "test/broker"; + private static final String OVERLORD_SERVICE = "test/overlord"; + + private final EmbeddedBroker broker = new EmbeddedBroker() + .addProperty("test.onlyBroker", "brokerValue") + .addProperty("mytestbrokerproperty", "mytestbrokervalue") + .addProperty("druid.host", HOST) + .addProperty("druid.plaintextPort", BROKER_PORT) + .addProperty("druid.service", BROKER_SERVICE); + + private final EmbeddedOverlord overlord = new EmbeddedOverlord() + .addProperty("druid.service", OVERLORD_SERVICE) + .addProperty("druid.host", HOST) + .addProperty("druid.plaintextPort", OVERLORD_PORT) + .addProperty("test.onlyOverlord", "overlordValue"); + + + @Override + protected EmbeddedDruidCluster createCluster() + { + return EmbeddedDruidCluster + .withZookeeper() + .addServer(new EmbeddedCoordinator()) + .addServer(overlord) + .addServer(broker) + .addCommonProperty("commonProperty", "commonValue"); + } + + @Test + public void test_serverPropertiesTable() + { + final Map overlordProps = cluster.callApi().serviceClient().onLeaderOverlord( + mapper -> new RequestBuilder(HttpMethod.GET, "/status/properties"), + new TypeReference<>(){} + ); + verifyPropertiesForServer(overlordProps, OVERLORD_SERVICE, getHostAndPort(HOST, OVERLORD_PORT), NodeRole.OVERLORD_JSON_NAME); + + final Map brokerProps = cluster.callApi().serviceClient().onAnyBroker( + mapper -> new RequestBuilder(HttpMethod.GET, "/status/properties"), + new TypeReference<>(){} + ); + verifyPropertiesForServer(brokerProps, BROKER_SERVICE, getHostAndPort(HOST, BROKER_PORT), NodeRole.BROKER_JSON_NAME); + final String test = cluster.runSql("SELECT * FROM sys.server_properties"); + } + + private void verifyPropertiesForServer(Map properties, String serivceName, String hostAndPort, String nodeRole) + { + String[] expectedRows = properties.entrySet().stream().map(entry -> String.join( + ",", + escapeCsvField(serivceName), + escapeCsvField(hostAndPort), + escapeCsvField(nodeRole), + escapeCsvField(entry.getKey()), + escapeCsvField(entry.getValue()) + )).toArray(String[]::new); + Arrays.sort(expectedRows, String::compareTo); + String[] actualRows = Arrays.stream(cluster.runSql("SELECT * FROM sys.server_properties WHERE server='%s'", hostAndPort).split("\n")).map(entry -> StringUtils.replace(entry, "...", "\"\"")).toArray(String[]::new); + Arrays.sort(actualRows, String::compareTo); + Assertions.assertArrayEquals(expectedRows, actualRows); + } + + /** + * Escapes a field value for CSV format. + */ + private String escapeCsvField(String field) + { + if (field == null) { + return ""; + } + if (field.contains(",") || field.contains("\"") || field.contains("\n") || field.contains("\r")) { + return "\"" + field.replace("\"", "\"\"") + "\""; + } + return field; + } + + private String getHostAndPort(String host, String port) + { + return StringUtils.format("%s:%s", host, port); + } +} diff --git a/sql/src/test/java/org/apache/druid/sql/calcite/schema/DruidCalciteSchemaModuleTest.java b/sql/src/test/java/org/apache/druid/sql/calcite/schema/DruidCalciteSchemaModuleTest.java index 54baec57225c..ca2a82db82e0 100644 --- a/sql/src/test/java/org/apache/druid/sql/calcite/schema/DruidCalciteSchemaModuleTest.java +++ b/sql/src/test/java/org/apache/druid/sql/calcite/schema/DruidCalciteSchemaModuleTest.java @@ -35,10 +35,10 @@ import org.apache.druid.discovery.DruidNodeDiscoveryProvider; import org.apache.druid.guice.LazySingleton; import org.apache.druid.guice.LifecycleModule; +import org.apache.druid.guice.annotations.EscalatedClient; import org.apache.druid.guice.annotations.Json; import org.apache.druid.java.util.emitter.service.ServiceEmitter; import org.apache.druid.java.util.http.client.HttpClient; -import org.apache.druid.guice.annotations.EscalatedClient; import org.apache.druid.query.lookup.LookupExtractorFactoryContainerProvider; import org.apache.druid.query.lookup.LookupReferencesManager; import org.apache.druid.rpc.indexing.NoopOverlordClient; From 191e4ea359df63d01f53b462c3402baa10293ea3 Mon Sep 17 00:00:00 2001 From: Gabriel Chang <77312579+GabrielCWT@users.noreply.github.com> Date: Wed, 29 Oct 2025 12:02:27 +0800 Subject: [PATCH 09/28] Use StringUtils.replace instead --- .../testing/embedded/schema/SystemPropertiesTableTest.java | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/embedded-tests/src/test/java/org/apache/druid/testing/embedded/schema/SystemPropertiesTableTest.java b/embedded-tests/src/test/java/org/apache/druid/testing/embedded/schema/SystemPropertiesTableTest.java index ad4fd244d893..19f0703af83e 100644 --- a/embedded-tests/src/test/java/org/apache/druid/testing/embedded/schema/SystemPropertiesTableTest.java +++ b/embedded-tests/src/test/java/org/apache/druid/testing/embedded/schema/SystemPropertiesTableTest.java @@ -110,7 +110,7 @@ private String escapeCsvField(String field) return ""; } if (field.contains(",") || field.contains("\"") || field.contains("\n") || field.contains("\r")) { - return "\"" + field.replace("\"", "\"\"") + "\""; + return "\"" + StringUtils.replace(field, "\"", "\"\"") + "\""; } return field; } From 5617d72ada617f014ec2fd3b96de56e413207aa9 Mon Sep 17 00:00:00 2001 From: Gabriel Chang <77312579+GabrielCWT@users.noreply.github.com> Date: Wed, 29 Oct 2025 15:54:46 +0800 Subject: [PATCH 10/28] Fix embedded test --- .../schema/SystemPropertiesTableTest.java | 42 ++++++++++--------- .../CoordinatorJettyServerInitializer.java | 3 +- 2 files changed, 23 insertions(+), 22 deletions(-) diff --git a/embedded-tests/src/test/java/org/apache/druid/testing/embedded/schema/SystemPropertiesTableTest.java b/embedded-tests/src/test/java/org/apache/druid/testing/embedded/schema/SystemPropertiesTableTest.java index 19f0703af83e..2664569d57b8 100644 --- a/embedded-tests/src/test/java/org/apache/druid/testing/embedded/schema/SystemPropertiesTableTest.java +++ b/embedded-tests/src/test/java/org/apache/druid/testing/embedded/schema/SystemPropertiesTableTest.java @@ -37,32 +37,34 @@ public class SystemPropertiesTableTest extends EmbeddedClusterTestBase { - private static final String HOST = "localhost"; - private static final String BROKER_PORT = "8082"; - private static final String OVERLORD_PORT = "8090"; + private static final String BROKER_PORT = "9082"; private static final String BROKER_SERVICE = "test/broker"; + private static final String OVERLORD_PORT = "9090"; private static final String OVERLORD_SERVICE = "test/overlord"; + private static final String COORDINATOR_PORT = "9081"; + private static final String COORDINATOR_SERVICE = "test/coordinator"; private final EmbeddedBroker broker = new EmbeddedBroker() - .addProperty("test.onlyBroker", "brokerValue") - .addProperty("mytestbrokerproperty", "mytestbrokervalue") - .addProperty("druid.host", HOST) + .addProperty("druid.service", BROKER_SERVICE) .addProperty("druid.plaintextPort", BROKER_PORT) - .addProperty("druid.service", BROKER_SERVICE); + .addProperty("test.onlyBroker", "brokerValue"); private final EmbeddedOverlord overlord = new EmbeddedOverlord() - .addProperty("druid.service", OVERLORD_SERVICE) - .addProperty("druid.host", HOST) - .addProperty("druid.plaintextPort", OVERLORD_PORT) - .addProperty("test.onlyOverlord", "overlordValue"); + .addProperty("druid.service", OVERLORD_SERVICE) + .addProperty("druid.plaintextPort", OVERLORD_PORT) + .addProperty("test.onlyOverlord", "overlordValue"); + private final EmbeddedCoordinator coordinator = new EmbeddedCoordinator() + .addProperty("druid.service", COORDINATOR_SERVICE) + .addProperty("druid.plaintextPort", COORDINATOR_PORT) + .addProperty("test.onlyCoordinator", "coordinatorValue"); @Override protected EmbeddedDruidCluster createCluster() { return EmbeddedDruidCluster .withZookeeper() - .addServer(new EmbeddedCoordinator()) + .addServer(coordinator) .addServer(overlord) .addServer(broker) .addCommonProperty("commonProperty", "commonValue"); @@ -75,14 +77,19 @@ public void test_serverPropertiesTable() mapper -> new RequestBuilder(HttpMethod.GET, "/status/properties"), new TypeReference<>(){} ); - verifyPropertiesForServer(overlordProps, OVERLORD_SERVICE, getHostAndPort(HOST, OVERLORD_PORT), NodeRole.OVERLORD_JSON_NAME); + verifyPropertiesForServer(overlordProps, OVERLORD_SERVICE, StringUtils.format("localhost:%s", OVERLORD_PORT), NodeRole.OVERLORD_JSON_NAME); final Map brokerProps = cluster.callApi().serviceClient().onAnyBroker( mapper -> new RequestBuilder(HttpMethod.GET, "/status/properties"), new TypeReference<>(){} ); - verifyPropertiesForServer(brokerProps, BROKER_SERVICE, getHostAndPort(HOST, BROKER_PORT), NodeRole.BROKER_JSON_NAME); - final String test = cluster.runSql("SELECT * FROM sys.server_properties"); + verifyPropertiesForServer(brokerProps, BROKER_SERVICE, StringUtils.format("localhost:%s", BROKER_PORT), NodeRole.BROKER_JSON_NAME); + + final Map coordinatorProps = cluster.callApi().serviceClient().onLeaderCoordinator( + mapper -> new RequestBuilder(HttpMethod.GET, "/status/properties"), + new TypeReference<>(){} + ); + verifyPropertiesForServer(coordinatorProps, COORDINATOR_SERVICE, StringUtils.format("localhost:%s", COORDINATOR_PORT), NodeRole.COORDINATOR_JSON_NAME); } private void verifyPropertiesForServer(Map properties, String serivceName, String hostAndPort, String nodeRole) @@ -114,9 +121,4 @@ private String escapeCsvField(String field) } return field; } - - private String getHostAndPort(String host, String port) - { - return StringUtils.format("%s:%s", host, port); - } } diff --git a/services/src/main/java/org/apache/druid/cli/CoordinatorJettyServerInitializer.java b/services/src/main/java/org/apache/druid/cli/CoordinatorJettyServerInitializer.java index ece5fa2be4ac..3fd6b41031be 100644 --- a/services/src/main/java/org/apache/druid/cli/CoordinatorJettyServerInitializer.java +++ b/services/src/main/java/org/apache/druid/cli/CoordinatorJettyServerInitializer.java @@ -24,7 +24,6 @@ import com.google.inject.Inject; import com.google.inject.Injector; import com.google.inject.Key; -import com.google.inject.servlet.GuiceFilter; import org.apache.druid.guice.annotations.Json; import org.apache.druid.server.http.OverlordProxyServlet; import org.apache.druid.server.http.RedirectFilter; @@ -110,7 +109,7 @@ public void initialize(Server server, Injector injector) // add some paths not to be redirected to leader. final FilterHolder guiceFilterHolder = JettyServerInitUtils.getGuiceFilterHolder(injector); - root.addFilter(GuiceFilter.class, "/status/*", null); + root.addFilter(guiceFilterHolder, "/status/*", null); root.addFilter(guiceFilterHolder, "/druid-internal/*", null); // redirect anything other than status to the current lead From 78be93cffce93ce679468e6df76f0396e4634d4b Mon Sep 17 00:00:00 2001 From: Gabriel Chang <77312579+GabrielCWT@users.noreply.github.com> Date: Wed, 29 Oct 2025 15:55:43 +0800 Subject: [PATCH 11/28] Fix based on feedback --- .../calcite/schema/SystemPropertiesTable.java | 22 ++++++++++++------- 1 file changed, 14 insertions(+), 8 deletions(-) diff --git a/sql/src/main/java/org/apache/druid/sql/calcite/schema/SystemPropertiesTable.java b/sql/src/main/java/org/apache/druid/sql/calcite/schema/SystemPropertiesTable.java index d31196f5bb1e..0866c102d7e3 100644 --- a/sql/src/main/java/org/apache/druid/sql/calcite/schema/SystemPropertiesTable.java +++ b/sql/src/main/java/org/apache/druid/sql/calcite/schema/SystemPropertiesTable.java @@ -60,13 +60,16 @@ import java.util.stream.Stream; /** - * This table contains row per property. It contains all the properties of all druid servers. + * System schema table {@code sys.server_properties} that contains the properties of all Druid servers. + * Each row contains the value of a single property. If a server has multiple node roles, all the rows for + * that server would have multiple values in the column {@code node_roles} rather than duplicating all the + * rows. */ public class SystemPropertiesTable extends AbstractTable implements ScannableTable { - public static final String PROPERTIES_TABLE = "server_properties"; + public static final String TABLE_NAME = "server_properties"; - static final RowSignature PROPERTIES_SIGNATURE = RowSignature + static final RowSignature ROW_SIGNATURE = RowSignature .builder() .add("service_name", ColumnType.STRING) .add("server", ColumnType.STRING) @@ -96,7 +99,7 @@ public SystemPropertiesTable( @Override public RelDataType getRowType(RelDataTypeFactory typeFactory) { - return RowSignatures.toRelDataType(PROPERTIES_SIGNATURE, typeFactory); + return RowSignatures.toRelDataType(ROW_SIGNATURE, typeFactory); } @Override @@ -151,7 +154,7 @@ private Map getProperties(DruidNode druidNode) if (response.getStatus().getCode() != HttpServletResponse.SC_OK) { throw new RE( - "Failed to get properties from node at [%s]. Error code [%d], description [%s].", + "Failed to get properties from node[%s]. Error code[%d], description[%s].", url, response.getStatus().getCode(), response.getStatus().getReasonPhrase() @@ -159,11 +162,14 @@ private Map getProperties(DruidNode druidNode) } return jsonMapper.readValue( response.getContent(), - new TypeReference>() - { - } + new TypeReference>(){} ); } + catch (Exception e) { + throw DruidException.forPersona(DruidException.Persona.USER) + .ofCategory(DruidException.Category.UNCATEGORIZED) + .build(e, "HTTP request to[%s] failed", url); + } catch (IOException | InterruptedException e) { throw new RuntimeException(e); } From 3d31b1d68a07cff61a0740482093c60ff7e22e0a Mon Sep 17 00:00:00 2001 From: Gabriel Chang <77312579+GabrielCWT@users.noreply.github.com> Date: Wed, 29 Oct 2025 16:08:15 +0800 Subject: [PATCH 12/28] Refactor to use ServerProperties class --- .../calcite/schema/SystemPropertiesTable.java | 56 +++++++++++-------- .../sql/calcite/schema/SystemSchema.java | 4 +- 2 files changed, 35 insertions(+), 25 deletions(-) diff --git a/sql/src/main/java/org/apache/druid/sql/calcite/schema/SystemPropertiesTable.java b/sql/src/main/java/org/apache/druid/sql/calcite/schema/SystemPropertiesTable.java index 0866c102d7e3..8a8be4675e48 100644 --- a/sql/src/main/java/org/apache/druid/sql/calcite/schema/SystemPropertiesTable.java +++ b/sql/src/main/java/org/apache/druid/sql/calcite/schema/SystemPropertiesTable.java @@ -32,9 +32,8 @@ import org.apache.calcite.schema.impl.AbstractTable; import org.apache.druid.discovery.DiscoveryDruidNode; import org.apache.druid.discovery.DruidNodeDiscoveryProvider; -import org.apache.druid.java.util.common.Pair; +import org.apache.druid.error.DruidException; import org.apache.druid.java.util.common.RE; -import org.apache.druid.java.util.common.StringUtils; import org.apache.druid.java.util.http.client.HttpClient; import org.apache.druid.java.util.http.client.Request; import org.apache.druid.java.util.http.client.response.StringFullResponseHandler; @@ -49,13 +48,14 @@ import org.jboss.netty.handler.codec.http.HttpMethod; import javax.servlet.http.HttpServletResponse; -import java.io.IOException; import java.net.URL; import java.nio.charset.StandardCharsets; +import java.util.ArrayList; +import java.util.Arrays; import java.util.HashMap; import java.util.Iterator; +import java.util.List; import java.util.Map; -import java.util.concurrent.ExecutionException; import java.util.stream.Collectors; import java.util.stream.Stream; @@ -118,28 +118,18 @@ public Enumerable scan(DataContext root) SystemSchema.checkStateReadAccessForServers(authenticationResult, authorizerMapper); final Iterator druidServers = SystemSchema.getDruidServers(druidNodeDiscoveryProvider); - final Map>> serverToPropertiesMap = new HashMap<>(); + final Map serverToPropertiesMap = new HashMap<>(); druidServers.forEachRemaining(discoveryDruidNode -> { final DruidNode druidNode = discoveryDruidNode.getDruidNode(); final Map propertiesMap = getProperties(druidNode); if (serverToPropertiesMap.containsKey(druidNode.getHostAndPortToUse())) { - Pair> pair = serverToPropertiesMap.get(druidNode.getHostAndPortToUse()); - pair.lhs.append(StringUtils.format(",%s", discoveryDruidNode.getNodeRole().getJsonName())); + ServerProperties serverProperties = serverToPropertiesMap.get(druidNode.getHostAndPortToUse()); + serverProperties.addNodeRole(discoveryDruidNode.getNodeRole().getJsonName()); } else { - final StringBuilder builder = new StringBuilder(); - builder.append(StringUtils.format("%s", discoveryDruidNode.getNodeRole().getJsonName())); - serverToPropertiesMap.put(druidNode.getHostAndPortToUse(), Pair.of(builder, propertiesMap.entrySet().stream() - .map(entry -> new Object[]{ - druidNode.getServiceName(), - druidNode.getHostAndPortToUse(), - entry.getKey(), - entry.getValue() - }) - ) - ); + serverToPropertiesMap.put(druidNode.getHostAndPortToUse(), new ServerProperties(druidNode.getServiceName(), druidNode.getHostAndPortToUse(), new ArrayList<>(Arrays.asList(discoveryDruidNode.getNodeRole().getJsonName())), propertiesMap)); } }); - return Linq4j.asEnumerable(serverToPropertiesMap.values().stream().flatMap(pair -> pair.rhs.map(entry -> new Object[]{entry[0], entry[1], pair.lhs.toString(), entry[2], entry[3]})).collect(Collectors.toList())); + return Linq4j.asEnumerable(serverToPropertiesMap.values().stream().flatMap(ServerProperties::toRows).collect(Collectors.toList())); } private Map getProperties(DruidNode druidNode) @@ -170,11 +160,31 @@ private Map getProperties(DruidNode druidNode) .ofCategory(DruidException.Category.UNCATEGORIZED) .build(e, "HTTP request to[%s] failed", url); } - catch (IOException | InterruptedException e) { - throw new RuntimeException(e); + } + + private static class ServerProperties + { + final String serviceName; + final String server; + final List nodeRoles; + final Map properties; + + public ServerProperties(String serviceName, String server, List nodeRoles, Map properties) + { + this.serviceName = serviceName; + this.server = server; + this.nodeRoles = nodeRoles; + this.properties = properties; + } + + public void addNodeRole(String nodeRole) + { + nodeRoles.add(nodeRole); } - catch (ExecutionException e) { - throw new RE(e, "HTTP request to[%s] failed", url); + + public Stream toRows() + { + return properties.entrySet().stream().map(entry -> new Object[]{serviceName, server, nodeRoles.toString(), entry.getKey(), entry.getValue()}); } } } diff --git a/sql/src/main/java/org/apache/druid/sql/calcite/schema/SystemSchema.java b/sql/src/main/java/org/apache/druid/sql/calcite/schema/SystemSchema.java index bbb4d7f4f5d4..56900f41fbb2 100644 --- a/sql/src/main/java/org/apache/druid/sql/calcite/schema/SystemSchema.java +++ b/sql/src/main/java/org/apache/druid/sql/calcite/schema/SystemSchema.java @@ -237,7 +237,7 @@ public SystemSchema( final OverlordClient overlordClient, final DruidNodeDiscoveryProvider druidNodeDiscoveryProvider, final ObjectMapper jsonMapper, - final @EscalatedClient HttpClient httpClient + @EscalatedClient final HttpClient httpClient ) { Preconditions.checkNotNull(serverView, "serverView"); @@ -259,7 +259,7 @@ public SystemSchema( new TasksTable(overlordClient, authorizerMapper), SUPERVISOR_TABLE, new SupervisorsTable(overlordClient, authorizerMapper), - SystemPropertiesTable.PROPERTIES_TABLE, + SystemPropertiesTable.TABLE_NAME, new SystemPropertiesTable(druidNodeDiscoveryProvider, authorizerMapper, httpClient, jsonMapper) ); } From 2d4ca97db5b6e9103660d14640526fd0bb0547fd Mon Sep 17 00:00:00 2001 From: Gabriel Chang <77312579+GabrielCWT@users.noreply.github.com> Date: Wed, 29 Oct 2025 16:13:00 +0800 Subject: [PATCH 13/28] Revert formatting --- .../sql/calcite/schema/SystemSchema.java | 38 +++++++++---------- 1 file changed, 19 insertions(+), 19 deletions(-) diff --git a/sql/src/main/java/org/apache/druid/sql/calcite/schema/SystemSchema.java b/sql/src/main/java/org/apache/druid/sql/calcite/schema/SystemSchema.java index 56900f41fbb2..6a6d39ca7b8e 100644 --- a/sql/src/main/java/org/apache/druid/sql/calcite/schema/SystemSchema.java +++ b/sql/src/main/java/org/apache/druid/sql/calcite/schema/SystemSchema.java @@ -242,25 +242,25 @@ public SystemSchema( { Preconditions.checkNotNull(serverView, "serverView"); this.tableMap = ImmutableMap.of( - SEGMENTS_TABLE, - new SegmentsTable(druidSchema, metadataView, jsonMapper, authorizerMapper), - SERVERS_TABLE, - new ServersTable( - druidNodeDiscoveryProvider, - serverInventoryView, - authorizerMapper, - overlordClient, - coordinatorClient, - jsonMapper - ), - SERVER_SEGMENTS_TABLE, - new ServerSegmentsTable(serverView, authorizerMapper), - TASKS_TABLE, - new TasksTable(overlordClient, authorizerMapper), - SUPERVISOR_TABLE, - new SupervisorsTable(overlordClient, authorizerMapper), - SystemPropertiesTable.TABLE_NAME, - new SystemPropertiesTable(druidNodeDiscoveryProvider, authorizerMapper, httpClient, jsonMapper) + SEGMENTS_TABLE, + new SegmentsTable(druidSchema, metadataView, jsonMapper, authorizerMapper), + SERVERS_TABLE, + new ServersTable( + druidNodeDiscoveryProvider, + serverInventoryView, + authorizerMapper, + overlordClient, + coordinatorClient, + jsonMapper + ), + SERVER_SEGMENTS_TABLE, + new ServerSegmentsTable(serverView, authorizerMapper), + TASKS_TABLE, + new TasksTable(overlordClient, authorizerMapper), + SUPERVISOR_TABLE, + new SupervisorsTable(overlordClient, authorizerMapper), + SystemPropertiesTable.TABLE_NAME, + new SystemPropertiesTable(druidNodeDiscoveryProvider, authorizerMapper, httpClient, jsonMapper) ); } From 3eb02c50a30eae01f358aa19eea294ed9a653373 Mon Sep 17 00:00:00 2001 From: Gabriel Chang <77312579+GabrielCWT@users.noreply.github.com> Date: Wed, 29 Oct 2025 16:44:50 +0800 Subject: [PATCH 14/28] Remove unnecessary throw Exception --- .../org/apache/druid/sql/calcite/schema/SystemSchemaTest.java | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/sql/src/test/java/org/apache/druid/sql/calcite/schema/SystemSchemaTest.java b/sql/src/test/java/org/apache/druid/sql/calcite/schema/SystemSchemaTest.java index 17f2baa67b4c..98353b159b06 100644 --- a/sql/src/test/java/org/apache/druid/sql/calcite/schema/SystemSchemaTest.java +++ b/sql/src/test/java/org/apache/druid/sql/calcite/schema/SystemSchemaTest.java @@ -1464,7 +1464,7 @@ public void testSupervisorTableAuth() } @Test - public void testPropertiesTable() throws Exception + public void testPropertiesTable() { SystemPropertiesTable propertiesTable = EasyMock.createMockBuilder(SystemPropertiesTable.class) .withConstructor(druidNodeDiscoveryProvider, authMapper, httpClient, MAPPER) From c01a5bfa21fc045cd1fdf753e0f307768371f36e Mon Sep 17 00:00:00 2001 From: Gabriel Chang <77312579+GabrielCWT@users.noreply.github.com> Date: Wed, 29 Oct 2025 17:32:15 +0800 Subject: [PATCH 15/28] Update test to use list for node roles --- .../apache/druid/sql/calcite/schema/SystemSchemaTest.java | 8 ++++---- 1 file changed, 4 insertions(+), 4 deletions(-) diff --git a/sql/src/test/java/org/apache/druid/sql/calcite/schema/SystemSchemaTest.java b/sql/src/test/java/org/apache/druid/sql/calcite/schema/SystemSchemaTest.java index 98353b159b06..f505566aa519 100644 --- a/sql/src/test/java/org/apache/druid/sql/calcite/schema/SystemSchemaTest.java +++ b/sql/src/test/java/org/apache/druid/sql/calcite/schema/SystemSchemaTest.java @@ -1489,7 +1489,7 @@ public void testPropertiesTable() expectedRows.add(new Object[]{ coordinator.getDruidNode().getServiceName(), coordinator.getDruidNode().getHostAndPortToUse(), - coordinator.getNodeRole().getJsonName(), + ImmutableList.of(coordinator.getNodeRole().getJsonName()).toString(), "druid.test-key", "test-value" }); @@ -1501,7 +1501,7 @@ public void testPropertiesTable() .add(new Object[]{ coordinator2.getDruidNode().getServiceName(), coordinator2.getDruidNode().getHostAndPortToUse(), - coordinator2.getNodeRole().getJsonName(), + ImmutableList.of(coordinator2.getNodeRole().getJsonName()).toString(), "druid.test-key3", "test-value3" }); @@ -1517,14 +1517,14 @@ public void testPropertiesTable() .add(new Object[]{ middleManager.getDruidNode().getServiceName(), middleManager.getDruidNode().getHostAndPortToUse(), - middleManager.getNodeRole().getJsonName(), + ImmutableList.of(middleManager.getNodeRole().getJsonName()).toString(), "druid.test-key", "test-value" }); expectedRows .add(new Object[]{ middleManager.getDruidNode().getServiceName(), middleManager.getDruidNode().getHostAndPortToUse(), - middleManager.getNodeRole().getJsonName(), + ImmutableList.of(middleManager.getNodeRole().getJsonName()).toString(), "druid.test-key2", "test-value2" }); From ec7c943e5d57e34d75099866895791702f0917c6 Mon Sep 17 00:00:00 2001 From: Gabriel Chang <77312579+GabrielCWT@users.noreply.github.com> Date: Wed, 29 Oct 2025 18:23:01 +0800 Subject: [PATCH 16/28] Store nodeRole in array to match SQL return format --- .../testing/embedded/schema/SystemPropertiesTableTest.java | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/embedded-tests/src/test/java/org/apache/druid/testing/embedded/schema/SystemPropertiesTableTest.java b/embedded-tests/src/test/java/org/apache/druid/testing/embedded/schema/SystemPropertiesTableTest.java index 2664569d57b8..cea2baff754a 100644 --- a/embedded-tests/src/test/java/org/apache/druid/testing/embedded/schema/SystemPropertiesTableTest.java +++ b/embedded-tests/src/test/java/org/apache/druid/testing/embedded/schema/SystemPropertiesTableTest.java @@ -98,7 +98,7 @@ private void verifyPropertiesForServer(Map properties, String se ",", escapeCsvField(serivceName), escapeCsvField(hostAndPort), - escapeCsvField(nodeRole), + escapeCsvField(ImmutableList.of(nodeRole).toString()), escapeCsvField(entry.getKey()), escapeCsvField(entry.getValue()) )).toArray(String[]::new); From 5055b9e5e04c2dd52fef8e00c6b6ecb3a7e710f2 Mon Sep 17 00:00:00 2001 From: Gabriel Chang <77312579+GabrielCWT@users.noreply.github.com> Date: Thu, 30 Oct 2025 09:29:48 +0800 Subject: [PATCH 17/28] Add missing import --- .../druid/testing/embedded/schema/SystemPropertiesTableTest.java | 1 + 1 file changed, 1 insertion(+) diff --git a/embedded-tests/src/test/java/org/apache/druid/testing/embedded/schema/SystemPropertiesTableTest.java b/embedded-tests/src/test/java/org/apache/druid/testing/embedded/schema/SystemPropertiesTableTest.java index cea2baff754a..cbbaa7c37688 100644 --- a/embedded-tests/src/test/java/org/apache/druid/testing/embedded/schema/SystemPropertiesTableTest.java +++ b/embedded-tests/src/test/java/org/apache/druid/testing/embedded/schema/SystemPropertiesTableTest.java @@ -20,6 +20,7 @@ package org.apache.druid.testing.embedded.schema; import com.fasterxml.jackson.core.type.TypeReference; +import com.google.common.collect.ImmutableList; import org.apache.druid.discovery.NodeRole; import org.apache.druid.java.util.common.StringUtils; import org.apache.druid.rpc.RequestBuilder; From 6eefbd03cc4f45f7f67d10f742a334a95ea7ffbc Mon Sep 17 00:00:00 2001 From: Gabriel Chang <77312579+GabrielCWT@users.noreply.github.com> Date: Fri, 31 Oct 2025 18:29:07 +0800 Subject: [PATCH 18/28] Refactor out nodeRoles.toString --- .../apache/druid/sql/calcite/schema/SystemPropertiesTable.java | 3 ++- 1 file changed, 2 insertions(+), 1 deletion(-) diff --git a/sql/src/main/java/org/apache/druid/sql/calcite/schema/SystemPropertiesTable.java b/sql/src/main/java/org/apache/druid/sql/calcite/schema/SystemPropertiesTable.java index 8a8be4675e48..e29b540e7078 100644 --- a/sql/src/main/java/org/apache/druid/sql/calcite/schema/SystemPropertiesTable.java +++ b/sql/src/main/java/org/apache/druid/sql/calcite/schema/SystemPropertiesTable.java @@ -184,7 +184,8 @@ public void addNodeRole(String nodeRole) public Stream toRows() { - return properties.entrySet().stream().map(entry -> new Object[]{serviceName, server, nodeRoles.toString(), entry.getKey(), entry.getValue()}); + String nodeRolesString = nodeRoles.toString(); + return properties.entrySet().stream().map(entry -> new Object[]{serviceName, server, nodeRolesString, entry.getKey(), entry.getValue()}); } } } From c55ca1e27fd69952199741311aed3bbf2486cac7 Mon Sep 17 00:00:00 2001 From: Gabriel Chang <77312579+GabrielCWT@users.noreply.github.com> Date: Tue, 4 Nov 2025 10:00:29 +0800 Subject: [PATCH 19/28] Refactor based on feedback --- .../schema/SystemPropertiesTableTest.java | 5 ++-- .../calcite/schema/SystemPropertiesTable.java | 30 ++++++++++++------- 2 files changed, 23 insertions(+), 12 deletions(-) diff --git a/embedded-tests/src/test/java/org/apache/druid/testing/embedded/schema/SystemPropertiesTableTest.java b/embedded-tests/src/test/java/org/apache/druid/testing/embedded/schema/SystemPropertiesTableTest.java index cbbaa7c37688..cfb68ad05694 100644 --- a/embedded-tests/src/test/java/org/apache/druid/testing/embedded/schema/SystemPropertiesTableTest.java +++ b/embedded-tests/src/test/java/org/apache/druid/testing/embedded/schema/SystemPropertiesTableTest.java @@ -97,14 +97,15 @@ private void verifyPropertiesForServer(Map properties, String se { String[] expectedRows = properties.entrySet().stream().map(entry -> String.join( ",", - escapeCsvField(serivceName), escapeCsvField(hostAndPort), + escapeCsvField(serivceName), escapeCsvField(ImmutableList.of(nodeRole).toString()), escapeCsvField(entry.getKey()), escapeCsvField(entry.getValue()) )).toArray(String[]::new); Arrays.sort(expectedRows, String::compareTo); - String[] actualRows = Arrays.stream(cluster.runSql("SELECT * FROM sys.server_properties WHERE server='%s'", hostAndPort).split("\n")).map(entry -> StringUtils.replace(entry, "...", "\"\"")).toArray(String[]::new); + final String result = cluster.runSql("SELECT * FROM sys.server_properties WHERE server='%s'", hostAndPort); + String[] actualRows = result.split("\n"); Arrays.sort(actualRows, String::compareTo); Assertions.assertArrayEquals(expectedRows, actualRows); } diff --git a/sql/src/main/java/org/apache/druid/sql/calcite/schema/SystemPropertiesTable.java b/sql/src/main/java/org/apache/druid/sql/calcite/schema/SystemPropertiesTable.java index e29b540e7078..f499bfc94411 100644 --- a/sql/src/main/java/org/apache/druid/sql/calcite/schema/SystemPropertiesTable.java +++ b/sql/src/main/java/org/apache/druid/sql/calcite/schema/SystemPropertiesTable.java @@ -32,7 +32,7 @@ import org.apache.calcite.schema.impl.AbstractTable; import org.apache.druid.discovery.DiscoveryDruidNode; import org.apache.druid.discovery.DruidNodeDiscoveryProvider; -import org.apache.druid.error.DruidException; +import org.apache.druid.error.InternalServerError; import org.apache.druid.java.util.common.RE; import org.apache.druid.java.util.http.client.HttpClient; import org.apache.druid.java.util.http.client.Request; @@ -71,8 +71,8 @@ public class SystemPropertiesTable extends AbstractTable implements ScannableTab static final RowSignature ROW_SIGNATURE = RowSignature .builder() - .add("service_name", ColumnType.STRING) .add("server", ColumnType.STRING) + .add("service_name", ColumnType.STRING) .add("node_roles", ColumnType.STRING) .add("property", ColumnType.STRING) .add("value", ColumnType.STRING) @@ -126,10 +126,22 @@ public Enumerable scan(DataContext root) ServerProperties serverProperties = serverToPropertiesMap.get(druidNode.getHostAndPortToUse()); serverProperties.addNodeRole(discoveryDruidNode.getNodeRole().getJsonName()); } else { - serverToPropertiesMap.put(druidNode.getHostAndPortToUse(), new ServerProperties(druidNode.getServiceName(), druidNode.getHostAndPortToUse(), new ArrayList<>(Arrays.asList(discoveryDruidNode.getNodeRole().getJsonName())), propertiesMap)); + serverToPropertiesMap.put( + druidNode.getHostAndPortToUse(), + new ServerProperties( + druidNode.getServiceName(), + druidNode.getHostAndPortToUse(), + new ArrayList<>(Arrays.asList(discoveryDruidNode.getNodeRole().getJsonName())), + propertiesMap + ) + ); } }); - return Linq4j.asEnumerable(serverToPropertiesMap.values().stream().flatMap(ServerProperties::toRows).collect(Collectors.toList())); + ArrayList rows = new ArrayList<>(); + for (ServerProperties serverProperties : serverToPropertiesMap.values()) { + rows.addAll(serverProperties.toRows()); + } + return Linq4j.asEnumerable(rows); } private Map getProperties(DruidNode druidNode) @@ -152,13 +164,11 @@ private Map getProperties(DruidNode druidNode) } return jsonMapper.readValue( response.getContent(), - new TypeReference>(){} + new TypeReference<>(){} ); } catch (Exception e) { - throw DruidException.forPersona(DruidException.Persona.USER) - .ofCategory(DruidException.Category.UNCATEGORIZED) - .build(e, "HTTP request to[%s] failed", url); + throw InternalServerError.exception(e, "HTTP request to[%s] failed", url); } } @@ -182,10 +192,10 @@ public void addNodeRole(String nodeRole) nodeRoles.add(nodeRole); } - public Stream toRows() + public List toRows() { String nodeRolesString = nodeRoles.toString(); - return properties.entrySet().stream().map(entry -> new Object[]{serviceName, server, nodeRolesString, entry.getKey(), entry.getValue()}); + return properties.entrySet().stream().map(entry -> new Object[]{server, serviceName, nodeRolesString, entry.getKey(), entry.getValue()}).collect(Collectors.toList()); } } } From 935c1703e58b8c4b45a65bdaa2a0ae318e356d2f Mon Sep 17 00:00:00 2001 From: Gabriel Chang <77312579+GabrielCWT@users.noreply.github.com> Date: Tue, 4 Nov 2025 10:23:06 +0800 Subject: [PATCH 20/28] Add more test cases --- .../schema/SystemPropertiesTableTest.java | 62 ++++++++++++++++--- 1 file changed, 52 insertions(+), 10 deletions(-) diff --git a/embedded-tests/src/test/java/org/apache/druid/testing/embedded/schema/SystemPropertiesTableTest.java b/embedded-tests/src/test/java/org/apache/druid/testing/embedded/schema/SystemPropertiesTableTest.java index cfb68ad05694..c0621a0d098b 100644 --- a/embedded-tests/src/test/java/org/apache/druid/testing/embedded/schema/SystemPropertiesTableTest.java +++ b/embedded-tests/src/test/java/org/apache/druid/testing/embedded/schema/SystemPropertiesTableTest.java @@ -48,17 +48,20 @@ public class SystemPropertiesTableTest extends EmbeddedClusterTestBase private final EmbeddedBroker broker = new EmbeddedBroker() .addProperty("druid.service", BROKER_SERVICE) .addProperty("druid.plaintextPort", BROKER_PORT) - .addProperty("test.onlyBroker", "brokerValue"); + .addProperty("test.onlyBroker", "brokerValue") + .addProperty("test.nonUniqueProperty", "brokerNonUniqueValue"); private final EmbeddedOverlord overlord = new EmbeddedOverlord() - .addProperty("druid.service", OVERLORD_SERVICE) - .addProperty("druid.plaintextPort", OVERLORD_PORT) - .addProperty("test.onlyOverlord", "overlordValue"); + .addProperty("druid.service", OVERLORD_SERVICE) + .addProperty("druid.plaintextPort", OVERLORD_PORT) + .addProperty("test.onlyOverlord", "overlordValue") + .addProperty("test.nonUniqueProperty", "overlordNonUniqueValue"); private final EmbeddedCoordinator coordinator = new EmbeddedCoordinator() .addProperty("druid.service", COORDINATOR_SERVICE) .addProperty("druid.plaintextPort", COORDINATOR_PORT) - .addProperty("test.onlyCoordinator", "coordinatorValue"); + .addProperty("test.onlyCoordinator", "coordinatorValue") + .addProperty("test.nonUniqueProperty", "coordinatorNonUniqueValue"); @Override protected EmbeddedDruidCluster createCluster() @@ -72,20 +75,28 @@ protected EmbeddedDruidCluster createCluster() } @Test - public void test_serverPropertiesTable() + public void test_serverPropertiesTable_brokerServer() { - final Map overlordProps = cluster.callApi().serviceClient().onLeaderOverlord( + final Map brokerProps = cluster.callApi().serviceClient().onAnyBroker( mapper -> new RequestBuilder(HttpMethod.GET, "/status/properties"), new TypeReference<>(){} ); - verifyPropertiesForServer(overlordProps, OVERLORD_SERVICE, StringUtils.format("localhost:%s", OVERLORD_PORT), NodeRole.OVERLORD_JSON_NAME); + verifyPropertiesForServer(brokerProps, BROKER_SERVICE, StringUtils.format("localhost:%s", BROKER_PORT), NodeRole.BROKER_JSON_NAME); + } - final Map brokerProps = cluster.callApi().serviceClient().onAnyBroker( + @Test + public void test_serverPropertiesTable_overlordServer() + { + final Map overlordProps = cluster.callApi().serviceClient().onLeaderOverlord( mapper -> new RequestBuilder(HttpMethod.GET, "/status/properties"), new TypeReference<>(){} ); - verifyPropertiesForServer(brokerProps, BROKER_SERVICE, StringUtils.format("localhost:%s", BROKER_PORT), NodeRole.BROKER_JSON_NAME); + verifyPropertiesForServer(overlordProps, OVERLORD_SERVICE, StringUtils.format("localhost:%s", OVERLORD_PORT), NodeRole.OVERLORD_JSON_NAME); + } + @Test + public void test_serverPropertiesTable_coordinatorServer() + { final Map coordinatorProps = cluster.callApi().serviceClient().onLeaderCoordinator( mapper -> new RequestBuilder(HttpMethod.GET, "/status/properties"), new TypeReference<>(){} @@ -93,6 +104,37 @@ public void test_serverPropertiesTable() verifyPropertiesForServer(coordinatorProps, COORDINATOR_SERVICE, StringUtils.format("localhost:%s", COORDINATOR_PORT), NodeRole.COORDINATOR_JSON_NAME); } + @Test + public void test_serverPropertiesTable_specificProperty() + { + Assertions.assertEquals( + "brokerValue", + cluster.runSql("SELECT \"value\" FROM sys.server_properties WHERE server = 'localhost:%s' AND property = 'test.onlyBroker'", BROKER_PORT) + ); + + Assertions.assertEquals( + "brokerValue", + cluster.runSql("SELECT \"value\" FROM sys.server_properties WHERE service_name = '%s' AND property = 'test.onlyBroker'", BROKER_SERVICE) + ); + + Assertions.assertEquals( + StringUtils.format("localhost:%s,%s,[%s],test.onlyBroker,brokerValue", BROKER_PORT, BROKER_SERVICE, NodeRole.BROKER_JSON_NAME), + cluster.runSql("SELECT * FROM sys.server_properties WHERE server = 'localhost:%s' AND property = 'test.onlyBroker'", BROKER_PORT) + ); + + String[] expectedRows = new String[] { + StringUtils.format("localhost:%s,%s,[%s],test.nonUniqueProperty,brokerNonUniqueValue", BROKER_PORT, BROKER_SERVICE, NodeRole.BROKER_JSON_NAME), + StringUtils.format("localhost:%s,%s,[%s],test.nonUniqueProperty,overlordNonUniqueValue", OVERLORD_PORT, OVERLORD_SERVICE, NodeRole.OVERLORD_JSON_NAME), + StringUtils.format("localhost:%s,%s,[%s],test.nonUniqueProperty,coordinatorNonUniqueValue", COORDINATOR_PORT, COORDINATOR_SERVICE, NodeRole.COORDINATOR_JSON_NAME), + }; + Arrays.sort(expectedRows, String::compareTo); + final String result = cluster.runSql("SELECT * FROM sys.server_properties WHERE property='test.nonUniqueProperty'"); + String[] actualRows = result.split("\n"); + Arrays.sort(actualRows, String::compareTo); + Assertions.assertArrayEquals(expectedRows, actualRows); + + } + private void verifyPropertiesForServer(Map properties, String serivceName, String hostAndPort, String nodeRole) { String[] expectedRows = properties.entrySet().stream().map(entry -> String.join( From 698894d038b4a28714961e14a9d7450ef4140c29 Mon Sep 17 00:00:00 2001 From: Gabriel Chang <77312579+GabrielCWT@users.noreply.github.com> Date: Tue, 4 Nov 2025 10:45:12 +0800 Subject: [PATCH 21/28] Update docs --- docs/querying/sql-metadata-tables.md | 19 +++++++++++++++++++ website/.spelling | 3 +++ 2 files changed, 22 insertions(+) diff --git a/docs/querying/sql-metadata-tables.md b/docs/querying/sql-metadata-tables.md index 6d59462fd1e9..5cf31eaaea90 100644 --- a/docs/querying/sql-metadata-tables.md +++ b/docs/querying/sql-metadata-tables.md @@ -238,6 +238,7 @@ Servers table lists all discovered servers in the cluster. |start_time|STRING|Timestamp in ISO8601 format when the server was announced in the cluster| |version|VARCHAR|Druid version running on the server| |labels|VARCHAR|Labels for the server configured using the property [`druid.labels`](../configuration/index.md)| + To retrieve information about all servers, use the query: ```sql @@ -315,3 +316,21 @@ For example, to retrieve supervisor tasks information filtered by health status, ```sql SELECT * FROM sys.supervisors WHERE healthy=0; ``` + +### SERVER_PROPERTIES table + +The `server_properties` table exposes configuration properties for each Druid server. Each row represents a single property key-value pair associated with a specific server. + +|Column|Type|Notes| +|------|-----|-----| +|server|VARCHAR|Host and port of the server, in the form host:port| +|service_name|VARCHAR|Service name of the server, as defined by `druid.service`| +|node_roles|VARCHAR|Comma-separated list of roles that the server performs. For example, `[coordinator,overlord]` if the server functions as both a Coordinator and an Overlord.| +|property|VARCHAR|Name of the property| +|value|VARCHAR|Value of the property| + +For example, to retrieve properties for a specific server, use the query + +```sql +SELECT * FROM sys.server_properties WHERE server='123.456.789.1:8081' +``` \ No newline at end of file diff --git a/website/.spelling b/website/.spelling index 2eaa410bace9..92207fe38f59 100644 --- a/website/.spelling +++ b/website/.spelling @@ -447,6 +447,7 @@ namespaced namespaces natively netflow +node_roles nondescriptive nonfinalized non-null @@ -539,6 +540,7 @@ searchable secondaryPartitionPruning seekable seekable-stream +service_name servlet setProcessingThreadNames sigterm @@ -677,6 +679,7 @@ PT5M SCHEMA_NAME SCHEMA_OWNER SERVER_SEGMENTS +SERVER_PROPERTIES SMALLINT SQL_PATH STRING_AGG From 0152adce0c33401bdffafbeab2b34355deaddbd3 Mon Sep 17 00:00:00 2001 From: Gabriel Chang <77312579+GabrielCWT@users.noreply.github.com> Date: Tue, 4 Nov 2025 10:48:07 +0800 Subject: [PATCH 22/28] Rename to SystemServerPropertiesTable --- ...esTableTest.java => SystemServerPropertiesTableTest.java} | 2 +- .../org/apache/druid/sql/calcite/schema/SystemSchema.java | 4 ++-- ...PropertiesTable.java => SystemServerPropertiesTable.java} | 5 ++--- 3 files changed, 5 insertions(+), 6 deletions(-) rename embedded-tests/src/test/java/org/apache/druid/testing/embedded/schema/{SystemPropertiesTableTest.java => SystemServerPropertiesTableTest.java} (98%) rename sql/src/main/java/org/apache/druid/sql/calcite/schema/{SystemPropertiesTable.java => SystemServerPropertiesTable.java} (98%) diff --git a/embedded-tests/src/test/java/org/apache/druid/testing/embedded/schema/SystemPropertiesTableTest.java b/embedded-tests/src/test/java/org/apache/druid/testing/embedded/schema/SystemServerPropertiesTableTest.java similarity index 98% rename from embedded-tests/src/test/java/org/apache/druid/testing/embedded/schema/SystemPropertiesTableTest.java rename to embedded-tests/src/test/java/org/apache/druid/testing/embedded/schema/SystemServerPropertiesTableTest.java index c0621a0d098b..df157db57feb 100644 --- a/embedded-tests/src/test/java/org/apache/druid/testing/embedded/schema/SystemPropertiesTableTest.java +++ b/embedded-tests/src/test/java/org/apache/druid/testing/embedded/schema/SystemServerPropertiesTableTest.java @@ -36,7 +36,7 @@ import java.util.Arrays; import java.util.Map; -public class SystemPropertiesTableTest extends EmbeddedClusterTestBase +public class SystemServerPropertiesTableTest extends EmbeddedClusterTestBase { private static final String BROKER_PORT = "9082"; private static final String BROKER_SERVICE = "test/broker"; diff --git a/sql/src/main/java/org/apache/druid/sql/calcite/schema/SystemSchema.java b/sql/src/main/java/org/apache/druid/sql/calcite/schema/SystemSchema.java index 6a6d39ca7b8e..6278506051ca 100644 --- a/sql/src/main/java/org/apache/druid/sql/calcite/schema/SystemSchema.java +++ b/sql/src/main/java/org/apache/druid/sql/calcite/schema/SystemSchema.java @@ -259,8 +259,8 @@ public SystemSchema( new TasksTable(overlordClient, authorizerMapper), SUPERVISOR_TABLE, new SupervisorsTable(overlordClient, authorizerMapper), - SystemPropertiesTable.TABLE_NAME, - new SystemPropertiesTable(druidNodeDiscoveryProvider, authorizerMapper, httpClient, jsonMapper) + SystemServerPropertiesTable.TABLE_NAME, + new SystemServerPropertiesTable(druidNodeDiscoveryProvider, authorizerMapper, httpClient, jsonMapper) ); } diff --git a/sql/src/main/java/org/apache/druid/sql/calcite/schema/SystemPropertiesTable.java b/sql/src/main/java/org/apache/druid/sql/calcite/schema/SystemServerPropertiesTable.java similarity index 98% rename from sql/src/main/java/org/apache/druid/sql/calcite/schema/SystemPropertiesTable.java rename to sql/src/main/java/org/apache/druid/sql/calcite/schema/SystemServerPropertiesTable.java index f499bfc94411..df8b313c8e1c 100644 --- a/sql/src/main/java/org/apache/druid/sql/calcite/schema/SystemPropertiesTable.java +++ b/sql/src/main/java/org/apache/druid/sql/calcite/schema/SystemServerPropertiesTable.java @@ -57,7 +57,6 @@ import java.util.List; import java.util.Map; import java.util.stream.Collectors; -import java.util.stream.Stream; /** * System schema table {@code sys.server_properties} that contains the properties of all Druid servers. @@ -65,7 +64,7 @@ * that server would have multiple values in the column {@code node_roles} rather than duplicating all the * rows. */ -public class SystemPropertiesTable extends AbstractTable implements ScannableTable +public class SystemServerPropertiesTable extends AbstractTable implements ScannableTable { public static final String TABLE_NAME = "server_properties"; @@ -83,7 +82,7 @@ public class SystemPropertiesTable extends AbstractTable implements ScannableTab private final HttpClient httpClient; private final ObjectMapper jsonMapper; - public SystemPropertiesTable( + public SystemServerPropertiesTable( DruidNodeDiscoveryProvider druidNodeDiscoveryProvider, AuthorizerMapper authorizerMapper, HttpClient httpClient, From fccb86afde1ea29831429d48686f31c143387ede Mon Sep 17 00:00:00 2001 From: Gabriel Chang <77312579+GabrielCWT@users.noreply.github.com> Date: Tue, 4 Nov 2025 10:55:36 +0800 Subject: [PATCH 23/28] Update naming --- .../druid/sql/calcite/schema/SystemSchemaTest.java | 10 +++++----- 1 file changed, 5 insertions(+), 5 deletions(-) diff --git a/sql/src/test/java/org/apache/druid/sql/calcite/schema/SystemSchemaTest.java b/sql/src/test/java/org/apache/druid/sql/calcite/schema/SystemSchemaTest.java index f505566aa519..88fa713020c3 100644 --- a/sql/src/test/java/org/apache/druid/sql/calcite/schema/SystemSchemaTest.java +++ b/sql/src/test/java/org/apache/druid/sql/calcite/schema/SystemSchemaTest.java @@ -571,8 +571,8 @@ public void testGetTableMap() Assert.assertEquals("server", serverFields.get(0).getName()); Assert.assertEquals(SqlTypeName.VARCHAR, serverFields.get(0).getType().getSqlTypeName()); - final SystemPropertiesTable propertiesTable = (SystemPropertiesTable) schema.getTableMap() - .get("server_properties"); + final SystemServerPropertiesTable propertiesTable = (SystemServerPropertiesTable) schema.getTableMap() + .get("server_properties"); final RelDataType propertiesRowType = propertiesTable.getRowType(new JavaTypeFactoryImpl()); final List propertiesFields = propertiesRowType.getFieldList(); Assert.assertEquals(5, propertiesFields.size()); @@ -1466,9 +1466,9 @@ public void testSupervisorTableAuth() @Test public void testPropertiesTable() { - SystemPropertiesTable propertiesTable = EasyMock.createMockBuilder(SystemPropertiesTable.class) - .withConstructor(druidNodeDiscoveryProvider, authMapper, httpClient, MAPPER) - .createMock(); + SystemServerPropertiesTable propertiesTable = EasyMock.createMockBuilder(SystemServerPropertiesTable.class) + .withConstructor(druidNodeDiscoveryProvider, authMapper, httpClient, MAPPER) + .createMock(); EasyMock.replay(propertiesTable); From c60fee3837f870f16daa33b8fbd83cf37bd5ecdf Mon Sep 17 00:00:00 2001 From: Gabriel Chang <77312579+GabrielCWT@users.noreply.github.com> Date: Tue, 4 Nov 2025 11:35:50 +0800 Subject: [PATCH 24/28] Update test --- .../druid/sql/calcite/schema/SystemSchemaTest.java | 12 ++++++------ 1 file changed, 6 insertions(+), 6 deletions(-) diff --git a/sql/src/test/java/org/apache/druid/sql/calcite/schema/SystemSchemaTest.java b/sql/src/test/java/org/apache/druid/sql/calcite/schema/SystemSchemaTest.java index 88fa713020c3..7c35d16e9e58 100644 --- a/sql/src/test/java/org/apache/druid/sql/calcite/schema/SystemSchemaTest.java +++ b/sql/src/test/java/org/apache/druid/sql/calcite/schema/SystemSchemaTest.java @@ -1487,8 +1487,8 @@ public void testPropertiesTable() String coordinatorJson = "{\"druid.test-key\": \"test-value\"}"; coordinatorResponseHolder.addChunk(coordinatorJson); expectedRows.add(new Object[]{ - coordinator.getDruidNode().getServiceName(), coordinator.getDruidNode().getHostAndPortToUse(), + coordinator.getDruidNode().getServiceName(), ImmutableList.of(coordinator.getNodeRole().getJsonName()).toString(), "druid.test-key", "test-value" }); @@ -1499,8 +1499,8 @@ public void testPropertiesTable() coordinator2ResponseHolder.addChunk(coordinator2Json); expectedRows .add(new Object[]{ - coordinator2.getDruidNode().getServiceName(), coordinator2.getDruidNode().getHostAndPortToUse(), + coordinator2.getDruidNode().getServiceName(), ImmutableList.of(coordinator2.getNodeRole().getJsonName()).toString(), "druid.test-key3", "test-value3" }); @@ -1515,15 +1515,15 @@ public void testPropertiesTable() middleManagerResponseHolder.addChunk(middleManagerJson); expectedRows .add(new Object[]{ - middleManager.getDruidNode().getServiceName(), middleManager.getDruidNode().getHostAndPortToUse(), + middleManager.getDruidNode().getServiceName(), ImmutableList.of(middleManager.getNodeRole().getJsonName()).toString(), "druid.test-key", "test-value" }); expectedRows .add(new Object[]{ - middleManager.getDruidNode().getServiceName(), middleManager.getDruidNode().getHostAndPortToUse(), + middleManager.getDruidNode().getServiceName(), ImmutableList.of(middleManager.getNodeRole().getJsonName()).toString(), "druid.test-key2", "test-value2" }); @@ -1554,8 +1554,8 @@ public void testPropertiesTable() DataContext dataContext = createDataContext(Users.SUPER); final List rows = propertiesTable.scan(dataContext).toList(); - expectedRows.sort((Object[] row1, Object[] row2) -> ((Comparable) row1[1]).compareTo(row2[1])); - rows.sort((Object[] row1, Object[] row2) -> ((Comparable) row1[1]).compareTo(row2[1])); + expectedRows.sort((Object[] row1, Object[] row2) -> ((Comparable) row1[0]).compareTo(row2[0])); + rows.sort((Object[] row1, Object[] row2) -> ((Comparable) row1[0]).compareTo(row2[0])); Assert.assertEquals(expectedRows.size(), rows.size()); for (int i = 0; i < expectedRows.size(); i++) { Assert.assertArrayEquals(expectedRows.get(i), rows.get(i)); From 774510cb11fc4ffd262779ccd4babe95819195db Mon Sep 17 00:00:00 2001 From: Gabriel Chang <77312579+GabrielCWT@users.noreply.github.com> Date: Tue, 4 Nov 2025 11:35:57 +0800 Subject: [PATCH 25/28] Update docs --- docs/querying/sql-metadata-tables.md | 4 ++-- 1 file changed, 2 insertions(+), 2 deletions(-) diff --git a/docs/querying/sql-metadata-tables.md b/docs/querying/sql-metadata-tables.md index 5cf31eaaea90..3bc340f06031 100644 --- a/docs/querying/sql-metadata-tables.md +++ b/docs/querying/sql-metadata-tables.md @@ -319,11 +319,11 @@ SELECT * FROM sys.supervisors WHERE healthy=0; ### SERVER_PROPERTIES table -The `server_properties` table exposes configuration properties for each Druid server. Each row represents a single property key-value pair associated with a specific server. +The `server_properties` table exposes th runtime properties configured on for each Druid server. Each row represents a single property key-value pair associated with a specific server. |Column|Type|Notes| |------|-----|-----| -|server|VARCHAR|Host and port of the server, in the form host:port| +|server|VARCHAR|Host and port of the server, in the form `host:port`| |service_name|VARCHAR|Service name of the server, as defined by `druid.service`| |node_roles|VARCHAR|Comma-separated list of roles that the server performs. For example, `[coordinator,overlord]` if the server functions as both a Coordinator and an Overlord.| |property|VARCHAR|Name of the property| From cdc5aa1505d18e319abd3ab899faca9a995a2905 Mon Sep 17 00:00:00 2001 From: Gabriel Chang <77312579+GabrielCWT@users.noreply.github.com> Date: Tue, 4 Nov 2025 13:14:52 +0800 Subject: [PATCH 26/28] Test hidden properties --- .../schema/SystemServerPropertiesTableTest.java | 13 ++++++++++++- 1 file changed, 12 insertions(+), 1 deletion(-) diff --git a/embedded-tests/src/test/java/org/apache/druid/testing/embedded/schema/SystemServerPropertiesTableTest.java b/embedded-tests/src/test/java/org/apache/druid/testing/embedded/schema/SystemServerPropertiesTableTest.java index df157db57feb..d5f062e0d47d 100644 --- a/embedded-tests/src/test/java/org/apache/druid/testing/embedded/schema/SystemServerPropertiesTableTest.java +++ b/embedded-tests/src/test/java/org/apache/druid/testing/embedded/schema/SystemServerPropertiesTableTest.java @@ -49,7 +49,8 @@ public class SystemServerPropertiesTableTest extends EmbeddedClusterTestBase .addProperty("druid.service", BROKER_SERVICE) .addProperty("druid.plaintextPort", BROKER_PORT) .addProperty("test.onlyBroker", "brokerValue") - .addProperty("test.nonUniqueProperty", "brokerNonUniqueValue"); + .addProperty("test.nonUniqueProperty", "brokerNonUniqueValue") + .addProperty("password", "brokerPassword"); private final EmbeddedOverlord overlord = new EmbeddedOverlord() .addProperty("druid.service", OVERLORD_SERVICE) @@ -135,6 +136,16 @@ public void test_serverPropertiesTable_specificProperty() } + @Test + public void test_serverPropertiesTable_hiddenProperties() + { + final Map brokerProps = cluster.callApi().serviceClient().onAnyBroker( + mapper -> new RequestBuilder(HttpMethod.GET, "/status/properties"), + new TypeReference<>(){} + ); + Assertions.assertFalse(brokerProps.containsKey("password")); + } + private void verifyPropertiesForServer(Map properties, String serivceName, String hostAndPort, String nodeRole) { String[] expectedRows = properties.entrySet().stream().map(entry -> String.join( From 61d5f610f48625dd9f6b12a3cf1ad542af01e734 Mon Sep 17 00:00:00 2001 From: Gabriel Chang <77312579+GabrielCWT@users.noreply.github.com> Date: Tue, 4 Nov 2025 13:41:09 +0800 Subject: [PATCH 27/28] Fix typo --- docs/querying/sql-metadata-tables.md | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/docs/querying/sql-metadata-tables.md b/docs/querying/sql-metadata-tables.md index 3bc340f06031..dea794170894 100644 --- a/docs/querying/sql-metadata-tables.md +++ b/docs/querying/sql-metadata-tables.md @@ -319,7 +319,7 @@ SELECT * FROM sys.supervisors WHERE healthy=0; ### SERVER_PROPERTIES table -The `server_properties` table exposes th runtime properties configured on for each Druid server. Each row represents a single property key-value pair associated with a specific server. +The `server_properties` table exposes the runtime properties configured on for each Druid server. Each row represents a single property key-value pair associated with a specific server. |Column|Type|Notes| |------|-----|-----| From ec4d324968ccb0ce7064a7489188420fbb2bb394 Mon Sep 17 00:00:00 2001 From: Frank Chen Date: Thu, 6 Nov 2025 16:25:24 +0800 Subject: [PATCH 28/28] Update docs/querying/sql-metadata-tables.md --- docs/querying/sql-metadata-tables.md | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/docs/querying/sql-metadata-tables.md b/docs/querying/sql-metadata-tables.md index dea794170894..09d0db23d96a 100644 --- a/docs/querying/sql-metadata-tables.md +++ b/docs/querying/sql-metadata-tables.md @@ -332,5 +332,5 @@ The `server_properties` table exposes the runtime properties configured on for e For example, to retrieve properties for a specific server, use the query ```sql -SELECT * FROM sys.server_properties WHERE server='123.456.789.1:8081' +SELECT * FROM sys.server_properties WHERE server='192.168.1.1:8081' ``` \ No newline at end of file