Skip to content
Merged
Show file tree
Hide file tree
Changes from all commits
Commits
Show all changes
29 commits
Select commit Hold shift + click to select a range
412eb62
Add properties table
GabrielCWT Oct 23, 2025
8f87895
Fix based on feedback
GabrielCWT Oct 28, 2025
e3eabcc
Combine rows from servers with same hostAndPort
GabrielCWT Oct 28, 2025
cd005de
Update tests
GabrielCWT Oct 28, 2025
d4ad335
Fix tests
GabrielCWT Oct 28, 2025
3de794c
Revert tableMap to use ImmutableMap.of
GabrielCWT Oct 28, 2025
22e9154
Separate service_name and node_roles column
GabrielCWT Oct 28, 2025
a9b29a2
Add embedded test
GabrielCWT Oct 29, 2025
191e4ea
Use StringUtils.replace instead
GabrielCWT Oct 29, 2025
5617d72
Fix embedded test
GabrielCWT Oct 29, 2025
78be93c
Fix based on feedback
GabrielCWT Oct 29, 2025
3d31b1d
Refactor to use ServerProperties class
GabrielCWT Oct 29, 2025
2d4ca97
Revert formatting
GabrielCWT Oct 29, 2025
3eb02c5
Remove unnecessary throw Exception
GabrielCWT Oct 29, 2025
c01a5bf
Update test to use list for node roles
GabrielCWT Oct 29, 2025
ec7c943
Store nodeRole in array to match SQL return format
GabrielCWT Oct 29, 2025
5055b9e
Add missing import
GabrielCWT Oct 30, 2025
6eefbd0
Refactor out nodeRoles.toString
GabrielCWT Oct 31, 2025
c55ca1e
Refactor based on feedback
GabrielCWT Nov 4, 2025
935c170
Add more test cases
GabrielCWT Nov 4, 2025
698894d
Update docs
GabrielCWT Nov 4, 2025
0152adc
Rename to SystemServerPropertiesTable
GabrielCWT Nov 4, 2025
fccb86a
Update naming
GabrielCWT Nov 4, 2025
c60fee3
Update test
GabrielCWT Nov 4, 2025
774510c
Update docs
GabrielCWT Nov 4, 2025
cdc5aa1
Test hidden properties
GabrielCWT Nov 4, 2025
61d5f61
Fix typo
GabrielCWT Nov 4, 2025
ec4d324
Update docs/querying/sql-metadata-tables.md
FrankChen021 Nov 6, 2025
aa10bde
Merge branch 'master' into gh-65-add-sys-properties
GabrielCWT Nov 19, 2025
File filter

Filter by extension

Filter by extension

Conversations
Failed to load comments.
Loading
Jump to
Jump to file
Failed to load files.
Loading
Diff view
Diff view
19 changes: 19 additions & 0 deletions docs/querying/sql-metadata-tables.md
Original file line number Diff line number Diff line change
Expand Up @@ -238,6 +238,7 @@ Servers table lists all discovered servers in the cluster.
|start_time|STRING|Timestamp in ISO8601 format when the server was announced in the cluster|
|version|VARCHAR|Druid version running on the server|
|labels|VARCHAR|Labels for the server configured using the property [`druid.labels`](../configuration/index.md)|

To retrieve information about all servers, use the query:

```sql
Expand Down Expand Up @@ -315,3 +316,21 @@ For example, to retrieve supervisor tasks information filtered by health status,
```sql
SELECT * FROM sys.supervisors WHERE healthy=0;
```

### SERVER_PROPERTIES table

The `server_properties` table exposes the runtime properties configured on for each Druid server. Each row represents a single property key-value pair associated with a specific server.

|Column|Type|Notes|
|------|-----|-----|
|server|VARCHAR|Host and port of the server, in the form `host:port`|
|service_name|VARCHAR|Service name of the server, as defined by `druid.service`|
|node_roles|VARCHAR|Comma-separated list of roles that the server performs. For example, `[coordinator,overlord]` if the server functions as both a Coordinator and an Overlord.|
|property|VARCHAR|Name of the property|
|value|VARCHAR|Value of the property|

For example, to retrieve properties for a specific server, use the query

```sql
SELECT * FROM sys.server_properties WHERE server='192.168.1.1:8081'
```
Original file line number Diff line number Diff line change
@@ -0,0 +1,179 @@
/*
* Licensed to the Apache Software Foundation (ASF) under one
* or more contributor license agreements. See the NOTICE file
* distributed with this work for additional information
* regarding copyright ownership. The ASF licenses this file
* to you under the Apache License, Version 2.0 (the
* "License"); you may not use this file except in compliance
* with the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing,
* software distributed under the License is distributed on an
* "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
* KIND, either express or implied. See the License for the
* specific language governing permissions and limitations
* under the License.
*/

package org.apache.druid.testing.embedded.schema;

import com.fasterxml.jackson.core.type.TypeReference;
import com.google.common.collect.ImmutableList;
import org.apache.druid.discovery.NodeRole;
import org.apache.druid.java.util.common.StringUtils;
import org.apache.druid.rpc.RequestBuilder;
import org.apache.druid.testing.embedded.EmbeddedBroker;
import org.apache.druid.testing.embedded.EmbeddedCoordinator;
import org.apache.druid.testing.embedded.EmbeddedDruidCluster;
import org.apache.druid.testing.embedded.EmbeddedOverlord;
import org.apache.druid.testing.embedded.junit5.EmbeddedClusterTestBase;
import org.jboss.netty.handler.codec.http.HttpMethod;
import org.junit.jupiter.api.Assertions;
import org.junit.jupiter.api.Test;

import java.util.Arrays;
import java.util.Map;

public class SystemServerPropertiesTableTest extends EmbeddedClusterTestBase
{
private static final String BROKER_PORT = "9082";
private static final String BROKER_SERVICE = "test/broker";
private static final String OVERLORD_PORT = "9090";
private static final String OVERLORD_SERVICE = "test/overlord";
private static final String COORDINATOR_PORT = "9081";
private static final String COORDINATOR_SERVICE = "test/coordinator";

private final EmbeddedBroker broker = new EmbeddedBroker()
.addProperty("druid.service", BROKER_SERVICE)
.addProperty("druid.plaintextPort", BROKER_PORT)
Comment thread
GabrielCWT marked this conversation as resolved.
.addProperty("test.onlyBroker", "brokerValue")
.addProperty("test.nonUniqueProperty", "brokerNonUniqueValue")
.addProperty("password", "brokerPassword");

private final EmbeddedOverlord overlord = new EmbeddedOverlord()
.addProperty("druid.service", OVERLORD_SERVICE)
.addProperty("druid.plaintextPort", OVERLORD_PORT)
.addProperty("test.onlyOverlord", "overlordValue")
.addProperty("test.nonUniqueProperty", "overlordNonUniqueValue");

private final EmbeddedCoordinator coordinator = new EmbeddedCoordinator()
.addProperty("druid.service", COORDINATOR_SERVICE)
.addProperty("druid.plaintextPort", COORDINATOR_PORT)
.addProperty("test.onlyCoordinator", "coordinatorValue")
.addProperty("test.nonUniqueProperty", "coordinatorNonUniqueValue");

@Override
protected EmbeddedDruidCluster createCluster()
{
return EmbeddedDruidCluster
.withZookeeper()
.addServer(coordinator)
.addServer(overlord)
.addServer(broker)
.addCommonProperty("commonProperty", "commonValue");
}

@Test
public void test_serverPropertiesTable_brokerServer()
{
final Map<String, String> brokerProps = cluster.callApi().serviceClient().onAnyBroker(
mapper -> new RequestBuilder(HttpMethod.GET, "/status/properties"),
new TypeReference<>(){}
);
verifyPropertiesForServer(brokerProps, BROKER_SERVICE, StringUtils.format("localhost:%s", BROKER_PORT), NodeRole.BROKER_JSON_NAME);
}

@Test
public void test_serverPropertiesTable_overlordServer()
{
final Map<String, String> overlordProps = cluster.callApi().serviceClient().onLeaderOverlord(
mapper -> new RequestBuilder(HttpMethod.GET, "/status/properties"),
new TypeReference<>(){}
);
verifyPropertiesForServer(overlordProps, OVERLORD_SERVICE, StringUtils.format("localhost:%s", OVERLORD_PORT), NodeRole.OVERLORD_JSON_NAME);
}

@Test
public void test_serverPropertiesTable_coordinatorServer()
{
final Map<String, String> coordinatorProps = cluster.callApi().serviceClient().onLeaderCoordinator(
mapper -> new RequestBuilder(HttpMethod.GET, "/status/properties"),
new TypeReference<>(){}
);
verifyPropertiesForServer(coordinatorProps, COORDINATOR_SERVICE, StringUtils.format("localhost:%s", COORDINATOR_PORT), NodeRole.COORDINATOR_JSON_NAME);
}

@Test
public void test_serverPropertiesTable_specificProperty()
{
Assertions.assertEquals(
"brokerValue",
cluster.runSql("SELECT \"value\" FROM sys.server_properties WHERE server = 'localhost:%s' AND property = 'test.onlyBroker'", BROKER_PORT)
);

Assertions.assertEquals(
"brokerValue",
cluster.runSql("SELECT \"value\" FROM sys.server_properties WHERE service_name = '%s' AND property = 'test.onlyBroker'", BROKER_SERVICE)
);

Assertions.assertEquals(
StringUtils.format("localhost:%s,%s,[%s],test.onlyBroker,brokerValue", BROKER_PORT, BROKER_SERVICE, NodeRole.BROKER_JSON_NAME),
cluster.runSql("SELECT * FROM sys.server_properties WHERE server = 'localhost:%s' AND property = 'test.onlyBroker'", BROKER_PORT)
);

String[] expectedRows = new String[] {
StringUtils.format("localhost:%s,%s,[%s],test.nonUniqueProperty,brokerNonUniqueValue", BROKER_PORT, BROKER_SERVICE, NodeRole.BROKER_JSON_NAME),
StringUtils.format("localhost:%s,%s,[%s],test.nonUniqueProperty,overlordNonUniqueValue", OVERLORD_PORT, OVERLORD_SERVICE, NodeRole.OVERLORD_JSON_NAME),
StringUtils.format("localhost:%s,%s,[%s],test.nonUniqueProperty,coordinatorNonUniqueValue", COORDINATOR_PORT, COORDINATOR_SERVICE, NodeRole.COORDINATOR_JSON_NAME),
};
Arrays.sort(expectedRows, String::compareTo);
final String result = cluster.runSql("SELECT * FROM sys.server_properties WHERE property='test.nonUniqueProperty'");
String[] actualRows = result.split("\n");
Arrays.sort(actualRows, String::compareTo);
Assertions.assertArrayEquals(expectedRows, actualRows);

}

@Test
public void test_serverPropertiesTable_hiddenProperties()
{
final Map<String, String> brokerProps = cluster.callApi().serviceClient().onAnyBroker(
mapper -> new RequestBuilder(HttpMethod.GET, "/status/properties"),
new TypeReference<>(){}
);
Assertions.assertFalse(brokerProps.containsKey("password"));
}

private void verifyPropertiesForServer(Map<String, String> properties, String serivceName, String hostAndPort, String nodeRole)
{
String[] expectedRows = properties.entrySet().stream().map(entry -> String.join(
",",
escapeCsvField(hostAndPort),
escapeCsvField(serivceName),
escapeCsvField(ImmutableList.of(nodeRole).toString()),
escapeCsvField(entry.getKey()),
escapeCsvField(entry.getValue())
)).toArray(String[]::new);
Arrays.sort(expectedRows, String::compareTo);
final String result = cluster.runSql("SELECT * FROM sys.server_properties WHERE server='%s'", hostAndPort);
String[] actualRows = result.split("\n");
Arrays.sort(actualRows, String::compareTo);
Assertions.assertArrayEquals(expectedRows, actualRows);
}

/**
* Escapes a field value for CSV format.
*/
private String escapeCsvField(String field)
{
if (field == null) {
return "";
}
if (field.contains(",") || field.contains("\"") || field.contains("\n") || field.contains("\r")) {
return "\"" + StringUtils.replace(field, "\"", "\"\"") + "\"";
}
return field;
}
}
Original file line number Diff line number Diff line change
Expand Up @@ -24,7 +24,6 @@
import com.google.inject.Inject;
import com.google.inject.Injector;
import com.google.inject.Key;
import com.google.inject.servlet.GuiceFilter;
import org.apache.druid.guice.annotations.Json;
import org.apache.druid.server.http.OverlordProxyServlet;
import org.apache.druid.server.http.RedirectFilter;
Expand Down Expand Up @@ -110,7 +109,7 @@ public void initialize(Server server, Injector injector)

// add some paths not to be redirected to leader.
final FilterHolder guiceFilterHolder = JettyServerInitUtils.getGuiceFilterHolder(injector);
root.addFilter(GuiceFilter.class, "/status/*", null);
root.addFilter(guiceFilterHolder, "/status/*", null);
root.addFilter(guiceFilterHolder, "/druid-internal/*", null);

// redirect anything other than status to the current lead
Expand Down
Original file line number Diff line number Diff line change
Expand Up @@ -53,12 +53,14 @@
import org.apache.druid.discovery.DiscoveryDruidNode;
import org.apache.druid.discovery.DruidNodeDiscoveryProvider;
import org.apache.druid.discovery.NodeRole;
import org.apache.druid.guice.annotations.EscalatedClient;
import org.apache.druid.indexer.TaskStatusPlus;
import org.apache.druid.indexing.overlord.supervisor.SupervisorStatus;
import org.apache.druid.java.util.common.ISE;
import org.apache.druid.java.util.common.StringUtils;
import org.apache.druid.java.util.common.jackson.JacksonUtils;
import org.apache.druid.java.util.common.parsers.CloseableIterator;
import org.apache.druid.java.util.http.client.HttpClient;
import org.apache.druid.rpc.indexing.OverlordClient;
import org.apache.druid.segment.column.ColumnType;
import org.apache.druid.segment.column.RowSignature;
Expand Down Expand Up @@ -234,7 +236,8 @@ public SystemSchema(
final CoordinatorClient coordinatorClient,
final OverlordClient overlordClient,
final DruidNodeDiscoveryProvider druidNodeDiscoveryProvider,
final ObjectMapper jsonMapper
final ObjectMapper jsonMapper,
@EscalatedClient final HttpClient httpClient
)
{
Preconditions.checkNotNull(serverView, "serverView");
Expand All @@ -255,7 +258,9 @@ public SystemSchema(
TASKS_TABLE,
new TasksTable(overlordClient, authorizerMapper),
SUPERVISOR_TABLE,
new SupervisorsTable(overlordClient, authorizerMapper)
new SupervisorsTable(overlordClient, authorizerMapper),
SystemServerPropertiesTable.TABLE_NAME,
new SystemServerPropertiesTable(druidNodeDiscoveryProvider, authorizerMapper, httpClient, jsonMapper)
);
}

Expand Down Expand Up @@ -741,13 +746,6 @@ private static DruidServer toDruidServer(DiscoveryDruidNode discoveryDruidNode)
}
}

private static Iterator<DiscoveryDruidNode> getDruidServers(DruidNodeDiscoveryProvider druidNodeDiscoveryProvider)
{
return Arrays.stream(NodeRole.values())
.flatMap(nodeRole -> druidNodeDiscoveryProvider.getForNodeRole(nodeRole).getAllNodes().stream())
.collect(Collectors.toList())
.iterator();
}
}

/**
Expand Down Expand Up @@ -1105,7 +1103,7 @@ private static String toStringOrNull(@Nullable final Object object)
/**
* Checks if an authenticated user has the STATE READ permissions needed to view server information.
*/
private static void checkStateReadAccessForServers(
public static void checkStateReadAccessForServers(
AuthenticationResult authenticationResult,
AuthorizerMapper authorizerMapper
)
Expand All @@ -1121,6 +1119,17 @@ private static void checkStateReadAccessForServers(
}
}

/**
* Returns an iterator over all discoverable Druid nodes in the cluster.
*/
public static Iterator<DiscoveryDruidNode> getDruidServers(DruidNodeDiscoveryProvider druidNodeDiscoveryProvider)
Comment thread
GabrielCWT marked this conversation as resolved.
{
return Arrays.stream(NodeRole.values())
.flatMap(nodeRole -> druidNodeDiscoveryProvider.getForNodeRole(nodeRole).getAllNodes().stream())
.collect(Collectors.toList())
.iterator();
}

/**
* Project a row using "projects" from {@link SegmentsTable#scan(DataContext, List, int[])}.
* <p>
Expand Down
Loading
Loading