diff --git a/docs/querying/sql-metadata-tables.md b/docs/querying/sql-metadata-tables.md index fbf2eb05941f..3d6093659560 100644 --- a/docs/querying/sql-metadata-tables.md +++ b/docs/querying/sql-metadata-tables.md @@ -208,7 +208,7 @@ Servers table lists all discovered servers in the cluster. |current_size|BIGINT|Current size of segments in bytes on this server. Only valid for HISTORICAL type, for other types it's 0| |max_size|BIGINT|Max size in bytes this server recommends to assign to segments see [druid.server.maxSize](../configuration/index.md#historical-general-configuration). Only valid for HISTORICAL type, for other types it's 0| |is_leader|BIGINT|1 if the server is currently the 'leader' (for services which have the concept of leadership), otherwise 0 if the server is not the leader, or the default long value (0 or null depending on `druid.generic.useDefaultValueForNull`) if the server type does not have the concept of leadership| - +|start_time|STRING|Timestamp in ISO8601 format when the server was announced in the cluster| To retrieve information about all servers, use the query: ```sql diff --git a/integration-tests/src/test/java/org/apache/druid/tests/security/AbstractAuthConfigurationTest.java b/integration-tests/src/test/java/org/apache/druid/tests/security/AbstractAuthConfigurationTest.java index 5c6e02745960..1c5867916137 100644 --- a/integration-tests/src/test/java/org/apache/druid/tests/security/AbstractAuthConfigurationTest.java +++ b/integration-tests/src/test/java/org/apache/druid/tests/security/AbstractAuthConfigurationTest.java @@ -276,7 +276,7 @@ public void test_systemSchemaAccess_admin() throws Exception verifySystemSchemaServerQuery( adminClient, SYS_SCHEMA_SERVERS_QUERY, - getServersWithoutCurrentSize(adminServers) + getServersWithoutCurrentSizeAndStartTime(adminServers) ); LOG.info("Checking sys.server_segments query as admin..."); @@ -767,7 +767,7 @@ protected void verifySystemSchemaQueryBase( String content = responseHolder.getContent(); List> responseMap = jsonMapper.readValue(content, SYS_SCHEMA_RESULTS_TYPE_REFERENCE); if (isServerQuery) { - responseMap = getServersWithoutCurrentSize(responseMap); + responseMap = getServersWithoutCurrentSizeAndStartTime(responseMap); } Assert.assertEquals(responseMap, expectedResults); } @@ -914,7 +914,7 @@ protected void setExpectedSystemSchemaObjects() throws IOException SYS_SCHEMA_RESULTS_TYPE_REFERENCE ); - adminServers = getServersWithoutCurrentSize( + adminServers = getServersWithoutCurrentSizeAndStartTime( jsonMapper.readValue( fillServersTemplate( config, @@ -937,13 +937,14 @@ protected void setExpectedSystemSchemaObjects() throws IOException * curr_size on historicals changes because cluster state is not isolated across different * integration tests, zero it out for consistent test results */ - protected static List> getServersWithoutCurrentSize(List> servers) + protected static List> getServersWithoutCurrentSizeAndStartTime(List> servers) { return Lists.transform( servers, (server) -> { Map newServer = new HashMap<>(server); newServer.put("curr_size", 0); + newServer.put("start_time", "0"); return newServer; } ); diff --git a/integration-tests/src/test/resources/results/auth_test_sys_schema_servers.json b/integration-tests/src/test/resources/results/auth_test_sys_schema_servers.json index d27614e2ecef..776c2ba2f5ed 100644 --- a/integration-tests/src/test/resources/results/auth_test_sys_schema_servers.json +++ b/integration-tests/src/test/resources/results/auth_test_sys_schema_servers.json @@ -8,7 +8,8 @@ "tier": "_default_tier", "curr_size": 2208932412, "max_size": 5000000000, - "is_leader": %%NON_LEADER%% + "is_leader": %%NON_LEADER%%, + "start_time": "0" }, { "server": "%%BROKER%%:8282", @@ -19,6 +20,7 @@ "tier": "_default_tier", "curr_size": 0, "max_size": 1000000000, - "is_leader": %%NON_LEADER%% + "is_leader": %%NON_LEADER%%, + "start_time": "0" } ] diff --git a/server/src/main/java/org/apache/druid/discovery/DiscoveryDruidNode.java b/server/src/main/java/org/apache/druid/discovery/DiscoveryDruidNode.java index 5ecdf5e04359..461e5fea0dfe 100644 --- a/server/src/main/java/org/apache/druid/discovery/DiscoveryDruidNode.java +++ b/server/src/main/java/org/apache/druid/discovery/DiscoveryDruidNode.java @@ -26,10 +26,12 @@ import com.fasterxml.jackson.databind.ObjectMapper; import com.google.common.collect.Maps; import org.apache.druid.jackson.StringObjectPairList; +import org.apache.druid.java.util.common.DateTimes; import org.apache.druid.java.util.common.IAE; import org.apache.druid.java.util.common.NonnullPair; import org.apache.druid.java.util.common.logger.Logger; import org.apache.druid.server.DruidNode; +import org.joda.time.DateTime; import javax.annotation.Nullable; import java.util.HashMap; @@ -50,6 +52,7 @@ public class DiscoveryDruidNode private final DruidNode druidNode; private final NodeRole nodeRole; + private final DateTime startTime; /** * Map of service name -> DruidServices. @@ -66,6 +69,16 @@ public DiscoveryDruidNode( NodeRole nodeRole, Map services ) + { + this(druidNode, nodeRole, services, DateTimes.nowUtc()); + } + + public DiscoveryDruidNode( + DruidNode druidNode, + NodeRole nodeRole, + Map services, + DateTime startTime + ) { this.druidNode = druidNode; this.nodeRole = nodeRole; @@ -73,6 +86,7 @@ public DiscoveryDruidNode( if (services != null && !services.isEmpty()) { this.services.putAll(services); } + this.startTime = startTime; } @JsonCreator @@ -80,6 +94,7 @@ private static DiscoveryDruidNode fromJson( @JsonProperty("druidNode") DruidNode druidNode, @JsonProperty("nodeType") NodeRole nodeRole, @JsonProperty("services") Map rawServices, + @JsonProperty("startTime") DateTime startTime, @JacksonInject ObjectMapper jsonMapper ) { @@ -95,7 +110,7 @@ private static DiscoveryDruidNode fromJson( } } } - return new DiscoveryDruidNode(druidNode, nodeRole, services); + return new DiscoveryDruidNode(druidNode, nodeRole, services, startTime); } /** @@ -106,10 +121,10 @@ private static DiscoveryDruidNode fromJson( * This is definitely a bug of DataNodeService, but, since renaming one of those duplicate keys will * break compatibility, DataNodeService still has the deprecated "type" property. * See the Javadoc of DataNodeService for more details. - * + *

* This function catches such duplicate keys and rewrites the deprecated "type" to "serverType", * so that we don't lose any properties. - * + *

* This method can be removed together when we entirely remove the deprecated "type" property from DataNodeService. */ @Deprecated @@ -166,6 +181,12 @@ public DruidNode getDruidNode() return druidNode; } + @JsonProperty + public DateTime getStartTime() + { + return startTime; + } + @Nullable @JsonIgnore public T getService(String key, Class clazz) @@ -205,7 +226,8 @@ public String toString() return "DiscoveryDruidNode{" + "druidNode=" + druidNode + ", nodeRole='" + nodeRole + '\'' + - ", services=" + services + + ", services=" + services + '\'' + + ", startTime=" + startTime + '}'; } } diff --git a/server/src/test/java/org/apache/druid/discovery/DiscoveryDruidNodeTest.java b/server/src/test/java/org/apache/druid/discovery/DiscoveryDruidNodeTest.java index 64976bd82235..0e55e5b68eb2 100644 --- a/server/src/test/java/org/apache/druid/discovery/DiscoveryDruidNodeTest.java +++ b/server/src/test/java/org/apache/druid/discovery/DiscoveryDruidNodeTest.java @@ -59,6 +59,7 @@ public void testEquals() { EqualsVerifier.forClass(DiscoveryDruidNode.class) .withNonnullFields("druidNode", "nodeRole", "services") + .withIgnoredFields("startTime") .usingGetClass() .verify(); } diff --git a/services/src/main/java/org/apache/druid/cli/CliOverlord.java b/services/src/main/java/org/apache/druid/cli/CliOverlord.java index e4383c673c5d..8267329e2d8a 100644 --- a/services/src/main/java/org/apache/druid/cli/CliOverlord.java +++ b/services/src/main/java/org/apache/druid/cli/CliOverlord.java @@ -267,13 +267,13 @@ public void configure(Binder binder) if (standalone) { LifecycleModule.register(binder, Server.class); - } - bindAnnouncer( - binder, - IndexingService.class, - DiscoverySideEffectsProvider.create() - ); + bindAnnouncer( + binder, + IndexingService.class, + DiscoverySideEffectsProvider.create() + ); + } Jerseys.addResource(binder, SelfDiscoveryResource.class); LifecycleModule.registerKey(binder, Key.get(SelfDiscoveryResource.class)); diff --git a/sql/src/main/java/org/apache/druid/sql/calcite/schema/SystemSchema.java b/sql/src/main/java/org/apache/druid/sql/calcite/schema/SystemSchema.java index 10090e72581f..3e6cb3e58f20 100644 --- a/sql/src/main/java/org/apache/druid/sql/calcite/schema/SystemSchema.java +++ b/sql/src/main/java/org/apache/druid/sql/calcite/schema/SystemSchema.java @@ -163,6 +163,7 @@ public class SystemSchema extends AbstractSchema .add("curr_size", ColumnType.LONG) .add("max_size", ColumnType.LONG) .add("is_leader", ColumnType.LONG) + .add("start_time", ColumnType.STRING) .build(); static final RowSignature SERVER_SEGMENTS_SIGNATURE = RowSignature @@ -595,7 +596,8 @@ private static Object[] buildRowForNonDataServer(DiscoveryDruidNode discoveryDru null, UNKNOWN_SIZE, UNKNOWN_SIZE, - NullHandling.defaultLongValue() + NullHandling.defaultLongValue(), + toStringOrNull(discoveryDruidNode.getStartTime()) }; } @@ -614,7 +616,8 @@ private static Object[] buildRowForNonDataServerWithLeadership(DiscoveryDruidNod null, UNKNOWN_SIZE, UNKNOWN_SIZE, - isLeader ? 1L : 0L + isLeader ? 1L : 0L, + toStringOrNull(discoveryDruidNode.getStartTime()) }; } @@ -648,7 +651,8 @@ private static Object[] buildRowForDiscoverableDataServer( druidServerToUse.getTier(), currentSize, druidServerToUse.getMaxSize(), - NullHandling.defaultLongValue() + NullHandling.defaultLongValue(), + toStringOrNull(discoveryDruidNode.getStartTime()) }; } diff --git a/sql/src/test/java/org/apache/druid/sql/calcite/schema/SystemSchemaTest.java b/sql/src/test/java/org/apache/druid/sql/calcite/schema/SystemSchemaTest.java index 1acc5aef1c4a..50d0f09a85e5 100644 --- a/sql/src/test/java/org/apache/druid/sql/calcite/schema/SystemSchemaTest.java +++ b/sql/src/test/java/org/apache/druid/sql/calcite/schema/SystemSchemaTest.java @@ -49,6 +49,7 @@ import org.apache.druid.discovery.DruidNodeDiscoveryProvider; import org.apache.druid.discovery.NodeRole; import org.apache.druid.indexer.partitions.DynamicPartitionsSpec; +import org.apache.druid.java.util.common.DateTimes; import org.apache.druid.java.util.common.IAE; import org.apache.druid.java.util.common.ISE; import org.apache.druid.java.util.common.Intervals; @@ -106,6 +107,7 @@ import org.jboss.netty.handler.codec.http.HttpResponse; import org.jboss.netty.handler.codec.http.HttpResponseStatus; import org.jboss.netty.handler.codec.http.HttpVersion; +import org.joda.time.DateTime; import org.junit.AfterClass; import org.junit.Assert; import org.junit.Before; @@ -380,40 +382,48 @@ public void setUp() throws Exception final List realtimeSegments = ImmutableList.of(segment2, segment4, segment5); + private final DateTime startTime = DateTimes.nowUtc(); + private final DiscoveryDruidNode coordinator = new DiscoveryDruidNode( new DruidNode("s1", "localhost", false, 8081, null, true, false), NodeRole.COORDINATOR, - ImmutableMap.of() + ImmutableMap.of(), + startTime ); private final DiscoveryDruidNode coordinator2 = new DiscoveryDruidNode( new DruidNode("s1", "localhost", false, 8181, null, true, false), NodeRole.COORDINATOR, - ImmutableMap.of() + ImmutableMap.of(), + startTime ); private final DiscoveryDruidNode overlord = new DiscoveryDruidNode( new DruidNode("s2", "localhost", false, 8090, null, true, false), NodeRole.OVERLORD, - ImmutableMap.of() + ImmutableMap.of(), + startTime ); private final DiscoveryDruidNode overlord2 = new DiscoveryDruidNode( new DruidNode("s2", "localhost", false, 8190, null, true, false), NodeRole.OVERLORD, - ImmutableMap.of() + ImmutableMap.of(), + startTime ); private final DiscoveryDruidNode broker1 = new DiscoveryDruidNode( new DruidNode("s3", "localhost", false, 8082, null, true, false), NodeRole.BROKER, - ImmutableMap.of() + ImmutableMap.of(), + startTime ); private final DiscoveryDruidNode broker2 = new DiscoveryDruidNode( new DruidNode("s3", "brokerHost", false, 8082, null, true, false), NodeRole.BROKER, - ImmutableMap.of() + ImmutableMap.of(), + startTime ); private final DiscoveryDruidNode brokerWithBroadcastSegments = new DiscoveryDruidNode( @@ -421,13 +431,15 @@ public void setUp() throws Exception NodeRole.BROKER, ImmutableMap.of( DataNodeService.DISCOVERY_SERVICE_KEY, new DataNodeService("tier", 1000, ServerType.BROKER, 0) - ) + ), + startTime ); private final DiscoveryDruidNode router = new DiscoveryDruidNode( new DruidNode("s4", "localhost", false, 8888, null, true, false), NodeRole.ROUTER, - ImmutableMap.of() + ImmutableMap.of(), + startTime ); private final DiscoveryDruidNode historical1 = new DiscoveryDruidNode( @@ -435,7 +447,8 @@ DataNodeService.DISCOVERY_SERVICE_KEY, new DataNodeService("tier", 1000, ServerT NodeRole.HISTORICAL, ImmutableMap.of( DataNodeService.DISCOVERY_SERVICE_KEY, new DataNodeService("tier", 1000, ServerType.HISTORICAL, 0) - ) + ), + startTime ); private final DiscoveryDruidNode historical2 = new DiscoveryDruidNode( @@ -443,7 +456,8 @@ DataNodeService.DISCOVERY_SERVICE_KEY, new DataNodeService("tier", 1000, ServerT NodeRole.HISTORICAL, ImmutableMap.of( DataNodeService.DISCOVERY_SERVICE_KEY, new DataNodeService("tier", 1000, ServerType.HISTORICAL, 0) - ) + ), + startTime ); private final DiscoveryDruidNode lameHistorical = new DiscoveryDruidNode( @@ -451,13 +465,15 @@ DataNodeService.DISCOVERY_SERVICE_KEY, new DataNodeService("tier", 1000, ServerT NodeRole.HISTORICAL, ImmutableMap.of( DataNodeService.DISCOVERY_SERVICE_KEY, new DataNodeService("tier", 1000, ServerType.HISTORICAL, 0) - ) + ), + startTime ); private final DiscoveryDruidNode middleManager = new DiscoveryDruidNode( new DruidNode("s6", "mmHost", false, 8091, null, true, false), NodeRole.MIDDLE_MANAGER, - ImmutableMap.of() + ImmutableMap.of(), + startTime ); private final DiscoveryDruidNode peon1 = new DiscoveryDruidNode( @@ -465,7 +481,8 @@ DataNodeService.DISCOVERY_SERVICE_KEY, new DataNodeService("tier", 1000, ServerT NodeRole.PEON, ImmutableMap.of( DataNodeService.DISCOVERY_SERVICE_KEY, new DataNodeService("tier", 1000, ServerType.INDEXER_EXECUTOR, 0) - ) + ), + startTime ); private final DiscoveryDruidNode peon2 = new DiscoveryDruidNode( @@ -473,7 +490,8 @@ DataNodeService.DISCOVERY_SERVICE_KEY, new DataNodeService("tier", 1000, ServerT NodeRole.PEON, ImmutableMap.of( DataNodeService.DISCOVERY_SERVICE_KEY, new DataNodeService("tier", 1000, ServerType.INDEXER_EXECUTOR, 0) - ) + ), + startTime ); private final DiscoveryDruidNode indexer = new DiscoveryDruidNode( @@ -481,7 +499,8 @@ DataNodeService.DISCOVERY_SERVICE_KEY, new DataNodeService("tier", 1000, ServerT NodeRole.INDEXER, ImmutableMap.of( DataNodeService.DISCOVERY_SERVICE_KEY, new DataNodeService("tier", 1000, ServerType.INDEXER_EXECUTOR, 0) - ) + ), + startTime ); private final ImmutableDruidServer druidServer1 = new ImmutableDruidServer( @@ -536,7 +555,7 @@ public void testGetTableMap() final SystemSchema.ServersTable serversTable = (SystemSchema.ServersTable) schema.getTableMap().get("servers"); final RelDataType serverRowType = serversTable.getRowType(new JavaTypeFactoryImpl()); final List serverFields = serverRowType.getFieldList(); - Assert.assertEquals(9, serverFields.size()); + Assert.assertEquals(10, serverFields.size()); Assert.assertEquals("server", serverFields.get(0).getName()); Assert.assertEquals(SqlTypeName.VARCHAR, serverFields.get(0).getType().getSqlTypeName()); } @@ -814,6 +833,7 @@ public void testServersTable() final List expectedRows = new ArrayList<>(); final Long nonLeader = NullHandling.defaultLongValue(); + final String startTimeStr = startTime.toString(); expectedRows.add( createExpectedRow( "brokerHost:8082", @@ -824,7 +844,8 @@ public void testServersTable() null, 0L, 0L, - nonLeader + nonLeader, + startTimeStr ) ); expectedRows.add( @@ -837,7 +858,8 @@ public void testServersTable() "tier", 0L, 1000L, - nonLeader + nonLeader, + startTimeStr ) ); expectedRows.add( @@ -850,7 +872,8 @@ public void testServersTable() "tier", 400L, 1000L, - nonLeader + nonLeader, + startTimeStr ) ); expectedRows.add( @@ -863,7 +886,8 @@ public void testServersTable() "tier", 0L, 1000L, - nonLeader + nonLeader, + startTimeStr ) ); expectedRows.add( @@ -876,7 +900,8 @@ public void testServersTable() "tier", 0L, 1000L, - nonLeader + nonLeader, + startTimeStr ) ); expectedRows.add(createExpectedRow( @@ -888,7 +913,8 @@ public void testServersTable() "tier", 0L, 1000L, - nonLeader + nonLeader, + startTimeStr )); expectedRows.add( createExpectedRow( @@ -900,7 +926,8 @@ public void testServersTable() null, 0L, 0L, - 1L + 1L, + startTimeStr ) ); expectedRows.add( @@ -913,7 +940,8 @@ public void testServersTable() null, 0L, 0L, - nonLeader + nonLeader, + startTimeStr ) ); expectedRows.add( @@ -926,7 +954,8 @@ public void testServersTable() "tier", 200L, 1000L, - nonLeader + nonLeader, + startTimeStr ) ); expectedRows.add( @@ -939,7 +968,8 @@ public void testServersTable() null, 0L, 0L, - 1L + 1L, + startTimeStr ) ); expectedRows.add( @@ -952,7 +982,8 @@ public void testServersTable() null, 0L, 0L, - 0L + 0L, + startTimeStr ) ); expectedRows.add( @@ -965,7 +996,8 @@ public void testServersTable() null, 0L, 0L, - 0L + 0L, + startTimeStr ) ); expectedRows.add( @@ -978,7 +1010,8 @@ public void testServersTable() null, 0L, 0L, - nonLeader + nonLeader, + startTimeStr ) ); expectedRows.add( @@ -991,7 +1024,8 @@ public void testServersTable() null, 0L, 0L, - nonLeader + nonLeader, + startTimeStr ) ); expectedRows.add(createExpectedRow( @@ -1003,7 +1037,8 @@ public void testServersTable() "tier", 0L, 1000L, - nonLeader + nonLeader, + startTimeStr )); Assert.assertEquals(expectedRows.size(), rows.size()); for (int i = 0; i < rows.size(); i++) { @@ -1035,7 +1070,8 @@ private Object[] createExpectedRow( @Nullable String tier, @Nullable Long currSize, @Nullable Long maxSize, - @Nullable Long isLeader + @Nullable Long isLeader, + String startTime ) { return new Object[]{ @@ -1047,7 +1083,8 @@ private Object[] createExpectedRow( tier, currSize, maxSize, - isLeader + isLeader, + startTime }; } diff --git a/web-console/src/views/services-view/__snapshots__/services-view.spec.tsx.snap b/web-console/src/views/services-view/__snapshots__/services-view.spec.tsx.snap index bfec768131e1..7679fe55a98f 100644 --- a/web-console/src/views/services-view/__snapshots__/services-view.spec.tsx.snap +++ b/web-console/src/views/services-view/__snapshots__/services-view.spec.tsx.snap @@ -58,6 +58,7 @@ exports[`ServicesView renders data 1`] = ` "Current size", "Max size", "Usage", + "Start time", "Detail", "Actions", ] @@ -196,6 +197,14 @@ exports[`ServicesView renders data 1`] = ` "show": true, "width": 140, }, + Object { + "Aggregated": [Function], + "Cell": [Function], + "Header": "Start time", + "accessor": "start_time", + "show": true, + "width": 200, + }, Object { "Aggregated": [Function], "Cell": [Function], @@ -230,6 +239,7 @@ exports[`ServicesView renders data 1`] = ` "plaintext_port": 8082, "service": "localhost:8082", "service_type": "broker", + "start_time": 0, "tier": null, "tls_port": -1, }, @@ -245,6 +255,7 @@ exports[`ServicesView renders data 1`] = ` "segmentsToLoadSize": 0, "service": "localhost:8083", "service_type": "historical", + "start_time": 0, "tier": "_default_tier", "tls_port": -1, }, diff --git a/web-console/src/views/services-view/services-view.spec.tsx b/web-console/src/views/services-view/services-view.spec.tsx index 212ed5b12a07..681ba7695df2 100644 --- a/web-console/src/views/services-view/services-view.spec.tsx +++ b/web-console/src/views/services-view/services-view.spec.tsx @@ -49,6 +49,7 @@ jest.mock('../../utils', () => { curr_size: 0, max_size: 0, is_leader: 0, + start_time: 0, }, { service: 'localhost:8083', @@ -64,6 +65,7 @@ jest.mock('../../utils', () => { segmentsToDrop: 0, segmentsToLoadSize: 0, segmentsToDropSize: 0, + start_time: 0, }, ], ], diff --git a/web-console/src/views/services-view/services-view.tsx b/web-console/src/views/services-view/services-view.tsx index 636232cf7d7b..05c563c6b66d 100644 --- a/web-console/src/views/services-view/services-view.tsx +++ b/web-console/src/views/services-view/services-view.tsx @@ -59,23 +59,43 @@ import type { BasicAction } from '../../utils/basic-action'; import './services-view.scss'; -const allColumns: string[] = [ - 'Service', - 'Type', - 'Tier', - 'Host', - 'Port', - 'Current size', - 'Max size', - 'Usage', - 'Detail', - ACTION_COLUMN_LABEL, -]; - const tableColumns: Record = { - 'full': allColumns, - 'no-sql': allColumns, - 'no-proxy': ['Service', 'Type', 'Tier', 'Host', 'Port', 'Current size', 'Max size', 'Usage'], + 'full': [ + 'Service', + 'Type', + 'Tier', + 'Host', + 'Port', + 'Current size', + 'Max size', + 'Usage', + 'Start time', + 'Detail', + ACTION_COLUMN_LABEL, + ], + 'no-sql': [ + 'Service', + 'Type', + 'Tier', + 'Host', + 'Port', + 'Current size', + 'Max size', + 'Usage', + 'Detail', + ACTION_COLUMN_LABEL, + ], + 'no-proxy': [ + 'Service', + 'Type', + 'Tier', + 'Host', + 'Port', + 'Current size', + 'Max size', + 'Usage', + 'Start time', + ], }; function formatQueues( @@ -128,6 +148,7 @@ interface ServiceResultRow { readonly max_size: NumberLike; readonly plaintext_port: number; readonly tls_port: number; + readonly start_time: string; loadQueueInfo?: LoadQueueInfo; workerInfo?: WorkerInfo; } @@ -178,7 +199,8 @@ export class ServicesView extends React.PureComponent '', + }, { Header: 'Detail', show: visibleColumns.shown('Detail'), diff --git a/website/.spelling b/website/.spelling index bf846ba2078d..a437f559b248 100644 --- a/website/.spelling +++ b/website/.spelling @@ -475,6 +475,7 @@ smooshed splittable ssl sslmode +start_time stdout storages stringDictionaryEncoding