Skip to content
Merged
Show file tree
Hide file tree
Changes from all commits
Commits
File filter

Filter by extension

Filter by extension

Conversations
Failed to load comments.
Loading
Jump to
Jump to file
Failed to load files.
Loading
Diff view
Diff view
Original file line number Diff line number Diff line change
Expand Up @@ -35,7 +35,8 @@
@JsonSubTypes.Type(value = UnionDataSource.class, name = "union"),
@JsonSubTypes.Type(value = JoinDataSource.class, name = "join"),
@JsonSubTypes.Type(value = LookupDataSource.class, name = "lookup"),
@JsonSubTypes.Type(value = InlineDataSource.class, name = "inline")
@JsonSubTypes.Type(value = InlineDataSource.class, name = "inline"),
@JsonSubTypes.Type(value = GlobalTableDataSource.class, name = "globalTable")
})
public interface DataSource
{
Expand Down
Original file line number Diff line number Diff line change
@@ -0,0 +1,58 @@
/*
* Licensed to the Apache Software Foundation (ASF) under one
* or more contributor license agreements. See the NOTICE file
* distributed with this work for additional information
* regarding copyright ownership. The ASF licenses this file
* to you under the Apache License, Version 2.0 (the
* "License"); you may not use this file except in compliance
* with the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing,
* software distributed under the License is distributed on an
* "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
* KIND, either express or implied. See the License for the
* specific language governing permissions and limitations
* under the License.
*/

package org.apache.druid.query;

import com.fasterxml.jackson.annotation.JsonCreator;
import com.fasterxml.jackson.annotation.JsonProperty;
import com.fasterxml.jackson.annotation.JsonTypeName;

/**
* {@link TableDataSource} variant for globally available 'broadcast' segments. If bound to a
* {@link org.apache.druid.segment.join.JoinableFactory} that can create an
* {@link org.apache.druid.segment.join.table.IndexedTable} using DruidBinders.joinableFactoryBinder, this allows
* optimal usage of segments using this DataSource type in join operations (because they are global), and so can be
* pushed down to historicals as a {@link JoinDataSource}, instead of requiring a subquery join using
* {@link InlineDataSource} to construct an {@link org.apache.druid.segment.join.table.IndexedTable} on the fly on the
* broker. Because it is also a {@link TableDataSource}, when queried directly, or on the left hand side of a join,
* they will be treated as any normal table datasource.
*/
@JsonTypeName("globalTable")
public class GlobalTableDataSource extends TableDataSource
Copy link
Copy Markdown
Contributor

Choose a reason for hiding this comment

The reason will be displayed to describe this comment to others. Learn more.

What do you think about adding a javadoc explaining why this datasource type exists?

Copy link
Copy Markdown
Member Author

Choose a reason for hiding this comment

The reason will be displayed to describe this comment to others. Learn more.

Added javadoc 👍

{
@JsonCreator
public GlobalTableDataSource(@JsonProperty("name") String name)
{
super(name);
}

@Override
public boolean isGlobal()
{
return true;
}

@Override
public String toString()
{
return "GlobalTableDataSource{" +
"name='" + getName() + '\'' +
'}';
}
}
Original file line number Diff line number Diff line change
Expand Up @@ -27,6 +27,7 @@

import java.util.Collections;
import java.util.List;
import java.util.Objects;
import java.util.Set;

@JsonTypeName("table")
Expand Down Expand Up @@ -99,27 +100,21 @@ public String toString()
}

@Override
public final boolean equals(Object o)
public boolean equals(Object o)
{
if (this == o) {
return true;
}
if (!(o instanceof TableDataSource)) {
if (o == null || getClass() != o.getClass()) {
return false;
}

TableDataSource that = (TableDataSource) o;

if (!name.equals(that.name)) {
return false;
}

return true;
return name.equals(that.name);
}

@Override
public final int hashCode()
public int hashCode()
{
return name.hashCode();
return Objects.hash(name);
}
}
Original file line number Diff line number Diff line change
Expand Up @@ -99,5 +99,4 @@ public void testUnionDataSource() throws Exception
final DataSource serde = JSON_MAPPER.readValue(JSON_MAPPER.writeValueAsString(dataSource), DataSource.class);
Assert.assertEquals(dataSource, serde);
}

}
Original file line number Diff line number Diff line change
@@ -0,0 +1,67 @@
/*
* Licensed to the Apache Software Foundation (ASF) under one
* or more contributor license agreements. See the NOTICE file
* distributed with this work for additional information
* regarding copyright ownership. The ASF licenses this file
* to you under the Apache License, Version 2.0 (the
* "License"); you may not use this file except in compliance
* with the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing,
* software distributed under the License is distributed on an
* "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
* KIND, either express or implied. See the License for the
* specific language governing permissions and limitations
* under the License.
*/

package org.apache.druid.query;

import com.fasterxml.jackson.core.JsonProcessingException;
import com.fasterxml.jackson.databind.ObjectMapper;
import nl.jqno.equalsverifier.EqualsVerifier;
import org.apache.druid.segment.TestHelper;
import org.junit.Assert;
import org.junit.Test;

public class GlobalTableDataSourceTest
{
private static final GlobalTableDataSource GLOBAL_TABLE_DATA_SOURCE = new GlobalTableDataSource("foo");

@Test
public void testEquals()
Copy link
Copy Markdown
Contributor

Choose a reason for hiding this comment

The reason will be displayed to describe this comment to others. Learn more.

Please add a test for nonequality with a TableDataSource of the same name.

Copy link
Copy Markdown
Member Author

Choose a reason for hiding this comment

The reason will be displayed to describe this comment to others. Learn more.

added

{
EqualsVerifier.forClass(GlobalTableDataSource.class)
.usingGetClass()
.withNonnullFields("name")
.verify();
}

@Test
public void testGlobalTableIsNotEqualsTable()
{
TableDataSource tbl = new TableDataSource(GLOBAL_TABLE_DATA_SOURCE.getName());
Assert.assertNotEquals(GLOBAL_TABLE_DATA_SOURCE, tbl);
Assert.assertNotEquals(tbl, GLOBAL_TABLE_DATA_SOURCE);
}

@Test
public void testIsGlobal()
{
Assert.assertTrue(GLOBAL_TABLE_DATA_SOURCE.isGlobal());
}

@Test
public void testSerde() throws JsonProcessingException
{
final ObjectMapper jsonMapper = TestHelper.makeJsonMapper();
final GlobalTableDataSource deserialized = (GlobalTableDataSource) jsonMapper.readValue(
jsonMapper.writeValueAsString(GLOBAL_TABLE_DATA_SOURCE),
DataSource.class
);

Assert.assertEquals(GLOBAL_TABLE_DATA_SOURCE, deserialized);
}
}
Original file line number Diff line number Diff line change
Expand Up @@ -86,7 +86,7 @@ public void test_withChildren_nonEmpty()
@Test
public void test_equals()
{
EqualsVerifier.forClass(TableDataSource.class).withNonnullFields("name").verify();
EqualsVerifier.forClass(TableDataSource.class).usingGetClass().withNonnullFields("name").verify();
}

@Test
Expand Down
57 changes: 30 additions & 27 deletions server/src/main/java/org/apache/druid/client/BrokerServerView.java
Original file line number Diff line number Diff line change
Expand Up @@ -218,51 +218,54 @@ private QueryableDruidServer removeServer(DruidServer server)

private void serverAddedSegment(final DruidServerMetadata server, final DataSegment segment)
{
if (server.getType().equals(ServerType.BROKER)) {
// in theory we could just filter this to ensure we don't put ourselves in here, to make dope broker tree
// query topologies, but for now just skip all brokers, so we don't create some sort of wild infinite query
// loop...
return;
}
SegmentId segmentId = segment.getId();
synchronized (lock) {
log.debug("Adding segment[%s] for server[%s]", segment, server);

ServerSelector selector = selectors.get(segmentId);
if (selector == null) {
selector = new ServerSelector(segment, tierSelectorStrategy);
// in theory we could probably just filter this to ensure we don't put ourselves in here, to make broker tree
// query topologies, but for now just skip all brokers, so we don't create some sort of wild infinite query
// loop...
if (!server.getType().equals(ServerType.BROKER)) {
log.debug("Adding segment[%s] for server[%s]", segment, server);
ServerSelector selector = selectors.get(segmentId);
if (selector == null) {
selector = new ServerSelector(segment, tierSelectorStrategy);

VersionedIntervalTimeline<String, ServerSelector> timeline = timelines.get(segment.getDataSource());
if (timeline == null) {
timeline = new VersionedIntervalTimeline<>(Ordering.natural());
timelines.put(segment.getDataSource(), timeline);
}

VersionedIntervalTimeline<String, ServerSelector> timeline = timelines.get(segment.getDataSource());
if (timeline == null) {
timeline = new VersionedIntervalTimeline<>(Ordering.natural());
timelines.put(segment.getDataSource(), timeline);
timeline.add(segment.getInterval(), segment.getVersion(), segment.getShardSpec().createChunk(selector));
selectors.put(segmentId, selector);
}

timeline.add(segment.getInterval(), segment.getVersion(), segment.getShardSpec().createChunk(selector));
selectors.put(segmentId, selector);
}

QueryableDruidServer queryableDruidServer = clients.get(server.getName());
if (queryableDruidServer == null) {
queryableDruidServer = addServer(baseView.getInventoryValue(server.getName()));
QueryableDruidServer queryableDruidServer = clients.get(server.getName());
if (queryableDruidServer == null) {
queryableDruidServer = addServer(baseView.getInventoryValue(server.getName()));
}
selector.addServerAndUpdateSegment(queryableDruidServer, segment);
}
selector.addServerAndUpdateSegment(queryableDruidServer, segment);
// run the callbacks, even if the segment came from a broker, lets downstream watchers decide what to do with it
runTimelineCallbacks(callback -> callback.segmentAdded(server, segment));
}
}

private void serverRemovedSegment(DruidServerMetadata server, DataSegment segment)
{
if (server.getType().equals(ServerType.BROKER)) {
// might as well save the trouble of grabbing a lock for something that isn't there..
return;
}

SegmentId segmentId = segment.getId();
final ServerSelector selector;

synchronized (lock) {
log.debug("Removing segment[%s] from server[%s].", segmentId, server);

// we don't store broker segments here, but still run the callbacks for the segment being removed from the server
// since the broker segments are not stored on the timeline, do not fire segmentRemoved event
if (server.getType().equals(ServerType.BROKER)) {
runTimelineCallbacks(callback -> callback.serverSegmentRemoved(server, segment));
return;
}

selector = selectors.get(segmentId);
if (selector == null) {
log.warn("Told to remove non-existant segment[%s]", segmentId);
Expand Down
Original file line number Diff line number Diff line change
Expand Up @@ -129,6 +129,7 @@ public <T> QueryRunner<T> getQueryRunnerForIntervals(final Query<T> query, final
.applyPostMergeDecoration()
.emitCPUTimeMetric(emitter, cpuAccumulator);
}

@Override
public <T> QueryRunner<T> getQueryRunnerForSegments(final Query<T> query, final Iterable<SegmentDescriptor> specs)
{
Expand Down
Original file line number Diff line number Diff line change
Expand Up @@ -40,6 +40,7 @@

import java.util.Map;
import java.util.Optional;
import java.util.Set;
import java.util.concurrent.ConcurrentHashMap;

/**
Expand Down Expand Up @@ -121,6 +122,11 @@ public Map<String, Long> getDataSourceSizes()
return CollectionUtils.mapValues(dataSources, SegmentManager.DataSourceState::getTotalSegmentSize);
}

public Set<String> getDataSourceNames()
{
return dataSources.keySet();
}

/**
* Returns a map of dataSource to the number of segments managed by this segmentManager. This method should be
* carefully because the returned map might be different from the actual data source states.
Expand Down
Original file line number Diff line number Diff line change
Expand Up @@ -51,6 +51,7 @@
import java.util.List;
import java.util.Map;
import java.util.Optional;
import java.util.Set;
import java.util.concurrent.ExecutionException;
import java.util.concurrent.ExecutorService;
import java.util.concurrent.Executors;
Expand Down Expand Up @@ -421,17 +422,19 @@ public void testLoadAndDropNonRootGenerationSegment() throws SegmentLoadingExcep
@SuppressWarnings("RedundantThrows") // TODO remove when the bug in intelliJ is fixed.
private void assertResult(List<DataSegment> expectedExistingSegments) throws SegmentLoadingException
{
final Map<String, Long> expectedDataSourceSizes = expectedExistingSegments
.stream()
.collect(Collectors.toMap(DataSegment::getDataSource, DataSegment::getSize, Long::sum));
final Map<String, Long> expectedDataSourceCounts = expectedExistingSegments
.stream()
.collect(Collectors.toMap(DataSegment::getDataSource, segment -> 1L, Long::sum));
final Map<String, VersionedIntervalTimeline<String, ReferenceCountingSegment>> expectedDataSources
= new HashMap<>();
final Map<String, Long> expectedDataSourceSizes =
expectedExistingSegments.stream()
.collect(Collectors.toMap(DataSegment::getDataSource, DataSegment::getSize, Long::sum));
final Map<String, Long> expectedDataSourceCounts =
expectedExistingSegments.stream()
.collect(Collectors.toMap(DataSegment::getDataSource, segment -> 1L, Long::sum));
final Set<String> expectedDataSourceNames = expectedExistingSegments.stream()
.map(DataSegment::getDataSource)
.collect(Collectors.toSet());
final Map<String, VersionedIntervalTimeline<String, ReferenceCountingSegment>> expectedTimelines = new HashMap<>();
for (DataSegment segment : expectedExistingSegments) {
final VersionedIntervalTimeline<String, ReferenceCountingSegment> expectedTimeline =
expectedDataSources.computeIfAbsent(
expectedTimelines.computeIfAbsent(
segment.getDataSource(),
k -> new VersionedIntervalTimeline<>(Ordering.natural())
);
Expand All @@ -444,11 +447,12 @@ private void assertResult(List<DataSegment> expectedExistingSegments) throws Seg
);
}

Assert.assertEquals(expectedDataSourceNames, segmentManager.getDataSourceNames());
Assert.assertEquals(expectedDataSourceCounts, segmentManager.getDataSourceCounts());
Assert.assertEquals(expectedDataSourceSizes, segmentManager.getDataSourceSizes());

final Map<String, DataSourceState> dataSources = segmentManager.getDataSources();
Assert.assertEquals(expectedDataSources.size(), dataSources.size());
Assert.assertEquals(expectedTimelines.size(), dataSources.size());

dataSources.forEach(
(sourceName, dataSourceState) -> {
Expand All @@ -458,7 +462,7 @@ private void assertResult(List<DataSegment> expectedExistingSegments) throws Seg
dataSourceState.getTotalSegmentSize()
);
Assert.assertEquals(
expectedDataSources.get(sourceName).getAllTimelineEntries(),
expectedTimelines.get(sourceName).getAllTimelineEntries(),
dataSourceState.getTimeline().getAllTimelineEntries()
);
}
Expand Down
Loading