Skip to content
Merged
Show file tree
Hide file tree
Changes from all commits
Commits
File filter

Filter by extension

Filter by extension

Conversations
Failed to load comments.
Loading
Jump to
Jump to file
Failed to load files.
Loading
Diff view
Diff view
Original file line number Diff line number Diff line change
Expand Up @@ -23,6 +23,7 @@
import com.google.common.base.Preconditions;
import org.apache.druid.timeline.DataSegment;

import javax.annotation.Nullable;
import java.util.Collection;
import java.util.Collections;
import java.util.Map;
Expand Down Expand Up @@ -63,6 +64,12 @@ public Collection<DataSegment> getSegments()
return Collections.unmodifiableCollection(idToSegmentMap.values());
}

@Nullable
public DataSegment getSegment(String segmentId)
{
return idToSegmentMap.get(segmentId);
}

public DruidDataSource addSegment(DataSegment dataSegment)
{
idToSegmentMap.put(dataSegment.getIdentifier(), dataSegment);
Expand Down
Original file line number Diff line number Diff line change
Expand Up @@ -25,8 +25,6 @@
import com.google.common.base.Throwables;
import com.google.common.collect.Collections2;
import com.google.common.collect.ImmutableMap;
import com.google.common.collect.Interner;
import com.google.common.collect.Interners;
import com.google.common.collect.Iterators;
import com.google.common.collect.Lists;
import com.google.inject.Inject;
Expand Down Expand Up @@ -82,7 +80,6 @@
@ManageLifecycle
public class SQLMetadataSegmentManager implements MetadataSegmentManager
{
private static final Interner<DataSegment> DATA_SEGMENT_INTERNER = Interners.newWeakInterner();
private static final EmittingLogger log = new EmittingLogger(SQLMetadataSegmentManager.class);

/**
Expand Down Expand Up @@ -232,7 +229,7 @@ public boolean enableDatasource(final String ds)
.iterator(),
payload -> {
try {
return DATA_SEGMENT_INTERNER.intern(jsonMapper.readValue(payload, DataSegment.class));
return jsonMapper.readValue(payload, DataSegment.class);
}
catch (IOException e) {
throw new RuntimeException(e);
Expand Down Expand Up @@ -466,10 +463,9 @@ public DataSegment map(int index, ResultSet r, StatementContext ctx)
throws SQLException
{
try {
return DATA_SEGMENT_INTERNER.intern(jsonMapper.readValue(
r.getBytes("payload"),
DataSegment.class
));
return replaceWithExistingSegmentIfPresent(
jsonMapper.readValue(r.getBytes("payload"), DataSegment.class)
);
}
catch (IOException e) {
log.makeAlert(e, "Failed to read segment from db.").emit();
Expand Down Expand Up @@ -535,6 +531,25 @@ public DataSegment map(int index, ResultSet r, StatementContext ctx)
}
}

/**
* For the garbage collector in Java, it's better to keep new objects short-living, but once they are old enough
* (i. e. promoted to old generation), try to keep them alive. In {@link #poll()}, we fetch and deserialize all
* existing segments each time, and then replace them in {@link #dataSourcesRef}. This method allows to use already
* existing (old) segments when possible, effectively interning them a-la {@link String#intern} or {@link
* com.google.common.collect.Interner}, aiming to make the majority of {@link DataSegment} objects garbage soon after
* they are deserialized and to die in young generation. It allows to avoid fragmentation of the old generation and
* full GCs.
*/
private DataSegment replaceWithExistingSegmentIfPresent(DataSegment segment)
{
DruidDataSource dataSource = dataSourcesRef.get().get(segment.getDataSource());
if (dataSource == null) {
return segment;
}
DataSegment alreadyExistingSegment = dataSource.getSegment(segment.getIdentifier());
return alreadyExistingSegment != null ? alreadyExistingSegment : segment;
}

private String getSegmentsTable()
{
return dbTables.get().getSegmentsTable();
Expand Down