diff --git a/api/pom.xml b/api/pom.xml index d4bcddd43704..5bc9bbcbb50f 100644 --- a/api/pom.xml +++ b/api/pom.xml @@ -28,7 +28,7 @@ io.druid druid - 0.12.0-SNAPSHOT + 0.12.2-SNAPSHOT diff --git a/api/src/main/java/io/druid/guice/JsonConfigurator.java b/api/src/main/java/io/druid/guice/JsonConfigurator.java index f6d766dd9551..09721a932d59 100644 --- a/api/src/main/java/io/druid/guice/JsonConfigurator.java +++ b/api/src/main/java/io/druid/guice/JsonConfigurator.java @@ -93,7 +93,6 @@ public T configurate(Properties props, String propertyPrefix, Class clazz log.info(e, "Unable to parse [%s]=[%s] as a json object, using as is.", prop, propValue); value = propValue; } - hieraricalPutValue(propertyPrefix, prop, prop.substring(propertyBase.length()), value, jsonMap); } } @@ -175,8 +174,11 @@ private static void hieraricalPutValue( ) { int dotIndex = property.indexOf('.'); + // Always put property with name even if it is of form a.b. This will make sure the property is available for classes + // where JsonProperty names are of the form a.b + // Note:- this will cause more than required properties to be present in the jsonMap. + targetMap.put(property, value); if (dotIndex < 0) { - targetMap.put(property, value); return; } if (dotIndex == 0) { diff --git a/api/src/main/java/io/druid/indexer/TaskStatusPlus.java b/api/src/main/java/io/druid/indexer/TaskStatusPlus.java index a45a9a7865a9..0560206f213a 100644 --- a/api/src/main/java/io/druid/indexer/TaskStatusPlus.java +++ b/api/src/main/java/io/druid/indexer/TaskStatusPlus.java @@ -25,6 +25,7 @@ import org.joda.time.DateTime; import javax.annotation.Nullable; +import java.util.Objects; public class TaskStatusPlus { @@ -40,7 +41,7 @@ public TaskStatusPlus( @JsonProperty("id") String id, @JsonProperty("createdTime") DateTime createdTime, @JsonProperty("queueInsertionTime") DateTime queueInsertionTime, - @JsonProperty("state") @Nullable TaskState state, + @JsonProperty("statusCode") @Nullable TaskState state, @JsonProperty("duration") @Nullable Long duration, @JsonProperty("location") TaskLocation location ) @@ -74,7 +75,8 @@ public DateTime getQueueInsertionTime() return queueInsertionTime; } - @JsonProperty + @Nullable + @JsonProperty("statusCode") public TaskState getState() { return state; @@ -91,4 +93,40 @@ public TaskLocation getLocation() { return location; } + + @Override + public boolean equals(Object o) + { + if (this == o) { + return true; + } + + if (o == null || getClass() != o.getClass()) { + return false; + } + + final TaskStatusPlus that = (TaskStatusPlus) o; + if (!id.equals(that.id)) { + return false; + } + if (!createdTime.equals(that.createdTime)) { + return false; + } + if (!queueInsertionTime.equals(that.queueInsertionTime)) { + return false; + } + if (!Objects.equals(state, that.state)) { + return false; + } + if (!Objects.equals(duration, that.duration)) { + return false; + } + return location.equals(that.location); + } + + @Override + public int hashCode() + { + return Objects.hash(id, createdTime, queueInsertionTime, state, duration, location); + } } diff --git a/api/src/main/java/io/druid/segment/loading/DataSegmentFinder.java b/api/src/main/java/io/druid/segment/loading/DataSegmentFinder.java index 937a42e72c23..66af8353dafb 100644 --- a/api/src/main/java/io/druid/segment/loading/DataSegmentFinder.java +++ b/api/src/main/java/io/druid/segment/loading/DataSegmentFinder.java @@ -20,8 +20,11 @@ package io.druid.segment.loading; import io.druid.guice.annotations.ExtensionPoint; +import io.druid.java.util.common.Pair; +import io.druid.java.util.common.logger.Logger; import io.druid.timeline.DataSegment; +import java.util.Map; import java.util.Set; /** @@ -31,6 +34,8 @@ @ExtensionPoint public interface DataSegmentFinder { + Logger log = new Logger(DataSegmentFinder.class); + /** * This method should first recursively look for descriptor.json (partitionNum_descriptor.json for HDFS data storage) underneath * workingDirPath and then verify that index.zip (partitionNum_index.zip for HDFS data storage) exists in the same folder. @@ -46,4 +51,26 @@ public interface DataSegmentFinder * @return a set of segments that were found underneath workingDirPath */ Set findSegments(String workingDirPath, boolean updateDescriptor) throws SegmentLoadingException; + + /** + * Adds dataSegment if it does not exist in timestampedSegments. If it exists, replaces entry if segmentModifiedAt is + * newer than stored timestamp. + * + * @param timestampedSegments map of > containing segments with modified time + * @param dataSegment segment to add + * @param segmentModifiedAt segment modified timestamp + */ + static void putInMapRetainingNewest( + Map> timestampedSegments, DataSegment dataSegment, long segmentModifiedAt + ) + { + timestampedSegments.merge( + dataSegment.getIdentifier(), + Pair.of(dataSegment, segmentModifiedAt), + (previous, current) -> { + log.warn("Multiple copies of segmentId [%s] found, using newest version", current.lhs.getIdentifier()); + return previous.rhs > current.rhs ? previous : current; + } + ); + } } diff --git a/api/src/main/java/io/druid/segment/loading/DataSegmentKiller.java b/api/src/main/java/io/druid/segment/loading/DataSegmentKiller.java index c26a73daeb10..96bbeb9362d0 100644 --- a/api/src/main/java/io/druid/segment/loading/DataSegmentKiller.java +++ b/api/src/main/java/io/druid/segment/loading/DataSegmentKiller.java @@ -20,16 +20,41 @@ package io.druid.segment.loading; import io.druid.guice.annotations.ExtensionPoint; +import io.druid.java.util.common.logger.Logger; import io.druid.timeline.DataSegment; import java.io.IOException; -/** - */ @ExtensionPoint public interface DataSegmentKiller { - void kill(DataSegment segments) throws SegmentLoadingException; - void killAll() throws IOException; + Logger log = new Logger(DataSegmentKiller.class); + + /** + * Removes segment files (index and metadata) from deep storage. + * @param segment the segment to kill + * @throws SegmentLoadingException if the segment could not be completely removed + */ + void kill(DataSegment segment) throws SegmentLoadingException; + /** + * A more stoic killer who doesn't throw a tantrum if things get messy. Use when killing segments for best-effort + * cleanup. + * @param segment the segment to kill + */ + default void killQuietly(DataSegment segment) + { + try { + kill(segment); + } + catch (Exception e) { + log.debug(e, "Failed to kill segment %s", segment); + } + } + + /** + * Like a nuke. Use wisely. Used by the 'reset-cluster' command, and of the built-in deep storage implementations, it + * is only implemented by local and HDFS. + */ + void killAll() throws IOException; } diff --git a/api/src/main/java/io/druid/segment/loading/DataSegmentPusher.java b/api/src/main/java/io/druid/segment/loading/DataSegmentPusher.java index b9bf810f72ce..7c4cead40cb2 100644 --- a/api/src/main/java/io/druid/segment/loading/DataSegmentPusher.java +++ b/api/src/main/java/io/druid/segment/loading/DataSegmentPusher.java @@ -30,6 +30,7 @@ import java.util.Collections; import java.util.List; import java.util.Map; +import java.util.UUID; @ExtensionPoint public interface DataSegmentPusher @@ -39,18 +40,53 @@ public interface DataSegmentPusher @Deprecated String getPathForHadoop(String dataSource); String getPathForHadoop(); - DataSegment push(File file, DataSegment segment) throws IOException; + + /** + * Pushes index files and segment descriptor to deep storage. + * @param file directory containing index files + * @param segment segment descriptor + * @param useUniquePath if true, pushes to a unique file path. This prevents situations where task failures or replica + * tasks can either overwrite or fail to overwrite existing segments leading to the possibility + * of different versions of the same segment ID containing different data. As an example, a Kafka + * indexing task starting at offset A and ending at offset B may push a segment to deep storage + * and then fail before writing the loadSpec to the metadata table, resulting in a replacement + * task being spawned. This replacement will also start at offset A but will read to offset C and + * will then push a segment to deep storage and write the loadSpec metadata. Without unique file + * paths, this can only work correctly if new segments overwrite existing segments. Suppose that + * at this point the task then fails so that the supervisor retries again from offset A. This 3rd + * attempt will overwrite the segments in deep storage before failing to write the loadSpec + * metadata, resulting in inconsistencies in the segment data now in deep storage and copies of + * the segment already loaded by historicals. + * + * If unique paths are used, caller is responsible for cleaning up segments that were pushed but + * were not written to the metadata table (for example when using replica tasks). + * @return segment descriptor + * @throws IOException + */ + DataSegment push(File file, DataSegment segment, boolean useUniquePath) throws IOException; + //use map instead of LoadSpec class to avoid dependency pollution. Map makeLoadSpec(URI finalIndexZipFilePath); + /** + * @deprecated backward-compatibiliy shim that should be removed on next major release; + * use {@link #getStorageDir(DataSegment, boolean)} instead. + */ + @Deprecated default String getStorageDir(DataSegment dataSegment) { - return getDefaultStorageDir(dataSegment); + return getStorageDir(dataSegment, false); + } + + default String getStorageDir(DataSegment dataSegment, boolean useUniquePath) + { + return getDefaultStorageDir(dataSegment, useUniquePath); } default String makeIndexPathName(DataSegment dataSegment, String indexName) { - return StringUtils.format("./%s/%s", getStorageDir(dataSegment), indexName); + // This is only called from Hadoop batch which doesn't require unique segment paths so set useUniquePath=false + return StringUtils.format("./%s/%s", getStorageDir(dataSegment, false), indexName); } /** @@ -66,13 +102,19 @@ default List getAllowedPropertyPrefixesForHadoop() // If above format is ever changed, make sure to change it appropriately in other places // e.g. HDFSDataSegmentKiller uses this information to clean the version, interval and dataSource directories // on segment deletion if segment being deleted was the only segment - static String getDefaultStorageDir(DataSegment segment) + static String getDefaultStorageDir(DataSegment segment, boolean useUniquePath) { return JOINER.join( segment.getDataSource(), StringUtils.format("%s_%s", segment.getInterval().getStart(), segment.getInterval().getEnd()), segment.getVersion(), - segment.getShardSpec().getPartitionNum() + segment.getShardSpec().getPartitionNum(), + useUniquePath ? generateUniquePath() : null ); } + + static String generateUniquePath() + { + return UUID.randomUUID().toString(); + } } diff --git a/api/src/main/java/io/druid/utils/CompressionUtils.java b/api/src/main/java/io/druid/utils/CompressionUtils.java index 6a551e319e0d..2b25186b25fe 100644 --- a/api/src/main/java/io/druid/utils/CompressionUtils.java +++ b/api/src/main/java/io/druid/utils/CompressionUtils.java @@ -36,26 +36,26 @@ public class CompressionUtils private static final Logger log = new Logger(CompressionUtils.class); - @Deprecated // Use com.metamx.common.CompressionUtils.zip + @Deprecated // Use io.druid.java.util.common.CompressionUtils.zip public static long zip(File directory, File outputZipFile) throws IOException { return io.druid.java.util.common.CompressionUtils.zip(directory, outputZipFile); } - @Deprecated // Use com.metamx.common.CompressionUtils.zip + @Deprecated // Use io.druid.java.util.common.CompressionUtils.zip public static long zip(File directory, OutputStream out) throws IOException { return io.druid.java.util.common.CompressionUtils.zip(directory, out); } - @Deprecated // Use com.metamx.common.CompressionUtils.unzip + @Deprecated // Use io.druid.java.util.common.CompressionUtils.unzip public static void unzip(File pulledFile, File outDir) throws IOException { io.druid.java.util.common.CompressionUtils.unzip(pulledFile, outDir); } - @Deprecated // Use com.metamx.common.CompressionUtils.unzip + @Deprecated // Use io.druid.java.util.common.CompressionUtils.unzip public static void unzip(InputStream in, File outDir) throws IOException { io.druid.java.util.common.CompressionUtils.unzip(in, outDir); @@ -63,8 +63,8 @@ public static void unzip(InputStream in, File outDir) throws IOException /** * Uncompress using a gzip uncompress algorithm from the `pulledFile` to the `outDir`. - * Unlike `com.metamx.common.CompressionUtils.gunzip`, this function takes an output *DIRECTORY* and tries to guess the file name. - * It is recommended that the caller use `com.metamx.common.CompressionUtils.gunzip` and specify the output file themselves to ensure names are as expected + * Unlike `io.druid.java.util.common.CompressionUtils.gunzip`, this function takes an output *DIRECTORY* and tries to guess the file name. + * It is recommended that the caller use `io.druid.java.util.common.CompressionUtils.gunzip` and specify the output file themselves to ensure names are as expected * * @param pulledFile The source file * @param outDir The destination directory to put the resulting file diff --git a/api/src/test/java/io/druid/guice/JsonConfiguratorTest.java b/api/src/test/java/io/druid/guice/JsonConfiguratorTest.java index 0ce4f77a79aa..acfadf57131f 100644 --- a/api/src/test/java/io/druid/guice/JsonConfiguratorTest.java +++ b/api/src/test/java/io/druid/guice/JsonConfiguratorTest.java @@ -94,10 +94,13 @@ public ExecutableValidator forExecutables() public void testTest() { Assert.assertEquals( - new MappableObject("p1", ImmutableList.of("p2")), - new MappableObject("p1", ImmutableList.of("p2")) + new MappableObject("p1", ImmutableList.of("p2"), "p2"), + new MappableObject("p1", ImmutableList.of("p2"), "p2") + ); + Assert.assertEquals( + new MappableObject("p1", null, null), + new MappableObject("p1", ImmutableList.of(), null) ); - Assert.assertEquals(new MappableObject("p1", null), new MappableObject("p1", ImmutableList.of())); } @Test @@ -140,6 +143,19 @@ public void testQuotedConfig() Assert.assertEquals("testing \"prop1\"", obj.prop1); Assert.assertEquals(ImmutableList.of(), obj.prop1List); } + + @Test + public void testPropertyWithDot() + { + final JsonConfigurator configurator = new JsonConfigurator(mapper, validator); + properties.setProperty(PROP_PREFIX + "prop2.prop.2", "testing"); + properties.setProperty(PROP_PREFIX + "prop1", "prop1"); + final MappableObject obj = configurator.configurate(properties, PROP_PREFIX, MappableObject.class); + Assert.assertEquals("testing", obj.prop2); + Assert.assertEquals(ImmutableList.of(), obj.prop1List); + Assert.assertEquals("prop1", obj.prop1); + + } } class MappableObject @@ -148,15 +164,19 @@ class MappableObject final String prop1; @JsonProperty("prop1List") final List prop1List; + @JsonProperty("prop2.prop.2") + final String prop2; @JsonCreator protected MappableObject( @JsonProperty("prop1") final String prop1, - @JsonProperty("prop1List") final List prop1List + @JsonProperty("prop1List") final List prop1List, + @JsonProperty("prop2.prop.2") final String prop2 ) { this.prop1 = prop1; this.prop1List = prop1List == null ? ImmutableList.of() : prop1List; + this.prop2 = prop2; } @@ -172,6 +192,12 @@ public String getProp1() return prop1; } + @JsonProperty + public String getProp2() + { + return prop2; + } + @Override public boolean equals(Object o) { diff --git a/api/src/test/java/io/druid/indexer/TaskStatusPlusTest.java b/api/src/test/java/io/druid/indexer/TaskStatusPlusTest.java new file mode 100644 index 000000000000..a4f3460e65a2 --- /dev/null +++ b/api/src/test/java/io/druid/indexer/TaskStatusPlusTest.java @@ -0,0 +1,87 @@ +/* + * Licensed to Metamarkets Group Inc. (Metamarkets) under one + * or more contributor license agreements. See the NOTICE file + * distributed with this work for additional information + * regarding copyright ownership. Metamarkets licenses this file + * to you under the Apache License, Version 2.0 (the + * "License"); you may not use this file except in compliance + * with the License. You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, + * software distributed under the License is distributed on an + * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY + * KIND, either express or implied. See the License for the + * specific language governing permissions and limitations + * under the License. + */ + +package io.druid.indexer; + +import com.fasterxml.jackson.core.JsonParser; +import com.fasterxml.jackson.core.JsonProcessingException; +import com.fasterxml.jackson.core.JsonToken; +import com.fasterxml.jackson.databind.DeserializationContext; +import com.fasterxml.jackson.databind.ObjectMapper; +import com.fasterxml.jackson.databind.deser.std.StdDeserializer; +import com.fasterxml.jackson.databind.module.SimpleModule; +import com.fasterxml.jackson.databind.ser.std.ToStringSerializer; +import io.druid.java.util.common.DateTimes; +import org.joda.time.DateTime; +import org.junit.Assert; +import org.junit.Test; + +import java.io.IOException; + +public class TaskStatusPlusTest +{ + @Test + public void testSerde() throws IOException + { + final ObjectMapper mapper = new ObjectMapper(); + mapper.registerModule( + new SimpleModule() + .addDeserializer(DateTime.class, new DateTimeDeserializer()) + .addSerializer(DateTime.class, ToStringSerializer.instance) + ); + final TaskStatusPlus status = new TaskStatusPlus( + "testId", + DateTimes.nowUtc(), + DateTimes.nowUtc(), + TaskState.RUNNING, + 1000L, + TaskLocation.create("testHost", 1010, -1) + ); + final String json = mapper.writeValueAsString(status); + Assert.assertEquals(status, mapper.readValue(json, TaskStatusPlus.class)); + } + + // Copied from io.druid.jackson.JodaStuff + private static class DateTimeDeserializer extends StdDeserializer + { + public DateTimeDeserializer() + { + super(DateTime.class); + } + + @Override + public DateTime deserialize(JsonParser jp, DeserializationContext ctxt) + throws IOException, JsonProcessingException + { + JsonToken t = jp.getCurrentToken(); + if (t == JsonToken.VALUE_NUMBER_INT) { + return DateTimes.utc(jp.getLongValue()); + } + if (t == JsonToken.VALUE_STRING) { + String str = jp.getText().trim(); + if (str.length() == 0) { // [JACKSON-360] + return null; + } + // make sure to preserve time zone information when parsing timestamps + return DateTimes.ISO_DATE_OR_TIME_WITH_OFFSET.parse(str); + } + throw ctxt.mappingException(getValueClass()); + } + } +} diff --git a/aws-common/pom.xml b/aws-common/pom.xml index 608552f98f54..0335148957d4 100644 --- a/aws-common/pom.xml +++ b/aws-common/pom.xml @@ -26,7 +26,7 @@ io.druid druid - 0.12.0-SNAPSHOT + 0.12.2-SNAPSHOT diff --git a/benchmarks/pom.xml b/benchmarks/pom.xml index 596931c14391..c13de92f5d5f 100644 --- a/benchmarks/pom.xml +++ b/benchmarks/pom.xml @@ -27,7 +27,7 @@ io.druid druid - 0.12.0-SNAPSHOT + 0.12.2-SNAPSHOT diff --git a/benchmarks/src/main/java/io/druid/benchmark/LoadStatusBenchmark.java b/benchmarks/src/main/java/io/druid/benchmark/LoadStatusBenchmark.java new file mode 100644 index 000000000000..e1b877d481d8 --- /dev/null +++ b/benchmarks/src/main/java/io/druid/benchmark/LoadStatusBenchmark.java @@ -0,0 +1,121 @@ +/* + * Licensed to Metamarkets Group Inc. (Metamarkets) under one + * or more contributor license agreements. See the NOTICE file + * distributed with this work for additional information + * regarding copyright ownership. Metamarkets licenses this file + * to you under the Apache License, Version 2.0 (the + * "License"); you may not use this file except in compliance + * with the License. You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, + * software distributed under the License is distributed on an + * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY + * KIND, either express or implied. See the License for the + * specific language governing permissions and limitations + * under the License. + */ + +package io.druid.benchmark; + +import com.google.common.collect.ImmutableMap; +import com.google.common.collect.Maps; +import com.google.common.collect.Sets; +import io.druid.java.util.common.Intervals; +import io.druid.java.util.common.StringUtils; +import io.druid.timeline.DataSegment; +import io.druid.timeline.partition.NoneShardSpec; +import org.openjdk.jmh.annotations.Benchmark; +import org.openjdk.jmh.annotations.BenchmarkMode; +import org.openjdk.jmh.annotations.Fork; +import org.openjdk.jmh.annotations.Level; +import org.openjdk.jmh.annotations.Measurement; +import org.openjdk.jmh.annotations.Mode; +import org.openjdk.jmh.annotations.OutputTimeUnit; +import org.openjdk.jmh.annotations.Param; +import org.openjdk.jmh.annotations.Scope; +import org.openjdk.jmh.annotations.Setup; +import org.openjdk.jmh.annotations.State; +import org.openjdk.jmh.annotations.Warmup; +import org.openjdk.jmh.infra.Blackhole; + +import java.util.Collection; +import java.util.Collections; +import java.util.HashMap; +import java.util.Map; +import java.util.Set; +import java.util.concurrent.ConcurrentHashMap; +import java.util.concurrent.TimeUnit; + +@State(Scope.Benchmark) +@Fork(value = 1) +@Warmup(iterations = 15) +@Measurement(iterations = 30) +public class LoadStatusBenchmark +{ + // Number of total data segments + @Param({"10000"}) + int totalSegmentsCount; + + @Param({"true", "false"}) + private boolean serverHasAllSegments; + + private Set datasourceSegments; + private Collection serverSegments; + + @Setup(Level.Invocation) + public void setup() + { + Map immutableDatasourceSegmentsMap; + ConcurrentHashMap serverSegmentsMap; + + HashMap datasourceSegmentsMap = Maps.newHashMap(); + serverSegmentsMap = new ConcurrentHashMap<>(); + + for (int i = 0; i < totalSegmentsCount; i++) { + DataSegment segment = new DataSegment( + "benchmarkDatasource", + Intervals.of(StringUtils.format("%s-01-01/%s-12-31", i + 1970, i + 1970)), + "1", + null, + null, + null, + NoneShardSpec.instance(), + 1, + 1 + ); + + datasourceSegmentsMap.put(segment.getIdentifier(), segment); + + if (serverHasAllSegments || i % 2 == 0) { + serverSegmentsMap.put(segment.getIdentifier(), segment); + } + } + + immutableDatasourceSegmentsMap = ImmutableMap.copyOf(datasourceSegmentsMap); + + datasourceSegments = Sets.newHashSet(immutableDatasourceSegmentsMap.values()); + serverSegments = Collections.unmodifiableCollection(serverSegmentsMap.values()); + } + + @Benchmark + @BenchmarkMode(Mode.AverageTime) + @OutputTimeUnit(TimeUnit.MICROSECONDS) + public void oldVersion(Blackhole blackhole) + { + datasourceSegments.removeAll(serverSegments); + blackhole.consume(datasourceSegments); + } + + @Benchmark + @BenchmarkMode(Mode.AverageTime) + @OutputTimeUnit(TimeUnit.MICROSECONDS) + public void newVersion(Blackhole blackhole) + { + for (DataSegment segment : serverSegments) { + datasourceSegments.remove(segment); + } + blackhole.consume(datasourceSegments); + } +} diff --git a/bytebuffer-collections/pom.xml b/bytebuffer-collections/pom.xml deleted file mode 100755 index 4b8b231d5ea0..000000000000 --- a/bytebuffer-collections/pom.xml +++ /dev/null @@ -1,138 +0,0 @@ - - - - - 4.0.0 - - - io.druid - druid - 0.12.0-SNAPSHOT - - - bytebuffer-collections - bytebuffer-collections - ByteBuffer Collections - - - - io.druid - extendedset - ${project.parent.version} - - - com.google.guava - guava - 16.0.1 - - - com.fasterxml.jackson.core - jackson-annotations - 2.4.6 - - - com.fasterxml.jackson.core - jackson-core - 2.4.6 - - - com.fasterxml.jackson.core - jackson-databind - 2.4.6 - - - org.roaringbitmap - RoaringBitmap - - - - - junit - junit - test - - - org.easymock - easymock - 3.0 - test - - - com.carrotsearch - junit-benchmarks - 0.7.2 - test - - - com.h2database - h2 - 1.4.182 - test - - - com.google.guava - guava-testlib - test - - - - - - - org.apache.maven.plugins - maven-jar-plugin - 2.4 - - - - test-jar - - - - - - org.apache.maven.plugins - maven-surefire-plugin - - io.druid.test.annotation.Benchmark - - - - - - - - benchmark - - - - maven-surefire-plugin - - -server -Xms3G -Xmx3G -Djub.consumers=CONSOLE,H2 -Djub.db.file=benchmarks/benchmarks - io.druid.test.annotation.Benchmark - io.druid.test.annotation.Dummy - - - - - - - diff --git a/common/pom.xml b/common/pom.xml index 58379d338640..050f714e163a 100644 --- a/common/pom.xml +++ b/common/pom.xml @@ -26,7 +26,7 @@ io.druid druid - 0.12.0-SNAPSHOT + 0.12.2-SNAPSHOT diff --git a/common/src/main/java/io/druid/concurrent/LifecycleLock.java b/common/src/main/java/io/druid/concurrent/LifecycleLock.java index a94e3cd31a3e..7cc8e64a113f 100644 --- a/common/src/main/java/io/druid/concurrent/LifecycleLock.java +++ b/common/src/main/java/io/druid/concurrent/LifecycleLock.java @@ -175,9 +175,9 @@ void exitStop() } } - void reset() + void exitStopAndReset() { - if (!compareAndSetState(STOPPED, NOT_STARTED)) { + if (!compareAndSetState(STOPPING, NOT_STARTED)) { throw new IllegalMonitorStateException("Not called exitStop() before reset()"); } } @@ -187,7 +187,7 @@ void reset() /** * Start latch, only one canStart() call in any thread on this LifecycleLock object could return true, if {@link - * #reset()} is not called in between. + * #exitStopAndReset()} is not called in between. */ public boolean canStart() { @@ -257,8 +257,8 @@ public boolean canStop() } /** - * If this LifecycleLock is used in a restartable object, which uses {@link #reset()}, exitStop() must be called - * before exit from stop() on this object, usually in a finally block. + * Finalizes stopping the the LifecycleLock. This method must be called before exit from stop() on this object, + * usually in a finally block. If you're using a restartable object, use {@link #exitStopAndReset()} instead. * * @throws IllegalMonitorStateException if {@link #canStop()} is not yet called on this LifecycleLock */ @@ -268,12 +268,14 @@ public void exitStop() } /** - * Resets the LifecycleLock after {@link #exitStop()}, so that {@link #canStart()} could be called again. + * Finalizes stopping the LifecycleLock and resets it, so that {@link #canStart()} could be called again. If this + * LifecycleLock is used in a restartable object, this method must be called before exit from stop() on this object, + * usually in a finally block. * - * @throws IllegalMonitorStateException if {@link #exitStop()} is not yet called on this LifecycleLock + * @throws IllegalMonitorStateException if {@link #canStop()} is not yet called on this LifecycleLock */ - public void reset() + public void exitStopAndReset() { - sync.reset(); + sync.exitStopAndReset(); } } diff --git a/common/src/test/java/io/druid/concurrent/LifecycleLockTest.java b/common/src/test/java/io/druid/concurrent/LifecycleLockTest.java index e1d9f8adbbf7..afb55bf2c2c2 100644 --- a/common/src/test/java/io/druid/concurrent/LifecycleLockTest.java +++ b/common/src/test/java/io/druid/concurrent/LifecycleLockTest.java @@ -138,8 +138,7 @@ public void testRestart() lifecycleLock.started(); lifecycleLock.exitStart(); Assert.assertTrue(lifecycleLock.canStop()); - lifecycleLock.exitStop(); - lifecycleLock.reset(); + lifecycleLock.exitStopAndReset(); Assert.assertTrue(lifecycleLock.canStart()); } diff --git a/distribution/pom.xml b/distribution/pom.xml index 365ee356c46c..217e46ad2809 100644 --- a/distribution/pom.xml +++ b/distribution/pom.xml @@ -16,8 +16,7 @@ ~ limitations under the License. --> - + 4.0.0 pom @@ -29,7 +28,7 @@ druid io.druid - 0.12.0-SNAPSHOT + 0.12.2-SNAPSHOT @@ -60,7 +59,7 @@ java -classpath - + -Ddruid.extensions.loadList=[] -Ddruid.extensions.directory=${project.build.directory}/extensions @@ -190,7 +189,7 @@ java -classpath - + -Ddruid.extensions.loadList=[] -Ddruid.extensions.directory=${project.build.directory}/extensions diff --git a/docs/content/configuration/auth.md b/docs/content/configuration/auth.md index 358a54431b80..2fecfc1a86c3 100644 --- a/docs/content/configuration/auth.md +++ b/docs/content/configuration/auth.md @@ -9,6 +9,7 @@ layout: doc_page |`druid.auth.authenticationChain`|JSON List of Strings|List of Authenticator type names|["allowAll"]|no| |`druid.escalator.type`|String|Type of the Escalator that should be used for internal Druid communications. This Escalator must use an authentication scheme that is supported by an Authenticator in `druid.auth.authenticationChain`.|"noop"|no| |`druid.auth.authorizers`|JSON List of Strings|List of Authorizer type names |["allowAll"]|no| +|`druid.auth.allowUnauthenticatedHttpOptions`|Boolean|If true, skip authentication checks for HTTP OPTIONS requests. This is needed for certain use cases, such as supporting CORS pre-flight requests. Note that disabling authentication checks for OPTIONS requests will allow unauthenticated users to determine what Druid endpoints are valid (by checking if the OPTIONS request returns a 200 instead of 404), so enabling this option may reveal information about server configuration, including information about what extensions are loaded (if those extensions add endpoints).|false|no| ## Enabling Authentication/Authorization diff --git a/docs/content/configuration/coordinator.md b/docs/content/configuration/coordinator.md index 2dfd97098df6..867a3043ac2d 100644 --- a/docs/content/configuration/coordinator.md +++ b/docs/content/configuration/coordinator.md @@ -29,6 +29,7 @@ The coordinator node uses several of the global configs in [Configuration](../co |`druid.coordinator.merge.on`|Boolean flag for whether or not the coordinator should try and merge small segments into a more optimal segment size.|false| |`druid.coordinator.conversion.on`|Boolean flag for converting old segment indexing versions to the latest segment indexing version.|false| |`druid.coordinator.load.timeout`|The timeout duration for when the coordinator assigns a segment to a historical node.|PT15M| +|`druid.coordinator.kill.pendingSegments.on`|Boolean flag for whether or not the coordinator clean up old entries in the `pendingSegments` table of metadata store. If set to true, coordinator will check the created time of most recently complete task. If it doesn't exist, it finds the created time of the earlist running/pending/waiting tasks. Once the created time is found, then for all dataSources not in the `killPendingSegmentsSkipList` (see [Dynamic configuration](#dynamic-configuration)), coordinator will ask the overlord to clean up the entries 1 day or more older than the found created time in the `pendingSegments` table. This will be done periodically based on `druid.coordinator.period` specified.|false| |`druid.coordinator.kill.on`|Boolean flag for whether or not the coordinator should submit kill task for unused segments, that is, hard delete them from metadata store and deep storage. If set to true, then for all whitelisted dataSources (or optionally all), coordinator will submit tasks periodically based on `period` specified. These kill tasks will delete all segments except for the last `durationToRetain` period. Whitelist or All can be set via dynamic configuration `killAllDataSources` and `killDataSourceWhitelist` described later.|false| |`druid.coordinator.kill.period`|How often to send kill tasks to the indexing service. Value must be greater than `druid.coordinator.period.indexingPeriod`. Only applies if kill is turned on.|P1D (1 Day)| |`druid.coordinator.kill.durationToRetain`| Do not kill segments in last `durationToRetain`, must be greater or equal to 0. Only applies and MUST be specified if kill is turned on. Note that default value is invalid.|PT-1S (-1 seconds)| @@ -103,8 +104,9 @@ Issuing a GET request at the same URL will return the spec that is currently in |`replicantLifetime`|The maximum number of coordinator runs for a segment to be replicated before we start alerting.|15| |`replicationThrottleLimit`|The maximum number of segments that can be replicated at one time.|10| |`emitBalancingStats`|Boolean flag for whether or not we should emit balancing stats. This is an expensive operation.|false| -|`killDataSourceWhitelist`|List of dataSources for which kill tasks are sent if property `druid.coordinator.kill.on` is true.|none| +|`killDataSourceWhitelist`|List of dataSources for which kill tasks are sent if property `druid.coordinator.kill.on` is true. This can be a list of comma-separated dataSources or a JSON array.|none| |`killAllDataSources`|Send kill tasks for ALL dataSources if property `druid.coordinator.kill.on` is true. If this is set to true then `killDataSourceWhitelist` must not be specified or be empty list.|false| +|`killPendingSegmentsSkipList`|List of dataSources for which pendingSegments are _NOT_ cleaned up if property `druid.coordinator.kill.pendingSegments.on` is true. This can be a list of comma-separated dataSources or a JSON array.|none| |`maxSegmentsInNodeLoadingQueue`|The maximum number of segments that could be queued for loading to any given server. This parameter could be used to speed up segments loading process, especially if there are "slow" nodes in the cluster (with low loading speed) or if too much segments scheduled to be replicated to some particular node (faster loading could be preferred to better segments distribution). Desired value depends on segments loading speed, acceptable replication time and number of nodes. Value 1000 could be a start point for a rather big cluster. Default value is 0 (loading queue is unbounded) |0| To view the audit history of coordinator dynamic config issue a GET request to the URL - diff --git a/docs/content/configuration/index.md b/docs/content/configuration/index.md index 66b8ca41f76f..02a0d20e9de2 100644 --- a/docs/content/configuration/index.md +++ b/docs/content/configuration/index.md @@ -180,19 +180,19 @@ Druid nodes periodically emit metrics and different metrics monitors can be incl |Property|Description|Default| |--------|-----------|-------| |`druid.monitoring.emissionPeriod`|How often metrics are emitted.|PT1m| -|`druid.monitoring.monitors`|Sets list of Druid monitors used by a node. See below for names and more information. For example, you can specify monitors for a Broker with `druid.monitoring.monitors=["com.metamx.metrics.SysMonitor","com.metamx.metrics.JvmMonitor"]`.|none (no monitors)| +|`druid.monitoring.monitors`|Sets list of Druid monitors used by a node. See below for names and more information. For example, you can specify monitors for a Broker with `druid.monitoring.monitors=["io.druid.java.util.metrics.SysMonitor","io.druid.java.util.metrics.JvmMonitor"]`.|none (no monitors)| The following monitors are available: |Name|Description| |----|-----------| |`io.druid.client.cache.CacheMonitor`|Emits metrics (to logs) about the segment results cache for Historical and Broker nodes. Reports typical cache statistics include hits, misses, rates, and size (bytes and number of entries), as well as timeouts and and errors.| -|`com.metamx.metrics.SysMonitor`|This uses the [SIGAR library](http://www.hyperic.com/products/sigar) to report on various system activities and statuses.| +|`io.druid.java.util.metrics.SysMonitor`|This uses the [SIGAR library](http://www.hyperic.com/products/sigar) to report on various system activities and statuses.| |`io.druid.server.metrics.HistoricalMetricsMonitor`|Reports statistics on Historical nodes.| -|`com.metamx.metrics.JvmMonitor`|Reports various JVM-related statistics.| -|`com.metamx.metrics.JvmCpuMonitor`|Reports statistics of CPU consumption by the JVM.| -|`com.metamx.metrics.CpuAcctDeltaMonitor`|Reports consumed CPU as per the cpuacct cgroup.| -|`com.metamx.metrics.JvmThreadsMonitor`|Reports Thread statistics in the JVM, like numbers of total, daemon, started, died threads.| +|`io.druid.java.util.metrics.JvmMonitor`|Reports various JVM-related statistics.| +|`io.druid.java.util.metrics.JvmCpuMonitor`|Reports statistics of CPU consumption by the JVM.| +|`io.druid.java.util.metrics.CpuAcctDeltaMonitor`|Reports consumed CPU as per the cpuacct cgroup.| +|`io.druid.java.util.metrics.JvmThreadsMonitor`|Reports Thread statistics in the JVM, like numbers of total, daemon, started, died threads.| |`io.druid.segment.realtime.RealtimeMetricsMonitor`|Reports statistics on Realtime nodes.| |`io.druid.server.metrics.EventReceiverFirehoseMonitor`|Reports how many events have been queued in the EventReceiverFirehose.| |`io.druid.server.metrics.QueryCountStatsMonitor`|Reports how many queries have been successful/failed/interrupted.| @@ -222,8 +222,8 @@ The Druid servers [emit various metrics](../operations/metrics.html) and alerts |`druid.emitter.http.basicAuthentication`|Login and password for authentification in "login:password" form, e. g. `druid.emitter.http.basicAuthentication=admin:adminpassword`|not specified = no authentification| |`druid.emitter.http.flushTimeOut|The timeout after which an event should be sent to the endpoint, even if internal buffers are not filled, in milliseconds.|not specified = no timeout| |`druid.emitter.http.batchingStrategy`|The strategy of how the batch is formatted. "ARRAY" means `[event1,event2]`, "NEWLINES" means `event1\nevent2`, ONLY_EVENTS means `event1event2`.|ARRAY| -|`druid.emitter.http.maxBatchSize`|The maximum batch size, in bytes.|5191680 (i. e. 5 MB)| -|`druid.emitter.http.batchQueueSizeLimit`|The maximum number of batches in emitter queue, if there are problems with emitting.|50| +|`druid.emitter.http.maxBatchSize`|The maximum batch size, in bytes.|the minimum of (10% of JVM heap size divided by 2) or (5191680 (i. e. 5 MB))| +|`druid.emitter.http.batchQueueSizeLimit`|The maximum number of batches in emitter queue, if there are problems with emitting.|the maximum of (2) or (10% of the JVM heap size divided by 5MB)| |`druid.emitter.http.minHttpTimeoutMillis`|If the speed of filling batches imposes timeout smaller than that, not even trying to send batch to endpoint, because it will likely fail, not being able to send the data that fast. Configure this depending based on emitter/successfulSending/minTimeMs metric. Reasonable values are 10ms..100ms.|0| |`druid.emitter.http.recipientBaseUrl`|The base URL to emit messages to. Druid will POST JSON to be consumed at the HTTP endpoint specified by this property.|none, required config| diff --git a/docs/content/development/extensions-core/druid-kerberos.md b/docs/content/development/extensions-core/druid-kerberos.md index 1ded0e0f5a54..dc0d38a7b5af 100644 --- a/docs/content/development/extensions-core/druid-kerberos.md +++ b/docs/content/development/extensions-core/druid-kerberos.md @@ -31,8 +31,6 @@ The configuration examples in the rest of this document will use "kerberos" as t ### Properties |Property|Possible Values|Description|Default|required| |--------|---------------|-----------|-------|--------| -|`druid.auth.authenticator.kerberos.internalClientPrincipal`|`druid@EXAMPLE.COM`| Principal user name, used for internal node communication|empty|Yes| -|`druid.auth.authenticator.kerberos.internalClientKeytab`|`/etc/security/keytabs/druid.keytab`|Path to keytab file used for internal node communication|empty|Yes| |`druid.auth.authenticator.kerberos.serverPrincipal`|`HTTP/_HOST@EXAMPLE.COM`| SPNego service principal used by druid nodes|empty|Yes| |`druid.auth.authenticator.kerberos.serverKeytab`|`/etc/security/keytabs/spnego.service.keytab`|SPNego service keytab used by druid nodes|empty|Yes| |`druid.auth.authenticator.kerberos.authToLocal`|`RULE:[1:$1@$0](druid@EXAMPLE.COM)s/.*/druid DEFAULT`|It allows you to set a general rule for mapping principal names to local user names. It will be used if there is not an explicit mapping for the principal name that is being translated.|DEFAULT|No| @@ -54,6 +52,17 @@ In Active Directory environment, SPNEGO token in the Authorization header includ which includes all security groups for the user. In some cases when the user belongs to many security groups the header to grow beyond what druid can handle by default. In such cases, max request header size that druid can handle can be increased by setting `druid.server.http.maxRequestHeaderSize` (default 8Kb) and `druid.router.http.maxRequestBufferSize` (default 8Kb). +## Configuring Kerberos Escalated Client + +Druid internal nodes communicate with each other using an escalated http Client. A Kerberos enabled escalated HTTP Client can be configured by following properties - + + +|Property|Example Values|Description|Default|required| +|--------|---------------|-----------|-------|--------| +|`druid.escalator.type`|`kerberos`| Type of Escalator client used for internal node communication.|n/a|Yes| +|`druid.escalator.internalClientPrincipal`|`druid@EXAMPLE.COM`| Principal user name, used for internal node communication|n/a|Yes| +|`druid.escalator.internalClientKeytab`|`/etc/security/keytabs/druid.keytab`|Path to keytab file used for internal node communication|n/a|Yes| +|`druid.escalator.authorizerName`|`MyBasicAuthorizer`|Authorizer that requests should be directed to.|n/a|Yes| ## Accessing Druid HTTP end points when kerberos security is enabled 1. To access druid HTTP endpoints via curl user will need to first login using `kinit` command as follows - diff --git a/docs/content/development/extensions-core/kafka-ingestion.md b/docs/content/development/extensions-core/kafka-ingestion.md index b32c479fe7ba..c254aad114e2 100644 --- a/docs/content/development/extensions-core/kafka-ingestion.md +++ b/docs/content/development/extensions-core/kafka-ingestion.md @@ -311,40 +311,17 @@ In this way, configuration changes can be applied without requiring any pause in ### On the Subject of Segments -The Kafka indexing service may generate a significantly large number of segments which over time will cause query -performance issues if not properly managed. One important characteristic to understand is that the Kafka indexing task -will generate a Druid partition in each segment granularity interval for each partition in the Kafka topic. As an -example, if you are ingesting realtime data and your segment granularity is 15 minutes with 10 partitions in the Kafka -topic, you would generate a minimum of 40 segments an hour. This is a limitation imposed by the Kafka architecture which -guarantees delivery order within a partition but not across partitions. Therefore as a consumer of Kafka, in order to -generate segments deterministically (and be able to provide exactly-once ingestion semantics) partitions need to be -handled separately. - -Compounding this, if your taskDuration was also set to 15 minutes, you would actually generate 80 segments an hour since -any given 15 minute interval would be handled by two tasks. For an example of this behavior, let's say we started the -supervisor at 9:05 with a 15 minute segment granularity. The first task would create a segment for 9:00-9:15 and a -segment for 9:15-9:30 before stopping at 9:20. A second task would be created at 9:20 which would create another segment -for 9:15-9:30 and a segment for 9:30-9:45 before stopping at 9:35. Hence, if taskDuration and segmentGranularity are the -same duration, you will get two tasks generating a segment for each segment granularity interval. - -Understanding this behavior is the first step to managing the number of segments produced. Some recommendations for -keeping the number of segments low are: - - * Keep the number of Kafka partitions to the minimum required to sustain the required throughput for your event streams. - * Increase segment granularity and task duration so that more events are written into the same segment. One - consideration here is that segments are only handed off to historical nodes after the task duration has elapsed. - Since workers tend to be configured with less query-serving resources than historical nodes, query performance may - suffer if tasks run excessively long without handing off segments. - -In many production installations which have been ingesting events for a long period of time, these suggestions alone -will not be sufficient to keep the number of segments at an optimal level. It is recommended that scheduled re-indexing -tasks be run to merge segments together into new segments of an ideal size (in the range of ~500-700 MB per segment). -Currently, the recommended way of doing this is by running periodic Hadoop batch ingestion jobs and using a `dataSource` -inputSpec to read from the segments generated by the Kafka indexing tasks. Details on how to do this can be found under -['Updating Existing Data'](../../ingestion/update-existing-data.html). Note that the Merge Task and Append Task described -[here](../../ingestion/tasks.html) will not work as they require unsharded segments while Kafka indexing tasks always -generated sharded segments. - -There is ongoing work to support automatic segment compaction of sharded segments as well as compaction not requiring -Hadoop (see [here](https://github.com/druid-io/druid/pull/1998) and [here](https://github.com/druid-io/druid/pull/3611) -for related PRs). +Each Kafka Indexing Task puts events consumed from Kafka partitions assigned to it in a single segment for each segment +granular interval until maxRowsPerSegment limit is reached, at this point a new partition for this segment granularity is +created for further events. Kafka Indexing Task also does incremental hand-offs which means that all the segments created by a +task will not be held up till the task duration is over. As soon as maxRowsPerSegment limit is hit, all the segments held +by the task at that point in time will be handed-off and new set of segments will be created for further events. +This means that the task can run for longer durations of time without accumulating old segments locally on Middle Manager +nodes and it is encouraged to do so. + +Kafka Indexing Service may still produce some small segments. Lets say the task duration is 4 hours, segment granularity +is set to an HOUR and Supervisor was started at 9:10 then after 4 hours at 13:10, new set of tasks will be started and +events for the interval 13:00 - 14:00 may be split across previous and new set of tasks. If you see it becoming a problem then +one can schedule re-indexing tasks be run to merge segments together into new segments of an ideal size (in the range of ~500-700 MB per segment). +There is also ongoing work to support automatic segment compaction of sharded segments as well as compaction not requiring +Hadoop (see [here](https://github.com/druid-io/druid/pull/5102)). diff --git a/docs/content/development/extensions-core/lookups-cached-global.md b/docs/content/development/extensions-core/lookups-cached-global.md index 7e4b11af8437..a5e7d5eb04ff 100644 --- a/docs/content/development/extensions-core/lookups-cached-global.md +++ b/docs/content/development/extensions-core/lookups-cached-global.md @@ -71,7 +71,7 @@ The parameters are as follows |--------|-----------|--------|-------| |`extractionNamespace`|Specifies how to populate the local cache. See below|Yes|-| |`firstCacheTimeout`|How long to wait (in ms) for the first run of the cache to populate. 0 indicates to not wait|No|`0` (do not wait)| -|`injective`|If the underlying map is injective (keys and values are unique) then optimizations can occur internally by setting this to `true`|No|`false`| +|`injective`|If the underlying map is [injective](../../querying/lookups.html#query-execution) (keys and values are unique) then optimizations can occur internally by setting this to `true`|No|`false`| If `firstCacheTimeout` is set to a non-zero value, it should be less than `druid.manager.lookups.hostUpdateTimeout`. If `firstCacheTimeout` is NOT set, then management is essentially asynchronous and does not know if a lookup succeeded or failed in starting. In such a case logs from the lookup nodes should be monitored for repeated failures. diff --git a/docs/content/development/extensions-core/mysql.md b/docs/content/development/extensions-core/mysql.md index eb03af6af867..5314c07eef59 100644 --- a/docs/content/development/extensions-core/mysql.md +++ b/docs/content/development/extensions-core/mysql.md @@ -53,3 +53,23 @@ Make sure to [include](../../operations/including-extensions.html) `mysql-metada packaged in a separate tarball that can be downloaded from [here](http://druid.io/downloads.html). You can also get it using [pull-deps](../../operations/pull-deps.html), or you can build it from source code; see [Build from Source](../build.html). + + +## Encrypting MySQL connections + This extension provides support for encrypting MySQL connections. To get more information about encrypting MySQL connections using TLS/SSL in general, please refer to this [guide](https://dev.mysql.com/doc/refman/5.7/en/using-encrypted-connections.html). + +## Configuration + +|Property|Description|Default|Required| +|--------|-----------|-------|--------| +|`druid.metadata.mysql.ssl.useSSL`|Enable SSL|`false`|no| +|`druid.metadata.mysql.ssl.clientCertificateKeyStoreUrl`|The file path URL to the client certificate key store.|none|no| +|`druid.metadata.mysql.ssl.clientCertificateKeyStoreType`|The type of the key store where the client certificate is stored.|none|no| +|`druid.metadata.mysql.ssl.clientCertificateKeyStorePassword`|The [Password Provider](../operations/password-provider.html) or String password for the client key store.|none|no| +|`druid.metadata.mysql.ssl.verifyServerCertificate`|Enables server certificate verification.|false|no| +|`druid.metadata.mysql.ssl.trustCertificateKeyStoreUrl`|The file path to the trusted root certificate key store.|Default trust store provided by MySQL|yes if `verifyServerCertificate` is set to true and a custom trust store is used| +|`druid.metadata.mysql.ssl.trustCertificateKeyStoreType`|The type of the key store where trusted root certificates are stored.|JKS|yes if `verifyServerCertificate` is set to true and keystore type is not JKS| +|`druid.metadata.mysql.ssl.trustCertificateKeyStorePassword`|The [Password Provider](../operations/password-provider.html) or String password for the trust store.|none|yes if `verifyServerCertificate` is set to true and password is not null| +|`druid.metadata.mysql.ssl.enabledSSLCipherSuites`|Overrides the existing cipher suites with these cipher suites.|none|no| +|`druid.metadata.mysql.ssl.enabledTLSProtocols`|Overrides the TLS protocols with these protocols.|none|no| + diff --git a/docs/content/development/extensions.md b/docs/content/development/extensions.md index bc7283060425..a9e1d1ee2f05 100644 --- a/docs/content/development/extensions.md +++ b/docs/content/development/extensions.md @@ -22,6 +22,7 @@ Core extensions are maintained by Druid committers. |Name|Description|Docs| |----|-----------|----| |druid-avro-extensions|Support for data in Apache Avro data format.|[link](../development/extensions-core/avro.html)| +|druid-basic-security|Support for Basic HTTP authentication and role-based access control.|[link](../development/extensions-core/druid-basic-security.html)| |druid-caffeine-cache|A local cache implementation backed by Caffeine.|[link](../development/extensions-core/caffeine-cache.html)| |druid-datasketches|Support for approximate counts and set operations with [DataSketches](http://datasketches.github.io/).|[link](../development/extensions-core/datasketches-aggregators.html)| |druid-hdfs-storage|HDFS deep storage.|[link](../development/extensions-core/hdfs.html)| diff --git a/docs/content/development/modules.md b/docs/content/development/modules.md index be3773bfaf15..db529f3029a3 100644 --- a/docs/content/development/modules.md +++ b/docs/content/development/modules.md @@ -110,7 +110,7 @@ The following example was retrieved from a historical node configured to use Azu 00Z_2015-04-14T02:41:09.484Z 2015-04-14T02:42:33,463 INFO [ZkCoordinator-0] io.druid.guice.JsonConfigurator - Loaded class[class io.druid.storage.azure.AzureAccountConfig] from props[drui d.azure.] as [io.druid.storage.azure.AzureAccountConfig@759c9ad9] -2015-04-14T02:49:08,275 INFO [ZkCoordinator-0] com.metamx.common.CompressionUtils - Unzipping file[/opt/druid/tmp/compressionUtilZipCache1263964429587449785.z +2015-04-14T02:49:08,275 INFO [ZkCoordinator-0] io.druid.java.util.common.CompressionUtils - Unzipping file[/opt/druid/tmp/compressionUtilZipCache1263964429587449785.z ip] to [/opt/druid/zk_druid/dde/2015-01-02T00:00:00.000Z_2015-01-03T00:00:00.000Z/2015-04-14T02:41:09.484Z/0] 2015-04-14T02:49:08,276 INFO [ZkCoordinator-0] io.druid.storage.azure.AzureDataSegmentPuller - Loaded 1196 bytes from [dde/2015-01-02T00:00:00.000Z_2015-01-03 T00:00:00.000Z/2015-04-14T02:41:09.484Z/0/index.zip] to [/opt/druid/zk_druid/dde/2015-01-02T00:00:00.000Z_2015-01-03T00:00:00.000Z/2015-04-14T02:41:09.484Z/0] diff --git a/docs/content/ingestion/tasks.md b/docs/content/ingestion/tasks.md index b5b1bc1c543b..4d424efcadb4 100644 --- a/docs/content/ingestion/tasks.md +++ b/docs/content/ingestion/tasks.md @@ -276,8 +276,10 @@ An example of compaction task is } ``` -This compaction task merges _all segments_ of the interval `2017-01-01/2018-01-01` into a _single segment_. -To merge each day's worth of data into a separate segment, you can submit multiple `compact` tasks, one for each day. They will run in parallel. +This compaction task reads _all segments_ of the interval `2017-01-01/2018-01-01` and results in new segments. +Note that intervals of the input segments are merged into a single interval of `2017-01-01/2018-01-01` no matter what the segmentGranularity was. +To controll the number of result segments, you can set `targetPartitionSize` or `numShards`. See [indexTuningConfig](#tuningconfig) for more details. +To merge each day's worth of data into separate segments, you can submit multiple `compact` tasks, one for each day. They will run in parallel. A compaction task internally generates an `index` task spec for performing compaction work with some fixed parameters. For example, its `firehose` is always the [ingestSegmentSpec](./firehose.html), and `dimensionsSpec` and `metricsSpec` diff --git a/docs/content/misc/math-expr.md b/docs/content/misc/math-expr.md index 07dcc5018082..abcebdd3b5e3 100644 --- a/docs/content/misc/math-expr.md +++ b/docs/content/misc/math-expr.md @@ -62,7 +62,7 @@ The following built-in functions are available. |timestamp_ceil|timestamp_ceil(expr, period, \[origin, \[timezone\]\]) rounds up a timestamp, returning it as a new timestamp. Period can be any ISO8601 period, like P3M (quarters) or PT12H (half-days). The time zone, if provided, should be a time zone name like "America/Los_Angeles" or offset like "-08:00".| |timestamp_floor|timestamp_floor(expr, period, \[origin, [timezone\]\]) rounds down a timestamp, returning it as a new timestamp. Period can be any ISO8601 period, like P3M (quarters) or PT12H (half-days). The time zone, if provided, should be a time zone name like "America/Los_Angeles" or offset like "-08:00".| |timestamp_shift|timestamp_shift(expr, period, step, \[timezone\]) shifts a timestamp by a period (step times), returning it as a new timestamp. Period can be any ISO8601 period. Step may be negative. The time zone, if provided, should be a time zone name like "America/Los_Angeles" or offset like "-08:00".| -|timestamp_extract|timestamp_extract(expr, unit, \[timezone\]) extracts a time part from expr, returning it as a number. Unit can be EPOCH, SECOND, MINUTE, HOUR, DAY (day of month), DOW (day of week), DOY (day of year), WEEK (week of [week year](https://en.wikipedia.org/wiki/ISO_week_date)), MONTH (1 through 12), QUARTER (1 through 4), or YEAR. The time zone, if provided, should be a time zone name like "America/Los_Angeles" or offset like "-08:00"| +|timestamp_extract|timestamp_extract(expr, unit, \[timezone\]) extracts a time part from expr, returning it as a number. Unit can be EPOCH (number of seconds since 1970-01-01 00:00:00 UTC), SECOND, MINUTE, HOUR, DAY (day of month), DOW (day of week), DOY (day of year), WEEK (week of [week year](https://en.wikipedia.org/wiki/ISO_week_date)), MONTH (1 through 12), QUARTER (1 through 4), or YEAR. The time zone, if provided, should be a time zone name like "America/Los_Angeles" or offset like "-08:00"| |timestamp_parse|timestamp_parse(string expr, \[pattern, [timezone\]\]) parses a string into a timestamp using a given [Joda DateTimeFormat pattern](http://www.joda.org/joda-time/apidocs/org/joda/time/format/DateTimeFormat.html). If the pattern is not provided, this parses time strings in either ISO8601 or SQL format. The time zone, if provided, should be a time zone name like "America/Los_Angeles" or offset like "-08:00", and will be used as the time zone for strings that do not include a time zone offset. Pattern and time zone must be literals. Strings that cannot be parsed as timestamps will be returned as nulls.| |timestamp_format|timestamp_format(expr, \[pattern, \[timezone\]\]) formats a timestamp as a string with a given [Joda DateTimeFormat pattern](http://www.joda.org/joda-time/apidocs/org/joda/time/format/DateTimeFormat.html), or ISO8601 if the pattern is not provided. The time zone, if provided, should be a time zone name like "America/Los_Angeles" or offset like "-08:00". Pattern and time zone must be literals.| diff --git a/docs/content/operations/insert-segment-to-db.md b/docs/content/operations/insert-segment-to-db.md index 60549e69d6c2..cd5ba2264aeb 100644 --- a/docs/content/operations/insert-segment-to-db.md +++ b/docs/content/operations/insert-segment-to-db.md @@ -5,24 +5,43 @@ layout: doc_page `insert-segment-to-db` is a tool that can insert segments into Druid metadata storage. It is intended to be used to update the segment table in metadata storage after people manually migrate segments from one place to another. -It can also be used to insert missing segment into Druid, or even recover metadata storage by telling it where the +It can also be used to insert missing segments into Druid, or even recover metadata storage by telling it where the segments are stored. -Note: This tool expects users to have Druid cluster running in a "safe" mode, where there are no active tasks to interfere -the segments being inserted. Users can optionally bring down the cluster to make 100% sure nothing is interfering. +**Note:** This tool simply scans the deep storage directory to reconstruct the metadata entries used to locate and +identify each segment. It does not have any understanding about whether those segments _should actually_ be written to +the metadata storage. In certain cases, this can lead to undesired or inconsistent results. Some examples of things to +watch out for: + - Dropped datasources will be re-enabled. + - The latest version of each segment set will be loaded by Druid, which in some cases may not be the version you + actually want. An example of this is a bad compaction job that generates segments which need to be manually rolled + back by removing that version from the metadata table. If these segments are not also removed from deep storage, + they will be imported back into the metadata table and overshadow the correct version. + - Some indexers such as the Kafka indexing service have the potential to generate more than one set of segments that + have the same segment ID but different contents. When the metadata is first written, the correct set of segments is + referenced and the other set is normally deleted from deep storage. It is possible however that an unhandled + exception could result in multiple sets of segments with the same segment ID remaining in deep storage. Since this + tool does not know which one is the 'correct' one to use, it will simply select the newest segment set and ignore + the other versions. If the wrong segment set is picked, the exactly-once semantics of the Kafka indexing service + will no longer hold true and you may get duplicated or dropped events. + +With these considerations in mind, it is recommended that data migrations be done by exporting the original metadata +storage directly, since that is the definitive cluster state. This tool should be used as a last resort when a direct +export is not possible. + +**Note:** This tool expects users to have Druid cluster running in a "safe" mode, where there are no active tasks to interfere +with the segments being inserted. Users can optionally bring down the cluster to make 100% sure nothing is interfering. In order to make it work, user will have to provide metadata storage credentials and deep storage type through Java JVM argument -or runtime.properties file. Specifically, this tool needs to know +or runtime.properties file. Specifically, this tool needs to know: -`druid.metadata.storage.type` - -`druid.metadata.storage.connector.connectURI` - -`druid.metadata.storage.connector.user` - -`druid.metadata.storage.connector.password` - -`druid.storage.type` +``` +druid.metadata.storage.type +druid.metadata.storage.connector.connectURI +druid.metadata.storage.connector.user +druid.metadata.storage.connector.password +druid.storage.type +``` Besides the properties above, you also need to specify the location where the segments are stored and whether you want to update descriptor.json (`partitionNum_descriptor.json` for HDFS data storage). These two can be provided through command line arguments. diff --git a/docs/content/operations/performance-faq.md b/docs/content/operations/performance-faq.md index edeaf77d8aee..1c27c0d3eb94 100644 --- a/docs/content/operations/performance-faq.md +++ b/docs/content/operations/performance-faq.md @@ -64,7 +64,7 @@ Yes, using a `log4j2.xml` similar to the following causes some of the more chatt - + diff --git a/docs/content/querying/dimensionspecs.md b/docs/content/querying/dimensionspecs.md index 236e13b5a7a0..19d080493fe1 100644 --- a/docs/content/querying/dimensionspecs.md +++ b/docs/content/querying/dimensionspecs.md @@ -95,8 +95,6 @@ The default values are `replaceMissingValueWith = null` and `retainMissingValue It is illegal to set `retainMissingValue = true` and also specify a `replaceMissingValueWith`. -A property of `injective` specifies if optimizations can be used which assume there is no combining of multiple names into one. For example: If ABC123 is the only key that maps to SomeCompany, that can be optimized since it is a unique lookup. But if both ABC123 and DEF456 BOTH map to SomeCompany, then that is NOT a unique lookup. Setting this value to true and setting `retainMissingValue` to FALSE (the default) may cause undesired behavior. - A property `optimize` can be supplied to allow optimization of lookup based extraction filter (by default `optimize = true`). The second kind where it is not possible to pass at query time due to their size, will be based on an external lookup table or resource that is already registered via configuration file or/and coordinator. @@ -316,67 +314,38 @@ Example for the `__time` dimension: JavaScript-based functionality is disabled by default. Please refer to the Druid JavaScript programming guide for guidelines about using Druid's JavaScript functionality, including instructions on how to enable it. -### Lookup extraction function +### Registered lookup extraction function -Lookups are a concept in Druid where dimension values are (optionally) replaced with new values. -For more documentation on using lookups, please see [here](../querying/lookups.html). -Explicit lookups allow you to specify a set of keys and values to use when performing the extraction. +Lookups are a concept in Druid where dimension values are (optionally) replaced with new values. +For more documentation on using lookups, please see [Lookups](../querying/lookups.html). +The "registeredLookup" extraction function lets you refer to a lookup that has been registered in the cluster-wide +configuration. -```json -{ - "type":"lookup", - "lookup":{ - "type":"map", - "map":{"foo":"bar", "baz":"bat"} - }, - "retainMissingValue":true, - "injective":true -} -``` - -```json -{ - "type":"lookup", - "lookup":{ - "type":"map", - "map":{"foo":"bar", "baz":"bat"} - }, - "retainMissingValue":false, - "injective":false, - "replaceMissingValueWith":"MISSING" -} -``` - -```json -{ - "type":"lookup", - "lookup":{"type":"namespace","namespace":"some_lookup"}, - "replaceMissingValueWith":"Unknown", - "injective":false -} -``` +An example: ```json { - "type":"lookup", - "lookup":{"type":"namespace","namespace":"some_lookup"}, - "retainMissingValue":true, - "injective":false + "type":"registeredLookup", + "lookup":"some_lookup_name", + "retainMissingValue":true } ``` -A lookup can be of type `namespace` or `map`. A `map` lookup is passed as part of the query. -A `namespace` lookup is populated on all the nodes which handle queries as per [lookups](../querying/lookups.html) +A property of `retainMissingValue` and `replaceMissingValueWith` can be specified at query time to hint how to handle +missing values. Setting `replaceMissingValueWith` to `""` has the same effect as setting it to `null` or omitting the +property. Setting `retainMissingValue` to true will use the dimension's original value if it is not found in the lookup. +The default values are `replaceMissingValueWith = null` and `retainMissingValue = false` which causes missing values to +be treated as missing. -A property of `retainMissingValue` and `replaceMissingValueWith` can be specified at query time to hint how to handle missing values. Setting `replaceMissingValueWith` to `""` has the same effect as setting it to `null` or omitting the property. Setting `retainMissingValue` to true will use the dimension's original value if it is not found in the lookup. The default values are `replaceMissingValueWith = null` and `retainMissingValue = false` which causes missing values to be treated as missing. - It is illegal to set `retainMissingValue = true` and also specify a `replaceMissingValueWith`. -A property of `injective` specifies if optimizations can be used which assume there is no combining of multiple names into one. For example: If ABC123 is the only key that maps to SomeCompany, that can be optimized since it is a unique lookup. But if both ABC123 and DEF456 BOTH map to SomeCompany, then that is NOT a unique lookup. Setting this value to true and setting `retainMissingValue` to FALSE (the default) may cause undesired behavior. +A property of `injective` can override the lookup's own sense of whether or not it is +[injective](lookups.html#query-execution). If left unspecified, Druid will use the registered cluster-wide lookup +configuration. -A property `optimize` can be supplied to allow optimization of lookup based extraction filter (by default `optimize = true`). +A property `optimize` can be supplied to allow optimization of lookup based extraction filter (by default `optimize = true`). The optimization layer will run on the broker and it will rewrite the extraction filter as clause of selector filters. -For instance the following filter +For instance the following filter ```json { @@ -385,21 +354,16 @@ For instance the following filter "dimension": "product", "value": "bar_1", "extractionFn": { - "type": "lookup", + "type": "registeredLookup", "optimize": true, - "lookup": { - "type": "map", - "map": { - "product_1": "bar_1", - "product_3": "bar_1" - } - } + "lookup": "some_lookup_name" } } } ``` -will be rewritten as +will be rewritten as the following simpler query, assuming a lookup that maps "product_1" and "product_3" to the value +"bar_1": ```json { @@ -425,30 +389,49 @@ will be rewritten as } ``` -A null dimension value can be mapped to a specific value by specifying the empty string as the key. +A null dimension value can be mapped to a specific value by specifying the empty string as the key in your lookup file. This allows distinguishing between a null dimension and a lookup resulting in a null. For example, specifying `{"":"bar","bat":"baz"}` with dimension values `[null, "foo", "bat"]` and replacing missing values with `"oof"` will yield results of `["bar", "oof", "baz"]`. Omitting the empty string key will cause the missing value to take over. For example, specifying `{"bat":"baz"}` with dimension values `[null, "foo", "bat"]` and replacing missing values with `"oof"` will yield results of `["oof", "oof", "baz"]`. -### Registered Lookup Extraction Function +### Inline lookup extraction function -While it is recommended that the [lookup dimension spec](#lookup-dimensionspecs) be used whenever possible, any lookup that is registered for use as a lookup dimension spec can be used as a dimension extraction. +Lookups are a concept in Druid where dimension values are (optionally) replaced with new values. +For more documentation on using lookups, please see [Lookups](../querying/lookups.html). +The "lookup" extraction function lets you specify an inline lookup map without registering one in the cluster-wide +configuration. -The specification for dimension extraction using dimension specification named lookups is formatted as per the following example: +Examples: ```json { - "type":"registeredLookup", - "lookup":"some_lookup_name", + "type":"lookup", + "lookup":{ + "type":"map", + "map":{"foo":"bar", "baz":"bat"} + }, "retainMissingValue":true, - "injective":false + "injective":true +} +``` + +```json +{ + "type":"lookup", + "lookup":{ + "type":"map", + "map":{"foo":"bar", "baz":"bat"} + }, + "retainMissingValue":false, + "injective":false, + "replaceMissingValueWith":"MISSING" } ``` -All the flags for [lookup extraction function](#lookup-extraction-function) apply here as well. +The inline lookup should be of type `map`. -In general, the dimension specification should be used. This dimension **extraction** implementation is made available for testing, validation, and transitioning from dimension extraction to the dimension specification style lookups. -There is also a chance that a feature uses dimension extraction in such a way that it is not applied to dimension specification lookups. Such a scenario should be brought to the attention of the development mailing list. +The properties `retainMissingValue`, `replaceMissingValueWith`, `injective`, and `optimize` behave similarly to the +[registered lookup extraction function](#registered-lookup-extraction-function). ### Cascade Extraction Function diff --git a/docs/content/querying/lookups.md b/docs/content/querying/lookups.md index 616b602eb3ba..771c9adfb743 100644 --- a/docs/content/querying/lookups.md +++ b/docs/content/querying/lookups.md @@ -8,15 +8,17 @@ layout: doc_page Lookups are an experimental feature. -Lookups are a concept in Druid where dimension values are (optionally) replaced with new values. -See [dimension specs](../querying/dimensionspecs.html) for more information. For the purpose of these documents, -a "key" refers to a dimension value to match, and a "value" refers to its replacement. -So if you wanted to rename `appid-12345` to `Super Mega Awesome App` then the key would be `appid-12345` and the value -would be `Super Mega Awesome App`. - -It is worth noting that lookups support use cases where keys map to unique values (injective) such as a country code and -a country name, and also supports use cases where multiple IDs map to the same value, e.g. multiple app-ids belonging to -a single account manager. +Lookups are a concept in Druid where dimension values are (optionally) replaced with new values, allowing join-like +functionality. Applying lookups in Druid is similar to joining a dimension table in a data warehouse. See +[dimension specs](../querying/dimensionspecs.html) for more information. For the purpose of these documents, a "key" +refers to a dimension value to match, and a "value" refers to its replacement. So if you wanted to map +`appid-12345` to `Super Mega Awesome App` then the key would be `appid-12345` and the value would be +`Super Mega Awesome App`. + +It is worth noting that lookups support not just use cases where keys map one-to-one to unique values, such as country +code and country name, but also support use cases where multiple IDs map to the same value, e.g. multiple app-ids +mapping to a single account manager. When lookups are one-to-one, Druid is able to apply additional optimizations at +query time; see [Query execution](#query-execution) below for more details. Lookups do not have history. They always use the current data. This means that if the chief account manager for a particular app-id changes, and you issue a query with a lookup to store the app-id to account manager relationship, @@ -33,6 +35,38 @@ Other lookup types are available as extensions, including: - Globally cached lookups from local files, remote URIs, or JDBC through [lookups-cached-global](../development/extensions-core/lookups-cached-global.html). - Globally cached lookups from a Kafka topic through [kafka-extraction-namespace](../development/extensions-core/kafka-extraction-namespace.html). +Query Execution +--------------- +When executing an aggregation query involving lookups, Druid can decide to apply lookups either while scanning and +aggregating rows, or to apply them after aggregation is complete. It is more efficient to apply lookups after +aggregation is complete, so Druid will do this if it can. Druid decides this by checking if the lookup is marked +as "injective" or not. In general, you should set this property for any lookup that is naturally one-to-one, to allow +Druid to run your queries as fast as possible. + +Injective lookups should include _all_ possible keys that may show up in your dataset, and should also map all keys to +_unique values_. This matters because non-injective lookups may map different keys to the same value, which must be +accounted for during aggregation, lest query results contain two result values that should have been aggregated into +one. + +This lookup is injective (assuming it contains all possible keys from your data): + +``` +1 -> Foo +2 -> Bar +3 -> Billy +``` + +But this one is not, since both "2" and "3" map to the same key: + +``` +1 -> Foo +2 -> Bar +3 -> Bar +``` + +To tell Druid that your lookup is injective, you must specify `"injective" : true` in the lookup configuration. Druid +will not detect this automatically. + Dynamic Configuration ---------------------
diff --git a/docs/content/toc.md b/docs/content/toc.md index d18de0e7c734..e51f95c05d46 100644 --- a/docs/content/toc.md +++ b/docs/content/toc.md @@ -90,6 +90,7 @@ layout: toc * [Broker](/docs/VERSION/configuration/broker.html) * [Realtime](/docs/VERSION/configuration/realtime.html) * [Configuring Logging](/docs/VERSION/configuration/logging.html) + * [Configuring Authentication and Authorization](/docs/VERSION/configuration/auth.html) ## Development * [Overview](/docs/VERSION/development/overview.html) diff --git a/examples/conf-quickstart/druid/_common/common.runtime.properties b/examples/conf-quickstart/druid/_common/common.runtime.properties index fd131b878cce..ee5b284b527f 100644 --- a/examples/conf-quickstart/druid/_common/common.runtime.properties +++ b/examples/conf-quickstart/druid/_common/common.runtime.properties @@ -113,7 +113,7 @@ druid.selectors.coordinator.serviceName=druid/coordinator # Monitoring # -druid.monitoring.monitors=["com.metamx.metrics.JvmMonitor"] +druid.monitoring.monitors=["io.druid.java.util.metrics.JvmMonitor"] druid.emitter=logging druid.emitter.logging.logLevel=info diff --git a/examples/conf/druid/_common/common.runtime.properties b/examples/conf/druid/_common/common.runtime.properties index a018fa01780c..1c967fcd60cf 100644 --- a/examples/conf/druid/_common/common.runtime.properties +++ b/examples/conf/druid/_common/common.runtime.properties @@ -112,7 +112,7 @@ druid.selectors.coordinator.serviceName=druid/coordinator # Monitoring # -druid.monitoring.monitors=["com.metamx.metrics.JvmMonitor"] +druid.monitoring.monitors=["io.druid.java.util.metrics.JvmMonitor"] druid.emitter=logging druid.emitter.logging.logLevel=info diff --git a/examples/pom.xml b/examples/pom.xml index 231469f4b06e..0f1f5edefc4c 100644 --- a/examples/pom.xml +++ b/examples/pom.xml @@ -27,7 +27,7 @@ io.druid druid - 0.12.0-SNAPSHOT + 0.12.2-SNAPSHOT diff --git a/extendedset/pom.xml b/extendedset/pom.xml index 91f19144c8ec..f9afc99259bd 100755 --- a/extendedset/pom.xml +++ b/extendedset/pom.xml @@ -18,8 +18,7 @@ ~ under the License. --> - + 4.0.0 extendedset @@ -32,7 +31,7 @@ io.druid druid - 0.12.0-SNAPSHOT + 0.12.2-SNAPSHOT diff --git a/extensions-contrib/ambari-metrics-emitter/pom.xml b/extensions-contrib/ambari-metrics-emitter/pom.xml index 14fe7cd44bb9..bed987c60915 100644 --- a/extensions-contrib/ambari-metrics-emitter/pom.xml +++ b/extensions-contrib/ambari-metrics-emitter/pom.xml @@ -18,14 +18,13 @@ ~ under the License. --> - + 4.0.0 io.druid druid - 0.12.0-SNAPSHOT + 0.12.2-SNAPSHOT ../../pom.xml @@ -48,8 +47,9 @@ provided - com.metamx + io.druid java-util + ${project.parent.version} provided diff --git a/extensions-contrib/ambari-metrics-emitter/src/main/java/io/druid/emitter/ambari/metrics/AmbariMetricsEmitter.java b/extensions-contrib/ambari-metrics-emitter/src/main/java/io/druid/emitter/ambari/metrics/AmbariMetricsEmitter.java index 1838e8952c11..f8f2e76fa1fc 100644 --- a/extensions-contrib/ambari-metrics-emitter/src/main/java/io/druid/emitter/ambari/metrics/AmbariMetricsEmitter.java +++ b/extensions-contrib/ambari-metrics-emitter/src/main/java/io/druid/emitter/ambari/metrics/AmbariMetricsEmitter.java @@ -20,11 +20,11 @@ package io.druid.emitter.ambari.metrics; import com.google.common.util.concurrent.ThreadFactoryBuilder; -import com.metamx.common.ISE; -import com.metamx.emitter.core.Emitter; -import com.metamx.emitter.core.Event; -import com.metamx.emitter.service.AlertEvent; -import com.metamx.emitter.service.ServiceMetricEvent; +import io.druid.java.util.common.ISE; +import io.druid.java.util.emitter.core.Emitter; +import io.druid.java.util.emitter.core.Event; +import io.druid.java.util.emitter.service.AlertEvent; +import io.druid.java.util.emitter.service.ServiceMetricEvent; import io.druid.java.util.common.StringUtils; import io.druid.java.util.common.logger.Logger; import org.apache.hadoop.metrics2.sink.timeline.AbstractTimelineMetricsSink; diff --git a/extensions-contrib/ambari-metrics-emitter/src/main/java/io/druid/emitter/ambari/metrics/AmbariMetricsEmitterModule.java b/extensions-contrib/ambari-metrics-emitter/src/main/java/io/druid/emitter/ambari/metrics/AmbariMetricsEmitterModule.java index ad029a087872..6dc5e49edc49 100644 --- a/extensions-contrib/ambari-metrics-emitter/src/main/java/io/druid/emitter/ambari/metrics/AmbariMetricsEmitterModule.java +++ b/extensions-contrib/ambari-metrics-emitter/src/main/java/io/druid/emitter/ambari/metrics/AmbariMetricsEmitterModule.java @@ -28,7 +28,7 @@ import com.google.inject.Provides; import com.google.inject.name.Named; import com.google.inject.name.Names; -import com.metamx.emitter.core.Emitter; +import io.druid.java.util.emitter.core.Emitter; import io.druid.guice.JsonConfigProvider; import io.druid.guice.ManageLifecycle; import io.druid.initialization.DruidModule; diff --git a/extensions-contrib/ambari-metrics-emitter/src/main/java/io/druid/emitter/ambari/metrics/DruidToTimelineMetricConverter.java b/extensions-contrib/ambari-metrics-emitter/src/main/java/io/druid/emitter/ambari/metrics/DruidToTimelineMetricConverter.java index bcc09f689054..9912ced68436 100644 --- a/extensions-contrib/ambari-metrics-emitter/src/main/java/io/druid/emitter/ambari/metrics/DruidToTimelineMetricConverter.java +++ b/extensions-contrib/ambari-metrics-emitter/src/main/java/io/druid/emitter/ambari/metrics/DruidToTimelineMetricConverter.java @@ -21,7 +21,7 @@ import com.fasterxml.jackson.annotation.JsonSubTypes; import com.fasterxml.jackson.annotation.JsonTypeInfo; -import com.metamx.emitter.service.ServiceMetricEvent; +import io.druid.java.util.emitter.service.ServiceMetricEvent; import org.apache.hadoop.metrics2.sink.timeline.TimelineMetric; diff --git a/extensions-contrib/ambari-metrics-emitter/src/main/java/io/druid/emitter/ambari/metrics/SendAllTimelineEventConverter.java b/extensions-contrib/ambari-metrics-emitter/src/main/java/io/druid/emitter/ambari/metrics/SendAllTimelineEventConverter.java index f5be89a23853..2189fac55f0d 100644 --- a/extensions-contrib/ambari-metrics-emitter/src/main/java/io/druid/emitter/ambari/metrics/SendAllTimelineEventConverter.java +++ b/extensions-contrib/ambari-metrics-emitter/src/main/java/io/druid/emitter/ambari/metrics/SendAllTimelineEventConverter.java @@ -26,7 +26,7 @@ import com.google.common.base.Strings; import com.google.common.collect.ImmutableList; import com.google.common.collect.ImmutableSortedSet; -import com.metamx.emitter.service.ServiceMetricEvent; +import io.druid.java.util.emitter.service.ServiceMetricEvent; import org.apache.hadoop.metrics2.sink.timeline.TimelineMetric; /** diff --git a/extensions-contrib/ambari-metrics-emitter/src/main/java/io/druid/emitter/ambari/metrics/WhiteListBasedDruidToTimelineEventConverter.java b/extensions-contrib/ambari-metrics-emitter/src/main/java/io/druid/emitter/ambari/metrics/WhiteListBasedDruidToTimelineEventConverter.java index 5d2a46290962..cf4bd86821e6 100644 --- a/extensions-contrib/ambari-metrics-emitter/src/main/java/io/druid/emitter/ambari/metrics/WhiteListBasedDruidToTimelineEventConverter.java +++ b/extensions-contrib/ambari-metrics-emitter/src/main/java/io/druid/emitter/ambari/metrics/WhiteListBasedDruidToTimelineEventConverter.java @@ -32,8 +32,8 @@ import com.google.common.collect.ImmutableSortedMap; import com.google.common.io.CharStreams; import com.google.common.io.Files; -import com.metamx.common.ISE; -import com.metamx.emitter.service.ServiceMetricEvent; +import io.druid.java.util.common.ISE; +import io.druid.java.util.emitter.service.ServiceMetricEvent; import io.druid.java.util.common.logger.Logger; import org.apache.hadoop.metrics2.sink.timeline.TimelineMetric; diff --git a/extensions-contrib/ambari-metrics-emitter/src/test/java/io/druid/emitter/ambari/metrics/WhiteListBasedDruidToTimelineEventConverterTest.java b/extensions-contrib/ambari-metrics-emitter/src/test/java/io/druid/emitter/ambari/metrics/WhiteListBasedDruidToTimelineEventConverterTest.java index eeb4048a9b47..be58f3c1934e 100644 --- a/extensions-contrib/ambari-metrics-emitter/src/test/java/io/druid/emitter/ambari/metrics/WhiteListBasedDruidToTimelineEventConverterTest.java +++ b/extensions-contrib/ambari-metrics-emitter/src/test/java/io/druid/emitter/ambari/metrics/WhiteListBasedDruidToTimelineEventConverterTest.java @@ -20,7 +20,7 @@ package io.druid.emitter.ambari.metrics; import com.google.common.collect.Maps; -import com.metamx.emitter.service.ServiceMetricEvent; +import io.druid.java.util.emitter.service.ServiceMetricEvent; import io.druid.jackson.DefaultObjectMapper; import io.druid.java.util.common.DateTimes; import junitparams.JUnitParamsRunner; diff --git a/extensions-contrib/azure-extensions/pom.xml b/extensions-contrib/azure-extensions/pom.xml index 1c647b0e91d4..6f9edc549a65 100644 --- a/extensions-contrib/azure-extensions/pom.xml +++ b/extensions-contrib/azure-extensions/pom.xml @@ -18,8 +18,7 @@ ~ under the License. --> - + 4.0.0 io.druid.extensions.contrib @@ -30,7 +29,7 @@ io.druid druid - 0.12.0-SNAPSHOT + 0.12.2-SNAPSHOT ../../pom.xml diff --git a/extensions-contrib/azure-extensions/src/main/java/io/druid/storage/azure/AzureDataSegmentPusher.java b/extensions-contrib/azure-extensions/src/main/java/io/druid/storage/azure/AzureDataSegmentPusher.java index 97bc1a0167a7..592b956c260d 100644 --- a/extensions-contrib/azure-extensions/src/main/java/io/druid/storage/azure/AzureDataSegmentPusher.java +++ b/extensions-contrib/azure-extensions/src/main/java/io/druid/storage/azure/AzureDataSegmentPusher.java @@ -25,7 +25,6 @@ import com.google.common.collect.ImmutableMap; import com.google.inject.Inject; import com.microsoft.azure.storage.StorageException; - import io.druid.java.util.common.CompressionUtils; import io.druid.java.util.common.StringUtils; import io.druid.java.util.common.logger.Logger; @@ -92,9 +91,9 @@ public File createSegmentDescriptorFile(final ObjectMapper jsonMapper, final Dat return descriptorFile; } - public Map getAzurePaths(final DataSegment segment) + public Map getAzurePaths(final DataSegment segment, final boolean useUniquePath) { - final String storageDir = this.getStorageDir(segment); + final String storageDir = this.getStorageDir(segment, useUniquePath); return ImmutableMap.of( "index", StringUtils.format("%s/%s", storageDir, AzureStorageDruidModule.INDEX_ZIP_FILE_NAME), @@ -131,9 +130,9 @@ public DataSegment uploadDataSegment( } @Override - public DataSegment push(final File indexFilesDir, final DataSegment segment) throws IOException + public DataSegment push(final File indexFilesDir, final DataSegment segment, final boolean useUniquePath) + throws IOException { - log.info("Uploading [%s] to Azure.", indexFilesDir); final int version = SegmentUtils.getVersionFromDir(indexFilesDir); @@ -145,7 +144,7 @@ public DataSegment push(final File indexFilesDir, final DataSegment segment) thr final long size = CompressionUtils.zip(indexFilesDir, zipOutFile); final File descFile = descriptorFile = createSegmentDescriptorFile(jsonMapper, segment); - final Map azurePaths = getAzurePaths(segment); + final Map azurePaths = getAzurePaths(segment, useUniquePath); return AzureUtils.retryAzureOperation( new Callable() diff --git a/extensions-contrib/azure-extensions/src/main/java/io/druid/storage/azure/AzureStorage.java b/extensions-contrib/azure-extensions/src/main/java/io/druid/storage/azure/AzureStorage.java index 8585bf043797..25a4764ad845 100644 --- a/extensions-contrib/azure-extensions/src/main/java/io/druid/storage/azure/AzureStorage.java +++ b/extensions-contrib/azure-extensions/src/main/java/io/druid/storage/azure/AzureStorage.java @@ -23,9 +23,7 @@ import com.microsoft.azure.storage.blob.CloudBlob; import com.microsoft.azure.storage.blob.CloudBlobClient; import com.microsoft.azure.storage.blob.CloudBlobContainer; -import com.microsoft.azure.storage.blob.CloudBlockBlob; import com.microsoft.azure.storage.blob.ListBlobItem; - import io.druid.java.util.common.logger.Logger; import java.io.File; @@ -78,17 +76,14 @@ public List emptyCloudBlobDirectory(final String containerName, final St } return deletedFiles; - } public void uploadBlob(final File file, final String containerName, final String blobPath) throws IOException, StorageException, URISyntaxException - { CloudBlobContainer container = getCloudBlobContainer(containerName); try (FileInputStream stream = new FileInputStream(file)) { - CloudBlockBlob blob = container.getBlockBlobReference(blobPath); - blob.upload(stream, file.length()); + container.getBlockBlobReference(blobPath).upload(stream, file.length()); } } diff --git a/extensions-contrib/azure-extensions/src/test/java/io/druid/storage/azure/AzureDataSegmentPusherTest.java b/extensions-contrib/azure-extensions/src/test/java/io/druid/storage/azure/AzureDataSegmentPusherTest.java index af76f3571424..3201c4afeca3 100644 --- a/extensions-contrib/azure-extensions/src/test/java/io/druid/storage/azure/AzureDataSegmentPusherTest.java +++ b/extensions-contrib/azure-extensions/src/test/java/io/druid/storage/azure/AzureDataSegmentPusherTest.java @@ -82,6 +82,17 @@ public void before() @Test public void testPush() throws Exception + { + testPushInternal(false, "foo/2015-01-01T00:00:00\\.000Z_2016-01-01T00:00:00\\.000Z/0/0/index\\.zip"); + } + + @Test + public void testPushUseUniquePath() throws Exception + { + testPushInternal(true, "foo/2015-01-01T00:00:00\\.000Z_2016-01-01T00:00:00\\.000Z/0/0/[A-Za-z0-9-]{36}/index\\.zip"); + } + + private void testPushInternal(boolean useUniquePath, String matcher) throws Exception { AzureDataSegmentPusher pusher = new AzureDataSegmentPusher(azureStorage, azureAccountConfig, jsonMapper); @@ -104,7 +115,12 @@ public void testPush() throws Exception size ); - DataSegment segment = pusher.push(tempFolder.getRoot(), segmentToPush); + DataSegment segment = pusher.push(tempFolder.getRoot(), segmentToPush, useUniquePath); + + Assert.assertTrue( + segment.getLoadSpec().get("blobPath").toString(), + segment.getLoadSpec().get("blobPath").toString().matches(matcher) + ); Assert.assertEquals(segmentToPush.getSize(), segment.getSize()); } @@ -114,10 +130,13 @@ public void getAzurePathsTest() { AzureDataSegmentPusher pusher = new AzureDataSegmentPusher(azureStorage, azureAccountConfig, jsonMapper); - final String storageDir = pusher.getStorageDir(dataSegment); - Map paths = pusher.getAzurePaths(dataSegment); + final String storageDir = pusher.getStorageDir(dataSegment, false); + Map paths = pusher.getAzurePaths(dataSegment, false); - assertEquals(StringUtils.format("%s/%s", storageDir, AzureStorageDruidModule.INDEX_ZIP_FILE_NAME), paths.get("index")); + assertEquals( + StringUtils.format("%s/%s", storageDir, AzureStorageDruidModule.INDEX_ZIP_FILE_NAME), + paths.get("index") + ); assertEquals( StringUtils.format("%s/%s", storageDir, AzureStorageDruidModule.DESCRIPTOR_FILE_NAME), paths.get("descriptor") @@ -131,7 +150,7 @@ public void uploadDataSegmentTest() throws StorageException, IOException, URISyn final int version = 9; final File compressedSegmentData = new File("index.zip"); final File descriptorFile = new File("descriptor.json"); - final Map azurePaths = pusher.getAzurePaths(dataSegment); + final Map azurePaths = pusher.getAzurePaths(dataSegment, false); azureStorage.uploadBlob(compressedSegmentData, containerName, azurePaths.get("index")); expectLastCall(); diff --git a/extensions-contrib/cassandra-storage/pom.xml b/extensions-contrib/cassandra-storage/pom.xml index ea32b084b02f..1068cf8530dd 100644 --- a/extensions-contrib/cassandra-storage/pom.xml +++ b/extensions-contrib/cassandra-storage/pom.xml @@ -29,7 +29,7 @@ io.druid druid - 0.12.0-SNAPSHOT + 0.12.2-SNAPSHOT ../../pom.xml diff --git a/extensions-contrib/cassandra-storage/src/main/java/io/druid/storage/cassandra/CassandraDataSegmentPusher.java b/extensions-contrib/cassandra-storage/src/main/java/io/druid/storage/cassandra/CassandraDataSegmentPusher.java index 3595147ace85..16ff585327db 100644 --- a/extensions-contrib/cassandra-storage/src/main/java/io/druid/storage/cassandra/CassandraDataSegmentPusher.java +++ b/extensions-contrib/cassandra-storage/src/main/java/io/druid/storage/cassandra/CassandraDataSegmentPusher.java @@ -24,8 +24,9 @@ import com.google.common.collect.ImmutableMap; import com.google.inject.Inject; import com.netflix.astyanax.MutationBatch; +import com.netflix.astyanax.connectionpool.exceptions.NotFoundException; import com.netflix.astyanax.recipes.storage.ChunkedStorage; - +import com.netflix.astyanax.recipes.storage.ChunkedStorageProvider; import io.druid.java.util.common.CompressionUtils; import io.druid.java.util.common.logger.Logger; import io.druid.segment.SegmentUtils; @@ -53,7 +54,8 @@ public class CassandraDataSegmentPusher extends CassandraStorage implements Data @Inject public CassandraDataSegmentPusher( CassandraDataSegmentConfig config, - ObjectMapper jsonMapper) + ObjectMapper jsonMapper + ) { super(config); this.jsonMapper = jsonMapper; @@ -73,13 +75,13 @@ public String getPathForHadoop(String dataSource) } @Override - public DataSegment push(final File indexFilesDir, DataSegment segment) throws IOException + public DataSegment push(final File indexFilesDir, DataSegment segment, final boolean useUniquePath) throws IOException { log.info("Writing [%s] to C*", indexFilesDir); String key = JOINER.join( config.getKeyspace().isEmpty() ? null : config.getKeyspace(), - this.getStorageDir(segment) - ); + this.getStorageDir(segment, useUniquePath) + ); // Create index final File compressedIndexFile = File.createTempFile("druid", "index.zip"); @@ -91,12 +93,12 @@ public DataSegment push(final File indexFilesDir, DataSegment segment) throws IO try { long start = System.currentTimeMillis(); ChunkedStorage.newWriter(indexStorage, key, new FileInputStream(compressedIndexFile)) - .withConcurrencyLevel(CONCURRENCY).call(); + .withConcurrencyLevel(CONCURRENCY).call(); byte[] json = jsonMapper.writeValueAsBytes(segment); MutationBatch mutation = this.keyspace.prepareMutationBatch(); mutation.withRow(descriptorStorage, key) - .putColumn("lastmodified", System.currentTimeMillis(), null) - .putColumn("descriptor", json, null); + .putColumn("lastmodified", System.currentTimeMillis(), null) + .putColumn("descriptor", json, null); mutation.execute(); log.info("Wrote index to C* in [%s] ms", System.currentTimeMillis() - start); } @@ -105,10 +107,8 @@ public DataSegment push(final File indexFilesDir, DataSegment segment) throws IO } segment = segment.withSize(indexSize) - .withLoadSpec( - ImmutableMap. of("type", "c*", "key", key) - ) - .withBinaryVersion(version); + .withLoadSpec(ImmutableMap.of("type", "c*", "key", key)) + .withBinaryVersion(version); log.info("Deleting zipped index File[%s]", compressedIndexFile); compressedIndexFile.delete(); @@ -120,4 +120,14 @@ public Map makeLoadSpec(URI uri) { throw new UnsupportedOperationException("not supported"); } + + private boolean doesObjectExist(ChunkedStorageProvider provider, String objectName) throws Exception + { + try { + return ChunkedStorage.newInfoReader(provider, objectName).call().isValidForRead(); + } + catch (NotFoundException e) { + return false; + } + } } diff --git a/extensions-contrib/cloudfiles-extensions/pom.xml b/extensions-contrib/cloudfiles-extensions/pom.xml index da06a9e9e51a..7ed7874b534b 100644 --- a/extensions-contrib/cloudfiles-extensions/pom.xml +++ b/extensions-contrib/cloudfiles-extensions/pom.xml @@ -18,8 +18,7 @@ ~ under the License. --> - + 4.0.0 io.druid.extensions.contrib @@ -30,7 +29,7 @@ io.druid druid - 0.12.0-SNAPSHOT + 0.12.2-SNAPSHOT ../../pom.xml diff --git a/extensions-contrib/cloudfiles-extensions/src/main/java/io/druid/storage/cloudfiles/CloudFilesDataSegmentPusher.java b/extensions-contrib/cloudfiles-extensions/src/main/java/io/druid/storage/cloudfiles/CloudFilesDataSegmentPusher.java index a08105b0575e..be3e25472354 100644 --- a/extensions-contrib/cloudfiles-extensions/src/main/java/io/druid/storage/cloudfiles/CloudFilesDataSegmentPusher.java +++ b/extensions-contrib/cloudfiles-extensions/src/main/java/io/druid/storage/cloudfiles/CloudFilesDataSegmentPusher.java @@ -31,7 +31,6 @@ import org.jclouds.rackspace.cloudfiles.v1.CloudFilesApi; import java.io.File; -import java.io.IOException; import java.net.URI; import java.nio.file.Files; import java.util.Map; @@ -74,9 +73,12 @@ public String getPathForHadoop(final String dataSource) } @Override - public DataSegment push(final File indexFilesDir, final DataSegment inSegment) throws IOException + public DataSegment push(final File indexFilesDir, final DataSegment inSegment, final boolean useUniquePath) { - final String segmentPath = CloudFilesUtils.buildCloudFilesPath(this.config.getBasePath(), getStorageDir(inSegment)); + final String segmentPath = CloudFilesUtils.buildCloudFilesPath( + this.config.getBasePath(), + getStorageDir(inSegment, useUniquePath) + ); File descriptorFile = null; File zipOutFile = null; @@ -98,6 +100,7 @@ public DataSegment call() throws Exception segmentPath, outFile, objectApi.getRegion(), objectApi.getContainer() ); + log.info("Pushing %s.", segmentData.getPath()); objectApi.put(segmentData); @@ -111,6 +114,7 @@ public DataSegment call() throws Exception log.info("Pushing %s.", descriptorData.getPath()); objectApi.put(descriptorData); + final DataSegment outSegment = inSegment .withSize(indexSize) .withLoadSpec(makeLoadSpec(new URI(segmentData.getPath()))) diff --git a/extensions-contrib/cloudfiles-extensions/src/main/java/io/druid/storage/cloudfiles/CloudFilesObjectApiProxy.java b/extensions-contrib/cloudfiles-extensions/src/main/java/io/druid/storage/cloudfiles/CloudFilesObjectApiProxy.java index 7f41497b37c9..d495fcc1c77f 100644 --- a/extensions-contrib/cloudfiles-extensions/src/main/java/io/druid/storage/cloudfiles/CloudFilesObjectApiProxy.java +++ b/extensions-contrib/cloudfiles-extensions/src/main/java/io/druid/storage/cloudfiles/CloudFilesObjectApiProxy.java @@ -58,4 +58,9 @@ public CloudFilesObject get(String path) Payload payload = swiftObject.getPayload(); return new CloudFilesObject(payload, this.region, this.container, path); } + + public boolean exists(String path) + { + return objectApi.getWithoutBody(path) != null; + } } diff --git a/extensions-contrib/cloudfiles-extensions/src/test/java/io/druid/storage/cloudfiles/CloudFilesDataSegmentPusherTest.java b/extensions-contrib/cloudfiles-extensions/src/test/java/io/druid/storage/cloudfiles/CloudFilesDataSegmentPusherTest.java index b257efb1282a..2be6f584811b 100644 --- a/extensions-contrib/cloudfiles-extensions/src/test/java/io/druid/storage/cloudfiles/CloudFilesDataSegmentPusherTest.java +++ b/extensions-contrib/cloudfiles-extensions/src/test/java/io/druid/storage/cloudfiles/CloudFilesDataSegmentPusherTest.java @@ -84,7 +84,7 @@ public void testPush() throws Exception size ); - DataSegment segment = pusher.push(tempFolder.getRoot(), segmentToPush); + DataSegment segment = pusher.push(tempFolder.getRoot(), segmentToPush, false); Assert.assertEquals(segmentToPush.getSize(), segment.getSize()); diff --git a/extensions-contrib/distinctcount/pom.xml b/extensions-contrib/distinctcount/pom.xml index 86c3fcc09c9b..7dbb7fcc17c6 100644 --- a/extensions-contrib/distinctcount/pom.xml +++ b/extensions-contrib/distinctcount/pom.xml @@ -18,8 +18,7 @@ ~ under the License. --> - + 4.0.0 io.druid.extensions.contrib @@ -30,7 +29,7 @@ io.druid druid - 0.12.0-SNAPSHOT + 0.12.2-SNAPSHOT ../../pom.xml diff --git a/extensions-contrib/druid-rocketmq/pom.xml b/extensions-contrib/druid-rocketmq/pom.xml index 924a07cdd424..3cba1a17663a 100644 --- a/extensions-contrib/druid-rocketmq/pom.xml +++ b/extensions-contrib/druid-rocketmq/pom.xml @@ -17,14 +17,12 @@ ~ specific language governing permissions and limitations ~ under the License. --> - + 4.0.0 druid io.druid - 0.12.0-SNAPSHOT + 0.12.2-SNAPSHOT ../../pom.xml diff --git a/extensions-contrib/google-extensions/pom.xml b/extensions-contrib/google-extensions/pom.xml index 20a3dd7ba3d4..5b51c53a997f 100644 --- a/extensions-contrib/google-extensions/pom.xml +++ b/extensions-contrib/google-extensions/pom.xml @@ -29,7 +29,7 @@ io.druid druid - 0.12.0-SNAPSHOT + 0.12.2-SNAPSHOT ../../pom.xml diff --git a/extensions-contrib/google-extensions/src/main/java/io/druid/storage/google/GoogleDataSegmentPusher.java b/extensions-contrib/google-extensions/src/main/java/io/druid/storage/google/GoogleDataSegmentPusher.java index 527e7e14025e..7a3581462d36 100644 --- a/extensions-contrib/google-extensions/src/main/java/io/druid/storage/google/GoogleDataSegmentPusher.java +++ b/extensions-contrib/google-extensions/src/main/java/io/druid/storage/google/GoogleDataSegmentPusher.java @@ -93,7 +93,8 @@ public File createDescriptorFile(final ObjectMapper jsonMapper, final DataSegmen return descriptorFile; } - public void insert(final File file, final String contentType, final String path) throws IOException + public void insert(final File file, final String contentType, final String path) + throws IOException { LOG.info("Inserting [%s] to [%s]", file, path); @@ -106,7 +107,8 @@ public void insert(final File file, final String contentType, final String path) } @Override - public DataSegment push(final File indexFilesDir, final DataSegment segment) throws IOException + public DataSegment push(final File indexFilesDir, final DataSegment segment, final boolean useUniquePath) + throws IOException { LOG.info("Uploading [%s] to Google.", indexFilesDir); @@ -117,7 +119,7 @@ public DataSegment push(final File indexFilesDir, final DataSegment segment) thr try { indexFile = File.createTempFile("index", ".zip"); final long indexSize = CompressionUtils.zip(indexFilesDir, indexFile); - final String storageDir = this.getStorageDir(segment); + final String storageDir = this.getStorageDir(segment, useUniquePath); final String indexPath = buildPath(storageDir + "/" + "index.zip"); final String descriptorPath = buildPath(storageDir + "/" + "descriptor.json"); diff --git a/extensions-contrib/google-extensions/src/test/java/io/druid/storage/google/GoogleDataSegmentPusherTest.java b/extensions-contrib/google-extensions/src/test/java/io/druid/storage/google/GoogleDataSegmentPusherTest.java index 6c845d433c77..b90f0d5f560e 100644 --- a/extensions-contrib/google-extensions/src/test/java/io/druid/storage/google/GoogleDataSegmentPusherTest.java +++ b/extensions-contrib/google-extensions/src/test/java/io/druid/storage/google/GoogleDataSegmentPusherTest.java @@ -89,7 +89,7 @@ public void testPush() throws Exception "foo", Intervals.of("2015/2016"), "0", - Maps.newHashMap(), + Maps.newHashMap(), Lists.newArrayList(), Lists.newArrayList(), new NoneShardSpec(), @@ -105,18 +105,26 @@ public void testPush() throws Exception jsonMapper ).addMockedMethod("insert", File.class, String.class, String.class).createMock(); - final String storageDir = pusher.getStorageDir(segmentToPush); + final String storageDir = pusher.getStorageDir(segmentToPush, false); final String indexPath = prefix + "/" + storageDir + "/" + "index.zip"; final String descriptorPath = prefix + "/" + storageDir + "/" + "descriptor.json"; - pusher.insert(EasyMock.anyObject(File.class), EasyMock.eq("application/zip"), EasyMock.eq(indexPath)); + pusher.insert( + EasyMock.anyObject(File.class), + EasyMock.eq("application/zip"), + EasyMock.eq(indexPath) + ); expectLastCall(); - pusher.insert(EasyMock.anyObject(File.class), EasyMock.eq("application/json"), EasyMock.eq(descriptorPath)); + pusher.insert( + EasyMock.anyObject(File.class), + EasyMock.eq("application/json"), + EasyMock.eq(descriptorPath) + ); expectLastCall(); replayAll(); - DataSegment segment = pusher.push(tempFolder.getRoot(), segmentToPush); + DataSegment segment = pusher.push(tempFolder.getRoot(), segmentToPush, false); Assert.assertEquals(segmentToPush.getSize(), segment.getSize()); Assert.assertEquals(segmentToPush, segment); diff --git a/extensions-contrib/graphite-emitter/pom.xml b/extensions-contrib/graphite-emitter/pom.xml index 8569823ea94b..f585362a4b8b 100644 --- a/extensions-contrib/graphite-emitter/pom.xml +++ b/extensions-contrib/graphite-emitter/pom.xml @@ -18,14 +18,13 @@ ~ under the License. --> - + 4.0.0 io.druid druid - 0.12.0-SNAPSHOT + 0.12.2-SNAPSHOT ../../pom.xml @@ -54,8 +53,9 @@ provided - com.metamx + io.druid java-util + ${project.parent.version} provided diff --git a/extensions-contrib/graphite-emitter/src/main/java/io/druid/emitter/graphite/DruidToGraphiteEventConverter.java b/extensions-contrib/graphite-emitter/src/main/java/io/druid/emitter/graphite/DruidToGraphiteEventConverter.java index 673fa2af2af7..243b6119799a 100644 --- a/extensions-contrib/graphite-emitter/src/main/java/io/druid/emitter/graphite/DruidToGraphiteEventConverter.java +++ b/extensions-contrib/graphite-emitter/src/main/java/io/druid/emitter/graphite/DruidToGraphiteEventConverter.java @@ -21,7 +21,7 @@ import com.fasterxml.jackson.annotation.JsonSubTypes; import com.fasterxml.jackson.annotation.JsonTypeInfo; -import com.metamx.emitter.service.ServiceMetricEvent; +import io.druid.java.util.emitter.service.ServiceMetricEvent; @JsonTypeInfo(use = JsonTypeInfo.Id.NAME, property = "type", defaultImpl = WhiteListBasedConverter.class) diff --git a/extensions-contrib/graphite-emitter/src/main/java/io/druid/emitter/graphite/GraphiteEmitter.java b/extensions-contrib/graphite-emitter/src/main/java/io/druid/emitter/graphite/GraphiteEmitter.java index cb0fb30d05bf..bba2796862c5 100644 --- a/extensions-contrib/graphite-emitter/src/main/java/io/druid/emitter/graphite/GraphiteEmitter.java +++ b/extensions-contrib/graphite-emitter/src/main/java/io/druid/emitter/graphite/GraphiteEmitter.java @@ -23,10 +23,10 @@ import com.codahale.metrics.graphite.GraphiteSender; import com.codahale.metrics.graphite.PickledGraphite; import com.google.common.util.concurrent.ThreadFactoryBuilder; -import com.metamx.emitter.core.Emitter; -import com.metamx.emitter.core.Event; -import com.metamx.emitter.service.AlertEvent; -import com.metamx.emitter.service.ServiceMetricEvent; +import io.druid.java.util.emitter.core.Emitter; +import io.druid.java.util.emitter.core.Event; +import io.druid.java.util.emitter.service.AlertEvent; +import io.druid.java.util.emitter.service.ServiceMetricEvent; import io.druid.java.util.common.ISE; import io.druid.java.util.common.logger.Logger; diff --git a/extensions-contrib/graphite-emitter/src/main/java/io/druid/emitter/graphite/GraphiteEmitterModule.java b/extensions-contrib/graphite-emitter/src/main/java/io/druid/emitter/graphite/GraphiteEmitterModule.java index b7fd38b3681f..12b27213c791 100644 --- a/extensions-contrib/graphite-emitter/src/main/java/io/druid/emitter/graphite/GraphiteEmitterModule.java +++ b/extensions-contrib/graphite-emitter/src/main/java/io/druid/emitter/graphite/GraphiteEmitterModule.java @@ -29,7 +29,7 @@ import com.google.inject.Provides; import com.google.inject.name.Named; import com.google.inject.name.Names; -import com.metamx.emitter.core.Emitter; +import io.druid.java.util.emitter.core.Emitter; import io.druid.guice.JsonConfigProvider; import io.druid.guice.ManageLifecycle; import io.druid.initialization.DruidModule; diff --git a/extensions-contrib/graphite-emitter/src/main/java/io/druid/emitter/graphite/SendAllGraphiteEventConverter.java b/extensions-contrib/graphite-emitter/src/main/java/io/druid/emitter/graphite/SendAllGraphiteEventConverter.java index dc26014e1b3f..e3b705684291 100644 --- a/extensions-contrib/graphite-emitter/src/main/java/io/druid/emitter/graphite/SendAllGraphiteEventConverter.java +++ b/extensions-contrib/graphite-emitter/src/main/java/io/druid/emitter/graphite/SendAllGraphiteEventConverter.java @@ -26,7 +26,7 @@ import com.google.common.base.Preconditions; import com.google.common.collect.ImmutableList; import com.google.common.collect.ImmutableSortedSet; -import com.metamx.emitter.service.ServiceMetricEvent; +import io.druid.java.util.emitter.service.ServiceMetricEvent; import java.util.concurrent.TimeUnit; diff --git a/extensions-contrib/graphite-emitter/src/main/java/io/druid/emitter/graphite/WhiteListBasedConverter.java b/extensions-contrib/graphite-emitter/src/main/java/io/druid/emitter/graphite/WhiteListBasedConverter.java index ac53033ca6a3..25018c5f5048 100644 --- a/extensions-contrib/graphite-emitter/src/main/java/io/druid/emitter/graphite/WhiteListBasedConverter.java +++ b/extensions-contrib/graphite-emitter/src/main/java/io/druid/emitter/graphite/WhiteListBasedConverter.java @@ -33,7 +33,7 @@ import com.google.common.collect.ImmutableSortedMap; import com.google.common.io.Files; import com.google.common.io.Resources; -import com.metamx.emitter.service.ServiceMetricEvent; +import io.druid.java.util.emitter.service.ServiceMetricEvent; import io.druid.java.util.common.ISE; import io.druid.java.util.common.logger.Logger; diff --git a/extensions-contrib/graphite-emitter/src/test/java/io/druid/emitter/graphite/WhiteListBasedConverterTest.java b/extensions-contrib/graphite-emitter/src/test/java/io/druid/emitter/graphite/WhiteListBasedConverterTest.java index 11908fb1bb23..10022a4023b8 100644 --- a/extensions-contrib/graphite-emitter/src/test/java/io/druid/emitter/graphite/WhiteListBasedConverterTest.java +++ b/extensions-contrib/graphite-emitter/src/test/java/io/druid/emitter/graphite/WhiteListBasedConverterTest.java @@ -20,7 +20,7 @@ package io.druid.emitter.graphite; import com.google.common.collect.Maps; -import com.metamx.emitter.service.ServiceMetricEvent; +import io.druid.java.util.emitter.service.ServiceMetricEvent; import io.druid.jackson.DefaultObjectMapper; import io.druid.java.util.common.DateTimes; import junitparams.JUnitParamsRunner; diff --git a/extensions-contrib/kafka-eight-simpleConsumer/pom.xml b/extensions-contrib/kafka-eight-simpleConsumer/pom.xml index 1c9f5c17ffe2..9572a71dbcdb 100644 --- a/extensions-contrib/kafka-eight-simpleConsumer/pom.xml +++ b/extensions-contrib/kafka-eight-simpleConsumer/pom.xml @@ -16,8 +16,7 @@ ~ limitations under the License. --> - + 4.0.0 io.druid.extensions.contrib druid-kafka-eight-simple-consumer @@ -27,7 +26,7 @@ io.druid druid - 0.12.0-SNAPSHOT + 0.12.2-SNAPSHOT ../../pom.xml @@ -39,8 +38,9 @@ provided - com.metamx + io.druid java-util + ${project.parent.version} provided diff --git a/extensions-contrib/kafka-eight-simpleConsumer/src/main/java/io/druid/firehose/kafka/KafkaEightSimpleConsumerFirehoseFactory.java b/extensions-contrib/kafka-eight-simpleConsumer/src/main/java/io/druid/firehose/kafka/KafkaEightSimpleConsumerFirehoseFactory.java index 0c53007311d8..c65fa808d70c 100644 --- a/extensions-contrib/kafka-eight-simpleConsumer/src/main/java/io/druid/firehose/kafka/KafkaEightSimpleConsumerFirehoseFactory.java +++ b/extensions-contrib/kafka-eight-simpleConsumer/src/main/java/io/druid/firehose/kafka/KafkaEightSimpleConsumerFirehoseFactory.java @@ -25,8 +25,8 @@ import com.google.common.collect.Iterators; import com.google.common.collect.Maps; import com.google.common.io.Closeables; -import com.metamx.common.parsers.ParseException; -import com.metamx.emitter.EmittingLogger; +import io.druid.java.util.common.parsers.ParseException; +import io.druid.java.util.emitter.EmittingLogger; import io.druid.data.input.ByteBufferInputRowParser; import io.druid.data.input.Committer; import io.druid.data.input.FirehoseFactoryV2; diff --git a/extensions-contrib/kafka-emitter/pom.xml b/extensions-contrib/kafka-emitter/pom.xml index 9130834d243f..3c087b1c0e03 100644 --- a/extensions-contrib/kafka-emitter/pom.xml +++ b/extensions-contrib/kafka-emitter/pom.xml @@ -18,14 +18,13 @@ ~ under the License. --> - + 4.0.0 io.druid druid - 0.12.0-SNAPSHOT + 0.12.2-SNAPSHOT ../../pom.xml @@ -53,8 +52,9 @@ provided - com.metamx + io.druid java-util + ${project.parent.version} provided diff --git a/extensions-contrib/kafka-emitter/src/main/java/io/druid/emitter/kafka/KafkaEmitter.java b/extensions-contrib/kafka-emitter/src/main/java/io/druid/emitter/kafka/KafkaEmitter.java index 7a791be5c84f..7628521dd8dc 100644 --- a/extensions-contrib/kafka-emitter/src/main/java/io/druid/emitter/kafka/KafkaEmitter.java +++ b/extensions-contrib/kafka-emitter/src/main/java/io/druid/emitter/kafka/KafkaEmitter.java @@ -22,10 +22,10 @@ import com.fasterxml.jackson.core.JsonProcessingException; import com.fasterxml.jackson.databind.ObjectMapper; import com.google.common.collect.ImmutableMap; -import com.metamx.emitter.core.Emitter; -import com.metamx.emitter.core.Event; -import com.metamx.emitter.service.AlertEvent; -import com.metamx.emitter.service.ServiceMetricEvent; +import io.druid.java.util.emitter.core.Emitter; +import io.druid.java.util.emitter.core.Event; +import io.druid.java.util.emitter.service.AlertEvent; +import io.druid.java.util.emitter.service.ServiceMetricEvent; import io.druid.emitter.kafka.MemoryBoundLinkedBlockingQueue.ObjectContainer; import io.druid.java.util.common.StringUtils; import io.druid.java.util.common.lifecycle.LifecycleStart; diff --git a/extensions-contrib/kafka-emitter/src/main/java/io/druid/emitter/kafka/KafkaEmitterModule.java b/extensions-contrib/kafka-emitter/src/main/java/io/druid/emitter/kafka/KafkaEmitterModule.java index 0608b1f0d898..7db5724787f0 100644 --- a/extensions-contrib/kafka-emitter/src/main/java/io/druid/emitter/kafka/KafkaEmitterModule.java +++ b/extensions-contrib/kafka-emitter/src/main/java/io/druid/emitter/kafka/KafkaEmitterModule.java @@ -24,7 +24,7 @@ import com.google.inject.Binder; import com.google.inject.Provides; import com.google.inject.name.Named; -import com.metamx.emitter.core.Emitter; +import io.druid.java.util.emitter.core.Emitter; import io.druid.guice.JsonConfigProvider; import io.druid.guice.ManageLifecycle; import io.druid.initialization.DruidModule; diff --git a/extensions-contrib/orc-extensions/pom.xml b/extensions-contrib/orc-extensions/pom.xml index 4eb48e949ebf..e2e6beca8440 100644 --- a/extensions-contrib/orc-extensions/pom.xml +++ b/extensions-contrib/orc-extensions/pom.xml @@ -15,9 +15,7 @@ ~ See the License for the specific language governing permissions and ~ limitations under the License. --> - + io.druid.extensions.contrib druid-orc-extensions druid-orc-extensions @@ -26,7 +24,7 @@ druid io.druid - 0.12.0-SNAPSHOT + 0.12.2-SNAPSHOT ../../pom.xml 4.0.0 diff --git a/extensions-contrib/parquet-extensions/pom.xml b/extensions-contrib/parquet-extensions/pom.xml index b2a460fc2891..9242bb05879c 100644 --- a/extensions-contrib/parquet-extensions/pom.xml +++ b/extensions-contrib/parquet-extensions/pom.xml @@ -1,7 +1,5 @@ - + io.druid.extensions.contrib druid-parquet-extensions druid-parquet-extensions @@ -10,7 +8,7 @@ druid io.druid - 0.12.0-SNAPSHOT + 0.12.2-SNAPSHOT ../../pom.xml 4.0.0 diff --git a/extensions-contrib/rabbitmq/pom.xml b/extensions-contrib/rabbitmq/pom.xml index 8f1ebc47f199..bbda4243759b 100644 --- a/extensions-contrib/rabbitmq/pom.xml +++ b/extensions-contrib/rabbitmq/pom.xml @@ -29,7 +29,7 @@ io.druid druid - 0.12.0-SNAPSHOT + 0.12.2-SNAPSHOT ../../pom.xml diff --git a/extensions-contrib/redis-cache/pom.xml b/extensions-contrib/redis-cache/pom.xml index 9d0c578bcd0d..bfa439100c78 100644 --- a/extensions-contrib/redis-cache/pom.xml +++ b/extensions-contrib/redis-cache/pom.xml @@ -19,8 +19,7 @@ ~ under the License. --> - + 4.0.0 io.druid.extensions.contrib @@ -30,7 +29,7 @@ io.druid druid - 0.12.0-SNAPSHOT + 0.12.2-SNAPSHOT ../../pom.xml diff --git a/extensions-contrib/redis-cache/src/main/java/io/druid/client/cache/RedisCache.java b/extensions-contrib/redis-cache/src/main/java/io/druid/client/cache/RedisCache.java index 11b63a2e0f92..9b4744fd961c 100644 --- a/extensions-contrib/redis-cache/src/main/java/io/druid/client/cache/RedisCache.java +++ b/extensions-contrib/redis-cache/src/main/java/io/druid/client/cache/RedisCache.java @@ -21,8 +21,8 @@ import com.google.common.annotations.VisibleForTesting; import com.google.common.collect.Lists; -import com.metamx.emitter.service.ServiceEmitter; -import com.metamx.emitter.service.ServiceMetricEvent; +import io.druid.java.util.emitter.service.ServiceEmitter; +import io.druid.java.util.emitter.service.ServiceMetricEvent; import io.druid.java.util.common.logger.Logger; import redis.clients.jedis.Jedis; import redis.clients.jedis.JedisPool; diff --git a/extensions-contrib/sqlserver-metadata-storage/pom.xml b/extensions-contrib/sqlserver-metadata-storage/pom.xml index 0f3970cdf204..5451d8ebdeb6 100644 --- a/extensions-contrib/sqlserver-metadata-storage/pom.xml +++ b/extensions-contrib/sqlserver-metadata-storage/pom.xml @@ -26,7 +26,7 @@ io.druid druid - 0.12.0-SNAPSHOT + 0.12.2-SNAPSHOT ../../pom.xml diff --git a/extensions-contrib/statsd-emitter/pom.xml b/extensions-contrib/statsd-emitter/pom.xml index 061b2a961a8d..e8226dc3e5c7 100644 --- a/extensions-contrib/statsd-emitter/pom.xml +++ b/extensions-contrib/statsd-emitter/pom.xml @@ -17,13 +17,11 @@ ~ specific language governing permissions and limitations ~ under the License. --> - + druid io.druid - 0.12.0-SNAPSHOT + 0.12.2-SNAPSHOT ../../pom.xml 4.0.0 @@ -47,8 +45,9 @@ provided - com.metamx + io.druid java-util + ${project.parent.version} provided diff --git a/extensions-contrib/statsd-emitter/src/main/java/io/druid/emitter/statsd/StatsDEmitter.java b/extensions-contrib/statsd-emitter/src/main/java/io/druid/emitter/statsd/StatsDEmitter.java index c949f13f4778..d395146f87e2 100644 --- a/extensions-contrib/statsd-emitter/src/main/java/io/druid/emitter/statsd/StatsDEmitter.java +++ b/extensions-contrib/statsd-emitter/src/main/java/io/druid/emitter/statsd/StatsDEmitter.java @@ -22,9 +22,9 @@ import com.fasterxml.jackson.databind.ObjectMapper; import com.google.common.base.Joiner; import com.google.common.collect.ImmutableList; -import com.metamx.emitter.core.Emitter; -import com.metamx.emitter.core.Event; -import com.metamx.emitter.service.ServiceMetricEvent; +import io.druid.java.util.emitter.core.Emitter; +import io.druid.java.util.emitter.core.Event; +import io.druid.java.util.emitter.service.ServiceMetricEvent; import com.timgroup.statsd.NonBlockingStatsDClient; import com.timgroup.statsd.StatsDClient; import com.timgroup.statsd.StatsDClientErrorHandler; diff --git a/extensions-contrib/statsd-emitter/src/main/java/io/druid/emitter/statsd/StatsDEmitterModule.java b/extensions-contrib/statsd-emitter/src/main/java/io/druid/emitter/statsd/StatsDEmitterModule.java index 3d00da34eff1..2d8bd2087c0c 100644 --- a/extensions-contrib/statsd-emitter/src/main/java/io/druid/emitter/statsd/StatsDEmitterModule.java +++ b/extensions-contrib/statsd-emitter/src/main/java/io/druid/emitter/statsd/StatsDEmitterModule.java @@ -24,7 +24,7 @@ import com.google.inject.Binder; import com.google.inject.Provides; import com.google.inject.name.Named; -import com.metamx.emitter.core.Emitter; +import io.druid.java.util.emitter.core.Emitter; import io.druid.guice.JsonConfigProvider; import io.druid.guice.ManageLifecycle; import io.druid.initialization.DruidModule; diff --git a/extensions-contrib/statsd-emitter/src/test/java/DimensionConverterTest.java b/extensions-contrib/statsd-emitter/src/test/java/DimensionConverterTest.java index b4c8fa33243b..62132a266cf1 100644 --- a/extensions-contrib/statsd-emitter/src/test/java/DimensionConverterTest.java +++ b/extensions-contrib/statsd-emitter/src/test/java/DimensionConverterTest.java @@ -19,7 +19,7 @@ import com.fasterxml.jackson.databind.ObjectMapper; import com.google.common.collect.ImmutableList; -import com.metamx.emitter.service.ServiceMetricEvent; +import io.druid.java.util.emitter.service.ServiceMetricEvent; import io.druid.emitter.statsd.DimensionConverter; import io.druid.emitter.statsd.StatsDMetric; import io.druid.java.util.common.DateTimes; diff --git a/extensions-contrib/statsd-emitter/src/test/java/StatsDEmitterTest.java b/extensions-contrib/statsd-emitter/src/test/java/StatsDEmitterTest.java index 531ca7a5dbf5..16175d917a4e 100644 --- a/extensions-contrib/statsd-emitter/src/test/java/StatsDEmitterTest.java +++ b/extensions-contrib/statsd-emitter/src/test/java/StatsDEmitterTest.java @@ -18,7 +18,7 @@ */ import com.fasterxml.jackson.databind.ObjectMapper; -import com.metamx.emitter.service.ServiceMetricEvent; +import io.druid.java.util.emitter.service.ServiceMetricEvent; import com.timgroup.statsd.StatsDClient; import io.druid.emitter.statsd.StatsDEmitter; import io.druid.emitter.statsd.StatsDEmitterConfig; diff --git a/extensions-contrib/thrift-extensions/pom.xml b/extensions-contrib/thrift-extensions/pom.xml index 18c51677a615..8c118c6cdbaa 100644 --- a/extensions-contrib/thrift-extensions/pom.xml +++ b/extensions-contrib/thrift-extensions/pom.xml @@ -1,7 +1,5 @@ - + io.druid.extensions.contrib druid-thrift-extensions @@ -11,7 +9,7 @@ druid io.druid - 0.12.0-SNAPSHOT + 0.12.2-SNAPSHOT ../../pom.xml 4.0.0 diff --git a/extensions-contrib/thrift-extensions/src/main/java/io/druid/data/input/thrift/ThriftInputRowParser.java b/extensions-contrib/thrift-extensions/src/main/java/io/druid/data/input/thrift/ThriftInputRowParser.java index 87169252b8f8..aa523f34993f 100644 --- a/extensions-contrib/thrift-extensions/src/main/java/io/druid/data/input/thrift/ThriftInputRowParser.java +++ b/extensions-contrib/thrift-extensions/src/main/java/io/druid/data/input/thrift/ThriftInputRowParser.java @@ -23,7 +23,7 @@ import com.fasterxml.jackson.annotation.JsonProperty; import com.google.common.base.Preconditions; import com.google.common.collect.ImmutableList; -import com.metamx.common.IAE; +import io.druid.java.util.common.IAE; import com.twitter.elephantbird.mapreduce.io.ThriftWritable; import io.druid.data.input.InputRow; import io.druid.data.input.MapBasedInputRow; diff --git a/extensions-contrib/time-min-max/pom.xml b/extensions-contrib/time-min-max/pom.xml index 59bc7ee6b1a7..028d42fbb573 100644 --- a/extensions-contrib/time-min-max/pom.xml +++ b/extensions-contrib/time-min-max/pom.xml @@ -17,13 +17,11 @@ ~ specific language governing permissions and limitations ~ under the License. --> - + druid io.druid - 0.12.0-SNAPSHOT + 0.12.2-SNAPSHOT ../../pom.xml 4.0.0 diff --git a/extensions-contrib/virtual-columns/pom.xml b/extensions-contrib/virtual-columns/pom.xml index 3102a75d1d98..48af929aa085 100644 --- a/extensions-contrib/virtual-columns/pom.xml +++ b/extensions-contrib/virtual-columns/pom.xml @@ -29,7 +29,7 @@ io.druid druid - 0.12.0-SNAPSHOT + 0.12.2-SNAPSHOT ../../pom.xml diff --git a/extensions-contrib/virtual-columns/src/main/java/io/druid/segment/DruidVirtualColumnsModule.java b/extensions-contrib/virtual-columns/src/main/java/io/druid/segment/DruidVirtualColumnsModule.java index 184225d02453..9db5be737a22 100644 --- a/extensions-contrib/virtual-columns/src/main/java/io/druid/segment/DruidVirtualColumnsModule.java +++ b/extensions-contrib/virtual-columns/src/main/java/io/druid/segment/DruidVirtualColumnsModule.java @@ -20,6 +20,7 @@ package io.druid.segment; import com.fasterxml.jackson.databind.Module; +import com.fasterxml.jackson.databind.jsontype.NamedType; import com.fasterxml.jackson.databind.module.SimpleModule; import com.google.common.collect.ImmutableList; import com.google.inject.Binder; @@ -34,7 +35,12 @@ public class DruidVirtualColumnsModule implements DruidModule @Override public List getJacksonModules() { - return ImmutableList.of(new SimpleModule().registerSubtypes(MapVirtualColumn.class)); + return ImmutableList.of( + new SimpleModule(getClass().getSimpleName()) + .registerSubtypes( + new NamedType(MapVirtualColumn.class, "map") + ) + ); } @Override diff --git a/extensions-contrib/virtual-columns/src/main/java/io/druid/segment/MapVirtualColumn.java b/extensions-contrib/virtual-columns/src/main/java/io/druid/segment/MapVirtualColumn.java index c17b219a0389..79950efd0875 100644 --- a/extensions-contrib/virtual-columns/src/main/java/io/druid/segment/MapVirtualColumn.java +++ b/extensions-contrib/virtual-columns/src/main/java/io/druid/segment/MapVirtualColumn.java @@ -24,7 +24,7 @@ import com.google.common.base.Preconditions; import com.google.common.collect.ImmutableList; import com.google.common.collect.Maps; -import com.metamx.common.StringUtils; +import io.druid.java.util.common.StringUtils; import io.druid.query.dimension.DefaultDimensionSpec; import io.druid.query.dimension.DimensionSpec; import io.druid.query.filter.DimFilterUtils; diff --git a/extensions-contrib/virtual-columns/src/test/java/io/druid/segment/MapVirtualColumnTest.java b/extensions-contrib/virtual-columns/src/test/java/io/druid/segment/MapVirtualColumnTest.java index 283aa8684a9f..cc9417f7a0b3 100644 --- a/extensions-contrib/virtual-columns/src/test/java/io/druid/segment/MapVirtualColumnTest.java +++ b/extensions-contrib/virtual-columns/src/test/java/io/druid/segment/MapVirtualColumnTest.java @@ -19,6 +19,7 @@ package io.druid.segment; +import com.fasterxml.jackson.databind.ObjectMapper; import com.google.common.base.Supplier; import com.google.common.base.Suppliers; import com.google.common.collect.ImmutableMap; @@ -138,6 +139,18 @@ private Druids.SelectQueryBuilder testBuilder() .pagingSpec(new PagingSpec(null, 3)); } + @Test + public void testSerde() throws IOException + { + final ObjectMapper mapper = new DefaultObjectMapper(); + new DruidVirtualColumnsModule().getJacksonModules().forEach(mapper::registerModule); + + final MapVirtualColumn column = new MapVirtualColumn("keys", "values", "params"); + final String json = mapper.writeValueAsString(column); + final VirtualColumn fromJson = mapper.readValue(json, VirtualColumn.class); + Assert.assertEquals(column, fromJson); + } + @Test public void testBasic() throws Exception { diff --git a/extensions-core/avro-extensions/pom.xml b/extensions-core/avro-extensions/pom.xml index 9102202fac31..00695d4446f5 100644 --- a/extensions-core/avro-extensions/pom.xml +++ b/extensions-core/avro-extensions/pom.xml @@ -18,8 +18,7 @@ ~ under the License. --> - + 4.0.0 io.druid.extensions @@ -30,7 +29,7 @@ io.druid druid - 0.12.0-SNAPSHOT + 0.12.2-SNAPSHOT ../../pom.xml diff --git a/extensions-core/datasketches/pom.xml b/extensions-core/datasketches/pom.xml index f7cf74e7458e..d9e528dd7a56 100644 --- a/extensions-core/datasketches/pom.xml +++ b/extensions-core/datasketches/pom.xml @@ -18,8 +18,7 @@ ~ under the License. --> - + 4.0.0 io.druid.extensions @@ -30,7 +29,7 @@ io.druid druid - 0.12.0-SNAPSHOT + 0.12.2-SNAPSHOT ../../pom.xml diff --git a/extensions-core/datasketches/src/main/java/io/druid/query/aggregation/datasketches/quantiles/DoublesSketchAggregatorFactory.java b/extensions-core/datasketches/src/main/java/io/druid/query/aggregation/datasketches/quantiles/DoublesSketchAggregatorFactory.java index 7ebb40b5d7a5..bfbaaac86f7d 100644 --- a/extensions-core/datasketches/src/main/java/io/druid/query/aggregation/datasketches/quantiles/DoublesSketchAggregatorFactory.java +++ b/extensions-core/datasketches/src/main/java/io/druid/query/aggregation/datasketches/quantiles/DoublesSketchAggregatorFactory.java @@ -21,7 +21,7 @@ import com.fasterxml.jackson.annotation.JsonCreator; import com.fasterxml.jackson.annotation.JsonProperty; -import com.metamx.common.IAE; +import io.druid.java.util.common.IAE; import com.yahoo.sketches.Util; import com.yahoo.sketches.quantiles.DoublesSketch; import com.yahoo.sketches.quantiles.DoublesUnion; diff --git a/extensions-core/datasketches/src/main/java/io/druid/query/aggregation/datasketches/quantiles/DoublesSketchModule.java b/extensions-core/datasketches/src/main/java/io/druid/query/aggregation/datasketches/quantiles/DoublesSketchModule.java index 0e3081965b03..f26bf348a33a 100644 --- a/extensions-core/datasketches/src/main/java/io/druid/query/aggregation/datasketches/quantiles/DoublesSketchModule.java +++ b/extensions-core/datasketches/src/main/java/io/druid/query/aggregation/datasketches/quantiles/DoublesSketchModule.java @@ -35,6 +35,7 @@ public class DoublesSketchModule implements DruidModule { public static final String DOUBLES_SKETCH = "quantilesDoublesSketch"; + public static final String DOUBLES_SKETCH_MERGE = "quantilesDoublesSketchMerge"; public static final String DOUBLES_SKETCH_HISTOGRAM_POST_AGG = "quantilesDoublesSketchToHistogram"; public static final String DOUBLES_SKETCH_QUANTILE_POST_AGG = "quantilesDoublesSketchToQuantile"; @@ -55,6 +56,7 @@ public List getJacksonModules() return Arrays. asList( new SimpleModule("DoublesQuantilesSketchModule").registerSubtypes( new NamedType(DoublesSketchAggregatorFactory.class, DOUBLES_SKETCH), + new NamedType(DoublesSketchMergeAggregatorFactory.class, DOUBLES_SKETCH_MERGE), new NamedType(DoublesSketchToHistogramPostAggregator.class, DOUBLES_SKETCH_HISTOGRAM_POST_AGG), new NamedType(DoublesSketchToQuantilePostAggregator.class, DOUBLES_SKETCH_QUANTILE_POST_AGG), new NamedType(DoublesSketchToQuantilesPostAggregator.class, DOUBLES_SKETCH_QUANTILES_POST_AGG), diff --git a/extensions-core/datasketches/src/test/java/io/druid/query/aggregation/datasketches/quantiles/DoublesSketchAggregatorTest.java b/extensions-core/datasketches/src/test/java/io/druid/query/aggregation/datasketches/quantiles/DoublesSketchAggregatorTest.java index 899c8392e4fd..ea07be5567b0 100644 --- a/extensions-core/datasketches/src/test/java/io/druid/query/aggregation/datasketches/quantiles/DoublesSketchAggregatorTest.java +++ b/extensions-core/datasketches/src/test/java/io/druid/query/aggregation/datasketches/quantiles/DoublesSketchAggregatorTest.java @@ -28,6 +28,7 @@ import io.druid.java.util.common.guava.Sequence; import io.druid.java.util.common.guava.Sequences; import io.druid.query.aggregation.AggregationTestHelper; +import io.druid.query.aggregation.AggregatorFactory; import io.druid.query.groupby.GroupByQueryConfig; import io.druid.query.groupby.GroupByQueryRunnerTest; import org.junit.Assert; @@ -60,7 +61,8 @@ public DoublesSketchAggregatorTest(final GroupByQueryConfig config) module.getJacksonModules(), config, tempFolder); timeSeriesHelper = AggregationTestHelper.createTimeseriesQueryAggregationTestHelper( module.getJacksonModules(), - tempFolder); + tempFolder + ); } @Parameterized.Parameters(name = "{0}") @@ -68,7 +70,7 @@ public static Collection constructorFeeder() { final List constructors = Lists.newArrayList(); for (GroupByQueryConfig config : GroupByQueryRunnerTest.testConfigs()) { - constructors.add(new Object[] {config}); + constructors.add(new Object[]{config}); } return constructors; } @@ -78,11 +80,29 @@ public static Collection constructorFeeder() public void serializeDeserializeFactoryWithFieldName() throws Exception { ObjectMapper objectMapper = new DefaultObjectMapper(); + new DoublesSketchModule().getJacksonModules().forEach(objectMapper::registerModule); DoublesSketchAggregatorFactory factory = new DoublesSketchAggregatorFactory("name", "filedName", 128); - DoublesSketchAggregatorFactory other = objectMapper.readValue( + AggregatorFactory other = objectMapper.readValue( objectMapper.writeValueAsString(factory), - DoublesSketchAggregatorFactory.class); + AggregatorFactory.class + ); + + Assert.assertEquals(factory, other); + } + + // this is to test Json properties and equals for the combining factory + @Test + public void serializeDeserializeCombiningFactoryWithFieldName() throws Exception + { + ObjectMapper objectMapper = new DefaultObjectMapper(); + new DoublesSketchModule().getJacksonModules().forEach(objectMapper::registerModule); + DoublesSketchAggregatorFactory factory = new DoublesSketchMergeAggregatorFactory("name", 128); + + AggregatorFactory other = objectMapper.readValue( + objectMapper.writeValueAsString(factory), + AggregatorFactory.class + ); Assert.assertEquals(factory, other); } @@ -92,7 +112,8 @@ public void ingestingSketches() throws Exception { Sequence seq = helper.createIndexAndRunQueryOnSegment( new File(this.getClass().getClassLoader().getResource("quantiles/doubles_sketch_data.tsv").getFile()), - String.join("\n", + String.join( + "\n", "{", " \"type\": \"string\",", " \"parseSpec\": {", @@ -105,16 +126,20 @@ public void ingestingSketches() throws Exception " },", " \"columns\": [\"timestamp\", \"product\", \"sketch\"]", " }", - "}"), - String.join("\n", + "}" + ), + String.join( + "\n", "[", " {\"type\": \"quantilesDoublesSketch\", \"name\": \"sketch\", \"fieldName\": \"sketch\", \"k\": 128},", " {\"type\": \"quantilesDoublesSketch\", \"name\": \"non_existent_sketch\", \"fieldName\": \"non_existent_sketch\", \"k\": 128}", - "]"), + "]" + ), 0, // minTimestamp Granularities.NONE, 10, // maxRowCount - String.join("\n", + String.join( + "\n", "{", " \"queryType\": \"groupBy\",", " \"dataSource\": \"test_datasource\",", @@ -129,8 +154,10 @@ public void ingestingSketches() throws Exception " {\"type\": \"quantilesDoublesSketchToHistogram\", \"name\": \"histogram\", \"splitPoints\": [0.25, 0.5, 0.75], \"field\": {\"type\": \"fieldAccess\", \"fieldName\": \"sketch\"}}", " ],", " \"intervals\": [\"2016-01-01T00:00:00.000Z/2016-01-31T00:00:00.000Z\"]", - "}")); - List results = Sequences.toList(seq, new ArrayList()); + "}" + ) + ); + List results = Sequences.toList(seq, new ArrayList<>()); Assert.assertEquals(1, results.size()); Row row = results.get(0); @@ -156,9 +183,9 @@ public void ingestingSketches() throws Exception Object histogramObject = row.getRaw("histogram"); Assert.assertTrue(histogramObject instanceof double[]); double[] histogram = (double[]) histogramObject; - for (final double bin: histogram) { - Assert.assertEquals(100, bin, 100 * 0.2); // 400 items uniformly - // distributed into 4 bins + for (final double bin : histogram) { + // 400 items uniformly distributed into 4 bins + Assert.assertEquals(100, bin, 100 * 0.2); } } @@ -167,7 +194,8 @@ public void buildingSketchesAtIngestionTime() throws Exception { Sequence seq = helper.createIndexAndRunQueryOnSegment( new File(this.getClass().getClassLoader().getResource("quantiles/doubles_build_data.tsv").getFile()), - String.join("\n", + String.join( + "\n", "{", " \"type\": \"string\",", " \"parseSpec\": {", @@ -180,12 +208,14 @@ public void buildingSketchesAtIngestionTime() throws Exception " },", " \"columns\": [\"timestamp\", \"sequenceNumber\", \"product\", \"value\"]", " }", - "}"), + "}" + ), "[{\"type\": \"quantilesDoublesSketch\", \"name\": \"sketch\", \"fieldName\": \"value\", \"k\": 128}]", 0, // minTimestamp Granularities.NONE, 10, // maxRowCount - String.join("\n", + String.join( + "\n", "{", " \"queryType\": \"groupBy\",", " \"dataSource\": \"test_datasource\",", @@ -200,8 +230,10 @@ public void buildingSketchesAtIngestionTime() throws Exception " {\"type\": \"quantilesDoublesSketchToHistogram\", \"name\": \"histogram\", \"splitPoints\": [0.25, 0.5, 0.75], \"field\": {\"type\": \"fieldAccess\", \"fieldName\": \"sketch\"}}", " ],", " \"intervals\": [\"2016-01-01T00:00:00.000Z/2016-01-31T00:00:00.000Z\"]", - "}")); - List results = Sequences.toList(seq, new ArrayList()); + "}" + ) + ); + List results = Sequences.toList(seq, new ArrayList<>()); Assert.assertEquals(1, results.size()); Row row = results.get(0); @@ -225,7 +257,7 @@ public void buildingSketchesAtIngestionTime() throws Exception Assert.assertEquals(4, histogram.length); for (final double bin: histogram) { Assert.assertEquals(100, bin, 100 * 0.2); // 400 items uniformly - // distributed into 4 bins + // distributed into 4 bins } } @@ -234,7 +266,8 @@ public void buildingSketchesAtQueryTime() throws Exception { Sequence seq = helper.createIndexAndRunQueryOnSegment( new File(this.getClass().getClassLoader().getResource("quantiles/doubles_build_data.tsv").getFile()), - String.join("\n", + String.join( + "\n", "{", " \"type\": \"string\",", " \"parseSpec\": {", @@ -247,12 +280,14 @@ public void buildingSketchesAtQueryTime() throws Exception " },", " \"columns\": [\"timestamp\", \"sequenceNumber\", \"product\", \"value\"]", " }", - "}"), + "}" + ), "[{\"type\": \"doubleSum\", \"name\": \"value\", \"fieldName\": \"value\"}]", 0, // minTimestamp Granularities.NONE, 10, // maxRowCount - String.join("\n", + String.join( + "\n", "{", " \"queryType\": \"groupBy\",", " \"dataSource\": \"test_datasource\",", @@ -267,8 +302,10 @@ public void buildingSketchesAtQueryTime() throws Exception " {\"type\": \"quantilesDoublesSketchToHistogram\", \"name\": \"histogram\", \"splitPoints\": [0.25, 0.5, 0.75], \"field\": {\"type\": \"fieldAccess\", \"fieldName\": \"sketch\"}}", " ],", " \"intervals\": [\"2016-01-01T00:00:00.000Z/2016-01-31T00:00:00.000Z\"]", - "}")); - List results = Sequences.toList(seq, new ArrayList()); + "}" + ) + ); + List results = Sequences.toList(seq, new ArrayList<>()); Assert.assertEquals(1, results.size()); Row row = results.get(0); @@ -296,7 +333,7 @@ public void buildingSketchesAtQueryTime() throws Exception double[] histogram = (double[]) histogramObject; for (final double bin: histogram) { Assert.assertEquals(100, bin, 100 * 0.2); // 400 items uniformly - // distributed into 4 bins + // distributed into 4 bins } } @@ -319,7 +356,8 @@ public void QueryingDataWithFieldNameValueAsFloatInsteadOfSketch() throws Except " },", " \"columns\": [\"timestamp\", \"sequenceNumber\", \"product\", \"value\"]", " }", - "}"), + "}" + ), "[{\"type\": \"doubleSum\", \"name\": \"value\", \"fieldName\": \"value\"}]", 0, // minTimestamp Granularities.NONE, @@ -340,8 +378,10 @@ public void QueryingDataWithFieldNameValueAsFloatInsteadOfSketch() throws Except " {\"type\": \"quantilesDoublesSketchToHistogram\", \"name\": \"histogram\", \"splitPoints\": [0.25, 0.5, 0.75], \"field\": {\"type\": \"fieldAccess\", \"fieldName\": \"sketch\"}}", " ],", " \"intervals\": [\"2016-01-01T00:00:00.000Z/2016-01-31T00:00:00.000Z\"]", - "}")); - List results = Sequences.toList(seq, new ArrayList()); + "}" + ) + ); + List results = Sequences.toList(seq, new ArrayList<>()); Assert.assertEquals(1, results.size()); Row row = results.get(0); @@ -369,7 +409,7 @@ public void QueryingDataWithFieldNameValueAsFloatInsteadOfSketch() throws Except double[] histogram = (double[]) histogramObject; for (final double bin: histogram) { Assert.assertEquals(100, bin, 100 * 0.2); // 400 items uniformly - // distributed into 4 bins + // distributed into 4 bins } } @@ -392,7 +432,8 @@ public void TimeSeriesQueryInputAsFloat() throws Exception " },", " \"columns\": [\"timestamp\", \"sequenceNumber\", \"product\", \"value\"]", " }", - "}"), + "}" + ), "[{\"type\": \"doubleSum\", \"name\": \"value\", \"fieldName\": \"value\"}]", 0, // minTimestamp Granularities.NONE, @@ -412,8 +453,10 @@ public void TimeSeriesQueryInputAsFloat() throws Exception " {\"type\": \"quantilesDoublesSketchToHistogram\", \"name\": \"histogram1\", \"splitPoints\": [0.25, 0.5, 0.75], \"field\": {\"type\": \"fieldAccess\", \"fieldName\": \"sketch\"}}", " ],", " \"intervals\": [\"2016-01-01T00:00:00.000Z/2016-01-31T00:00:00.000Z\"]", - "}")); - List results = Sequences.toList(seq, new ArrayList()); + "}" + ) + ); + List results = Sequences.toList(seq, new ArrayList<>()); Assert.assertEquals(1, results.size()); } } diff --git a/extensions-core/druid-basic-security/pom.xml b/extensions-core/druid-basic-security/pom.xml index 1de016381863..353975f2b698 100644 --- a/extensions-core/druid-basic-security/pom.xml +++ b/extensions-core/druid-basic-security/pom.xml @@ -17,9 +17,7 @@ ~ limitations under the License. --> - + 4.0.0 io.druid.extensions @@ -30,7 +28,7 @@ io.druid druid - 0.12.0-SNAPSHOT + 0.12.2-SNAPSHOT ../../pom.xml diff --git a/extensions-core/druid-basic-security/src/main/java/io/druid/security/basic/CommonCacheNotifier.java b/extensions-core/druid-basic-security/src/main/java/io/druid/security/basic/CommonCacheNotifier.java index 4b7a557197ca..9554928ba60b 100644 --- a/extensions-core/druid-basic-security/src/main/java/io/druid/security/basic/CommonCacheNotifier.java +++ b/extensions-core/druid-basic-security/src/main/java/io/druid/security/basic/CommonCacheNotifier.java @@ -21,12 +21,12 @@ import com.google.common.util.concurrent.Futures; import com.google.common.util.concurrent.ListenableFuture; -import com.metamx.emitter.EmittingLogger; -import com.metamx.http.client.HttpClient; -import com.metamx.http.client.Request; -import com.metamx.http.client.response.ClientResponse; -import com.metamx.http.client.response.HttpResponseHandler; -import com.metamx.http.client.response.StatusResponseHolder; +import io.druid.java.util.emitter.EmittingLogger; +import io.druid.java.util.http.client.HttpClient; +import io.druid.java.util.http.client.Request; +import io.druid.java.util.http.client.response.ClientResponse; +import io.druid.java.util.http.client.response.HttpResponseHandler; +import io.druid.java.util.http.client.response.StatusResponseHolder; import io.druid.discovery.DiscoveryDruidNode; import io.druid.discovery.DruidNodeDiscovery; import io.druid.discovery.DruidNodeDiscoveryProvider; diff --git a/extensions-core/druid-basic-security/src/main/java/io/druid/security/basic/authentication/BasicHTTPAuthenticator.java b/extensions-core/druid-basic-security/src/main/java/io/druid/security/basic/authentication/BasicHTTPAuthenticator.java index b127687999bd..eeb406ee681b 100644 --- a/extensions-core/druid-basic-security/src/main/java/io/druid/security/basic/authentication/BasicHTTPAuthenticator.java +++ b/extensions-core/druid-basic-security/src/main/java/io/druid/security/basic/authentication/BasicHTTPAuthenticator.java @@ -105,7 +105,7 @@ public AuthenticationResult authenticateJDBCContext(Map context) } if (checkCredentials(user, password.toCharArray())) { - return new AuthenticationResult(user, name, null); + return new AuthenticationResult(user, authorizerName, name, null); } else { return null; } @@ -173,7 +173,7 @@ public void doFilter( char[] password = splits[1].toCharArray(); if (checkCredentials(user, password)) { - AuthenticationResult authenticationResult = new AuthenticationResult(user, authorizerName, null); + AuthenticationResult authenticationResult = new AuthenticationResult(user, authorizerName, name, null); servletRequest.setAttribute(AuthConfig.DRUID_AUTHENTICATION_RESULT, authenticationResult); } diff --git a/extensions-core/druid-basic-security/src/main/java/io/druid/security/basic/authentication/BasicHTTPEscalator.java b/extensions-core/druid-basic-security/src/main/java/io/druid/security/basic/authentication/BasicHTTPEscalator.java index 2eb73a23bf20..d2993f6509df 100644 --- a/extensions-core/druid-basic-security/src/main/java/io/druid/security/basic/authentication/BasicHTTPEscalator.java +++ b/extensions-core/druid-basic-security/src/main/java/io/druid/security/basic/authentication/BasicHTTPEscalator.java @@ -22,21 +22,11 @@ import com.fasterxml.jackson.annotation.JsonCreator; import com.fasterxml.jackson.annotation.JsonProperty; import com.fasterxml.jackson.annotation.JsonTypeName; -import com.google.common.base.Throwables; -import com.metamx.http.client.CredentialedHttpClient; -import com.metamx.http.client.HttpClient; -import com.metamx.http.client.auth.BasicCredentials; -import io.druid.java.util.common.StringUtils; -import io.druid.security.basic.BasicAuthUtils; +import io.druid.java.util.http.client.CredentialedHttpClient; +import io.druid.java.util.http.client.HttpClient; +import io.druid.java.util.http.client.auth.BasicCredentials; import io.druid.server.security.AuthenticationResult; import io.druid.server.security.Escalator; -import org.eclipse.jetty.client.api.Authentication; -import org.eclipse.jetty.client.api.ContentResponse; -import org.eclipse.jetty.client.api.Request; -import org.eclipse.jetty.util.Attributes; -import org.jboss.netty.handler.codec.http.HttpHeaders; - -import java.net.URI; @JsonTypeName("basic") public class BasicHTTPEscalator implements Escalator @@ -66,51 +56,11 @@ public HttpClient createEscalatedClient(HttpClient baseClient) ); } - @Override - public org.eclipse.jetty.client.HttpClient createEscalatedJettyClient(org.eclipse.jetty.client.HttpClient baseClient) - { - baseClient.getAuthenticationStore().addAuthentication(new Authentication() - { - @Override - public boolean matches(String type, URI uri, String realm) - { - return true; - } - - @Override - public Result authenticate( - final Request request, ContentResponse response, Authentication.HeaderInfo headerInfo, Attributes context - ) - { - return new Result() - { - @Override - public URI getURI() - { - return request.getURI(); - } - - @Override - public void apply(Request request) - { - try { - final String unencodedCreds = StringUtils.format("%s:%s", internalClientUsername, internalClientPassword); - final String base64Creds = BasicAuthUtils.getEncodedCredentials(unencodedCreds); - request.getHeaders().add(HttpHeaders.Names.AUTHORIZATION, "Basic " + base64Creds); - } - catch (Throwable e) { - Throwables.propagate(e); - } - } - }; - } - }); - return baseClient; - } - @Override public AuthenticationResult createEscalatedAuthenticationResult() { - return new AuthenticationResult(internalClientUsername, authorizerName, null); + // if you found your self asking why the authenticatedBy field is set to null please read this: + // https://github.com/druid-io/druid/pull/5706#discussion_r185940889 + return new AuthenticationResult(internalClientUsername, authorizerName, null, null); } } diff --git a/extensions-core/druid-basic-security/src/main/java/io/druid/security/basic/authentication/BytesFullResponseHandler.java b/extensions-core/druid-basic-security/src/main/java/io/druid/security/basic/authentication/BytesFullResponseHandler.java index d8b65a5e8ff0..c548cfc0ac1a 100644 --- a/extensions-core/druid-basic-security/src/main/java/io/druid/security/basic/authentication/BytesFullResponseHandler.java +++ b/extensions-core/druid-basic-security/src/main/java/io/druid/security/basic/authentication/BytesFullResponseHandler.java @@ -19,9 +19,9 @@ package io.druid.security.basic.authentication; -import com.metamx.http.client.response.ClientResponse; -import com.metamx.http.client.response.FullResponseHolder; -import com.metamx.http.client.response.HttpResponseHandler; +import io.druid.java.util.http.client.response.ClientResponse; +import io.druid.java.util.http.client.response.FullResponseHolder; +import io.druid.java.util.http.client.response.HttpResponseHandler; import org.jboss.netty.handler.codec.http.HttpChunk; import org.jboss.netty.handler.codec.http.HttpResponse; diff --git a/extensions-core/druid-basic-security/src/main/java/io/druid/security/basic/authentication/BytesFullResponseHolder.java b/extensions-core/druid-basic-security/src/main/java/io/druid/security/basic/authentication/BytesFullResponseHolder.java index a273f5030982..b701468b461c 100644 --- a/extensions-core/druid-basic-security/src/main/java/io/druid/security/basic/authentication/BytesFullResponseHolder.java +++ b/extensions-core/druid-basic-security/src/main/java/io/druid/security/basic/authentication/BytesFullResponseHolder.java @@ -19,7 +19,7 @@ package io.druid.security.basic.authentication; -import com.metamx.http.client.response.FullResponseHolder; +import io.druid.java.util.http.client.response.FullResponseHolder; import org.jboss.netty.handler.codec.http.HttpResponse; import org.jboss.netty.handler.codec.http.HttpResponseStatus; diff --git a/extensions-core/druid-basic-security/src/main/java/io/druid/security/basic/authentication/db/cache/CoordinatorBasicAuthenticatorCacheNotifier.java b/extensions-core/druid-basic-security/src/main/java/io/druid/security/basic/authentication/db/cache/CoordinatorBasicAuthenticatorCacheNotifier.java index 2eff574cfd91..5ab501b5166f 100644 --- a/extensions-core/druid-basic-security/src/main/java/io/druid/security/basic/authentication/db/cache/CoordinatorBasicAuthenticatorCacheNotifier.java +++ b/extensions-core/druid-basic-security/src/main/java/io/druid/security/basic/authentication/db/cache/CoordinatorBasicAuthenticatorCacheNotifier.java @@ -21,8 +21,8 @@ import com.google.common.base.Preconditions; import com.google.inject.Inject; -import com.metamx.emitter.EmittingLogger; -import com.metamx.http.client.HttpClient; +import io.druid.java.util.emitter.EmittingLogger; +import io.druid.java.util.http.client.HttpClient; import io.druid.concurrent.LifecycleLock; import io.druid.discovery.DruidNodeDiscoveryProvider; import io.druid.guice.ManageLifecycle; diff --git a/extensions-core/druid-basic-security/src/main/java/io/druid/security/basic/authentication/db/cache/CoordinatorPollingBasicAuthenticatorCacheManager.java b/extensions-core/druid-basic-security/src/main/java/io/druid/security/basic/authentication/db/cache/CoordinatorPollingBasicAuthenticatorCacheManager.java index 178232e13dfc..bc9a713b9c14 100644 --- a/extensions-core/druid-basic-security/src/main/java/io/druid/security/basic/authentication/db/cache/CoordinatorPollingBasicAuthenticatorCacheManager.java +++ b/extensions-core/druid-basic-security/src/main/java/io/druid/security/basic/authentication/db/cache/CoordinatorPollingBasicAuthenticatorCacheManager.java @@ -21,16 +21,16 @@ import com.fasterxml.jackson.databind.ObjectMapper; import com.google.common.base.Preconditions; -import com.google.common.io.Files; import com.google.inject.Inject; import com.google.inject.Injector; -import com.metamx.emitter.EmittingLogger; -import com.metamx.http.client.Request; +import io.druid.java.util.emitter.EmittingLogger; +import io.druid.java.util.http.client.Request; import io.druid.client.coordinator.Coordinator; import io.druid.concurrent.LifecycleLock; import io.druid.discovery.DruidLeaderClient; import io.druid.guice.ManageLifecycle; import io.druid.guice.annotations.Smile; +import io.druid.java.util.common.FileUtils; import io.druid.java.util.common.ISE; import io.druid.java.util.common.RetryUtils; import io.druid.java.util.common.StringUtils; @@ -235,7 +235,7 @@ private void writeUserMapToDisk(String prefix, byte[] userMapBytes) throws IOExc File cacheDir = new File(commonCacheConfig.getCacheDirectory()); cacheDir.mkdirs(); File userMapFile = new File(commonCacheConfig.getCacheDirectory(), getUserMapFilename(prefix)); - Files.write(userMapBytes, userMapFile); + FileUtils.writeAtomically(userMapFile, out -> out.write(userMapBytes)); } private Map tryFetchUserMapFromCoordinator(String prefix) throws Exception diff --git a/extensions-core/druid-basic-security/src/main/java/io/druid/security/basic/authentication/db/updater/CoordinatorBasicAuthenticatorMetadataStorageUpdater.java b/extensions-core/druid-basic-security/src/main/java/io/druid/security/basic/authentication/db/updater/CoordinatorBasicAuthenticatorMetadataStorageUpdater.java index eacc02422373..872cdc103208 100644 --- a/extensions-core/druid-basic-security/src/main/java/io/druid/security/basic/authentication/db/updater/CoordinatorBasicAuthenticatorMetadataStorageUpdater.java +++ b/extensions-core/druid-basic-security/src/main/java/io/druid/security/basic/authentication/db/updater/CoordinatorBasicAuthenticatorMetadataStorageUpdater.java @@ -22,7 +22,7 @@ import com.fasterxml.jackson.databind.ObjectMapper; import com.google.common.base.Preconditions; import com.google.inject.Inject; -import com.metamx.emitter.EmittingLogger; +import io.druid.java.util.emitter.EmittingLogger; import io.druid.common.config.ConfigManager; import io.druid.concurrent.LifecycleLock; import io.druid.guice.ManageLifecycle; diff --git a/extensions-core/druid-basic-security/src/main/java/io/druid/security/basic/authorization/db/cache/CoordinatorBasicAuthorizerCacheNotifier.java b/extensions-core/druid-basic-security/src/main/java/io/druid/security/basic/authorization/db/cache/CoordinatorBasicAuthorizerCacheNotifier.java index 924107cf92a2..9f7a1def62a3 100644 --- a/extensions-core/druid-basic-security/src/main/java/io/druid/security/basic/authorization/db/cache/CoordinatorBasicAuthorizerCacheNotifier.java +++ b/extensions-core/druid-basic-security/src/main/java/io/druid/security/basic/authorization/db/cache/CoordinatorBasicAuthorizerCacheNotifier.java @@ -21,8 +21,8 @@ import com.google.common.base.Preconditions; import com.google.inject.Inject; -import com.metamx.emitter.EmittingLogger; -import com.metamx.http.client.HttpClient; +import io.druid.java.util.emitter.EmittingLogger; +import io.druid.java.util.http.client.HttpClient; import io.druid.concurrent.LifecycleLock; import io.druid.discovery.DruidNodeDiscoveryProvider; import io.druid.guice.ManageLifecycle; diff --git a/extensions-core/druid-basic-security/src/main/java/io/druid/security/basic/authorization/db/cache/CoordinatorPollingBasicAuthorizerCacheManager.java b/extensions-core/druid-basic-security/src/main/java/io/druid/security/basic/authorization/db/cache/CoordinatorPollingBasicAuthorizerCacheManager.java index a57d931c97f5..5f993d3121f3 100644 --- a/extensions-core/druid-basic-security/src/main/java/io/druid/security/basic/authorization/db/cache/CoordinatorPollingBasicAuthorizerCacheManager.java +++ b/extensions-core/druid-basic-security/src/main/java/io/druid/security/basic/authorization/db/cache/CoordinatorPollingBasicAuthorizerCacheManager.java @@ -21,16 +21,16 @@ import com.fasterxml.jackson.databind.ObjectMapper; import com.google.common.base.Preconditions; -import com.google.common.io.Files; import com.google.inject.Inject; import com.google.inject.Injector; -import com.metamx.emitter.EmittingLogger; -import com.metamx.http.client.Request; +import io.druid.java.util.emitter.EmittingLogger; +import io.druid.java.util.http.client.Request; import io.druid.client.coordinator.Coordinator; import io.druid.concurrent.LifecycleLock; import io.druid.discovery.DruidLeaderClient; import io.druid.guice.ManageLifecycle; import io.druid.guice.annotations.Smile; +import io.druid.java.util.common.FileUtils; import io.druid.java.util.common.ISE; import io.druid.java.util.common.RetryUtils; import io.druid.java.util.common.StringUtils; @@ -212,7 +212,7 @@ private void writeMapToDisk(String prefix, byte[] userMapBytes) throws IOExcepti File cacheDir = new File(commonCacheConfig.getCacheDirectory()); cacheDir.mkdirs(); File userMapFile = new File(commonCacheConfig.getCacheDirectory(), getUserRoleMapFilename(prefix)); - Files.write(userMapBytes, userMapFile); + FileUtils.writeAtomically(userMapFile, out -> out.write(userMapBytes)); } @Nullable diff --git a/extensions-core/druid-basic-security/src/main/java/io/druid/security/basic/authorization/db/updater/CoordinatorBasicAuthorizerMetadataStorageUpdater.java b/extensions-core/druid-basic-security/src/main/java/io/druid/security/basic/authorization/db/updater/CoordinatorBasicAuthorizerMetadataStorageUpdater.java index ca80d19712d6..adc0a60591c5 100644 --- a/extensions-core/druid-basic-security/src/main/java/io/druid/security/basic/authorization/db/updater/CoordinatorBasicAuthorizerMetadataStorageUpdater.java +++ b/extensions-core/druid-basic-security/src/main/java/io/druid/security/basic/authorization/db/updater/CoordinatorBasicAuthorizerMetadataStorageUpdater.java @@ -23,7 +23,7 @@ import com.google.common.base.Preconditions; import com.google.common.collect.Lists; import com.google.inject.Inject; -import com.metamx.emitter.EmittingLogger; +import io.druid.java.util.emitter.EmittingLogger; import io.druid.common.config.ConfigManager; import io.druid.concurrent.LifecycleLock; import io.druid.guice.ManageLifecycle; diff --git a/extensions-core/druid-basic-security/src/test/java/io/druid/security/authorization/BasicRoleBasedAuthorizerTest.java b/extensions-core/druid-basic-security/src/test/java/io/druid/security/authorization/BasicRoleBasedAuthorizerTest.java index 6132753c684d..aafac305525a 100644 --- a/extensions-core/druid-basic-security/src/test/java/io/druid/security/authorization/BasicRoleBasedAuthorizerTest.java +++ b/extensions-core/druid-basic-security/src/test/java/io/druid/security/authorization/BasicRoleBasedAuthorizerTest.java @@ -116,7 +116,7 @@ public void testAuth() updater.setPermissions(AUTHORIZER_NAME, "druidRole", permissions); - AuthenticationResult authenticationResult = new AuthenticationResult("druid", "druid", null); + AuthenticationResult authenticationResult = new AuthenticationResult("druid", "druid", null, null); Access access = authorizer.authorize( authenticationResult, diff --git a/extensions-core/druid-kerberos/pom.xml b/extensions-core/druid-kerberos/pom.xml index 08696bed8122..e533c5ed3071 100644 --- a/extensions-core/druid-kerberos/pom.xml +++ b/extensions-core/druid-kerberos/pom.xml @@ -18,8 +18,7 @@ ~ under the License. --> - + 4.0.0 io.druid.extensions @@ -30,7 +29,7 @@ io.druid druid - 0.12.0-SNAPSHOT + 0.12.2-SNAPSHOT ../../pom.xml diff --git a/extensions-core/druid-kerberos/src/main/java/io/druid/security/kerberos/KerberosAuthenticator.java b/extensions-core/druid-kerberos/src/main/java/io/druid/security/kerberos/KerberosAuthenticator.java index 785a5a415cb5..df799c09df56 100644 --- a/extensions-core/druid-kerberos/src/main/java/io/druid/security/kerberos/KerberosAuthenticator.java +++ b/extensions-core/druid-kerberos/src/main/java/io/druid/security/kerberos/KerberosAuthenticator.java @@ -23,6 +23,8 @@ import com.fasterxml.jackson.annotation.JsonCreator; import com.fasterxml.jackson.annotation.JsonProperty; import com.fasterxml.jackson.annotation.JsonTypeName; +import com.google.common.base.Joiner; +import com.google.common.base.Preconditions; import com.google.common.base.Throwables; import io.druid.guice.annotations.Self; import io.druid.java.util.common.StringUtils; @@ -41,6 +43,8 @@ import org.apache.hadoop.security.authentication.util.Signer; import org.apache.hadoop.security.authentication.util.SignerException; import org.apache.hadoop.security.authentication.util.SignerSecretProvider; +import org.eclipse.jetty.client.api.Request; +import org.eclipse.jetty.http.HttpHeader; import sun.security.krb5.EncryptedData; import sun.security.krb5.EncryptionKey; import sun.security.krb5.internal.APReq; @@ -72,18 +76,25 @@ import javax.servlet.http.HttpServletResponse; import java.io.File; import java.io.IOException; +import java.net.HttpCookie; import java.security.Principal; +import java.text.SimpleDateFormat; import java.util.Collections; +import java.util.Date; import java.util.EnumSet; import java.util.HashMap; import java.util.HashSet; import java.util.List; +import java.util.Locale; import java.util.Map; import java.util.Properties; import java.util.Random; import java.util.Set; +import java.util.TimeZone; import java.util.regex.Matcher; import java.util.regex.Pattern; +import java.util.stream.Collectors; + @JsonTypeName("kerberos") public class KerberosAuthenticator implements Authenticator @@ -91,40 +102,44 @@ public class KerberosAuthenticator implements Authenticator private static final Logger log = new Logger(KerberosAuthenticator.class); private static final Pattern HADOOP_AUTH_COOKIE_REGEX = Pattern.compile(".*p=(\\S+)&t=.*"); public static final List DEFAULT_EXCLUDED_PATHS = Collections.emptyList(); + public static final String SIGNED_TOKEN_ATTRIBUTE = "signedToken"; private final DruidNode node; private final String serverPrincipal; private final String serverKeytab; - private final String internalClientPrincipal; - private final String internalClientKeytab; private final String authToLocal; private final List excludedPaths; private final String cookieSignatureSecret; private final String authorizerName; + private final String name; private LoginContext loginContext; @JsonCreator public KerberosAuthenticator( @JsonProperty("serverPrincipal") String serverPrincipal, @JsonProperty("serverKeytab") String serverKeytab, - @JsonProperty("internalClientPrincipal") String internalClientPrincipal, - @JsonProperty("internalClientKeytab") String internalClientKeytab, @JsonProperty("authToLocal") String authToLocal, @JsonProperty("excludedPaths") List excludedPaths, @JsonProperty("cookieSignatureSecret") String cookieSignatureSecret, @JsonProperty("authorizerName") String authorizerName, + @JsonProperty("name") String name, @JacksonInject @Self DruidNode node ) { this.node = node; - this.serverPrincipal = serverPrincipal; this.serverKeytab = serverKeytab; - this.internalClientPrincipal = internalClientPrincipal; - this.internalClientKeytab = internalClientKeytab; this.authToLocal = authToLocal == null ? "DEFAULT" : authToLocal; this.excludedPaths = excludedPaths == null ? DEFAULT_EXCLUDED_PATHS : excludedPaths; this.cookieSignatureSecret = cookieSignatureSecret; this.authorizerName = authorizerName; + this.name = Preconditions.checkNotNull(name); + + try { + this.serverPrincipal = SecurityUtil.getServerPrincipal(serverPrincipal, node.getHost()); + } + catch (Exception e) { + throw new RuntimeException(e); + } } @Override @@ -259,7 +274,7 @@ public void doFilter( if (clientPrincipal != null) { request.setAttribute( AuthConfig.DRUID_AUTHENTICATION_RESULT, - new AuthenticationResult(clientPrincipal, authorizerName, null) + new AuthenticationResult(clientPrincipal, authorizerName, name, null) ); } } @@ -333,7 +348,20 @@ public Principal getUserPrincipal() createAuthCookie(httpResponse, signedToken, getCookieDomain(), getCookiePath(), token.getExpires(), isHttps ); + request.setAttribute(SIGNED_TOKEN_ATTRIBUTE, tokenToCookieString( + signedToken, + getCookieDomain(), + getCookiePath(), + token.getExpires(), + !token.isExpired() && token.getExpires() > 0, + isHttps + )); } + // Since this request is validated also set DRUID_AUTHENTICATION_RESULT + request.setAttribute( + AuthConfig.DRUID_AUTHENTICATION_RESULT, + new AuthenticationResult(token.getName(), authorizerName, name, null) + ); doFilter(filterChain, httpRequest, httpResponse); } } else { @@ -345,7 +373,7 @@ public Principal getUserPrincipal() errCode = HttpServletResponse.SC_FORBIDDEN; authenticationEx = ex; if (log.isDebugEnabled()) { - log.debug("Authentication exception: " + ex.getMessage(), ex); + log.debug(ex, "Authentication exception: " + ex.getMessage()); } else { log.warn("Authentication exception: " + ex.getMessage()); } @@ -388,20 +416,12 @@ public Class getFilterClass() public Map getInitParameters() { Map params = new HashMap(); - try { - params.put( - "kerberos.principal", - SecurityUtil.getServerPrincipal(serverPrincipal, node.getHost()) - ); - params.put("kerberos.keytab", serverKeytab); - params.put(AuthenticationFilter.AUTH_TYPE, DruidKerberosAuthenticationHandler.class.getName()); - params.put("kerberos.name.rules", authToLocal); - if (cookieSignatureSecret != null) { - params.put("signature.secret", cookieSignatureSecret); - } - } - catch (IOException e) { - Throwables.propagate(e); + params.put("kerberos.principal", serverPrincipal); + params.put("kerberos.keytab", serverKeytab); + params.put(AuthenticationFilter.AUTH_TYPE, DruidKerberosAuthenticationHandler.class.getName()); + params.put("kerberos.name.rules", authToLocal); + if (cookieSignatureSecret != null) { + params.put("signature.secret", cookieSignatureSecret); } return params; } @@ -440,6 +460,22 @@ private boolean isExcluded(String path) return false; } + @Override + public void decorateProxyRequest( + HttpServletRequest clientRequest, HttpServletResponse proxyResponse, Request proxyRequest + ) + { + Object cookieToken = clientRequest.getAttribute(SIGNED_TOKEN_ATTRIBUTE); + if (cookieToken != null && cookieToken instanceof String) { + log.debug("Found cookie token will attache it to proxyRequest as cookie"); + String authResult = (String) cookieToken; + String existingCookies = proxyRequest.getCookies() + .stream() + .map(HttpCookie::toString) + .collect(Collectors.joining(";")); + proxyRequest.header(HttpHeader.COOKIE, Joiner.on(";").join(authResult, existingCookies)); + } + } /** * Kerberos context configuration for the JDK GSS library. Copied from hadoop-auth's KerberosAuthenticationHandler. @@ -533,8 +569,8 @@ private String getPrincipalFromRequestNew(HttpServletRequest req) for (Object cred : serverCreds) { if (cred instanceof KeyTab) { KeyTab serverKeyTab = (KeyTab) cred; - KerberosPrincipal serverPrincipal = new KerberosPrincipal(this.serverPrincipal); - KerberosKey[] serverKeys = serverKeyTab.getKeys(serverPrincipal); + KerberosPrincipal kerberosPrincipal = new KerberosPrincipal(serverPrincipal); + KerberosKey[] serverKeys = serverKeyTab.getKeys(kerberosPrincipal); for (KerberosKey key : serverKeys) { if (key.getKeyType() == eType) { finalKey = new EncryptionKey(key.getKeyType(), key.getEncoded()); @@ -573,12 +609,10 @@ private boolean isValueAPReq(DerValue value) private void initializeKerberosLogin() throws ServletException { - String principal; String keytab; try { - principal = SecurityUtil.getServerPrincipal(serverPrincipal, node.getHost()); - if (principal == null || principal.trim().length() == 0) { + if (serverPrincipal == null || serverPrincipal.trim().length() == 0) { throw new ServletException("Principal not defined in configuration"); } keytab = serverKeytab; @@ -590,19 +624,85 @@ private void initializeKerberosLogin() throws ServletException } Set principals = new HashSet(); - principals.add(new KerberosPrincipal(principal)); + principals.add(new KerberosPrincipal(serverPrincipal)); Subject subject = new Subject(false, principals, new HashSet(), new HashSet()); - DruidKerberosConfiguration kerberosConfiguration = new DruidKerberosConfiguration(keytab, principal); + DruidKerberosConfiguration kerberosConfiguration = new DruidKerberosConfiguration(keytab, serverPrincipal); - log.info("Login using keytab " + keytab + ", for principal " + principal); + log.info("Login using keytab " + keytab + ", for principal " + serverPrincipal); loginContext = new LoginContext("", subject, null, kerberosConfiguration); loginContext.login(); - log.info("Initialized, principal %s from keytab %s", principal, keytab); + log.info("Initialized, principal %s from keytab %s", serverPrincipal, keytab); } catch (Exception ex) { throw new ServletException(ex); } } + + /** + * Creates the Hadoop authentication HTTP cookie. + * + * @param resp the response object. + * @param token authentication token for the cookie. + * @param domain the cookie domain. + * @param path the cookie path. + * @param expires UNIX timestamp that indicates the expire date of the + * cookie. It has no effect if its value < 0. + * @param isSecure is the cookie secure? + * @param isCookiePersistent whether the cookie is persistent or not. + *the following code copy/past from Hadoop 3.0.0 copied to avoid compilation issue due to new signature, + * org.apache.hadoop.security.authentication.server.AuthenticationFilter#createAuthCookie + * ( + * javax.servlet.http.HttpServletResponse, + * java.lang.String, + * java.lang.String, + * java.lang.String, + * long, boolean, boolean) + */ + private static void tokenToAuthCookie( + HttpServletResponse resp, String token, + String domain, String path, long expires, + boolean isCookiePersistent, + boolean isSecure + ) + { + resp.addHeader("Set-Cookie", tokenToCookieString(token, domain, path, expires, isCookiePersistent, isSecure)); + } + + private static String tokenToCookieString( + String token, + String domain, String path, long expires, + boolean isCookiePersistent, + boolean isSecure + ) + { + StringBuilder sb = new StringBuilder(AuthenticatedURL.AUTH_COOKIE) + .append("="); + if (token != null && token.length() > 0) { + sb.append("\"").append(token).append("\""); + } + + if (path != null) { + sb.append("; Path=").append(path); + } + + if (domain != null) { + sb.append("; Domain=").append(domain); + } + + if (expires >= 0 && isCookiePersistent) { + Date date = new Date(expires); + SimpleDateFormat df = new SimpleDateFormat("EEE, dd-MMM-yyyy HH:mm:ss zzz", Locale.ENGLISH); + df.setTimeZone(TimeZone.getTimeZone("GMT")); + sb.append("; Expires=").append(df.format(date)); + } + + if (isSecure) { + sb.append("; Secure"); + } + + sb.append("; HttpOnly"); + return sb.toString(); + } } diff --git a/extensions-core/druid-kerberos/src/main/java/io/druid/security/kerberos/KerberosEscalator.java b/extensions-core/druid-kerberos/src/main/java/io/druid/security/kerberos/KerberosEscalator.java index e995233a23c2..3b25f5e8af2e 100644 --- a/extensions-core/druid-kerberos/src/main/java/io/druid/security/kerberos/KerberosEscalator.java +++ b/extensions-core/druid-kerberos/src/main/java/io/druid/security/kerberos/KerberosEscalator.java @@ -22,20 +22,10 @@ import com.fasterxml.jackson.annotation.JsonCreator; import com.fasterxml.jackson.annotation.JsonProperty; import com.fasterxml.jackson.annotation.JsonTypeName; -import com.google.common.base.Throwables; -import com.metamx.http.client.HttpClient; import io.druid.java.util.common.logger.Logger; +import io.druid.java.util.http.client.HttpClient; import io.druid.server.security.AuthenticationResult; import io.druid.server.security.Escalator; -import org.apache.hadoop.security.UserGroupInformation; -import org.eclipse.jetty.client.api.Authentication; -import org.eclipse.jetty.client.api.ContentResponse; -import org.eclipse.jetty.client.api.Request; -import org.eclipse.jetty.util.Attributes; -import org.jboss.netty.handler.codec.http.HttpHeaders; - -import java.net.URI; -import java.security.PrivilegedExceptionAction; @JsonTypeName("kerberos") public class KerberosEscalator implements Escalator @@ -64,71 +54,12 @@ public HttpClient createEscalatedClient(HttpClient baseClient) return new KerberosHttpClient(baseClient, internalClientPrincipal, internalClientKeytab); } - @Override - public org.eclipse.jetty.client.HttpClient createEscalatedJettyClient(org.eclipse.jetty.client.HttpClient baseClient) - { - baseClient.getAuthenticationStore().addAuthentication(new Authentication() - { - @Override - public boolean matches(String type, URI uri, String realm) - { - return true; - } - - @Override - public Result authenticate( - final Request request, ContentResponse response, Authentication.HeaderInfo headerInfo, Attributes context - ) - { - return new Result() - { - @Override - public URI getURI() - { - return request.getURI(); - } - - @Override - public void apply(Request request) - { - try { - // No need to set cookies as they are handled by Jetty Http Client itself. - URI uri = request.getURI(); - if (DruidKerberosUtil.needToSendCredentials(baseClient.getCookieStore(), uri)) { - log.debug( - "No Auth Cookie found for URI[%s]. Existing Cookies[%s] Authenticating... ", - uri, - baseClient.getCookieStore().getCookies() - ); - final String host = request.getHost(); - DruidKerberosUtil.authenticateIfRequired(internalClientPrincipal, internalClientKeytab); - UserGroupInformation currentUser = UserGroupInformation.getCurrentUser(); - String challenge = currentUser.doAs(new PrivilegedExceptionAction() - { - @Override - public String run() throws Exception - { - return DruidKerberosUtil.kerberosChallenge(host); - } - }); - request.getHeaders().add(HttpHeaders.Names.AUTHORIZATION, "Negotiate " + challenge); - } else { - log.debug("Found Auth Cookie found for URI[%s].", uri); - } - } - catch (Throwable e) { - Throwables.propagate(e); - } - } - }; - } - }); - return baseClient; - } - @Override public AuthenticationResult createEscalatedAuthenticationResult() { - return new AuthenticationResult(internalClientPrincipal, authorizerName, null); + // if you found your self asking why the authenticatedBy field is set to null please read this: + // https://github.com/druid-io/druid/pull/5706#discussion_r185940889 + return new AuthenticationResult(internalClientPrincipal, authorizerName, null, null); } + } diff --git a/extensions-core/druid-kerberos/src/main/java/io/druid/security/kerberos/KerberosHttpClient.java b/extensions-core/druid-kerberos/src/main/java/io/druid/security/kerberos/KerberosHttpClient.java index c79deaf60e34..b1a66a8b2a75 100644 --- a/extensions-core/druid-kerberos/src/main/java/io/druid/security/kerberos/KerberosHttpClient.java +++ b/extensions-core/druid-kerberos/src/main/java/io/druid/security/kerberos/KerberosHttpClient.java @@ -24,10 +24,10 @@ import com.google.common.util.concurrent.Futures; import com.google.common.util.concurrent.ListenableFuture; import com.google.common.util.concurrent.SettableFuture; -import com.metamx.http.client.AbstractHttpClient; -import com.metamx.http.client.HttpClient; -import com.metamx.http.client.Request; -import com.metamx.http.client.response.HttpResponseHandler; +import io.druid.java.util.http.client.AbstractHttpClient; +import io.druid.java.util.http.client.HttpClient; +import io.druid.java.util.http.client.Request; +import io.druid.java.util.http.client.response.HttpResponseHandler; import io.druid.java.util.common.concurrent.Execs; import io.druid.java.util.common.logger.Logger; import org.apache.hadoop.security.UserGroupInformation; diff --git a/extensions-core/druid-kerberos/src/main/java/io/druid/security/kerberos/ResponseCookieHandler.java b/extensions-core/druid-kerberos/src/main/java/io/druid/security/kerberos/ResponseCookieHandler.java index 5a4d4df0b43e..755cdb96547d 100644 --- a/extensions-core/druid-kerberos/src/main/java/io/druid/security/kerberos/ResponseCookieHandler.java +++ b/extensions-core/druid-kerberos/src/main/java/io/druid/security/kerberos/ResponseCookieHandler.java @@ -21,8 +21,8 @@ import com.google.common.base.Function; import com.google.common.collect.Maps; -import com.metamx.http.client.response.ClientResponse; -import com.metamx.http.client.response.HttpResponseHandler; +import io.druid.java.util.http.client.response.ClientResponse; +import io.druid.java.util.http.client.response.HttpResponseHandler; import io.druid.java.util.common.logger.Logger; import org.jboss.netty.handler.codec.http.HttpChunk; import org.jboss.netty.handler.codec.http.HttpHeaders; diff --git a/extensions-core/druid-kerberos/src/main/java/io/druid/security/kerberos/RetryIfUnauthorizedResponseHandler.java b/extensions-core/druid-kerberos/src/main/java/io/druid/security/kerberos/RetryIfUnauthorizedResponseHandler.java index 8d8148842c8b..01a7987e2bac 100644 --- a/extensions-core/druid-kerberos/src/main/java/io/druid/security/kerberos/RetryIfUnauthorizedResponseHandler.java +++ b/extensions-core/druid-kerberos/src/main/java/io/druid/security/kerberos/RetryIfUnauthorizedResponseHandler.java @@ -19,8 +19,8 @@ package io.druid.security.kerberos; -import com.metamx.http.client.response.ClientResponse; -import com.metamx.http.client.response.HttpResponseHandler; +import io.druid.java.util.http.client.response.ClientResponse; +import io.druid.java.util.http.client.response.HttpResponseHandler; import io.druid.java.util.common.logger.Logger; import org.jboss.netty.handler.codec.http.HttpChunk; import org.jboss.netty.handler.codec.http.HttpResponse; diff --git a/extensions-core/druid-kerberos/src/main/java/io/druid/security/kerberos/SpnegoFilterConfig.java b/extensions-core/druid-kerberos/src/main/java/io/druid/security/kerberos/SpnegoFilterConfig.java deleted file mode 100644 index 4dd4f9b01f97..000000000000 --- a/extensions-core/druid-kerberos/src/main/java/io/druid/security/kerberos/SpnegoFilterConfig.java +++ /dev/null @@ -1,134 +0,0 @@ -/* - * Licensed to Metamarkets Group Inc. (Metamarkets) under one - * or more contributor license agreements. See the NOTICE file - * distributed with this work for additional information - * regarding copyright ownership. Metamarkets licenses this file - * to you under the Apache License, Version 2.0 (the - * "License"); you may not use this file except in compliance - * with the License. You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, - * software distributed under the License is distributed on an - * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY - * KIND, either express or implied. See the License for the - * specific language governing permissions and limitations - * under the License. - */ - -package io.druid.security.kerberos; - -import com.fasterxml.jackson.annotation.JsonCreator; -import com.fasterxml.jackson.annotation.JsonProperty; - -import java.util.Collections; -import java.util.List; - -public class SpnegoFilterConfig -{ - - public static final List DEFAULT_EXCLUDED_PATHS = Collections.emptyList(); - - @JsonProperty - private final String principal; - - @JsonProperty - private final String keytab; - - @JsonProperty - private final String authToLocal; - - @JsonProperty - private final List excludedPaths; - - @JsonProperty - private final String cookieSignatureSecret; - - @JsonCreator - public SpnegoFilterConfig( - @JsonProperty("principal") String principal, - @JsonProperty("keytab") String keytab, - @JsonProperty("authToLocal") String authToLocal, - @JsonProperty("excludedPaths") List excludedPaths, - @JsonProperty("cookieSignatureSecret") String cookieSignatureSecret - ) - { - this.principal = principal; - this.keytab = keytab; - this.authToLocal = authToLocal == null ? "DEFAULT" : authToLocal; - this.excludedPaths = excludedPaths == null ? DEFAULT_EXCLUDED_PATHS : excludedPaths; - this.cookieSignatureSecret = cookieSignatureSecret; - } - - @JsonProperty - public String getPrincipal() - { - return principal; - } - - @JsonProperty - public String getKeytab() - { - return keytab; - } - - @JsonProperty - public String getAuthToLocal() - { - return authToLocal; - } - - @JsonProperty - public List getExcludedPaths() - { - return excludedPaths; - } - - @JsonProperty - public String getCookieSignatureSecret() - { - return cookieSignatureSecret; - } - - @Override - public boolean equals(Object o) - { - if (this == o) { - return true; - } - if (o == null || getClass() != o.getClass()) { - return false; - } - - SpnegoFilterConfig that = (SpnegoFilterConfig) o; - - if (principal != null ? !principal.equals(that.principal) : that.principal != null) { - return false; - } - if (keytab != null ? !keytab.equals(that.keytab) : that.keytab != null) { - return false; - } - if (authToLocal != null ? !authToLocal.equals(that.authToLocal) : that.authToLocal != null) { - return false; - } - if (excludedPaths != null ? !excludedPaths.equals(that.excludedPaths) : that.excludedPaths != null) { - return false; - } - return cookieSignatureSecret != null - ? cookieSignatureSecret.equals(that.cookieSignatureSecret) - : that.cookieSignatureSecret == null; - - } - - @Override - public int hashCode() - { - int result = principal != null ? principal.hashCode() : 0; - result = 31 * result + (keytab != null ? keytab.hashCode() : 0); - result = 31 * result + (authToLocal != null ? authToLocal.hashCode() : 0); - result = 31 * result + (excludedPaths != null ? excludedPaths.hashCode() : 0); - result = 31 * result + (cookieSignatureSecret != null ? cookieSignatureSecret.hashCode() : 0); - return result; - } -} diff --git a/extensions-core/druid-kerberos/src/test/java/io/druid/security/kerberos/SpnegoFilterConfigTest.java b/extensions-core/druid-kerberos/src/test/java/io/druid/security/kerberos/SpnegoFilterConfigTest.java deleted file mode 100644 index e1ee98606324..000000000000 --- a/extensions-core/druid-kerberos/src/test/java/io/druid/security/kerberos/SpnegoFilterConfigTest.java +++ /dev/null @@ -1,75 +0,0 @@ -/* - * Licensed to Metamarkets Group Inc. (Metamarkets) under one - * or more contributor license agreements. See the NOTICE file - * distributed with this work for additional information - * regarding copyright ownership. Metamarkets licenses this file - * to you under the Apache License, Version 2.0 (the - * "License"); you may not use this file except in compliance - * with the License. You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, - * software distributed under the License is distributed on an - * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY - * KIND, either express or implied. See the License for the - * specific language governing permissions and limitations - * under the License. - */ - -package io.druid.security.kerberos; - -import com.fasterxml.jackson.databind.ObjectMapper; -import com.google.inject.Binder; -import com.google.inject.Guice; -import com.google.inject.Injector; -import com.google.inject.Module; -import com.google.inject.Provides; -import io.druid.guice.ConfigModule; -import io.druid.guice.DruidGuiceExtensions; -import io.druid.guice.JsonConfigProvider; -import io.druid.guice.LazySingleton; -import io.druid.guice.PropertiesModule; -import io.druid.jackson.DefaultObjectMapper; -import org.junit.Assert; -import org.junit.Test; - -import java.util.Arrays; -import java.util.Properties; - -public class SpnegoFilterConfigTest -{ - @Test - public void testserde() - { - Injector injector = Guice.createInjector( - new Module() - { - @Override - public void configure(Binder binder) - { - binder.install(new PropertiesModule(Arrays.asList("test.runtime.properties"))); - binder.install(new ConfigModule()); - binder.install(new DruidGuiceExtensions()); - JsonConfigProvider.bind(binder, "druid.hadoop.security.spnego", SpnegoFilterConfig.class); - } - - @Provides - @LazySingleton - public ObjectMapper jsonMapper() - { - return new DefaultObjectMapper(); - } - } - ); - - Properties props = injector.getInstance(Properties.class); - SpnegoFilterConfig config = injector.getInstance(SpnegoFilterConfig.class); - - Assert.assertEquals(props.getProperty("druid.hadoop.security.spnego.principal"), config.getPrincipal()); - Assert.assertEquals(props.getProperty("druid.hadoop.security.spnego.keytab"), config.getKeytab()); - Assert.assertEquals(props.getProperty("druid.hadoop.security.spnego.authToLocal"), config.getAuthToLocal()); - - - } -} diff --git a/extensions-core/hdfs-storage/pom.xml b/extensions-core/hdfs-storage/pom.xml index 5b6bb1cd4320..eb1bb4cad97e 100644 --- a/extensions-core/hdfs-storage/pom.xml +++ b/extensions-core/hdfs-storage/pom.xml @@ -27,7 +27,7 @@ io.druid druid - 0.12.0-SNAPSHOT + 0.12.2-SNAPSHOT ../../pom.xml @@ -44,6 +44,12 @@ ${project.parent.version} provided + + io.druid + java-util + ${project.parent.version} + provided + org.apache.hadoop hadoop-client @@ -135,11 +141,6 @@ - - com.metamx - java-util - provided - org.apache.hadoop hadoop-aws diff --git a/extensions-core/hdfs-storage/src/main/java/io/druid/storage/hdfs/HdfsDataSegmentFinder.java b/extensions-core/hdfs-storage/src/main/java/io/druid/storage/hdfs/HdfsDataSegmentFinder.java index 6fba009cf865..8e75b36057c2 100644 --- a/extensions-core/hdfs-storage/src/main/java/io/druid/storage/hdfs/HdfsDataSegmentFinder.java +++ b/extensions-core/hdfs-storage/src/main/java/io/druid/storage/hdfs/HdfsDataSegmentFinder.java @@ -20,13 +20,14 @@ package io.druid.storage.hdfs; import com.fasterxml.jackson.databind.ObjectMapper; -import com.google.common.collect.Sets; +import com.google.common.base.Preconditions; import com.google.inject.Inject; +import io.druid.java.util.common.Pair; +import io.druid.java.util.common.StringUtils; import io.druid.java.util.common.logger.Logger; import io.druid.segment.loading.DataSegmentFinder; import io.druid.segment.loading.SegmentLoadingException; import io.druid.timeline.DataSegment; -import io.druid.java.util.common.StringUtils; import org.apache.hadoop.conf.Configuration; import org.apache.hadoop.fs.FileSystem; import org.apache.hadoop.fs.LocatedFileStatus; @@ -34,14 +35,15 @@ import org.apache.hadoop.fs.RemoteIterator; import java.io.IOException; +import java.util.HashMap; import java.util.Map; import java.util.Set; +import java.util.stream.Collectors; /** */ public class HdfsDataSegmentFinder implements DataSegmentFinder { - private static final Logger log = new Logger(HdfsDataSegmentFinder.class); private final Configuration config; @@ -58,7 +60,7 @@ public HdfsDataSegmentFinder(Configuration config, ObjectMapper mapper) public Set findSegments(String workingDirPathStr, boolean updateDescriptor) throws SegmentLoadingException { - final Set segments = Sets.newHashSet(); + final Map> timestampedSegments = new HashMap<>(); final Path workingDirPath = new Path(workingDirPathStr); FileSystem fs; try { @@ -80,15 +82,31 @@ public Set findSegments(String workingDirPathStr, boolean updateDes final LocatedFileStatus locatedFileStatus = it.next(); final Path path = locatedFileStatus.getPath(); if (path.getName().endsWith("descriptor.json")) { - final Path indexZip; + + // There are 3 supported path formats: + // - hdfs://nn1/hdfs_base_directory/data_source_name/interval/version/shardNum/descriptor.json + // - hdfs://nn1/hdfs_base_directory/data_source_name/interval/version/shardNum_descriptor.json + // - hdfs://nn1/hdfs_base_directory/data_source_name/interval/version/shardNum_UUID_descriptor.json final String descriptorParts[] = path.getName().split("_"); - if (descriptorParts.length == 2 - && descriptorParts[1].equals("descriptor.json") - && org.apache.commons.lang.StringUtils.isNumeric(descriptorParts[0])) { - indexZip = new Path(path.getParent(), StringUtils.format("%s_index.zip", descriptorParts[0])); - } else { - indexZip = new Path(path.getParent(), "index.zip"); + + Path indexZip = new Path(path.getParent(), "index.zip"); + if (descriptorParts.length > 1) { + Preconditions.checkState(descriptorParts.length <= 3 && + org.apache.commons.lang.StringUtils.isNumeric(descriptorParts[0]) && + "descriptor.json".equals(descriptorParts[descriptorParts.length - 1]), + "Unexpected descriptor filename format [%s]", path + ); + + indexZip = new Path( + path.getParent(), + StringUtils.format( + "%s_%sindex.zip", + descriptorParts[0], + descriptorParts.length == 2 ? "" : descriptorParts[1] + "_" + ) + ); } + if (fs.exists(indexZip)) { final DataSegment dataSegment = mapper.readValue(fs.open(path), DataSegment.class); log.info("Found segment [%s] located at [%s]", dataSegment.getIdentifier(), indexZip); @@ -105,7 +123,12 @@ public Set findSegments(String workingDirPathStr, boolean updateDes mapper.writeValue(fs.create(path, true), dataSegment); } } - segments.add(dataSegment); + + DataSegmentFinder.putInMapRetainingNewest( + timestampedSegments, + dataSegment, + locatedFileStatus.getModificationTime() + ); } else { throw new SegmentLoadingException( "index.zip didn't exist at [%s] while descripter.json exists!?", @@ -119,7 +142,6 @@ public Set findSegments(String workingDirPathStr, boolean updateDes throw new SegmentLoadingException(e, "Problems interacting with filesystem[%s].", workingDirPath); } - return segments; + return timestampedSegments.values().stream().map(x -> x.lhs).collect(Collectors.toSet()); } - } diff --git a/extensions-core/hdfs-storage/src/main/java/io/druid/storage/hdfs/HdfsDataSegmentKiller.java b/extensions-core/hdfs-storage/src/main/java/io/druid/storage/hdfs/HdfsDataSegmentKiller.java index d910ae51b0ac..6cbd1eccd7c3 100644 --- a/extensions-core/hdfs-storage/src/main/java/io/druid/storage/hdfs/HdfsDataSegmentKiller.java +++ b/extensions-core/hdfs-storage/src/main/java/io/druid/storage/hdfs/HdfsDataSegmentKiller.java @@ -19,8 +19,9 @@ package io.druid.storage.hdfs; +import com.google.common.base.Preconditions; import com.google.inject.Inject; -import com.metamx.emitter.EmittingLogger; +import io.druid.java.util.emitter.EmittingLogger; import io.druid.segment.loading.DataSegmentKiller; import io.druid.segment.loading.SegmentLoadingException; import io.druid.timeline.DataSegment; @@ -57,65 +58,53 @@ private static Path getPath(DataSegment segment) public void kill(DataSegment segment) throws SegmentLoadingException { final Path segmentPath = getPath(segment); - log.info("killing segment[%s] mapped to path[%s]", segment.getIdentifier(), segmentPath); + log.info("Killing segment[%s] mapped to path[%s]", segment.getIdentifier(), segmentPath); try { - String segmentLocation = segmentPath.getName(); + String filename = segmentPath.getName(); final FileSystem fs = segmentPath.getFileSystem(config); - if (!segmentLocation.endsWith(".zip")) { + if (!filename.endsWith(".zip")) { throw new SegmentLoadingException("Unknown file type[%s]", segmentPath); } else { if (!fs.exists(segmentPath)) { - log.warn("Segment Path [%s] does not exist. It appears to have been deleted already.", segmentPath); + log.warn("Segment path [%s] does not exist", segmentPath); return; } - String[] zipParts = segmentLocation.split("_"); - // for segments stored as hdfs://nn1/hdfs_base_directory/data_source_name/interval/version/shardNum_index.zip - if (zipParts.length == 2 - && zipParts[1].equals("index.zip") - && StringUtils.isNumeric(zipParts[0])) { - if (!fs.delete(segmentPath, false)) { - throw new SegmentLoadingException( - "Unable to kill segment, failed to delete [%s]", - segmentPath.toString() - ); - } - Path descriptorPath = new Path( + // There are 3 supported path formats: + // - hdfs://nn1/hdfs_base_directory/data_source_name/interval/version/shardNum/index.zip + // - hdfs://nn1/hdfs_base_directory/data_source_name/interval/version/shardNum_index.zip + // - hdfs://nn1/hdfs_base_directory/data_source_name/interval/version/shardNum_UUID_index.zip + String[] zipParts = filename.split("_"); + + Path descriptorPath = new Path(segmentPath.getParent(), "descriptor.json"); + if (zipParts.length > 1) { + Preconditions.checkState(zipParts.length <= 3 && + StringUtils.isNumeric(zipParts[0]) && + "index.zip".equals(zipParts[zipParts.length - 1]), + "Unexpected segmentPath format [%s]", segmentPath + ); + + descriptorPath = new Path( segmentPath.getParent(), - io.druid.java.util.common.StringUtils.format("%s_descriptor.json", zipParts[0]) + io.druid.java.util.common.StringUtils.format( + "%s_%sdescriptor.json", + zipParts[0], + zipParts.length == 2 ? "" : zipParts[1] + "_" + ) ); - //delete partitionNumber_descriptor.json - if (!fs.delete(descriptorPath, false)) { - throw new SegmentLoadingException( - "Unable to kill segment, failed to delete [%s]", - descriptorPath.toString() - ); - } - //for segments stored as hdfs://nn1/hdfs_base_directory/data_source_name/interval/version/shardNum_index.zip - // max depth to look is 2, i.e version directory and interval. - mayBeDeleteParentsUpto(fs, segmentPath, 2); - - } else { //for segments stored as hdfs://nn1/hdfs_base_directory/data_source_name/interval/version/shardNum/ - // index.zip - if (!fs.delete(segmentPath, false)) { - throw new SegmentLoadingException( - "Unable to kill segment, failed to delete [%s]", - segmentPath.toString() - ); - } - Path descriptorPath = new Path(segmentPath.getParent(), "descriptor.json"); - if (!fs.delete(descriptorPath, false)) { - throw new SegmentLoadingException( - "Unable to kill segment, failed to delete [%s]", - descriptorPath.toString() - ); - } - //for segments stored as hdfs://nn1/hdfs_base_directory/data_source_name/interval/version/shardNum/index.zip - //max depth to look is 3, i.e partition number directory,version directory and interval. - mayBeDeleteParentsUpto(fs, segmentPath, 3); } + + if (!fs.delete(segmentPath, false)) { + throw new SegmentLoadingException("Unable to kill segment, failed to delete [%s]", segmentPath.toString()); + } + + if (!fs.delete(descriptorPath, false)) { + throw new SegmentLoadingException("Unable to kill segment, failed to delete [%s]", descriptorPath.toString()); + } + + removeEmptyParentDirectories(fs, segmentPath, zipParts.length > 1 ? 2 : 3); } } catch (IOException e) { @@ -131,11 +120,11 @@ public void killAll() throws IOException fs.delete(storageDirectory, true); } - private void mayBeDeleteParentsUpto(final FileSystem fs, final Path segmentPath, final int maxDepthTobeDeleted) + private void removeEmptyParentDirectories(final FileSystem fs, final Path segmentPath, final int depth) { Path path = segmentPath; try { - for (int i = 1; i <= maxDepthTobeDeleted; i++) { + for (int i = 1; i <= depth; i++) { path = path.getParent(); if (fs.listStatus(path).length != 0 || !fs.delete(path, false)) { break; diff --git a/extensions-core/hdfs-storage/src/main/java/io/druid/storage/hdfs/HdfsDataSegmentPusher.java b/extensions-core/hdfs-storage/src/main/java/io/druid/storage/hdfs/HdfsDataSegmentPusher.java index 33a85ac2147a..94b41e70e199 100644 --- a/extensions-core/hdfs-storage/src/main/java/io/druid/storage/hdfs/HdfsDataSegmentPusher.java +++ b/extensions-core/hdfs-storage/src/main/java/io/druid/storage/hdfs/HdfsDataSegmentPusher.java @@ -20,6 +20,7 @@ package io.druid.storage.hdfs; import com.fasterxml.jackson.databind.ObjectMapper; +import com.google.common.base.Preconditions; import com.google.common.collect.ImmutableMap; import com.google.common.io.ByteSink; import com.google.common.io.ByteSource; @@ -57,11 +58,8 @@ public class HdfsDataSegmentPusher implements DataSegmentPusher private final String fullyQualifiedStorageDirectory; @Inject - public HdfsDataSegmentPusher( - HdfsDataSegmentPusherConfig config, - Configuration hadoopConfig, - ObjectMapper jsonMapper - ) throws IOException + public HdfsDataSegmentPusher(HdfsDataSegmentPusherConfig config, Configuration hadoopConfig, ObjectMapper jsonMapper) + throws IOException { this.config = config; this.hadoopConfig = hadoopConfig; @@ -89,9 +87,11 @@ public String getPathForHadoop() } @Override - public DataSegment push(File inDir, DataSegment segment) throws IOException + public DataSegment push(final File inDir, final DataSegment segment, final boolean useUniquePath) throws IOException { - final String storageDir = this.getStorageDir(segment); + // For HDFS, useUniquePath does not affect the directory tree but instead affects the filename, which is of the form + // '{partitionNum}_index.zip' without unique paths and '{partitionNum}_{UUID}_index.zip' with unique paths. + final String storageDir = this.getStorageDir(segment, false); log.info( "Copying segment[%s] to HDFS at location[%s/%s]", @@ -116,17 +116,20 @@ public DataSegment push(File inDir, DataSegment segment) throws IOException final DataSegment dataSegment; try (FSDataOutputStream out = fs.create(tmpIndexFile)) { size = CompressionUtils.zip(inDir, out); + final String uniquePrefix = useUniquePath ? DataSegmentPusher.generateUniquePath() + "_" : ""; final Path outIndexFile = new Path(StringUtils.format( - "%s/%s/%d_index.zip", + "%s/%s/%d_%sindex.zip", fullyQualifiedStorageDirectory, storageDir, - segment.getShardSpec().getPartitionNum() + segment.getShardSpec().getPartitionNum(), + uniquePrefix )); final Path outDescriptorFile = new Path(StringUtils.format( - "%s/%s/%d_descriptor.json", + "%s/%s/%d_%sdescriptor.json", fullyQualifiedStorageDirectory, storageDir, - segment.getShardSpec().getPartitionNum() + segment.getShardSpec().getPartitionNum(), + uniquePrefix )); dataSegment = segment.withLoadSpec(makeLoadSpec(outIndexFile.toUri())) @@ -167,13 +170,12 @@ private void copyFilesWithChecks(final FileSystem fs, final Path from, final Pat if (!HadoopFsWrapper.rename(fs, from, to)) { if (fs.exists(to)) { log.info( - "Unable to rename temp Index file[%s] to final segment path [%s]. " - + "It is already pushed by a replica task.", + "Unable to rename temp file [%s] to segment path [%s], it may have already been pushed by a replica task.", from, to ); } else { - throw new IOE("Failed to rename temp Index file[%s] and final segment path[%s] is not present.", from, to); + throw new IOE("Failed to rename temp file [%s] and final segment path [%s] is not present.", from, to); } } } @@ -208,8 +210,17 @@ public Map makeLoadSpec(URI finalIndexZipFilePath) */ @Override - public String getStorageDir(DataSegment segment) + public String getStorageDir(DataSegment segment, boolean useUniquePath) { + // This is only called by HdfsDataSegmentPusher.push(), which will always set useUniquePath to false since any + // 'uniqueness' will be applied not to the directory but to the filename along with the shard number. This is done + // to avoid performance issues due to excessive HDFS directories. Hence useUniquePath is ignored here and we + // expect it to be false. + Preconditions.checkArgument( + !useUniquePath, + "useUniquePath must be false for HdfsDataSegmentPusher.getStorageDir()" + ); + return JOINER.join( segment.getDataSource(), StringUtils.format( @@ -224,9 +235,10 @@ public String getStorageDir(DataSegment segment) @Override public String makeIndexPathName(DataSegment dataSegment, String indexName) { + // This is only called from Hadoop batch which doesn't require unique segment paths so set useUniquePath=false return StringUtils.format( "./%s/%d_%s", - this.getStorageDir(dataSegment), + this.getStorageDir(dataSegment, false), dataSegment.getShardSpec().getPartitionNum(), indexName ); diff --git a/extensions-core/hdfs-storage/src/main/java/org/apache/hadoop/fs/HadoopFsWrapper.java b/extensions-core/hdfs-storage/src/main/java/org/apache/hadoop/fs/HadoopFsWrapper.java index df00fdb7a0fa..913adb0977bc 100644 --- a/extensions-core/hdfs-storage/src/main/java/org/apache/hadoop/fs/HadoopFsWrapper.java +++ b/extensions-core/hdfs-storage/src/main/java/org/apache/hadoop/fs/HadoopFsWrapper.java @@ -36,14 +36,15 @@ public class HadoopFsWrapper private HadoopFsWrapper() {} /** - * Same as FileSystem.rename(from, to, Options.Rename.NONE) . That is, - * it returns "false" when "to" directory already exists. It is different from FileSystem.rename(from, to) - * which moves "from" directory inside "to" directory if it already exists. + * Same as FileSystem.rename(from, to, Options.Rename). It is different from FileSystem.rename(from, to) which moves + * "from" directory inside "to" directory if it already exists. * * @param from * @param to - * @return - * @throws IOException + * + * @return true if operation succeeded, false if destination already exists + * + * @throws IOException if trying to overwrite a non-empty directory */ public static boolean rename(FileSystem fs, Path from, Path to) throws IOException { @@ -51,8 +52,8 @@ public static boolean rename(FileSystem fs, Path from, Path to) throws IOExcepti fs.rename(from, to, Options.Rename.NONE); return true; } - catch (IOException ex) { - log.warn(ex, "Failed to rename [%s] to [%s].", from, to); + catch (FileAlreadyExistsException ex) { + log.info(ex, "Destination exists while renaming [%s] to [%s]", from, to); return false; } } diff --git a/extensions-core/hdfs-storage/src/test/java/io/druid/segment/loading/HdfsDataSegmentFinderTest.java b/extensions-core/hdfs-storage/src/test/java/io/druid/segment/loading/HdfsDataSegmentFinderTest.java index 626124622527..5e052df1e6d1 100644 --- a/extensions-core/hdfs-storage/src/test/java/io/druid/segment/loading/HdfsDataSegmentFinderTest.java +++ b/extensions-core/hdfs-storage/src/test/java/io/druid/segment/loading/HdfsDataSegmentFinderTest.java @@ -25,6 +25,7 @@ import com.google.common.collect.ImmutableMap; import io.druid.java.util.common.IOE; import io.druid.java.util.common.Intervals; +import io.druid.java.util.common.StringUtils; import io.druid.segment.TestHelper; import io.druid.storage.hdfs.HdfsDataSegmentFinder; import io.druid.timeline.DataSegment; @@ -278,6 +279,30 @@ public void testFindSegmentsFail() throws Exception hdfsDataSegmentFinder.findSegments(dataSourceDir.toString(), false); } + @Test + public void testPreferNewestSegment() throws Exception + { + dataSourceDir = new Path(new Path(uriBase), "/usr/replicaDataSource"); + descriptor1 = new Path(dataSourceDir, StringUtils.format("interval1/v1/%d_%s_%s", 0, "older", DESCRIPTOR_JSON)); + descriptor2 = new Path(dataSourceDir, StringUtils.format("interval1/v1/%d_%s_%s", 0, "newer", DESCRIPTOR_JSON)); + indexZip1 = new Path(descriptor1.getParent(), StringUtils.format("%d_%s_%s", 0, "older", INDEX_ZIP)); + indexZip2 = new Path(descriptor2.getParent(), StringUtils.format("%d_%s_%s", 0, "newer", INDEX_ZIP)); + + mapper.writeValue(fs.create(descriptor1), SEGMENT_1); + mapper.writeValue(fs.create(descriptor2), SEGMENT_1); + + create(indexZip1); + Thread.sleep(1000); + create(indexZip2); + + final Set segments = new HdfsDataSegmentFinder(conf, mapper).findSegments( + dataSourceDir.toString(), false + ); + + Assert.assertEquals(1, segments.size()); + Assert.assertEquals(indexZip2.toUri().getPath(), segments.iterator().next().getLoadSpec().get("path")); + } + private String getDescriptorPath(DataSegment segment) { final Path indexzip = new Path(String.valueOf(segment.getLoadSpec().get("path"))); diff --git a/extensions-core/hdfs-storage/src/test/java/io/druid/storage/hdfs/HdfsDataSegmentKillerTest.java b/extensions-core/hdfs-storage/src/test/java/io/druid/storage/hdfs/HdfsDataSegmentKillerTest.java index bdf499dcd904..dba43507a699 100644 --- a/extensions-core/hdfs-storage/src/test/java/io/druid/storage/hdfs/HdfsDataSegmentKillerTest.java +++ b/extensions-core/hdfs-storage/src/test/java/io/druid/storage/hdfs/HdfsDataSegmentKillerTest.java @@ -26,13 +26,13 @@ import io.druid.timeline.DataSegment; import io.druid.timeline.partition.NoneShardSpec; import org.apache.hadoop.conf.Configuration; -import org.apache.hadoop.fs.FSDataOutputStream; import org.apache.hadoop.fs.FileSystem; import org.apache.hadoop.fs.Path; import org.junit.Assert; import org.junit.Test; import java.io.IOException; +import java.util.UUID; /** */ @@ -129,7 +129,10 @@ public String getStorageDirectory() Path interval1Dir = new Path(dataSourceDir, "intervalNew"); Path version11Dir = new Path(interval1Dir, "v1"); - makePartitionDirWithIndexWitNewFormat(fs, version11Dir, 3); + Assert.assertTrue(fs.mkdirs(version11Dir)); + fs.createNewFile(new Path(version11Dir, StringUtils.format("%s_index.zip", 3))); + fs.createNewFile(new Path(version11Dir, StringUtils.format("%s_descriptor.json", 3))); + killer.kill(getSegmentWithPath(new Path(version11Dir, "3_index.zip").toString())); Assert.assertFalse(fs.exists(version11Dir)); @@ -141,7 +144,7 @@ public String getStorageDirectory() } @Test - public void testKillNonExistingSegment() throws Exception + public void testKillForSegmentWithUniquePath() throws Exception { Configuration config = new Configuration(); HdfsDataSegmentKiller killer = new HdfsDataSegmentKiller( @@ -155,28 +158,51 @@ public String getStorageDirectory() } } ); - killer.kill(getSegmentWithPath(new Path("/xxx/", "index.zip").toString())); + + FileSystem fs = FileSystem.get(config); + Path dataSourceDir = new Path("/tmp/dataSourceNew"); + + Path interval1Dir = new Path(dataSourceDir, "intervalNew"); + Path version11Dir = new Path(interval1Dir, "v1"); + String uuid = UUID.randomUUID().toString().substring(0, 5); + + Assert.assertTrue(fs.mkdirs(version11Dir)); + fs.createNewFile(new Path(version11Dir, StringUtils.format("%s_%s_index.zip", 3, uuid))); + fs.createNewFile(new Path(version11Dir, StringUtils.format("%s_%s_descriptor.json", 3, uuid))); + + killer.kill(getSegmentWithPath(new Path(version11Dir, StringUtils.format("%s_%s_index.zip", 3, uuid)).toString())); + + Assert.assertFalse(fs.exists(version11Dir)); + Assert.assertFalse(fs.exists(interval1Dir)); + Assert.assertTrue(fs.exists(dataSourceDir)); + Assert.assertTrue(fs.exists(new Path("/tmp"))); + Assert.assertTrue(fs.exists(dataSourceDir)); + Assert.assertTrue(fs.delete(dataSourceDir, false)); } - private void makePartitionDirWithIndex(FileSystem fs, Path path) throws IOException + @Test + public void testKillNonExistingSegment() throws Exception { - Assert.assertTrue(fs.mkdirs(path)); - try (FSDataOutputStream os = fs.create(new Path(path, "index.zip")); FSDataOutputStream oos = fs.create(new Path( - path, - "descriptor.json" - ))) { - } + Configuration config = new Configuration(); + HdfsDataSegmentKiller killer = new HdfsDataSegmentKiller( + config, + new HdfsDataSegmentPusherConfig() + { + @Override + public String getStorageDirectory() + { + return "/tmp"; + } + } + ); + killer.kill(getSegmentWithPath(new Path("/xxx/", "index.zip").toString())); } - private void makePartitionDirWithIndexWitNewFormat(FileSystem fs, Path path, Integer partitionNumber) - throws IOException + private void makePartitionDirWithIndex(FileSystem fs, Path path) throws IOException { Assert.assertTrue(fs.mkdirs(path)); - try (FSDataOutputStream os = fs.create(new Path( - path, - StringUtils.format("%s_index.zip", partitionNumber) - )); FSDataOutputStream oos = fs.create(new Path(path, StringUtils.format("%s_descriptor.json", partitionNumber)))) { - } + fs.createNewFile(new Path(path, "index.zip")); + fs.createNewFile(new Path(path, "descriptor.json")); } private DataSegment getSegmentWithPath(String path) diff --git a/extensions-core/hdfs-storage/src/test/java/io/druid/storage/hdfs/HdfsDataSegmentPusherTest.java b/extensions-core/hdfs-storage/src/test/java/io/druid/storage/hdfs/HdfsDataSegmentPusherTest.java index 95dd85bf2b9e..9bb25f90707f 100644 --- a/extensions-core/hdfs-storage/src/test/java/io/druid/storage/hdfs/HdfsDataSegmentPusherTest.java +++ b/extensions-core/hdfs-storage/src/test/java/io/druid/storage/hdfs/HdfsDataSegmentPusherTest.java @@ -127,7 +127,8 @@ public void testPushWithMultipleSegments() throws Exception testUsingSchemeForMultipleSegments("file", 3); } - private void testUsingScheme(final String scheme) throws Exception + @Test + public void testUsingUniqueFilePath() throws Exception { Configuration conf = new Configuration(true); @@ -142,11 +143,7 @@ private void testUsingScheme(final String scheme) throws Exception HdfsDataSegmentPusherConfig config = new HdfsDataSegmentPusherConfig(); final File storageDirectory = tempFolder.newFolder(); - config.setStorageDirectory( - scheme != null - ? StringUtils.format("%s://%s", scheme, storageDirectory.getAbsolutePath()) - : storageDirectory.getAbsolutePath() - ); + config.setStorageDirectory(StringUtils.format("file://%s", storageDirectory.getAbsolutePath())); HdfsDataSegmentPusher pusher = new HdfsDataSegmentPusher(config, conf, new DefaultObjectMapper()); DataSegment segmentToPush = new DataSegment( @@ -161,51 +158,13 @@ private void testUsingScheme(final String scheme) throws Exception size ); - DataSegment segment = pusher.push(segmentDir, segmentToPush); - + DataSegment segment = pusher.push(segmentDir, segmentToPush, true); - String indexUri = StringUtils.format( - "%s/%s/%d_index.zip", - FileSystem.newInstance(conf).makeQualified(new Path(config.getStorageDirectory())).toUri().toString(), - pusher.getStorageDir(segmentToPush), - segmentToPush.getShardSpec().getPartitionNum() + String matcher = ".*/foo/20150101T000000\\.000Z_20160101T000000\\.000Z/0/0_[A-Za-z0-9-]{36}_index\\.zip"; + Assert.assertTrue( + segment.getLoadSpec().get("path").toString(), + segment.getLoadSpec().get("path").toString().matches(matcher) ); - - Assert.assertEquals(segmentToPush.getSize(), segment.getSize()); - Assert.assertEquals(segmentToPush, segment); - Assert.assertEquals(ImmutableMap.of( - "type", - "hdfs", - "path", - indexUri - ), segment.getLoadSpec()); - // rename directory after push - final String segmentPath = pusher.getStorageDir(segment); - - File indexFile = new File(StringUtils.format( - "%s/%s/%d_index.zip", - storageDirectory, - segmentPath, - segment.getShardSpec().getPartitionNum() - )); - Assert.assertTrue(indexFile.exists()); - File descriptorFile = new File(StringUtils.format( - "%s/%s/%d_descriptor.json", - storageDirectory, - segmentPath, - segment.getShardSpec().getPartitionNum() - )); - Assert.assertTrue(descriptorFile.exists()); - - // push twice will fail and temp dir cleaned - File outDir = new File(StringUtils.format("%s/%s", config.getStorageDirectory(), segmentPath)); - outDir.setReadOnly(); - try { - pusher.push(segmentDir, segmentToPush); - } - catch (IOException e) { - Assert.fail("should not throw exception"); - } } private void testUsingSchemeForMultipleSegments(final String scheme, final int numberOfSegments) throws Exception @@ -246,12 +205,12 @@ private void testUsingSchemeForMultipleSegments(final String scheme, final int n } for (int i = 0; i < numberOfSegments; i++) { - final DataSegment pushedSegment = pusher.push(segmentDir, segments[i]); + final DataSegment pushedSegment = pusher.push(segmentDir, segments[i], false); String indexUri = StringUtils.format( "%s/%s/%d_index.zip", FileSystem.newInstance(conf).makeQualified(new Path(config.getStorageDirectory())).toUri().toString(), - pusher.getStorageDir(segments[i]), + pusher.getStorageDir(segments[i], false), segments[i].getShardSpec().getPartitionNum() ); @@ -264,7 +223,7 @@ private void testUsingSchemeForMultipleSegments(final String scheme, final int n indexUri ), pushedSegment.getLoadSpec()); // rename directory after push - String segmentPath = pusher.getStorageDir(pushedSegment); + String segmentPath = pusher.getStorageDir(pushedSegment, false); File indexFile = new File(StringUtils.format( "%s/%s/%d_index.zip", @@ -293,7 +252,7 @@ private void testUsingSchemeForMultipleSegments(final String scheme, final int n indexUri ), fromDescriptorFileDataSegment.getLoadSpec()); // rename directory after push - segmentPath = pusher.getStorageDir(fromDescriptorFileDataSegment); + segmentPath = pusher.getStorageDir(fromDescriptorFileDataSegment, false); indexFile = new File(StringUtils.format( "%s/%s/%d_index.zip", @@ -308,7 +267,7 @@ private void testUsingSchemeForMultipleSegments(final String scheme, final int n File outDir = new File(StringUtils.format("%s/%s", config.getStorageDirectory(), segmentPath)); outDir.setReadOnly(); try { - pusher.push(segmentDir, segments[i]); + pusher.push(segmentDir, segments[i], false); } catch (IOException e) { Assert.fail("should not throw exception"); @@ -316,6 +275,87 @@ private void testUsingSchemeForMultipleSegments(final String scheme, final int n } } + private void testUsingScheme(final String scheme) throws Exception + { + Configuration conf = new Configuration(true); + + // Create a mock segment on disk + File segmentDir = tempFolder.newFolder(); + File tmp = new File(segmentDir, "version.bin"); + + final byte[] data = new byte[]{0x0, 0x0, 0x0, 0x1}; + Files.write(data, tmp); + final long size = data.length; + + HdfsDataSegmentPusherConfig config = new HdfsDataSegmentPusherConfig(); + final File storageDirectory = tempFolder.newFolder(); + + config.setStorageDirectory( + scheme != null + ? StringUtils.format("%s://%s", scheme, storageDirectory.getAbsolutePath()) + : storageDirectory.getAbsolutePath() + ); + HdfsDataSegmentPusher pusher = new HdfsDataSegmentPusher(config, conf, new DefaultObjectMapper()); + + DataSegment segmentToPush = new DataSegment( + "foo", + Intervals.of("2015/2016"), + "0", + Maps.newHashMap(), + Lists.newArrayList(), + Lists.newArrayList(), + NoneShardSpec.instance(), + 0, + size + ); + + DataSegment segment = pusher.push(segmentDir, segmentToPush, false); + + + String indexUri = StringUtils.format( + "%s/%s/%d_index.zip", + FileSystem.newInstance(conf).makeQualified(new Path(config.getStorageDirectory())).toUri().toString(), + pusher.getStorageDir(segmentToPush, false), + segmentToPush.getShardSpec().getPartitionNum() + ); + + Assert.assertEquals(segmentToPush.getSize(), segment.getSize()); + Assert.assertEquals(segmentToPush, segment); + Assert.assertEquals(ImmutableMap.of( + "type", + "hdfs", + "path", + indexUri + ), segment.getLoadSpec()); + // rename directory after push + final String segmentPath = pusher.getStorageDir(segment, false); + + File indexFile = new File(StringUtils.format( + "%s/%s/%d_index.zip", + storageDirectory, + segmentPath, + segment.getShardSpec().getPartitionNum() + )); + Assert.assertTrue(indexFile.exists()); + File descriptorFile = new File(StringUtils.format( + "%s/%s/%d_descriptor.json", + storageDirectory, + segmentPath, + segment.getShardSpec().getPartitionNum() + )); + Assert.assertTrue(descriptorFile.exists()); + + // push twice will fail and temp dir cleaned + File outDir = new File(StringUtils.format("%s/%s", config.getStorageDirectory(), segmentPath)); + outDir.setReadOnly(); + try { + pusher.push(segmentDir, segmentToPush, false); + } + catch (IOException e) { + Assert.fail("should not throw exception"); + } + } + public static class TestObjectMapper extends ObjectMapper { public TestObjectMapper() @@ -371,7 +411,7 @@ public void shouldNotHaveColonsInHdfsStorageDir() throws Exception 1 ); - String storageDir = hdfsDataSegmentPusher.getStorageDir(segment); + String storageDir = hdfsDataSegmentPusher.getStorageDir(segment, false); Assert.assertEquals("something/20111001T000000.000Z_20111002T000000.000Z/brand_new_version", storageDir); } diff --git a/extensions-core/histogram/pom.xml b/extensions-core/histogram/pom.xml index 5f0a3765d649..dc6728bac2c2 100644 --- a/extensions-core/histogram/pom.xml +++ b/extensions-core/histogram/pom.xml @@ -26,7 +26,7 @@ io.druid druid - 0.12.0-SNAPSHOT + 0.12.2-SNAPSHOT ../../pom.xml diff --git a/extensions-core/histogram/src/main/java/io/druid/query/aggregation/histogram/sql/QuantileSqlAggregator.java b/extensions-core/histogram/src/main/java/io/druid/query/aggregation/histogram/sql/QuantileSqlAggregator.java index 6711c36f4b1b..b9c8d3d3d46b 100644 --- a/extensions-core/histogram/src/main/java/io/druid/query/aggregation/histogram/sql/QuantileSqlAggregator.java +++ b/extensions-core/histogram/src/main/java/io/druid/query/aggregation/histogram/sql/QuantileSqlAggregator.java @@ -93,15 +93,27 @@ public Aggregation toDruidAggregation( project, aggregateCall.getArgList().get(1) ); - final float probability = ((Number) RexLiteral.value(probabilityArg)).floatValue(); + if (!probabilityArg.isA(SqlKind.LITERAL)) { + // Probability must be a literal in order to plan. + return null; + } + + final float probability = ((Number) RexLiteral.value(probabilityArg)).floatValue(); final int resolution; + if (aggregateCall.getArgList().size() >= 3) { final RexNode resolutionArg = Expressions.fromFieldAccess( rowSignature, project, aggregateCall.getArgList().get(2) ); + + if (!resolutionArg.isA(SqlKind.LITERAL)) { + // Resolution must be a literal in order to plan. + return null; + } + resolution = ((Number) RexLiteral.value(resolutionArg)).intValue(); } else { resolution = ApproximateHistogram.DEFAULT_HISTOGRAM_SIZE; diff --git a/extensions-core/histogram/src/test/java/io/druid/query/aggregation/histogram/sql/QuantileSqlAggregatorTest.java b/extensions-core/histogram/src/test/java/io/druid/query/aggregation/histogram/sql/QuantileSqlAggregatorTest.java index 45197c2c7e0e..77f629e5a1a1 100644 --- a/extensions-core/histogram/src/test/java/io/druid/query/aggregation/histogram/sql/QuantileSqlAggregatorTest.java +++ b/extensions-core/histogram/src/test/java/io/druid/query/aggregation/histogram/sql/QuantileSqlAggregatorTest.java @@ -27,6 +27,7 @@ import io.druid.java.util.common.guava.Sequences; import io.druid.segment.writeout.OffHeapMemorySegmentWriteOutMediumFactory; import io.druid.query.Druids; +import io.druid.query.QueryDataSource; import io.druid.query.aggregation.CountAggregatorFactory; import io.druid.query.aggregation.DoubleSumAggregatorFactory; import io.druid.query.aggregation.FilteredAggregatorFactory; @@ -34,9 +35,13 @@ import io.druid.query.aggregation.histogram.ApproximateHistogramDruidModule; import io.druid.query.aggregation.histogram.ApproximateHistogramFoldingAggregatorFactory; import io.druid.query.aggregation.histogram.QuantilePostAggregator; +import io.druid.query.aggregation.post.ArithmeticPostAggregator; +import io.druid.query.aggregation.post.FieldAccessPostAggregator; +import io.druid.query.dimension.DefaultDimensionSpec; import io.druid.query.expression.TestExprMacroTable; import io.druid.query.filter.NotDimFilter; import io.druid.query.filter.SelectorDimFilter; +import io.druid.query.groupby.GroupByQuery; import io.druid.query.spec.MultipleIntervalSegmentSpec; import io.druid.segment.IndexBuilder; import io.druid.segment.QueryableIndex; @@ -293,4 +298,69 @@ public void testQuantileOnComplexColumn() throws Exception ); } } + + @Test + public void testQuantileOnInnerQuery() throws Exception + { + try (final DruidPlanner planner = plannerFactory.createPlanner(null)) { + final String sql = "SELECT AVG(x), APPROX_QUANTILE(x, 0.98)\n" + + "FROM (SELECT dim2, SUM(m1) AS x FROM foo GROUP BY dim2)"; + + final PlannerResult plannerResult = planner.plan(sql); + + // Verify results + final List results = Sequences.toList(plannerResult.run(), new ArrayList()); + final List expectedResults = ImmutableList.of( + new Object[]{7.0, 8.26386833190918} + ); + Assert.assertEquals(expectedResults.size(), results.size()); + for (int i = 0; i < expectedResults.size(); i++) { + Assert.assertArrayEquals(expectedResults.get(i), results.get(i)); + } + + // Verify query + Assert.assertEquals( + GroupByQuery.builder() + .setDataSource( + new QueryDataSource( + GroupByQuery.builder() + .setDataSource(CalciteTests.DATASOURCE1) + .setInterval(new MultipleIntervalSegmentSpec(ImmutableList.of(Filtration.eternity()))) + .setGranularity(Granularities.ALL) + .setDimensions(ImmutableList.of(new DefaultDimensionSpec("dim2", "d0"))) + .setAggregatorSpecs( + ImmutableList.of( + new DoubleSumAggregatorFactory("a0", "m1") + ) + ) + .setContext(ImmutableMap.of()) + .build() + ) + ) + .setInterval(new MultipleIntervalSegmentSpec(ImmutableList.of(Filtration.eternity()))) + .setGranularity(Granularities.ALL) + .setAggregatorSpecs(ImmutableList.of( + new DoubleSumAggregatorFactory("_a0:sum", "a0"), + new CountAggregatorFactory("_a0:count"), + new ApproximateHistogramAggregatorFactory("_a1:agg", "a0", null, null, null, null) + )) + .setPostAggregatorSpecs( + ImmutableList.of( + new ArithmeticPostAggregator( + "_a0", + "quotient", + ImmutableList.of( + new FieldAccessPostAggregator(null, "_a0:sum"), + new FieldAccessPostAggregator(null, "_a0:count") + ) + ), + new QuantilePostAggregator("_a1", "_a1:agg", 0.98f) + ) + ) + .setContext(ImmutableMap.of()) + .build(), + Iterables.getOnlyElement(queryLogHook.getRecordedQueries()) + ); + } + } } diff --git a/extensions-core/kafka-eight/pom.xml b/extensions-core/kafka-eight/pom.xml index 2b94032c5878..ab235b7e290d 100644 --- a/extensions-core/kafka-eight/pom.xml +++ b/extensions-core/kafka-eight/pom.xml @@ -27,7 +27,7 @@ io.druid druid - 0.12.0-SNAPSHOT + 0.12.2-SNAPSHOT ../../pom.xml diff --git a/extensions-core/kafka-extraction-namespace/pom.xml b/extensions-core/kafka-extraction-namespace/pom.xml index 0edc753e9bc0..ff6d6e749b5d 100644 --- a/extensions-core/kafka-extraction-namespace/pom.xml +++ b/extensions-core/kafka-extraction-namespace/pom.xml @@ -18,8 +18,7 @@ ~ under the License. --> - + 4.0.0 io.druid.extensions druid-kafka-extraction-namespace @@ -29,7 +28,7 @@ io.druid druid - 0.12.0-SNAPSHOT + 0.12.2-SNAPSHOT ../../pom.xml diff --git a/extensions-core/kafka-indexing-service/pom.xml b/extensions-core/kafka-indexing-service/pom.xml index 007336e282f8..f5d17bbe2199 100644 --- a/extensions-core/kafka-indexing-service/pom.xml +++ b/extensions-core/kafka-indexing-service/pom.xml @@ -29,7 +29,7 @@ io.druid druid - 0.12.0-SNAPSHOT + 0.12.2-SNAPSHOT ../../pom.xml diff --git a/extensions-core/kafka-indexing-service/src/main/java/io/druid/indexing/kafka/KafkaIndexTask.java b/extensions-core/kafka-indexing-service/src/main/java/io/druid/indexing/kafka/KafkaIndexTask.java index d4fee282bde5..d7bffedea17b 100644 --- a/extensions-core/kafka-indexing-service/src/main/java/io/druid/indexing/kafka/KafkaIndexTask.java +++ b/extensions-core/kafka-indexing-service/src/main/java/io/druid/indexing/kafka/KafkaIndexTask.java @@ -44,7 +44,7 @@ import com.google.common.util.concurrent.ListenableFuture; import com.google.common.util.concurrent.ListeningExecutorService; import com.google.common.util.concurrent.MoreExecutors; -import com.metamx.emitter.EmittingLogger; +import io.druid.java.util.emitter.EmittingLogger; import io.druid.data.input.Committer; import io.druid.data.input.InputRow; import io.druid.data.input.impl.InputRowParser; @@ -62,9 +62,11 @@ import io.druid.indexing.common.task.AbstractTask; import io.druid.indexing.common.task.RealtimeIndexTask; import io.druid.indexing.common.task.TaskResource; +import io.druid.indexing.common.task.Tasks; import io.druid.indexing.kafka.supervisor.KafkaSupervisor; import io.druid.java.util.common.DateTimes; import io.druid.java.util.common.ISE; +import io.druid.java.util.common.Intervals; import io.druid.java.util.common.StringUtils; import io.druid.java.util.common.collect.Utils; import io.druid.java.util.common.concurrent.Execs; @@ -81,9 +83,9 @@ import io.druid.segment.realtime.FireDepartmentMetrics; import io.druid.segment.realtime.RealtimeMetricsMonitor; import io.druid.segment.realtime.appenderator.Appenderator; -import io.druid.segment.realtime.appenderator.AppenderatorDriver; import io.druid.segment.realtime.appenderator.AppenderatorDriverAddResult; import io.druid.segment.realtime.appenderator.Appenderators; +import io.druid.segment.realtime.appenderator.StreamAppenderatorDriver; import io.druid.segment.realtime.appenderator.SegmentIdentifier; import io.druid.segment.realtime.appenderator.SegmentsAndMetadata; import io.druid.segment.realtime.appenderator.TransactionalSegmentPublisher; @@ -135,7 +137,6 @@ import java.util.concurrent.ConcurrentHashMap; import java.util.concurrent.CopyOnWriteArrayList; import java.util.concurrent.CountDownLatch; -import java.util.concurrent.ExecutionException; import java.util.concurrent.Future; import java.util.concurrent.LinkedBlockingQueue; import java.util.concurrent.RejectedExecutionException; @@ -186,7 +187,7 @@ public enum Status private TaskToolbox toolbox; private volatile Appenderator appenderator = null; - private volatile AppenderatorDriver driver = null; + private volatile StreamAppenderatorDriver driver = null; private volatile FireDepartmentMetrics fireDepartmentMetrics = null; private volatile DateTime startTime; private volatile Status status = Status.NOT_STARTED; // this is only ever set by the task runner thread (runThread) @@ -306,6 +307,12 @@ private static String makeTaskId(String dataSource, int randomBits) return Joiner.on("_").join(TYPE, dataSource, suffix); } + @Override + public int getPriority() + { + return getContextValue(Tasks.PRIORITY_KEY, Tasks.DEFAULT_REALTIME_TASK_PRIORITY); + } + @Override public String getType() { @@ -371,7 +378,7 @@ private void createAndStartPublishExecutor() Joiner.on(", ").join( result.getSegments().stream().map(DataSegment::getIdentifier).collect(Collectors.toList()) ), - result.getCommitMetadata() + Preconditions.checkNotNull(result.getCommitMetadata(), "commitMetadata") ); } @@ -422,9 +429,11 @@ public TaskStatus run(final TaskToolbox toolbox) throws Exception if (getContext() != null && getContext().get("checkpoints") != null) { log.info("Got checkpoints [%s]", (String) getContext().get("checkpoints")); final TreeMap> checkpoints = toolbox.getObjectMapper().readValue( - (String) getContext().get("checkpoints"), new TypeReference>>() + (String) getContext().get("checkpoints"), + new TypeReference>>() { - }); + } + ); Iterator>> sequenceOffsets = checkpoints.entrySet().iterator(); Map.Entry> previous = sequenceOffsets.next(); @@ -511,13 +520,14 @@ public TaskStatus run(final TaskToolbox toolbox) throws Exception final Object restoredMetadata = driver.startJob(); if (restoredMetadata == null) { // no persist has happened so far + // so either this is a brand new task or replacement of a failed task Preconditions.checkState(sequences.get(0).startOffsets.entrySet().stream().allMatch( partitionOffsetEntry -> Longs.compare( partitionOffsetEntry.getValue(), ioConfig.getStartPartitions() .getPartitionOffsetMap() .get(partitionOffsetEntry.getKey()) - ) == 0 + ) >= 0 ), "Sequence offsets are not compatible with start offsets of task"); nextOffsets.putAll(sequences.get(0).startOffsets); } else { @@ -544,7 +554,7 @@ public TaskStatus run(final TaskToolbox toolbox) throws Exception ioConfig.getStartPartitions().getPartitionOffsetMap().keySet() ); } - // sequences size can 0 only when all sequences got published and task stopped before it could finish + // sequences size can be 0 only when all sequences got published and task stopped before it could finish // which is super rare if (sequences.size() == 0 || sequences.get(sequences.size() - 1).isCheckpointed()) { this.endOffsets.putAll(sequences.size() == 0 @@ -707,7 +717,7 @@ public void run() if (addResult.isOk()) { // If the number of rows in the segment exceeds the threshold after adding a row, - // move the segment out from the active segments of AppenderatorDriver to make a new segment. + // move the segment out from the active segments of BaseAppenderatorDriver to make a new segment. if (addResult.getNumRowsInSegment() > tuningConfig.getMaxRowsPerSegment()) { if (!sequenceToUse.isCheckpointed()) { sequenceToCheckpoint = sequenceToUse; @@ -745,7 +755,6 @@ public void onFailure(Throwable t) } } ); - } } catch (ParseException e) { @@ -848,7 +857,7 @@ public void onFailure(Throwable t) Joiner.on(", ").join( handedOff.getSegments().stream().map(DataSegment::getIdentifier).collect(Collectors.toList()) ), - handedOff.getCommitMetadata() + Preconditions.checkNotNull(handedOff.getCommitMetadata(), "commitMetadata") ); } } @@ -938,9 +947,9 @@ private TaskStatus runLegacy(final TaskToolbox toolbox) throws Exception ); try ( - final Appenderator appenderator0 = newAppenderator(fireDepartmentMetrics, toolbox); - final AppenderatorDriver driver = newDriver(appenderator0, toolbox, fireDepartmentMetrics); - final KafkaConsumer consumer = newConsumer() + final Appenderator appenderator0 = newAppenderator(fireDepartmentMetrics, toolbox); + final StreamAppenderatorDriver driver = newDriver(appenderator0, toolbox, fireDepartmentMetrics); + final KafkaConsumer consumer = newConsumer() ) { toolbox.getDataSegmentServerAnnouncer().announce(); toolbox.getDruidNodeAnnouncer().announce(discoveryDruidNode); @@ -1102,7 +1111,7 @@ public void run() if (addResult.isOk()) { // If the number of rows in the segment exceeds the threshold after adding a row, - // move the segment out from the active segments of AppenderatorDriver to make a new segment. + // move the segment out from the active segments of BaseAppenderatorDriver to make a new segment. if (addResult.getNumRowsInSegment() > tuningConfig.getMaxRowsPerSegment()) { segmentsToMoveOut.computeIfAbsent(sequenceName, k -> new HashSet<>()) .add(addResult.getSegmentIdentifier()); @@ -1168,7 +1177,7 @@ public void run() final TransactionalSegmentPublisher publisher = (segments, commitMetadata) -> { final KafkaPartitions finalPartitions = toolbox.getObjectMapper().convertValue( - ((Map) commitMetadata).get(METADATA_NEXT_PARTITIONS), + ((Map) Preconditions.checkNotNull(commitMetadata, "commitMetadata")).get(METADATA_NEXT_PARTITIONS), KafkaPartitions.class ); @@ -1228,7 +1237,7 @@ public String apply(DataSegment input) } ) ), - handedOff.getCommitMetadata() + Preconditions.checkNotNull(handedOff.getCommitMetadata(), "commitMetadata") ); } } @@ -1267,7 +1276,7 @@ private void checkAndMaybeThrowException() } private void maybePersistAndPublishSequences(Supplier committerSupplier) - throws ExecutionException, InterruptedException + throws InterruptedException { for (SequenceMetadata sequenceMetadata : sequences) { sequenceMetadata.updateAssignments(nextOffsets); @@ -1791,17 +1800,18 @@ private Appenderator newAppenderator(FireDepartmentMetrics metrics, TaskToolbox ); } - private AppenderatorDriver newDriver( + private StreamAppenderatorDriver newDriver( final Appenderator appenderator, final TaskToolbox toolbox, final FireDepartmentMetrics metrics ) { - return new AppenderatorDriver( + return new StreamAppenderatorDriver( appenderator, new ActionBasedSegmentAllocator(toolbox.getTaskActionClient(), dataSchema), toolbox.getSegmentHandoffNotifierFactory(), new ActionBasedUsedSegmentChecker(toolbox.getTaskActionClient()), + toolbox.getDataSegmentKiller(), toolbox.getObjectMapper(), metrics ); @@ -2015,6 +2025,19 @@ private boolean withinMinMaxRecordTime(final InputRow row) final boolean afterMaximumMessageTime = ioConfig.getMaximumMessageTime().isPresent() && ioConfig.getMaximumMessageTime().get().isBefore(row.getTimestamp()); + if (!Intervals.ETERNITY.contains(row.getTimestamp())) { + final String errorMsg = StringUtils.format( + "Encountered row with timestamp that cannot be represented as a long: [%s]", + row + ); + log.debug(errorMsg); + if (tuningConfig.isReportParseExceptions()) { + throw new ParseException(errorMsg); + } else { + return false; + } + } + if (log.isDebugEnabled()) { if (beforeMinimumMessageTime) { log.debug( @@ -2207,7 +2230,7 @@ public TransactionalSegmentPublisher getPublisher(TaskToolbox toolbox, boolean u { return (segments, commitMetadata) -> { final KafkaPartitions finalPartitions = toolbox.getObjectMapper().convertValue( - ((Map) commitMetadata).get(METADATA_PUBLISH_PARTITIONS), + ((Map) Preconditions.checkNotNull(commitMetadata, "commitMetadata")).get(METADATA_PUBLISH_PARTITIONS), KafkaPartitions.class ); diff --git a/extensions-core/kafka-indexing-service/src/main/java/io/druid/indexing/kafka/KafkaIndexTaskClient.java b/extensions-core/kafka-indexing-service/src/main/java/io/druid/indexing/kafka/KafkaIndexTaskClient.java index 30a06de8c17d..6525d1276318 100644 --- a/extensions-core/kafka-indexing-service/src/main/java/io/druid/indexing/kafka/KafkaIndexTaskClient.java +++ b/extensions-core/kafka-indexing-service/src/main/java/io/druid/indexing/kafka/KafkaIndexTaskClient.java @@ -29,11 +29,11 @@ import com.google.common.util.concurrent.ListenableFuture; import com.google.common.util.concurrent.ListeningExecutorService; import com.google.common.util.concurrent.MoreExecutors; -import com.metamx.emitter.EmittingLogger; -import com.metamx.http.client.HttpClient; -import com.metamx.http.client.Request; -import com.metamx.http.client.response.FullResponseHandler; -import com.metamx.http.client.response.FullResponseHolder; +import io.druid.java.util.emitter.EmittingLogger; +import io.druid.java.util.http.client.HttpClient; +import io.druid.java.util.http.client.Request; +import io.druid.java.util.http.client.response.FullResponseHandler; +import io.druid.java.util.http.client.response.FullResponseHolder; import io.druid.indexing.common.RetryPolicy; import io.druid.indexing.common.RetryPolicyConfig; import io.druid.indexing.common.RetryPolicyFactory; diff --git a/extensions-core/kafka-indexing-service/src/main/java/io/druid/indexing/kafka/KafkaIndexTaskClientFactory.java b/extensions-core/kafka-indexing-service/src/main/java/io/druid/indexing/kafka/KafkaIndexTaskClientFactory.java index 869392642003..da4d8c369f94 100644 --- a/extensions-core/kafka-indexing-service/src/main/java/io/druid/indexing/kafka/KafkaIndexTaskClientFactory.java +++ b/extensions-core/kafka-indexing-service/src/main/java/io/druid/indexing/kafka/KafkaIndexTaskClientFactory.java @@ -21,7 +21,7 @@ import com.fasterxml.jackson.databind.ObjectMapper; import com.google.inject.Inject; -import com.metamx.http.client.HttpClient; +import io.druid.java.util.http.client.HttpClient; import io.druid.guice.annotations.EscalatedGlobal; import io.druid.guice.annotations.Json; import io.druid.indexing.common.TaskInfoProvider; diff --git a/extensions-core/kafka-indexing-service/src/main/java/io/druid/indexing/kafka/supervisor/KafkaSupervisor.java b/extensions-core/kafka-indexing-service/src/main/java/io/druid/indexing/kafka/supervisor/KafkaSupervisor.java index d0b8ea55c241..7cc79f6a98ad 100644 --- a/extensions-core/kafka-indexing-service/src/main/java/io/druid/indexing/kafka/supervisor/KafkaSupervisor.java +++ b/extensions-core/kafka-indexing-service/src/main/java/io/druid/indexing/kafka/supervisor/KafkaSupervisor.java @@ -43,9 +43,9 @@ import com.google.common.util.concurrent.ListenableFuture; import com.google.common.util.concurrent.ListeningExecutorService; import com.google.common.util.concurrent.MoreExecutors; -import com.metamx.emitter.EmittingLogger; -import com.metamx.emitter.service.ServiceEmitter; -import com.metamx.emitter.service.ServiceMetricEvent; +import io.druid.java.util.emitter.EmittingLogger; +import io.druid.java.util.emitter.service.ServiceEmitter; +import io.druid.java.util.emitter.service.ServiceMetricEvent; import io.druid.indexing.common.TaskInfoProvider; import io.druid.indexer.TaskLocation; import io.druid.indexing.common.TaskStatus; @@ -670,7 +670,7 @@ public void handle() throws ExecutionException, InterruptedException, TimeoutExc // as when the task starts they are sent existing checkpoints Preconditions.checkState( checkpoints.size() <= 1, - "Got checkpoint request with null as previous check point, however found more than one checkpoints in metadata store" + "Got checkpoint request with null as previous check point, however found more than one checkpoints" ); if (checkpoints.size() == 1) { log.info("Already checkpointed with dataSourceMetadata [%s]", checkpoints.get(0)); diff --git a/extensions-core/kafka-indexing-service/src/main/java/io/druid/indexing/kafka/supervisor/KafkaSupervisorSpec.java b/extensions-core/kafka-indexing-service/src/main/java/io/druid/indexing/kafka/supervisor/KafkaSupervisorSpec.java index d11709ecc620..2b540e537104 100644 --- a/extensions-core/kafka-indexing-service/src/main/java/io/druid/indexing/kafka/supervisor/KafkaSupervisorSpec.java +++ b/extensions-core/kafka-indexing-service/src/main/java/io/druid/indexing/kafka/supervisor/KafkaSupervisorSpec.java @@ -25,7 +25,7 @@ import com.fasterxml.jackson.databind.ObjectMapper; import com.google.common.base.Preconditions; import com.google.common.collect.ImmutableList; -import com.metamx.emitter.service.ServiceEmitter; +import io.druid.java.util.emitter.service.ServiceEmitter; import io.druid.guice.annotations.Json; import io.druid.indexing.kafka.KafkaIndexTaskClientFactory; import io.druid.indexing.overlord.IndexerMetadataStorageCoordinator; diff --git a/extensions-core/kafka-indexing-service/src/test/java/io/druid/indexing/kafka/KafkaIndexTaskClientTest.java b/extensions-core/kafka-indexing-service/src/test/java/io/druid/indexing/kafka/KafkaIndexTaskClientTest.java index 8d328aadbad2..2834cc838a89 100644 --- a/extensions-core/kafka-indexing-service/src/test/java/io/druid/indexing/kafka/KafkaIndexTaskClientTest.java +++ b/extensions-core/kafka-indexing-service/src/test/java/io/druid/indexing/kafka/KafkaIndexTaskClientTest.java @@ -27,10 +27,10 @@ import com.google.common.collect.Maps; import com.google.common.util.concurrent.Futures; import com.google.common.util.concurrent.ListenableFuture; -import com.metamx.http.client.HttpClient; -import com.metamx.http.client.Request; -import com.metamx.http.client.response.FullResponseHandler; -import com.metamx.http.client.response.FullResponseHolder; +import io.druid.java.util.http.client.HttpClient; +import io.druid.java.util.http.client.Request; +import io.druid.java.util.http.client.response.FullResponseHandler; +import io.druid.java.util.http.client.response.FullResponseHolder; import io.druid.indexing.common.TaskInfoProvider; import io.druid.indexer.TaskLocation; import io.druid.indexing.common.TaskStatus; diff --git a/extensions-core/kafka-indexing-service/src/test/java/io/druid/indexing/kafka/KafkaIndexTaskTest.java b/extensions-core/kafka-indexing-service/src/test/java/io/druid/indexing/kafka/KafkaIndexTaskTest.java index 298d1490f882..45f3003638f0 100644 --- a/extensions-core/kafka-indexing-service/src/test/java/io/druid/indexing/kafka/KafkaIndexTaskTest.java +++ b/extensions-core/kafka-indexing-service/src/test/java/io/druid/indexing/kafka/KafkaIndexTaskTest.java @@ -38,10 +38,10 @@ import com.google.common.util.concurrent.ListenableFuture; import com.google.common.util.concurrent.ListeningExecutorService; import com.google.common.util.concurrent.MoreExecutors; -import com.metamx.emitter.EmittingLogger; -import com.metamx.emitter.core.NoopEmitter; -import com.metamx.emitter.service.ServiceEmitter; -import com.metamx.metrics.MonitorScheduler; +import io.druid.java.util.emitter.EmittingLogger; +import io.druid.java.util.emitter.core.NoopEmitter; +import io.druid.java.util.emitter.service.ServiceEmitter; +import io.druid.java.util.metrics.MonitorScheduler; import io.druid.client.cache.CacheConfig; import io.druid.client.cache.MapCache; import io.druid.data.input.impl.DimensionsSpec; @@ -155,10 +155,12 @@ import java.lang.reflect.InvocationTargetException; import java.lang.reflect.Method; import java.util.ArrayList; +import java.util.HashMap; import java.util.List; import java.util.Map; import java.util.Objects; import java.util.Set; +import java.util.TreeMap; import java.util.concurrent.Callable; import java.util.concurrent.Executor; import java.util.concurrent.Executors; @@ -241,6 +243,7 @@ private static List> generateRecords(String topic new ProducerRecord(topic, 0, null, JB("2010", "c", "y", 1.0f)), new ProducerRecord(topic, 0, null, JB("2011", "d", "y", 1.0f)), new ProducerRecord(topic, 0, null, JB("2011", "e", "y", 1.0f)), + new ProducerRecord(topic, 0, null, JB("246140482-04-24T15:36:27.903Z", "x", "z", 1.0f)), new ProducerRecord(topic, 0, null, StringUtils.toUtf8("unparseable")), new ProducerRecord(topic, 0, null, null), new ProducerRecord(topic, 0, null, JB("2013", "f", "y", 1.0f)), @@ -456,7 +459,7 @@ public void testIncrementalHandOff() throws Exception // of events fetched across two partitions from Kafka final KafkaPartitions checkpoint1 = new KafkaPartitions(topic, ImmutableMap.of(0, 5L, 1, 0L)); final KafkaPartitions checkpoint2 = new KafkaPartitions(topic, ImmutableMap.of(0, 4L, 1, 2L)); - final KafkaPartitions endPartitions = new KafkaPartitions(topic, ImmutableMap.of(0, 8L, 1, 2L)); + final KafkaPartitions endPartitions = new KafkaPartitions(topic, ImmutableMap.of(0, 9L, 1, 2L)); final KafkaIndexTask task = createTask( null, new KafkaIOConfig( @@ -494,7 +497,7 @@ public void testIncrementalHandOff() throws Exception // Check metrics Assert.assertEquals(8, task.getFireDepartmentMetrics().processed()); Assert.assertEquals(1, task.getFireDepartmentMetrics().unparseable()); - Assert.assertEquals(1, task.getFireDepartmentMetrics().thrownAway()); + Assert.assertEquals(2, task.getFireDepartmentMetrics().thrownAway()); // Check published metadata SegmentDescriptor desc1 = SD(task, "2008/P1D", 0); @@ -506,7 +509,7 @@ public void testIncrementalHandOff() throws Exception SegmentDescriptor desc7 = SD(task, "2013/P1D", 0); Assert.assertEquals(ImmutableSet.of(desc1, desc2, desc3, desc4, desc5, desc6, desc7), publishedDescriptors()); Assert.assertEquals( - new KafkaDataSourceMetadata(new KafkaPartitions(topic, ImmutableMap.of(0, 8L, 1, 2L))), + new KafkaDataSourceMetadata(new KafkaPartitions(topic, ImmutableMap.of(0, 9L, 1, 2L))), metadataStorageCoordinator.getDataSourceMetadata(DATA_SCHEMA.getDataSource()) ); @@ -970,7 +973,7 @@ public void testRunConflicting() throws Exception new KafkaIOConfig( "sequence1", new KafkaPartitions(topic, ImmutableMap.of(0, 3L)), - new KafkaPartitions(topic, ImmutableMap.of(0, 8L)), + new KafkaPartitions(topic, ImmutableMap.of(0, 9L)), kafkaServer.consumerProperties(), true, false, @@ -1001,7 +1004,7 @@ public void testRunConflicting() throws Exception Assert.assertEquals(0, task1.getFireDepartmentMetrics().thrownAway()); Assert.assertEquals(3, task2.getFireDepartmentMetrics().processed()); Assert.assertEquals(1, task2.getFireDepartmentMetrics().unparseable()); - Assert.assertEquals(1, task2.getFireDepartmentMetrics().thrownAway()); + Assert.assertEquals(2, task2.getFireDepartmentMetrics().thrownAway()); // Check published segments & metadata, should all be from the first task SegmentDescriptor desc1 = SD(task1, "2010/P1D", 0); @@ -1039,7 +1042,7 @@ public void testRunConflictingWithoutTransactions() throws Exception new KafkaIOConfig( "sequence1", new KafkaPartitions(topic, ImmutableMap.of(0, 3L)), - new KafkaPartitions(topic, ImmutableMap.of(0, 8L)), + new KafkaPartitions(topic, ImmutableMap.of(0, 9L)), kafkaServer.consumerProperties(), false, false, @@ -1076,7 +1079,7 @@ public void testRunConflictingWithoutTransactions() throws Exception Assert.assertEquals(0, task1.getFireDepartmentMetrics().thrownAway()); Assert.assertEquals(3, task2.getFireDepartmentMetrics().processed()); Assert.assertEquals(1, task2.getFireDepartmentMetrics().unparseable()); - Assert.assertEquals(1, task2.getFireDepartmentMetrics().thrownAway()); + Assert.assertEquals(2, task2.getFireDepartmentMetrics().thrownAway()); // Check published segments & metadata SegmentDescriptor desc3 = SD(task2, "2011/P1D", 1); @@ -1449,7 +1452,7 @@ public void testRunAndPauseAfterReadWithModifiedEndOffsets() throws Exception Assert.assertEquals(KafkaIndexTask.Status.PAUSED, task.getStatus()); // try again but with resume flag == true - newEndOffsets = ImmutableMap.of(0, 6L); + newEndOffsets = ImmutableMap.of(0, 7L); task.setEndOffsets(newEndOffsets, true, true); Assert.assertEquals(newEndOffsets, task.getEndOffsets()); Assert.assertNotEquals(KafkaIndexTask.Status.PAUSED, task.getStatus()); @@ -1468,7 +1471,7 @@ public void testRunAndPauseAfterReadWithModifiedEndOffsets() throws Exception // Check metrics Assert.assertEquals(4, task.getFireDepartmentMetrics().processed()); Assert.assertEquals(1, task.getFireDepartmentMetrics().unparseable()); - Assert.assertEquals(0, task.getFireDepartmentMetrics().thrownAway()); + Assert.assertEquals(1, task.getFireDepartmentMetrics().thrownAway()); // Check published metadata SegmentDescriptor desc1 = SD(task, "2009/P1D", 0); @@ -1476,7 +1479,7 @@ public void testRunAndPauseAfterReadWithModifiedEndOffsets() throws Exception SegmentDescriptor desc3 = SD(task, "2011/P1D", 0); Assert.assertEquals(ImmutableSet.of(desc1, desc2, desc3), publishedDescriptors()); Assert.assertEquals( - new KafkaDataSourceMetadata(new KafkaPartitions(topic, ImmutableMap.of(0, 6L))), + new KafkaDataSourceMetadata(new KafkaPartitions(topic, ImmutableMap.of(0, 7L))), metadataStorageCoordinator.getDataSourceMetadata(DATA_SCHEMA.getDataSource()) ); @@ -1556,6 +1559,72 @@ public void testRunWithOffsetOutOfRangeExceptionAndNextOffsetGreaterThanLeastAva } } + @Test(timeout = 60_000L) + public void testRunContextSequenceAheadOfStartingOffsets() throws Exception + { + // This tests the case when a replacement task is created in place of a failed test + // which has done some incremental handoffs, thus the context will contain starting + // sequence offsets from which the task should start reading and ignore the start offsets + if (!isIncrementalHandoffSupported) { + return; + } + // Insert data + try (final KafkaProducer kafkaProducer = kafkaServer.newProducer()) { + for (ProducerRecord record : records) { + kafkaProducer.send(record).get(); + } + } + + final TreeMap> sequences = new TreeMap<>(); + // Here the sequence number is 1 meaning that one incremental handoff was done by the failed task + // and this task should start reading from offset 2 for partition 0 + sequences.put(1, ImmutableMap.of(0, 2L)); + final Map context = new HashMap<>(); + context.put("checkpoints", objectMapper.writerWithType(new TypeReference>>() + { + }).writeValueAsString(sequences)); + + final KafkaIndexTask task = createTask( + null, + new KafkaIOConfig( + "sequence0", + // task should ignore these and use sequence info sent in the context + new KafkaPartitions(topic, ImmutableMap.of(0, 0L)), + new KafkaPartitions(topic, ImmutableMap.of(0, 5L)), + kafkaServer.consumerProperties(), + true, + false, + null, + null, + false + ), + context + ); + + final ListenableFuture future = runTask(task); + + // Wait for task to exit + Assert.assertEquals(TaskState.SUCCESS, future.get().getStatusCode()); + + // Check metrics + Assert.assertEquals(3, task.getFireDepartmentMetrics().processed()); + Assert.assertEquals(0, task.getFireDepartmentMetrics().unparseable()); + Assert.assertEquals(0, task.getFireDepartmentMetrics().thrownAway()); + + // Check published metadata + SegmentDescriptor desc1 = SD(task, "2010/P1D", 0); + SegmentDescriptor desc2 = SD(task, "2011/P1D", 0); + Assert.assertEquals(ImmutableSet.of(desc1, desc2), publishedDescriptors()); + Assert.assertEquals( + new KafkaDataSourceMetadata(new KafkaPartitions(topic, ImmutableMap.of(0, 5L))), + metadataStorageCoordinator.getDataSourceMetadata(DATA_SCHEMA.getDataSource()) + ); + + // Check segments in deep storage + Assert.assertEquals(ImmutableList.of("c"), readSegmentColumn("dim1", desc1)); + Assert.assertEquals(ImmutableList.of("d", "e"), readSegmentColumn("dim1", desc2)); + } + private ListenableFuture runTask(final Task task) { try { @@ -1614,6 +1683,15 @@ private KafkaIndexTask createTask( return createTask(taskId, DATA_SCHEMA, ioConfig); } + private KafkaIndexTask createTask( + final String taskId, + final KafkaIOConfig ioConfig, + final Map context + ) + { + return createTask(taskId, DATA_SCHEMA, ioConfig, context); + } + private KafkaIndexTask createTask( final String taskId, final DataSchema dataSchema, @@ -1650,6 +1728,45 @@ private KafkaIndexTask createTask( return task; } + + private KafkaIndexTask createTask( + final String taskId, + final DataSchema dataSchema, + final KafkaIOConfig ioConfig, + final Map context + ) + { + final KafkaTuningConfig tuningConfig = new KafkaTuningConfig( + 1000, + maxRowsPerSegment, + new Period("P1Y"), + null, + null, + null, + true, + reportParseExceptions, + handoffConditionTimeout, + resetOffsetAutomatically, + null + ); + if (isIncrementalHandoffSupported) { + context.put(KafkaSupervisor.IS_INCREMENTAL_HANDOFF_SUPPORTED, true); + } + + final KafkaIndexTask task = new KafkaIndexTask( + taskId, + null, + cloneDataSchema(dataSchema), + tuningConfig, + ioConfig, + context, + null, + null + ); + task.setPollRetryMs(POLL_RETRY_MS); + return task; + } + private static DataSchema cloneDataSchema(final DataSchema dataSchema) { return new DataSchema( @@ -1884,9 +2001,9 @@ private File getSegmentDirectory() private List readSegmentColumn(final String column, final SegmentDescriptor descriptor) throws IOException { - File indexZip = new File( + File indexBasePath = new File( StringUtils.format( - "%s/%s/%s_%s/%s/%d/index.zip", + "%s/%s/%s_%s/%s/%d", getSegmentDirectory(), DATA_SCHEMA.getDataSource(), descriptor.getInterval().getStart(), @@ -1895,6 +2012,7 @@ private List readSegmentColumn(final String column, final SegmentDescrip descriptor.getPartitionNumber() ) ); + File outputLocation = new File( directory, StringUtils.format( @@ -1907,7 +2025,7 @@ private List readSegmentColumn(final String column, final SegmentDescrip ); outputLocation.mkdir(); CompressionUtils.unzip( - Files.asByteSource(indexZip), + Files.asByteSource(new File(indexBasePath.listFiles()[0], "index.zip")), outputLocation, Predicates.alwaysFalse(), false diff --git a/extensions-core/lookups-cached-global/pom.xml b/extensions-core/lookups-cached-global/pom.xml index 4ce26c7a803c..629705204ddc 100644 --- a/extensions-core/lookups-cached-global/pom.xml +++ b/extensions-core/lookups-cached-global/pom.xml @@ -18,8 +18,7 @@ ~ under the License. --> - + 4.0.0 io.druid.extensions druid-lookups-cached-global @@ -29,7 +28,7 @@ io.druid druid - 0.12.0-SNAPSHOT + 0.12.2-SNAPSHOT ../../pom.xml diff --git a/extensions-core/lookups-cached-global/src/main/java/io/druid/server/lookup/namespace/cache/CacheScheduler.java b/extensions-core/lookups-cached-global/src/main/java/io/druid/server/lookup/namespace/cache/CacheScheduler.java index 38fcce978aac..1f1d3e3f8135 100644 --- a/extensions-core/lookups-cached-global/src/main/java/io/druid/server/lookup/namespace/cache/CacheScheduler.java +++ b/extensions-core/lookups-cached-global/src/main/java/io/druid/server/lookup/namespace/cache/CacheScheduler.java @@ -22,8 +22,9 @@ import com.google.common.annotations.VisibleForTesting; import com.google.common.base.Throwables; import com.google.inject.Inject; -import com.metamx.emitter.service.ServiceEmitter; -import com.metamx.emitter.service.ServiceMetricEvent; +import io.druid.concurrent.ConcurrentAwaitableCounter; +import io.druid.java.util.emitter.service.ServiceEmitter; +import io.druid.java.util.emitter.service.ServiceMetricEvent; import io.druid.guice.LazySingleton; import io.druid.java.util.common.ISE; import io.druid.java.util.common.StringUtils; @@ -51,11 +52,11 @@ * // cacheState could be either NoCache or VersionedCache. * if (cacheState instanceof NoCache) { * // the cache is not yet created, or already closed - * } else if (cacheState instanceof VersionedCache) { + * } else { * Map cache = ((VersionedCache) cacheState).getCache(); // use the cache * // Although VersionedCache implements AutoCloseable, versionedCache shouldn't be manually closed * // when obtained from entry.getCacheState(). If the namespace updates should be ceased completely, - * // entry.close() (see below) should be called, it will close the last VersionedCache itself. + * // entry.close() (see below) should be called, it will close the last VersionedCache as well. * // On scheduled updates, outdated VersionedCaches are also closed automatically. * } * ... @@ -105,14 +106,16 @@ Future getUpdaterFuture() return impl.updaterFuture; } + @VisibleForTesting public void awaitTotalUpdates(int totalUpdates) throws InterruptedException { - impl.updateCounter.awaitTotalUpdates(totalUpdates); + impl.updateCounter.awaitCount(totalUpdates); } + @VisibleForTesting void awaitNextUpdates(int nextUpdates) throws InterruptedException { - impl.updateCounter.awaitNextUpdates(nextUpdates); + impl.updateCounter.awaitNextIncrements(nextUpdates); } /** @@ -145,7 +148,7 @@ public class EntryImpl implements AutoCloseable private final Future updaterFuture; private final Cleaner entryCleaner; private final CacheGenerator cacheGenerator; - private final UpdateCounter updateCounter = new UpdateCounter(); + private final ConcurrentAwaitableCounter updateCounter = new ConcurrentAwaitableCounter(); private final CountDownLatch startLatch = new CountDownLatch(1); private EntryImpl(final T namespace, final Entry entry, final CacheGenerator cacheGenerator) @@ -276,7 +279,7 @@ private CacheState swapCacheState(VersionedCache newVersionedCache) return lastCacheState; } } while (!cacheStateHolder.compareAndSet(lastCacheState, newVersionedCache)); - updateCounter.update(); + updateCounter.increment(); return lastCacheState; } @@ -485,7 +488,7 @@ public Entry scheduleAndWait(ExtractionNamespace namespace, long waitForFirstRun log.debug("Scheduled new %s", entry); boolean success = false; try { - success = entry.impl.updateCounter.awaitFirstUpdate(waitForFirstRunMs, TimeUnit.MILLISECONDS); + success = entry.impl.updateCounter.awaitFirstIncrement(waitForFirstRunMs, TimeUnit.MILLISECONDS); if (success) { return entry; } else { diff --git a/extensions-core/lookups-cached-global/src/main/java/io/druid/server/lookup/namespace/cache/NamespaceExtractionCacheManager.java b/extensions-core/lookups-cached-global/src/main/java/io/druid/server/lookup/namespace/cache/NamespaceExtractionCacheManager.java index 65f646d5af65..49047294c9c0 100644 --- a/extensions-core/lookups-cached-global/src/main/java/io/druid/server/lookup/namespace/cache/NamespaceExtractionCacheManager.java +++ b/extensions-core/lookups-cached-global/src/main/java/io/druid/server/lookup/namespace/cache/NamespaceExtractionCacheManager.java @@ -22,7 +22,7 @@ import com.google.common.annotations.VisibleForTesting; import com.google.common.base.Throwables; import com.google.common.util.concurrent.ThreadFactoryBuilder; -import com.metamx.emitter.service.ServiceEmitter; +import io.druid.java.util.emitter.service.ServiceEmitter; import io.druid.java.util.common.concurrent.ExecutorServices; import io.druid.java.util.common.lifecycle.Lifecycle; import io.druid.java.util.common.logger.Logger; diff --git a/extensions-core/lookups-cached-global/src/main/java/io/druid/server/lookup/namespace/cache/OffHeapNamespaceExtractionCacheManager.java b/extensions-core/lookups-cached-global/src/main/java/io/druid/server/lookup/namespace/cache/OffHeapNamespaceExtractionCacheManager.java index e591f8da90d3..ab069001e39d 100644 --- a/extensions-core/lookups-cached-global/src/main/java/io/druid/server/lookup/namespace/cache/OffHeapNamespaceExtractionCacheManager.java +++ b/extensions-core/lookups-cached-global/src/main/java/io/druid/server/lookup/namespace/cache/OffHeapNamespaceExtractionCacheManager.java @@ -21,8 +21,8 @@ import com.google.common.base.Throwables; import com.google.inject.Inject; -import com.metamx.emitter.service.ServiceEmitter; -import com.metamx.emitter.service.ServiceMetricEvent; +import io.druid.java.util.emitter.service.ServiceEmitter; +import io.druid.java.util.emitter.service.ServiceMetricEvent; import io.druid.java.util.common.lifecycle.Lifecycle; import io.druid.java.util.common.logger.Logger; import io.druid.server.lookup.namespace.NamespaceExtractionConfig; diff --git a/extensions-core/lookups-cached-global/src/main/java/io/druid/server/lookup/namespace/cache/OnHeapNamespaceExtractionCacheManager.java b/extensions-core/lookups-cached-global/src/main/java/io/druid/server/lookup/namespace/cache/OnHeapNamespaceExtractionCacheManager.java index 6a5e015f2521..d538c8f2da87 100644 --- a/extensions-core/lookups-cached-global/src/main/java/io/druid/server/lookup/namespace/cache/OnHeapNamespaceExtractionCacheManager.java +++ b/extensions-core/lookups-cached-global/src/main/java/io/druid/server/lookup/namespace/cache/OnHeapNamespaceExtractionCacheManager.java @@ -21,8 +21,8 @@ import com.google.common.primitives.Chars; import com.google.inject.Inject; -import com.metamx.emitter.service.ServiceEmitter; -import com.metamx.emitter.service.ServiceMetricEvent; +import io.druid.java.util.emitter.service.ServiceEmitter; +import io.druid.java.util.emitter.service.ServiceMetricEvent; import io.druid.java.util.common.ISE; import io.druid.java.util.common.lifecycle.Lifecycle; import io.druid.java.util.common.logger.Logger; diff --git a/extensions-core/lookups-cached-global/src/main/java/io/druid/server/lookup/namespace/cache/UpdateCounter.java b/extensions-core/lookups-cached-global/src/main/java/io/druid/server/lookup/namespace/cache/UpdateCounter.java deleted file mode 100644 index ed229b45f891..000000000000 --- a/extensions-core/lookups-cached-global/src/main/java/io/druid/server/lookup/namespace/cache/UpdateCounter.java +++ /dev/null @@ -1,88 +0,0 @@ -/* - * Licensed to Metamarkets Group Inc. (Metamarkets) under one - * or more contributor license agreements. See the NOTICE file - * distributed with this work for additional information - * regarding copyright ownership. Metamarkets licenses this file - * to you under the Apache License, Version 2.0 (the - * "License"); you may not use this file except in compliance - * with the License. You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, - * software distributed under the License is distributed on an - * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY - * KIND, either express or implied. See the License for the - * specific language governing permissions and limitations - * under the License. - */ - -package io.druid.server.lookup.namespace.cache; - -import java.util.concurrent.Phaser; -import java.util.concurrent.TimeUnit; -import java.util.concurrent.TimeoutException; - -final class UpdateCounter -{ - /** - * Max {@link Phaser}'s phase, specified in it's javadoc. Then it wraps to zero. - */ - private static final int MAX_PHASE = Integer.MAX_VALUE; - - private final Phaser phaser = new Phaser(1); - - void update() - { - phaser.arrive(); - } - - void awaitTotalUpdates(int totalUpdates) throws InterruptedException - { - totalUpdates &= MAX_PHASE; - int currentUpdates = phaser.getPhase(); - checkNotTerminated(currentUpdates); - while (comparePhases(totalUpdates, currentUpdates) > 0) { - currentUpdates = phaser.awaitAdvanceInterruptibly(currentUpdates); - checkNotTerminated(currentUpdates); - } - } - - private static int comparePhases(int phase1, int phase2) - { - int diff = (phase1 - phase2) & MAX_PHASE; - if (diff == 0) { - return 0; - } - return diff < MAX_PHASE / 2 ? 1 : -1; - } - - private void checkNotTerminated(int phase) - { - if (phase < 0) { - throw new IllegalStateException("Phaser[" + phaser + "] unexpectedly terminated."); - } - } - - void awaitNextUpdates(int nextUpdates) throws InterruptedException - { - if (nextUpdates <= 0) { - throw new IllegalArgumentException("nextUpdates is not positive: " + nextUpdates); - } - if (nextUpdates > MAX_PHASE / 4) { - throw new UnsupportedOperationException("Couldn't wait for so many updates: " + nextUpdates); - } - awaitTotalUpdates(phaser.getPhase() + nextUpdates); - } - - boolean awaitFirstUpdate(long timeout, TimeUnit unit) throws InterruptedException - { - try { - phaser.awaitAdvanceInterruptibly(0, timeout, unit); - return true; - } - catch (TimeoutException e) { - return false; - } - } -} diff --git a/extensions-core/lookups-cached-single/pom.xml b/extensions-core/lookups-cached-single/pom.xml index 9b4ea5a5ea36..6d22a81bf977 100644 --- a/extensions-core/lookups-cached-single/pom.xml +++ b/extensions-core/lookups-cached-single/pom.xml @@ -18,8 +18,7 @@ ~ under the License. --> - + 4.0.0 io.druid.extensions druid-lookups-cached-single @@ -29,7 +28,7 @@ io.druid druid - 0.12.0-SNAPSHOT + 0.12.2-SNAPSHOT ../../pom.xml diff --git a/extensions-core/mysql-metadata-storage/pom.xml b/extensions-core/mysql-metadata-storage/pom.xml index 950a985ce8ff..1a5d4b3943ae 100644 --- a/extensions-core/mysql-metadata-storage/pom.xml +++ b/extensions-core/mysql-metadata-storage/pom.xml @@ -28,7 +28,7 @@ io.druid druid - 0.12.0-SNAPSHOT + 0.12.2-SNAPSHOT ../../pom.xml diff --git a/extensions-core/mysql-metadata-storage/src/main/java/io/druid/metadata/storage/mysql/MySQLConnector.java b/extensions-core/mysql-metadata-storage/src/main/java/io/druid/metadata/storage/mysql/MySQLConnector.java index 3079ad427eac..910f397f447b 100644 --- a/extensions-core/mysql-metadata-storage/src/main/java/io/druid/metadata/storage/mysql/MySQLConnector.java +++ b/extensions-core/mysql-metadata-storage/src/main/java/io/druid/metadata/storage/mysql/MySQLConnector.java @@ -19,6 +19,7 @@ package io.druid.metadata.storage.mysql; +import com.google.common.base.Joiner; import com.google.common.base.Supplier; import com.google.common.collect.ImmutableList; import com.google.inject.Inject; @@ -36,6 +37,7 @@ import org.skife.jdbi.v2.tweak.HandleCallback; import org.skife.jdbi.v2.util.BooleanMapper; +import java.io.File; import java.sql.SQLException; public class MySQLConnector extends SQLMetadataConnector @@ -48,7 +50,11 @@ public class MySQLConnector extends SQLMetadataConnector private final DBI dbi; @Inject - public MySQLConnector(Supplier config, Supplier dbTables) + public MySQLConnector( + Supplier config, + Supplier dbTables, + MySQLConnectorConfig connectorConfig + ) { super(config, dbTables); @@ -57,6 +63,68 @@ public MySQLConnector(Supplier config, Supplier< // so we need to help JDBC find the driver datasource.setDriverClassLoader(getClass().getClassLoader()); datasource.setDriverClassName("com.mysql.jdbc.Driver"); + datasource.addConnectionProperty("useSSL", String.valueOf(connectorConfig.isUseSSL())); + if (connectorConfig.isUseSSL()) { + log.info("SSL is enabled on this MySQL connection. "); + + datasource.addConnectionProperty( + "verifyServerCertificate", + String.valueOf(connectorConfig.isVerifyServerCertificate()) + ); + if (connectorConfig.isVerifyServerCertificate()) { + log.info("Server certificate verification is enabled. "); + + if (connectorConfig.getTrustCertificateKeyStoreUrl() != null) { + datasource.addConnectionProperty( + "trustCertificateKeyStoreUrl", + new File(connectorConfig.getTrustCertificateKeyStoreUrl()).toURI().toString() + ); + } + if (connectorConfig.getTrustCertificateKeyStoreType() != null) { + datasource.addConnectionProperty( + "trustCertificateKeyStoreType", + connectorConfig.getTrustCertificateKeyStoreType() + ); + } + if (connectorConfig.getTrustCertificateKeyStorePassword() == null) { + log.warn( + "Trust store password is empty. Ensure that the trust store has been configured with an empty password."); + } else { + datasource.addConnectionProperty( + "trustCertificateKeyStorePassword", + connectorConfig.getTrustCertificateKeyStorePassword() + ); + } + } + if (connectorConfig.getClientCertificateKeyStoreUrl() != null) { + datasource.addConnectionProperty( + "clientCertificateKeyStoreUrl", + new File(connectorConfig.getClientCertificateKeyStoreUrl()).toURI().toString() + ); + } + if (connectorConfig.getClientCertificateKeyStoreType() != null) { + datasource.addConnectionProperty( + "clientCertificateKeyStoreType", + connectorConfig.getClientCertificateKeyStoreType() + ); + } + if (connectorConfig.getClientCertificateKeyStorePassword() != null) { + datasource.addConnectionProperty( + "clientCertificateKeyStorePassword", + connectorConfig.getClientCertificateKeyStorePassword() + ); + } + Joiner joiner = Joiner.on(",").skipNulls(); + if (connectorConfig.getEnabledSSLCipherSuites() != null) { + datasource.addConnectionProperty( + "enabledSSLCipherSuites", + joiner.join(connectorConfig.getEnabledSSLCipherSuites()) + ); + } + if (connectorConfig.getEnabledTLSProtocols() != null) { + datasource.addConnectionProperty("enabledTLSProtocols", joiner.join(connectorConfig.getEnabledTLSProtocols())); + } + } // use double-quotes for quoting columns, so we can write SQL that works with most databases datasource.setConnectionInitSqls(ImmutableList.of("SET sql_mode='ANSI_QUOTES'")); @@ -97,9 +165,9 @@ public boolean tableExists(Handle handle, String tableName) { // ensure database defaults to utf8, otherwise bail boolean isUtf8 = handle - .createQuery("SELECT @@character_set_database = 'utf8'") - .map(BooleanMapper.FIRST) - .first(); + .createQuery("SELECT @@character_set_database = 'utf8'") + .map(BooleanMapper.FIRST) + .first(); if (!isUtf8) { throw new ISE( diff --git a/extensions-core/mysql-metadata-storage/src/main/java/io/druid/metadata/storage/mysql/MySQLConnectorConfig.java b/extensions-core/mysql-metadata-storage/src/main/java/io/druid/metadata/storage/mysql/MySQLConnectorConfig.java new file mode 100644 index 000000000000..77fc9dcd60b6 --- /dev/null +++ b/extensions-core/mysql-metadata-storage/src/main/java/io/druid/metadata/storage/mysql/MySQLConnectorConfig.java @@ -0,0 +1,123 @@ +/* + * Licensed to Metamarkets Group Inc. (Metamarkets) under one + * or more contributor license agreements. See the NOTICE file + * distributed with this work for additional information + * regarding copyright ownership. Metamarkets licenses this file + * to you under the Apache License, Version 2.0 (the + * "License"); you may not use this file except in compliance + * with the License. You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, + * software distributed under the License is distributed on an + * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY + * KIND, either express or implied. See the License for the + * specific language governing permissions and limitations + * under the License. + */ + +package io.druid.metadata.storage.mysql; + +import com.fasterxml.jackson.annotation.JsonProperty; +import io.druid.metadata.PasswordProvider; + +import java.util.List; + +public class MySQLConnectorConfig +{ + @JsonProperty + private boolean useSSL = false; + + @JsonProperty + private String trustCertificateKeyStoreUrl; + + @JsonProperty + private String trustCertificateKeyStoreType; + + @JsonProperty("trustCertificateKeyStorePassword") + private PasswordProvider trustCertificateKeyStorePasswordProvider; + + @JsonProperty + private String clientCertificateKeyStoreUrl; + + @JsonProperty + private String clientCertificateKeyStoreType; + + @JsonProperty("clientCertificateKeyStorePassword") + private PasswordProvider clientCertificateKeyStorePasswordProvider; + + @JsonProperty + private List enabledSSLCipherSuites; + + @JsonProperty + private List enabledTLSProtocols; + + @JsonProperty + private boolean verifyServerCertificate = false; + + public boolean isUseSSL() + { + return useSSL; + } + + public String getTrustCertificateKeyStoreUrl() + { + return trustCertificateKeyStoreUrl; + } + + public String getTrustCertificateKeyStoreType() + { + return trustCertificateKeyStoreType; + } + + public String getTrustCertificateKeyStorePassword() + { + return trustCertificateKeyStorePasswordProvider == null ? null : trustCertificateKeyStorePasswordProvider.getPassword(); + } + + public String getClientCertificateKeyStoreUrl() + { + return clientCertificateKeyStoreUrl; + } + + public String getClientCertificateKeyStoreType() + { + return clientCertificateKeyStoreType; + } + + public String getClientCertificateKeyStorePassword() + { + return clientCertificateKeyStorePasswordProvider == null ? null : clientCertificateKeyStorePasswordProvider.getPassword(); + } + + public List getEnabledSSLCipherSuites() + { + return enabledSSLCipherSuites; + } + + public List getEnabledTLSProtocols() + { + return enabledTLSProtocols; + } + + public boolean isVerifyServerCertificate() + { + return verifyServerCertificate; + } + + @Override + public String toString() + { + return "MySQLConnectorConfig{" + + "useSSL='" + useSSL + '\'' + + ", clientCertificateKeyStoreUrl='" + clientCertificateKeyStoreUrl + '\'' + + ", clientCertificateKeyStoreType='" + clientCertificateKeyStoreType + '\'' + + ", verifyServerCertificate='" + verifyServerCertificate + '\'' + + ", trustCertificateKeyStoreUrl='" + trustCertificateKeyStoreUrl + '\'' + + ", trustCertificateKeyStoreType='" + trustCertificateKeyStoreType + '\'' + + ", enabledSSLCipherSuites=" + enabledSSLCipherSuites + + ", enabledTLSProtocols=" + enabledTLSProtocols + + '}'; + } +} diff --git a/extensions-core/mysql-metadata-storage/src/main/java/io/druid/metadata/storage/mysql/MySQLMetadataStorageModule.java b/extensions-core/mysql-metadata-storage/src/main/java/io/druid/metadata/storage/mysql/MySQLMetadataStorageModule.java index 211d5ca0b0e4..4ba1cb8177bb 100644 --- a/extensions-core/mysql-metadata-storage/src/main/java/io/druid/metadata/storage/mysql/MySQLMetadataStorageModule.java +++ b/extensions-core/mysql-metadata-storage/src/main/java/io/druid/metadata/storage/mysql/MySQLMetadataStorageModule.java @@ -23,6 +23,7 @@ import com.google.common.collect.ImmutableList; import com.google.inject.Binder; import com.google.inject.Key; +import io.druid.guice.JsonConfigProvider; import io.druid.guice.LazySingleton; import io.druid.guice.PolyBind; import io.druid.guice.SQLMetadataStorageDruidModule; @@ -56,6 +57,8 @@ public void configure(Binder binder) { super.configure(binder); + JsonConfigProvider.bind(binder, "druid.metadata.mysql.ssl", MySQLConnectorConfig.class); + PolyBind .optionBinder(binder, Key.get(MetadataStorageProvider.class)) .addBinding(TYPE) diff --git a/extensions-core/postgresql-metadata-storage/pom.xml b/extensions-core/postgresql-metadata-storage/pom.xml index d4625a6499ce..c34c9d0db5f4 100644 --- a/extensions-core/postgresql-metadata-storage/pom.xml +++ b/extensions-core/postgresql-metadata-storage/pom.xml @@ -28,7 +28,7 @@ io.druid druid - 0.12.0-SNAPSHOT + 0.12.2-SNAPSHOT ../../pom.xml diff --git a/extensions-core/protobuf-extensions/pom.xml b/extensions-core/protobuf-extensions/pom.xml index e1a14d412b6e..72994ffc5dc3 100644 --- a/extensions-core/protobuf-extensions/pom.xml +++ b/extensions-core/protobuf-extensions/pom.xml @@ -16,9 +16,7 @@ ~ limitations under the License. --> - + 4.0.0 io.druid.extensions @@ -29,7 +27,7 @@ druid io.druid - 0.12.0-SNAPSHOT + 0.12.2-SNAPSHOT ../../pom.xml diff --git a/extensions-core/s3-extensions/pom.xml b/extensions-core/s3-extensions/pom.xml index c08208c81e1e..23a59366ff74 100644 --- a/extensions-core/s3-extensions/pom.xml +++ b/extensions-core/s3-extensions/pom.xml @@ -16,8 +16,7 @@ ~ limitations under the License. --> - + 4.0.0 io.druid.extensions @@ -28,7 +27,7 @@ io.druid druid - 0.12.0-SNAPSHOT + 0.12.2-SNAPSHOT ../../pom.xml @@ -45,15 +44,15 @@ ${project.parent.version} provided - - net.java.dev.jets3t - jets3t + io.druid + java-util + ${project.parent.version} provided - com.metamx - java-util + net.java.dev.jets3t + jets3t provided diff --git a/extensions-core/s3-extensions/src/main/java/io/druid/storage/s3/S3DataSegmentFinder.java b/extensions-core/s3-extensions/src/main/java/io/druid/storage/s3/S3DataSegmentFinder.java index d6d773640e8e..3f4529ec99a0 100644 --- a/extensions-core/s3-extensions/src/main/java/io/druid/storage/s3/S3DataSegmentFinder.java +++ b/extensions-core/s3-extensions/src/main/java/io/druid/storage/s3/S3DataSegmentFinder.java @@ -21,9 +21,8 @@ import com.fasterxml.jackson.databind.ObjectMapper; import com.google.common.base.Throwables; -import com.google.common.collect.Sets; import com.google.inject.Inject; - +import io.druid.java.util.common.Pair; import io.druid.java.util.common.logger.Logger; import io.druid.segment.loading.DataSegmentFinder; import io.druid.segment.loading.SegmentLoadingException; @@ -35,9 +34,11 @@ import java.io.IOException; import java.io.InputStream; +import java.util.HashMap; import java.util.Iterator; import java.util.Map; import java.util.Set; +import java.util.stream.Collectors; public class S3DataSegmentFinder implements DataSegmentFinder { @@ -62,7 +63,7 @@ public S3DataSegmentFinder( @Override public Set findSegments(String workingDirPath, boolean updateDescriptor) throws SegmentLoadingException { - final Set segments = Sets.newHashSet(); + final Map> timestampedSegments = new HashMap<>(); try { Iterator objectsIterator = S3Utils.storageObjectsIterator( @@ -103,7 +104,12 @@ public Set findSegments(String workingDirPath, boolean updateDescri s3Client.putObject(config.getBucket(), newDescJsonObject); } } - segments.add(dataSegment); + + DataSegmentFinder.putInMapRetainingNewest( + timestampedSegments, + dataSegment, + indexObject.getLastModifiedDate() == null ? 0 : indexObject.getLastModifiedDate().getTime() + ); } } else { throw new SegmentLoadingException( @@ -124,6 +130,6 @@ public Set findSegments(String workingDirPath, boolean updateDescri Throwables.propagateIfInstanceOf(e, SegmentLoadingException.class); Throwables.propagate(e); } - return segments; + return timestampedSegments.values().stream().map(x -> x.lhs).collect(Collectors.toSet()); } } diff --git a/extensions-core/s3-extensions/src/main/java/io/druid/storage/s3/S3DataSegmentMover.java b/extensions-core/s3-extensions/src/main/java/io/druid/storage/s3/S3DataSegmentMover.java index 983837865e8b..643b30407d9f 100644 --- a/extensions-core/s3-extensions/src/main/java/io/druid/storage/s3/S3DataSegmentMover.java +++ b/extensions-core/s3-extensions/src/main/java/io/druid/storage/s3/S3DataSegmentMover.java @@ -72,8 +72,11 @@ public DataSegment move(DataSegment segment, Map targetLoadSpec) final String targetS3Bucket = MapUtils.getString(targetLoadSpec, "bucket"); final String targetS3BaseKey = MapUtils.getString(targetLoadSpec, "baseKey"); - final String targetS3Path = S3Utils.constructSegmentPath(targetS3BaseKey, DataSegmentPusher.getDefaultStorageDir(segment)); - String targetS3DescriptorPath = S3Utils.descriptorPathForSegmentPath(targetS3Path); + final String targetS3Path = S3Utils.constructSegmentPath( + targetS3BaseKey, + DataSegmentPusher.getDefaultStorageDir(segment, false) + ); + final String targetS3DescriptorPath = S3Utils.descriptorPathForSegmentPath(targetS3Path); if (targetS3Bucket.isEmpty()) { throw new SegmentLoadingException("Target S3 bucket is not specified"); diff --git a/extensions-core/s3-extensions/src/main/java/io/druid/storage/s3/S3DataSegmentPusher.java b/extensions-core/s3-extensions/src/main/java/io/druid/storage/s3/S3DataSegmentPusher.java index 1100287cb966..eb5c4a9c10dd 100644 --- a/extensions-core/s3-extensions/src/main/java/io/druid/storage/s3/S3DataSegmentPusher.java +++ b/extensions-core/s3-extensions/src/main/java/io/druid/storage/s3/S3DataSegmentPusher.java @@ -24,9 +24,9 @@ import com.google.common.collect.ImmutableList; import com.google.common.collect.ImmutableMap; import com.google.inject.Inject; -import com.metamx.emitter.EmittingLogger; import io.druid.java.util.common.CompressionUtils; import io.druid.java.util.common.StringUtils; +import io.druid.java.util.emitter.EmittingLogger; import io.druid.segment.SegmentUtils; import io.druid.segment.loading.DataSegmentPusher; import io.druid.timeline.DataSegment; @@ -88,9 +88,10 @@ public List getAllowedPropertyPrefixesForHadoop() } @Override - public DataSegment push(final File indexFilesDir, final DataSegment inSegment) throws IOException + public DataSegment push(final File indexFilesDir, final DataSegment inSegment, final boolean useUniquePath) + throws IOException { - final String s3Path = S3Utils.constructSegmentPath(config.getBaseKey(), getStorageDir(inSegment)); + final String s3Path = S3Utils.constructSegmentPath(config.getBaseKey(), getStorageDir(inSegment, useUniquePath)); log.info("Copying segment[%s] to S3 at location[%s]", inSegment.getIdentifier(), s3Path); @@ -105,21 +106,10 @@ public DataSegment push(final File indexFilesDir, final DataSegment inSegment) t public DataSegment call() throws Exception { S3Object toPush = new S3Object(zipOutFile); - - final String outputBucket = config.getBucket(); - final String s3DescriptorPath = S3Utils.descriptorPathForSegmentPath(s3Path); - - toPush.setBucketName(outputBucket); - toPush.setKey(s3Path); - if (!config.getDisableAcl()) { - toPush.setAcl(GSAccessControlList.REST_CANNED_BUCKET_OWNER_FULL_CONTROL); - } - - log.info("Pushing %s.", toPush); - s3Client.putObject(outputBucket, toPush); + putObject(config.getBucket(), s3Path, toPush); final DataSegment outSegment = inSegment.withSize(indexSize) - .withLoadSpec(makeLoadSpec(outputBucket, toPush.getKey())) + .withLoadSpec(makeLoadSpec(config.getBucket(), toPush.getKey())) .withBinaryVersion(SegmentUtils.getVersionFromDir(indexFilesDir)); File descriptorFile = File.createTempFile("druid", "descriptor.json"); @@ -127,14 +117,12 @@ public DataSegment call() throws Exception // runtime, and because Guava deletes methods over time, that causes incompatibilities. Files.write(descriptorFile.toPath(), jsonMapper.writeValueAsBytes(outSegment)); S3Object descriptorObject = new S3Object(descriptorFile); - descriptorObject.setBucketName(outputBucket); - descriptorObject.setKey(s3DescriptorPath); - if (!config.getDisableAcl()) { - descriptorObject.setAcl(GSAccessControlList.REST_CANNED_BUCKET_OWNER_FULL_CONTROL); - } - log.info("Pushing %s", descriptorObject); - s3Client.putObject(outputBucket, descriptorObject); + putObject( + config.getBucket(), + S3Utils.descriptorPathForSegmentPath(s3Path), + descriptorObject + ); log.info("Deleting zipped index File[%s]", zipOutFile); zipOutFile.delete(); @@ -164,7 +152,6 @@ public Map makeLoadSpec(URI finalIndexZipFilePath) /** * Any change in loadSpec need to be reflected {@link io.druid.indexer.JobHelper#getURIFromSegment()} - * */ @SuppressWarnings("JavadocReference") private Map makeLoadSpec(String bucket, String key) @@ -180,4 +167,17 @@ private Map makeLoadSpec(String bucket, String key) config.isUseS3aSchema() ? "s3a" : "s3n" ); } + + private void putObject(String bucketName, String path, S3Object object) throws ServiceException + { + object.setBucketName(bucketName); + object.setKey(path); + if (!config.getDisableAcl()) { + object.setAcl(GSAccessControlList.REST_CANNED_BUCKET_OWNER_FULL_CONTROL); + } + + log.info("Pushing %s.", object); + + s3Client.putObject(bucketName, object); + } } diff --git a/extensions-core/s3-extensions/src/test/java/io/druid/storage/s3/S3DataSegmentFinderTest.java b/extensions-core/s3-extensions/src/test/java/io/druid/storage/s3/S3DataSegmentFinderTest.java index f0df427ccb29..55ad817003d2 100644 --- a/extensions-core/s3-extensions/src/test/java/io/druid/storage/s3/S3DataSegmentFinderTest.java +++ b/extensions-core/s3-extensions/src/test/java/io/druid/storage/s3/S3DataSegmentFinderTest.java @@ -124,7 +124,6 @@ public class S3DataSegmentFinderTest private String indexZip4_1; - @BeforeClass public static void setUpStatic() { @@ -210,31 +209,51 @@ public void testFindSegments() throws Exception final String serializedSegment4_0 = mapper.writeValueAsString(updatedSegment4_0); final String serializedSegment4_1 = mapper.writeValueAsString(updatedSegment4_1); - Assert.assertNotEquals(serializedSegment1, - IOUtils.toString(mockS3Client.getObject(bucket, descriptor1).getDataInputStream())); - Assert.assertNotEquals(serializedSegment2, - IOUtils.toString(mockS3Client.getObject(bucket, descriptor2).getDataInputStream())); - Assert.assertNotEquals(serializedSegment3, - IOUtils.toString(mockS3Client.getObject(bucket, descriptor3).getDataInputStream())); - Assert.assertNotEquals(serializedSegment4_0, - IOUtils.toString(mockS3Client.getObject(bucket, descriptor4_0).getDataInputStream())); - Assert.assertNotEquals(serializedSegment4_1, - IOUtils.toString(mockS3Client.getObject(bucket, descriptor4_1).getDataInputStream())); + Assert.assertNotEquals( + serializedSegment1, + IOUtils.toString(mockS3Client.getObject(bucket, descriptor1).getDataInputStream()) + ); + Assert.assertNotEquals( + serializedSegment2, + IOUtils.toString(mockS3Client.getObject(bucket, descriptor2).getDataInputStream()) + ); + Assert.assertNotEquals( + serializedSegment3, + IOUtils.toString(mockS3Client.getObject(bucket, descriptor3).getDataInputStream()) + ); + Assert.assertNotEquals( + serializedSegment4_0, + IOUtils.toString(mockS3Client.getObject(bucket, descriptor4_0).getDataInputStream()) + ); + Assert.assertNotEquals( + serializedSegment4_1, + IOUtils.toString(mockS3Client.getObject(bucket, descriptor4_1).getDataInputStream()) + ); final Set segments2 = s3DataSegmentFinder.findSegments("", true); Assert.assertEquals(segments, segments2); - Assert.assertEquals(serializedSegment1, - IOUtils.toString(mockS3Client.getObject(bucket, descriptor1).getDataInputStream())); - Assert.assertEquals(serializedSegment2, - IOUtils.toString(mockS3Client.getObject(bucket, descriptor2).getDataInputStream())); - Assert.assertEquals(serializedSegment3, - IOUtils.toString(mockS3Client.getObject(bucket, descriptor3).getDataInputStream())); - Assert.assertEquals(serializedSegment4_0, - IOUtils.toString(mockS3Client.getObject(bucket, descriptor4_0).getDataInputStream())); - Assert.assertEquals(serializedSegment4_1, - IOUtils.toString(mockS3Client.getObject(bucket, descriptor4_1).getDataInputStream())); + Assert.assertEquals( + serializedSegment1, + IOUtils.toString(mockS3Client.getObject(bucket, descriptor1).getDataInputStream()) + ); + Assert.assertEquals( + serializedSegment2, + IOUtils.toString(mockS3Client.getObject(bucket, descriptor2).getDataInputStream()) + ); + Assert.assertEquals( + serializedSegment3, + IOUtils.toString(mockS3Client.getObject(bucket, descriptor3).getDataInputStream()) + ); + Assert.assertEquals( + serializedSegment4_0, + IOUtils.toString(mockS3Client.getObject(bucket, descriptor4_0).getDataInputStream()) + ); + Assert.assertEquals( + serializedSegment4_1, + IOUtils.toString(mockS3Client.getObject(bucket, descriptor4_1).getDataInputStream()) + ); } @Test(expected = SegmentLoadingException.class) @@ -268,9 +287,7 @@ public void testFindSegmentsWithworkingDirPath() throws SegmentLoadingException public void testFindSegmentsUpdateLoadSpec() throws Exception { config.setBucket("amazing"); - final DataSegment segmentMissingLoadSpec = DataSegment.builder(SEGMENT_1) - .loadSpec(ImmutableMap.of()) - .build(); + final DataSegment segmentMissingLoadSpec = DataSegment.builder(SEGMENT_1).loadSpec(ImmutableMap.of()).build(); final S3DataSegmentFinder s3DataSegmentFinder = new S3DataSegmentFinder(mockS3Client, config, mapper); final String segmentPath = baseKey + "/interval_missing_load_spec/v1/1/"; final String descriptorPath = S3Utils.descriptorPathForSegmentPath(segmentPath); @@ -304,6 +321,32 @@ public void testFindSegmentsUpdateLoadSpec() throws Exception Assert.assertEquals(indexPath, testLoadSpec.get("key")); } + @Test + public void testPreferNewestSegment() throws Exception + { + baseKey = "replicaDataSource"; + + config = new S3DataSegmentPusherConfig(); + config.setBucket(bucket); + config.setBaseKey(baseKey); + + descriptor1 = S3Utils.descriptorPathForSegmentPath(baseKey + "/interval10/v1/0/older/"); + descriptor2 = S3Utils.descriptorPathForSegmentPath(baseKey + "/interval10/v1/0/newer/"); + + indexZip1 = S3Utils.indexZipForSegmentPath(descriptor1); + indexZip2 = S3Utils.indexZipForSegmentPath(descriptor2); + + mockS3Client.putObject(bucket, new S3Object(descriptor1, mapper.writeValueAsString(SEGMENT_1))); + mockS3Client.putObject(bucket, new S3Object(indexZip1, "dummy")); + mockS3Client.putObject(bucket, new S3Object(descriptor2, mapper.writeValueAsString(SEGMENT_1))); + mockS3Client.putObject(bucket, new S3Object(indexZip2, "dummy")); + + final S3DataSegmentFinder s3DataSegmentFinder = new S3DataSegmentFinder(mockS3Client, config, mapper); + final Set segments = s3DataSegmentFinder.findSegments("", false); + + Assert.assertEquals(1, segments.size()); + } + private String getDescriptorPath(DataSegment segment) { return S3Utils.descriptorPathForSegmentPath(String.valueOf(segment.getLoadSpec().get("key"))); diff --git a/extensions-core/s3-extensions/src/test/java/io/druid/storage/s3/S3DataSegmentPusherTest.java b/extensions-core/s3-extensions/src/test/java/io/druid/storage/s3/S3DataSegmentPusherTest.java index 32818b17e487..841f10e1c663 100644 --- a/extensions-core/s3-extensions/src/test/java/io/druid/storage/s3/S3DataSegmentPusherTest.java +++ b/extensions-core/s3-extensions/src/test/java/io/druid/storage/s3/S3DataSegmentPusherTest.java @@ -64,6 +64,20 @@ public void setValue(T value) @Test public void testPush() throws Exception + { + testPushInternal(false, "key/foo/2015-01-01T00:00:00\\.000Z_2016-01-01T00:00:00\\.000Z/0/0/index\\.zip"); + } + + @Test + public void testPushUseUniquePath() throws Exception + { + testPushInternal( + true, + "key/foo/2015-01-01T00:00:00\\.000Z_2016-01-01T00:00:00\\.000Z/0/0/[A-Za-z0-9-]{36}/index\\.zip" + ); + } + + private void testPushInternal(boolean useUniquePath, String matcher) throws Exception { RestS3Service s3Client = EasyMock.createStrictMock(RestS3Service.class); @@ -113,14 +127,15 @@ public S3Object answer() throws Throwable size ); - DataSegment segment = pusher.push(tempFolder.getRoot(), segmentToPush); + DataSegment segment = pusher.push(tempFolder.getRoot(), segmentToPush, useUniquePath); Assert.assertEquals(segmentToPush.getSize(), segment.getSize()); Assert.assertEquals(1, (int) segment.getBinaryVersion()); Assert.assertEquals("bucket", segment.getLoadSpec().get("bucket")); - Assert.assertEquals( - "key/foo/2015-01-01T00:00:00.000Z_2016-01-01T00:00:00.000Z/0/0/index.zip", - segment.getLoadSpec().get("key")); + Assert.assertTrue( + segment.getLoadSpec().get("key").toString(), + segment.getLoadSpec().get("key").toString().matches(matcher) + ); Assert.assertEquals("s3_zip", segment.getLoadSpec().get("type")); // Verify that the pushed S3Object contains the correct data diff --git a/extensions-core/simple-client-sslcontext/pom.xml b/extensions-core/simple-client-sslcontext/pom.xml index 9277db3a5526..2e0647dc420e 100644 --- a/extensions-core/simple-client-sslcontext/pom.xml +++ b/extensions-core/simple-client-sslcontext/pom.xml @@ -1,11 +1,9 @@ - + druid io.druid - 0.12.0-SNAPSHOT + 0.12.2-SNAPSHOT ../../pom.xml 4.0.0 @@ -35,8 +33,9 @@ provided - com.metamx + io.druid java-util + ${project.parent.version} provided diff --git a/extensions-core/simple-client-sslcontext/src/main/java/io/druid/https/SSLContextProvider.java b/extensions-core/simple-client-sslcontext/src/main/java/io/druid/https/SSLContextProvider.java index ccdacf1b733d..ee53c9278e0c 100644 --- a/extensions-core/simple-client-sslcontext/src/main/java/io/druid/https/SSLContextProvider.java +++ b/extensions-core/simple-client-sslcontext/src/main/java/io/druid/https/SSLContextProvider.java @@ -22,7 +22,7 @@ import com.google.common.base.Throwables; import com.google.inject.Inject; import com.google.inject.Provider; -import com.metamx.emitter.EmittingLogger; +import io.druid.java.util.emitter.EmittingLogger; import javax.net.ssl.SSLContext; import javax.net.ssl.TrustManagerFactory; diff --git a/extensions-core/stats/pom.xml b/extensions-core/stats/pom.xml index e6dd75e687d5..7a968ad8bda7 100644 --- a/extensions-core/stats/pom.xml +++ b/extensions-core/stats/pom.xml @@ -29,7 +29,7 @@ io.druid druid - 0.12.0-SNAPSHOT + 0.12.2-SNAPSHOT ../../pom.xml diff --git a/hll/pom.xml b/hll/pom.xml index 653b2ca63b32..fc9dc8468253 100644 --- a/hll/pom.xml +++ b/hll/pom.xml @@ -24,7 +24,7 @@ io.druid druid - 0.12.0-SNAPSHOT + 0.12.2-SNAPSHOT druid-hll diff --git a/indexing-hadoop/pom.xml b/indexing-hadoop/pom.xml index 9ab3867d3bf9..1bdd540102e4 100644 --- a/indexing-hadoop/pom.xml +++ b/indexing-hadoop/pom.xml @@ -26,7 +26,7 @@ io.druid druid - 0.12.0-SNAPSHOT + 0.12.2-SNAPSHOT diff --git a/indexing-hadoop/src/main/java/io/druid/indexer/HadoopIngestionSpec.java b/indexing-hadoop/src/main/java/io/druid/indexer/HadoopIngestionSpec.java index aea58796b654..b227d6fe4df0 100644 --- a/indexing-hadoop/src/main/java/io/druid/indexer/HadoopIngestionSpec.java +++ b/indexing-hadoop/src/main/java/io/druid/indexer/HadoopIngestionSpec.java @@ -37,6 +37,7 @@ import org.joda.time.Interval; import java.io.IOException; +import java.util.ArrayList; import java.util.HashSet; import java.util.List; import java.util.Map; @@ -151,20 +152,19 @@ public static HadoopIngestionSpec updateSegmentListIfDatasourcePathSpecIsUsed( String ingestionSpec = "ingestionSpec"; Map pathSpec = spec.getIOConfig().getPathSpec(); - Map datasourcePathSpec = null; + List> datasourcePathSpecs = new ArrayList<>(); if (pathSpec.get(type).equals(dataSource)) { - datasourcePathSpec = pathSpec; + datasourcePathSpecs.add(pathSpec); } else if (pathSpec.get(type).equals(multi)) { List> childPathSpecs = (List>) pathSpec.get(children); for (Map childPathSpec : childPathSpecs) { if (childPathSpec.get(type).equals(dataSource)) { - datasourcePathSpec = childPathSpec; - break; + datasourcePathSpecs.add(childPathSpec); } } } - if (datasourcePathSpec != null) { + for (Map datasourcePathSpec : datasourcePathSpecs) { Map ingestionSpecMap = (Map) datasourcePathSpec.get(ingestionSpec); DatasourceIngestionSpec ingestionSpecObj = jsonMapper.convertValue( ingestionSpecMap, diff --git a/indexing-hadoop/src/main/java/io/druid/indexer/IndexGeneratorJob.java b/indexing-hadoop/src/main/java/io/druid/indexer/IndexGeneratorJob.java index c9111d00cd35..a9726bb2db16 100644 --- a/indexing-hadoop/src/main/java/io/druid/indexer/IndexGeneratorJob.java +++ b/indexing-hadoop/src/main/java/io/druid/indexer/IndexGeneratorJob.java @@ -64,6 +64,7 @@ import org.apache.hadoop.io.Writable; import org.apache.hadoop.mapred.InvalidJobConfException; import org.apache.hadoop.mapreduce.Counter; +import org.apache.hadoop.mapreduce.Counters; import org.apache.hadoop.mapreduce.Job; import org.apache.hadoop.mapreduce.JobContext; import org.apache.hadoop.mapreduce.Partitioner; @@ -210,9 +211,17 @@ public boolean run() boolean success = job.waitForCompletion(true); - Counter invalidRowCount = job.getCounters() - .findCounter(HadoopDruidIndexerConfig.IndexJobCounters.INVALID_ROW_COUNTER); - jobStats.setInvalidRowCount(invalidRowCount.getValue()); + Counters counters = job.getCounters(); + if (counters == null) { + log.info("No counters found for job [%s]", job.getJobName()); + } else { + Counter invalidRowCount = counters.findCounter(HadoopDruidIndexerConfig.IndexJobCounters.INVALID_ROW_COUNTER); + if (invalidRowCount != null) { + jobStats.setInvalidRowCount(invalidRowCount.getValue()); + } else { + log.info("No invalid row counter found for job [%s]", job.getJobName()); + } + } return success; } @@ -258,6 +267,7 @@ public static class IndexGeneratorMapper extends HadoopDruidIndexerMapper typeHelperMap; @Override protected void setup(Context context) @@ -269,6 +279,11 @@ protected void setup(Context context) for (int i = 0; i < aggregators.length; ++i) { combiningAggs[i] = aggregators[i].getCombiningFactory(); } + typeHelperMap = InputRowSerde.getTypeHelperMap(config.getSchema() + .getDataSchema() + .getParser() + .getParseSpec() + .getDimensionsSpec()); } @Override @@ -299,9 +314,9 @@ protected void innerMap( // and they contain the columns as they show up in the segment after ingestion, not what you would see in raw // data byte[] serializedInputRow = inputRow instanceof SegmentInputRow ? - InputRowSerde.toBytes(inputRow, combiningAggs, reportParseExceptions) + InputRowSerde.toBytes(typeHelperMap, inputRow, combiningAggs, reportParseExceptions) : - InputRowSerde.toBytes(inputRow, aggregators, reportParseExceptions); + InputRowSerde.toBytes(typeHelperMap, inputRow, aggregators, reportParseExceptions); context.write( new SortableBytes( @@ -322,6 +337,7 @@ public static class IndexGeneratorCombiner extends Reducer typeHelperMap; @Override protected void setup(Context context) @@ -334,6 +350,11 @@ protected void setup(Context context) for (int i = 0; i < aggregators.length; ++i) { combiningAggs[i] = aggregators[i].getCombiningFactory(); } + typeHelperMap = InputRowSerde.getTypeHelperMap(config.getSchema() + .getDataSchema() + .getParser() + .getParseSpec() + .getDimensionsSpec()); } @Override @@ -350,11 +371,11 @@ protected void reduce( SortableBytes keyBytes = SortableBytes.fromBytesWritable(key); Bucket bucket = Bucket.fromGroupKey(keyBytes.getGroupKey()).lhs; IncrementalIndex index = makeIncrementalIndex(bucket, combiningAggs, config, null, null); - index.add(InputRowSerde.fromBytes(first.getBytes(), aggregators)); + index.add(InputRowSerde.fromBytes(typeHelperMap, first.getBytes(), aggregators)); while (iter.hasNext()) { context.progress(); - InputRow value = InputRowSerde.fromBytes(iter.next().getBytes(), aggregators); + InputRow value = InputRowSerde.fromBytes(typeHelperMap, iter.next().getBytes(), aggregators); if (!index.canAppendRow()) { dimOrder.addAll(index.getDimensionOrder()); @@ -381,10 +402,13 @@ private void flushIndexToContextAndClose(BytesWritable key, IncrementalIndex ind context.progress(); Row row = rows.next(); InputRow inputRow = getInputRowFromRow(row, dimensions); + // reportParseExceptions is true as any unparseable data is already handled by the mapper. + byte[] serializedRow = InputRowSerde.toBytes(typeHelperMap, inputRow, combiningAggs, true); + context.write( key, - new BytesWritable(InputRowSerde.toBytes(inputRow, combiningAggs, true)) + new BytesWritable(serializedRow) ); } index.close(); @@ -479,6 +503,7 @@ public static class IndexGeneratorReducer extends Reducer metricNames = Lists.newArrayList(); private AggregatorFactory[] aggregators; private AggregatorFactory[] combiningAggs; + private Map typeHelperMap; protected ProgressIndicator makeProgressIndicator(final Context context) { @@ -530,6 +555,11 @@ protected void setup(Context context) metricNames.add(aggregators[i].getName()); combiningAggs[i] = aggregators[i].getCombiningFactory(); } + typeHelperMap = InputRowSerde.getTypeHelperMap(config.getSchema() + .getDataSchema() + .getParser() + .getParseSpec() + .getDimensionsSpec()); } @Override @@ -597,7 +627,7 @@ public void rejectedExecution(Runnable r, ThreadPoolExecutor executor) for (final BytesWritable bw : values) { context.progress(); - final InputRow inputRow = index.formatRow(InputRowSerde.fromBytes(bw.getBytes(), aggregators)); + final InputRow inputRow = index.formatRow(InputRowSerde.fromBytes(typeHelperMap, bw.getBytes(), aggregators)); int numRows = index.add(inputRow); ++lineCount; diff --git a/indexing-hadoop/src/main/java/io/druid/indexer/InputRowSerde.java b/indexing-hadoop/src/main/java/io/druid/indexer/InputRowSerde.java index 672f77133746..cd1dd531604a 100644 --- a/indexing-hadoop/src/main/java/io/druid/indexer/InputRowSerde.java +++ b/indexing-hadoop/src/main/java/io/druid/indexer/InputRowSerde.java @@ -22,17 +22,24 @@ import com.google.common.base.Supplier; import com.google.common.collect.Lists; import com.google.common.collect.Maps; +import com.google.common.io.ByteArrayDataInput; import com.google.common.io.ByteArrayDataOutput; import com.google.common.io.ByteStreams; + import io.druid.data.input.InputRow; import io.druid.data.input.MapBasedInputRow; +import io.druid.data.input.Rows; +import io.druid.data.input.impl.DimensionSchema; +import io.druid.data.input.impl.DimensionsSpec; import io.druid.java.util.common.IAE; import io.druid.java.util.common.StringUtils; import io.druid.java.util.common.logger.Logger; import io.druid.java.util.common.parsers.ParseException; import io.druid.query.aggregation.Aggregator; import io.druid.query.aggregation.AggregatorFactory; +import io.druid.segment.DimensionHandlerUtils; import io.druid.segment.VirtualColumns; +import io.druid.segment.column.ValueType; import io.druid.segment.incremental.IncrementalIndex; import io.druid.segment.serde.ComplexMetricSerde; import io.druid.segment.serde.ComplexMetrics; @@ -49,7 +56,165 @@ public class InputRowSerde { private static final Logger log = new Logger(InputRowSerde.class); - public static final byte[] toBytes(final InputRow row, AggregatorFactory[] aggs, boolean reportParseExceptions) + private static final IndexSerdeTypeHelper STRING_HELPER = new StringIndexSerdeTypeHelper(); + private static final IndexSerdeTypeHelper LONG_HELPER = new LongIndexSerdeTypeHelper(); + private static final IndexSerdeTypeHelper FLOAT_HELPER = new FloatIndexSerdeTypeHelper(); + private static final IndexSerdeTypeHelper DOUBLE_HELPER = new DoubleIndexSerdeTypeHelper(); + + public interface IndexSerdeTypeHelper + { + ValueType getType(); + + void serialize(ByteArrayDataOutput out, Object value, boolean reportParseExceptions); + + T deserialize(ByteArrayDataInput in); + } + + public static Map getTypeHelperMap(DimensionsSpec dimensionsSpec) + { + Map typeHelperMap = Maps.newHashMap(); + for (DimensionSchema dimensionSchema : dimensionsSpec.getDimensions()) { + IndexSerdeTypeHelper typeHelper; + switch (dimensionSchema.getValueType()) { + case STRING: + typeHelper = STRING_HELPER; + break; + case LONG: + typeHelper = LONG_HELPER; + break; + case FLOAT: + typeHelper = FLOAT_HELPER; + break; + case DOUBLE: + typeHelper = DOUBLE_HELPER; + break; + default: + throw new IAE("Invalid type: [%s]", dimensionSchema.getValueType()); + } + typeHelperMap.put(dimensionSchema.getName(), typeHelper); + } + return typeHelperMap; + } + + public static class StringIndexSerdeTypeHelper implements IndexSerdeTypeHelper> + { + @Override + public ValueType getType() + { + return ValueType.STRING; + } + + @Override + public void serialize(ByteArrayDataOutput out, Object value, boolean reportParseExceptions) + { + List values = Rows.objectToStrings(value); + try { + writeStringArray(values, out); + } + catch (IOException ioe) { + throw new RuntimeException(ioe); + } + } + + @Override + public List deserialize(ByteArrayDataInput in) + { + try { + return readStringArray(in); + } + catch (IOException ioe) { + throw new RuntimeException(ioe); + } + } + } + + public static class LongIndexSerdeTypeHelper implements IndexSerdeTypeHelper + { + @Override + public ValueType getType() + { + return ValueType.LONG; + } + + @Override + public void serialize(ByteArrayDataOutput out, Object value, boolean reportParseExceptions) + { + Long ret = DimensionHandlerUtils.convertObjectToLong(value, reportParseExceptions); + if (ret == null) { + // remove null -> zero conversion when https://github.com/druid-io/druid/pull/5278 series of patches is merged + // we'll also need to change the serialized encoding so that it can represent numeric nulls + ret = DimensionHandlerUtils.ZERO_LONG; + } + out.writeLong(ret); + } + + @Override + public Long deserialize(ByteArrayDataInput in) + { + return in.readLong(); + } + } + + public static class FloatIndexSerdeTypeHelper implements IndexSerdeTypeHelper + { + @Override + public ValueType getType() + { + return ValueType.FLOAT; + } + + @Override + public void serialize(ByteArrayDataOutput out, Object value, boolean reportParseExceptions) + { + Float ret = DimensionHandlerUtils.convertObjectToFloat(value, reportParseExceptions); + if (ret == null) { + // remove null -> zero conversion when https://github.com/druid-io/druid/pull/5278 series of patches is merged + // we'll also need to change the serialized encoding so that it can represent numeric nulls + ret = DimensionHandlerUtils.ZERO_FLOAT; + } + out.writeFloat(ret); + } + + @Override + public Float deserialize(ByteArrayDataInput in) + { + return in.readFloat(); + } + } + + public static class DoubleIndexSerdeTypeHelper implements IndexSerdeTypeHelper + { + @Override + public ValueType getType() + { + return ValueType.DOUBLE; + } + + @Override + public void serialize(ByteArrayDataOutput out, Object value, boolean reportParseExceptions) + { + Double ret = DimensionHandlerUtils.convertObjectToDouble(value, reportParseExceptions); + if (ret == null) { + // remove null -> zero conversion when https://github.com/druid-io/druid/pull/5278 series of patches is merged + // we'll also need to change the serialized encoding so that it can represent numeric nulls + ret = DimensionHandlerUtils.ZERO_DOUBLE; + } + out.writeDouble(ret); + } + + @Override + public Double deserialize(ByteArrayDataInput in) + { + return in.readDouble(); + } + } + + public static final byte[] toBytes( + final Map typeHelperMap, + final InputRow row, + AggregatorFactory[] aggs, + boolean reportParseExceptions + ) { try { ByteArrayDataOutput out = ByteStreams.newDataOutput(); @@ -63,9 +228,12 @@ public static final byte[] toBytes(final InputRow row, AggregatorFactory[] aggs, WritableUtils.writeVInt(out, dimList.size()); if (dimList != null) { for (String dim : dimList) { - List dimValues = row.getDimension(dim); + IndexSerdeTypeHelper typeHelper = typeHelperMap.get(dim); + if (typeHelper == null) { + typeHelper = STRING_HELPER; + } writeString(dim, out); - writeStringArray(dimValues, out); + typeHelper.serialize(out, row.getRaw(dim), reportParseExceptions); } } @@ -176,10 +344,14 @@ private static List readStringArray(DataInput in) throws IOException return values; } - public static final InputRow fromBytes(byte[] data, AggregatorFactory[] aggs) + public static final InputRow fromBytes( + final Map typeHelperMap, + byte[] data, + AggregatorFactory[] aggs + ) { try { - DataInput in = ByteStreams.newDataInput(data); + ByteArrayDataInput in = ByteStreams.newDataInput(data); //Read timestamp long timestamp = in.readLong(); @@ -192,14 +364,25 @@ public static final InputRow fromBytes(byte[] data, AggregatorFactory[] aggs) for (int i = 0; i < dimNum; i++) { String dimension = readString(in); dimensions.add(dimension); - List dimensionValues = readStringArray(in); - if (dimensionValues == null) { + + IndexSerdeTypeHelper typeHelper = typeHelperMap.get(dimension); + if (typeHelper == null) { + typeHelper = STRING_HELPER; + } + Object dimValues = typeHelper.deserialize(in); + if (dimValues == null) { continue; } - if (dimensionValues.size() == 1) { - event.put(dimension, dimensionValues.get(0)); + + if (typeHelper.getType() == ValueType.STRING) { + List dimensionValues = (List) dimValues; + if (dimensionValues.size() == 1) { + event.put(dimension, dimensionValues.get(0)); + } else { + event.put(dimension, dimensionValues); + } } else { - event.put(dimension, dimensionValues); + event.put(dimension, dimValues); } } diff --git a/indexing-hadoop/src/main/java/io/druid/indexer/JobHelper.java b/indexing-hadoop/src/main/java/io/druid/indexer/JobHelper.java index 7ce678045c7f..8975c01d86a0 100644 --- a/indexing-hadoop/src/main/java/io/druid/indexer/JobHelper.java +++ b/indexing-hadoop/src/main/java/io/druid/indexer/JobHelper.java @@ -91,6 +91,7 @@ public static Path distributedClassPath(Path base) { return new Path(base, "classpath"); } + public static final String INDEX_ZIP = "index.zip"; public static final String DESCRIPTOR_JSON = "descriptor.json"; @@ -568,8 +569,10 @@ public static Path makeFileNamePath( DataSegmentPusher dataSegmentPusher ) { - return new Path(prependFSIfNullScheme(fs, basePath), - dataSegmentPusher.makeIndexPathName(segmentTemplate, baseFileName)); + return new Path( + prependFSIfNullScheme(fs, basePath), + dataSegmentPusher.makeIndexPathName(segmentTemplate, baseFileName) + ); } public static Path makeTmpPath( @@ -582,9 +585,10 @@ public static Path makeTmpPath( { return new Path( prependFSIfNullScheme(fs, basePath), - StringUtils.format("./%s.%d", - dataSegmentPusher.makeIndexPathName(segmentTemplate, JobHelper.INDEX_ZIP), - taskAttemptID.getId() + StringUtils.format( + "./%s.%d", + dataSegmentPusher.makeIndexPathName(segmentTemplate, JobHelper.INDEX_ZIP), + taskAttemptID.getId() ) ); } diff --git a/indexing-hadoop/src/main/java/io/druid/indexer/hadoop/DatasourceInputFormat.java b/indexing-hadoop/src/main/java/io/druid/indexer/hadoop/DatasourceInputFormat.java index b4856a925fca..a93459f61637 100644 --- a/indexing-hadoop/src/main/java/io/druid/indexer/hadoop/DatasourceInputFormat.java +++ b/indexing-hadoop/src/main/java/io/druid/indexer/hadoop/DatasourceInputFormat.java @@ -29,7 +29,6 @@ import io.druid.indexer.JobHelper; import io.druid.java.util.common.ISE; import io.druid.java.util.common.logger.Logger; -import org.apache.hadoop.conf.Configuration; import org.apache.hadoop.fs.FileStatus; import org.apache.hadoop.fs.FileSystem; import org.apache.hadoop.fs.Path; @@ -65,7 +64,7 @@ public class DatasourceInputFormat extends InputFormat @Override public List getSplits(JobContext context) throws IOException, InterruptedException { - Configuration conf = context.getConfiguration(); + JobConf conf = new JobConf(context.getConfiguration()); String segmentsStr = Preconditions.checkNotNull( conf.get(CONF_INPUT_SEGMENTS), @@ -89,7 +88,7 @@ public List getSplits(JobContext context) throws IOException, Interr for (WindowedDataSegment segment : segments) { totalSize += segment.getSegment().getSize(); } - int mapTask = ((JobConf) conf).getNumMapTasks(); + int mapTask = conf.getNumMapTasks(); if (mapTask > 0) { maxSize = totalSize / mapTask; } @@ -116,11 +115,10 @@ public int compare(WindowedDataSegment s1, WindowedDataSegment s2) List list = new ArrayList<>(); long size = 0; - JobConf dummyConf = new JobConf(); org.apache.hadoop.mapred.InputFormat fio = supplier.get(); for (WindowedDataSegment segment : segments) { if (size + segment.getSegment().getSize() > maxSize && size > 0) { - splits.add(toDataSourceSplit(list, fio, dummyConf)); + splits.add(toDataSourceSplit(list, fio, conf)); list = Lists.newArrayList(); size = 0; } @@ -130,7 +128,7 @@ public int compare(WindowedDataSegment s1, WindowedDataSegment s2) } if (list.size() > 0) { - splits.add(toDataSourceSplit(list, fio, dummyConf)); + splits.add(toDataSourceSplit(list, fio, conf)); } logger.info("Number of splits [%d]", splits.size()); @@ -214,14 +212,14 @@ static Stream getLocations( try { return Arrays.stream(split.getLocations()); } - catch (final IOException e) { + catch (final Exception e) { logger.error(e, "Exception getting locations"); return Stream.empty(); } } ); } - catch (final IOException e) { + catch (final Exception e) { logger.error(e, "Exception getting splits"); return Stream.empty(); } diff --git a/indexing-hadoop/src/test/java/io/druid/indexer/HadoopIngestionSpecUpdateDatasourcePathSpecSegmentsTest.java b/indexing-hadoop/src/test/java/io/druid/indexer/HadoopIngestionSpecUpdateDatasourcePathSpecSegmentsTest.java index 50f51752cbec..0351a114e9c6 100644 --- a/indexing-hadoop/src/test/java/io/druid/indexer/HadoopIngestionSpecUpdateDatasourcePathSpecSegmentsTest.java +++ b/indexing-hadoop/src/test/java/io/druid/indexer/HadoopIngestionSpecUpdateDatasourcePathSpecSegmentsTest.java @@ -51,9 +51,12 @@ */ public class HadoopIngestionSpecUpdateDatasourcePathSpecSegmentsTest { - private final String testDatasource = "test"; - private final Interval testDatasourceInterval = Intervals.of("1970/3000"); - private final Interval testDatasourceIntervalPartial = Intervals.of("2050/3000"); + private static final String testDatasource = "test"; + private static final String testDatasource2 = "test2"; + private static final Interval testDatasourceInterval = Intervals.of("1970/3000"); + private static final Interval testDatasourceInterval2 = Intervals.of("2000/2001"); + private static final Interval testDatasourceIntervalPartial = Intervals.of("2050/3000"); + private final ObjectMapper jsonMapper; public HadoopIngestionSpecUpdateDatasourcePathSpecSegmentsTest() @@ -67,7 +70,7 @@ public HadoopIngestionSpecUpdateDatasourcePathSpecSegmentsTest() } private static final DataSegment SEGMENT = new DataSegment( - "test1", + testDatasource, Intervals.of("2000/3000"), "ver", ImmutableMap.of( @@ -81,6 +84,21 @@ public HadoopIngestionSpecUpdateDatasourcePathSpecSegmentsTest() 2 ); + private static final DataSegment SEGMENT2 = new DataSegment( + testDatasource2, + Intervals.of("2000/3000"), + "ver2", + ImmutableMap.of( + "type", "local", + "path", "/tmp/index2.zip" + ), + ImmutableList.of("host2"), + ImmutableList.of("visited_sum", "unique_hosts"), + NoneShardSpec.instance(), + 9, + 2 + ); + @Test public void testUpdateSegmentListIfDatasourcePathSpecIsUsedWithNoDatasourcePathSpec() throws Exception { @@ -213,6 +231,22 @@ public void testUpdateSegmentListIfDatasourcePathSpecIsUsedWithMultiplePathSpec( null ), null + ), + new DatasourcePathSpec( + jsonMapper, + null, + new DatasourceIngestionSpec( + testDatasource2, + testDatasourceInterval2, + null, + null, + null, + null, + null, + false, + null + ), + null ) ) ); @@ -224,6 +258,10 @@ public void testUpdateSegmentListIfDatasourcePathSpecIsUsedWithMultiplePathSpec( ImmutableList.of(WindowedDataSegment.of(SEGMENT)), ((DatasourcePathSpec) ((MultiplePathSpec) config.getPathSpec()).getChildren().get(1)).getSegments() ); + Assert.assertEquals( + ImmutableList.of(new WindowedDataSegment(SEGMENT2, testDatasourceInterval2)), + ((DatasourcePathSpec) ((MultiplePathSpec) config.getPathSpec()).getChildren().get(2)).getSegments() + ); } private HadoopDruidIndexerConfig testRunUpdateSegmentListIfDatasourcePathSpecIsUsed( @@ -259,9 +297,21 @@ private HadoopDruidIndexerConfig testRunUpdateSegmentListIfDatasourcePathSpecIsU ); UsedSegmentLister segmentLister = EasyMock.createMock(UsedSegmentLister.class); + EasyMock.expect( - segmentLister.getUsedSegmentsForIntervals(testDatasource, Lists.newArrayList(jobInterval)) + segmentLister.getUsedSegmentsForIntervals( + testDatasource, + Lists.newArrayList(jobInterval != null ? jobInterval.overlap(testDatasourceInterval) : null) + ) ).andReturn(ImmutableList.of(SEGMENT)); + + EasyMock.expect( + segmentLister.getUsedSegmentsForIntervals( + testDatasource2, + Lists.newArrayList(jobInterval != null ? jobInterval.overlap(testDatasourceInterval2) : null) + ) + ).andReturn(ImmutableList.of(SEGMENT2)); + EasyMock.replay(segmentLister); spec = HadoopIngestionSpec.updateSegmentListIfDatasourcePathSpecIsUsed(spec, jsonMapper, segmentLister); diff --git a/indexing-hadoop/src/test/java/io/druid/indexer/IndexGeneratorCombinerTest.java b/indexing-hadoop/src/test/java/io/druid/indexer/IndexGeneratorCombinerTest.java index 9eaf75349ed9..9eb75e27b779 100644 --- a/indexing-hadoop/src/test/java/io/druid/indexer/IndexGeneratorCombinerTest.java +++ b/indexing-hadoop/src/test/java/io/druid/indexer/IndexGeneratorCombinerTest.java @@ -25,6 +25,7 @@ import io.druid.data.input.InputRow; import io.druid.data.input.MapBasedInputRow; import io.druid.data.input.impl.DimensionsSpec; +import io.druid.data.input.impl.StringDimensionSchema; import io.druid.data.input.impl.StringInputRowParser; import io.druid.data.input.impl.TimeAndDimsParseSpec; import io.druid.data.input.impl.TimestampSpec; @@ -144,6 +145,17 @@ public void testMultipleRowsMerged() throws Exception ); BytesWritable key = keySortableBytes.toBytesWritable(); + DimensionsSpec dimensionsSpec = new DimensionsSpec( + Arrays.asList( + new StringDimensionSchema("host"), + new StringDimensionSchema("keywords") + ), + null, + null + ); + + Map typeHelperMap = InputRowSerde.getTypeHelperMap(dimensionsSpec); + InputRow row1 = new MapBasedInputRow( timestamp, ImmutableList.of("keywords"), @@ -163,8 +175,8 @@ public void testMultipleRowsMerged() throws Exception ) ); List rows = Lists.newArrayList( - new BytesWritable(InputRowSerde.toBytes(row1, aggregators, true)), - new BytesWritable(InputRowSerde.toBytes(row2, aggregators, true)) + new BytesWritable(InputRowSerde.toBytes(typeHelperMap, row1, aggregators, true)), + new BytesWritable(InputRowSerde.toBytes(typeHelperMap, row2, aggregators, true)) ); Reducer.Context context = EasyMock.createNiceMock(Reducer.Context.class); @@ -183,7 +195,7 @@ public void testMultipleRowsMerged() throws Exception Assert.assertTrue(captureKey.getValue() == key); - InputRow capturedRow = InputRowSerde.fromBytes(captureVal.getValue().getBytes(), aggregators); + InputRow capturedRow = InputRowSerde.fromBytes(typeHelperMap, captureVal.getValue().getBytes(), aggregators); Assert.assertEquals(Arrays.asList("host", "keywords"), capturedRow.getDimensions()); Assert.assertEquals(ImmutableList.of(), capturedRow.getDimension("host")); Assert.assertEquals(Arrays.asList("bar", "foo"), capturedRow.getDimension("keywords")); @@ -228,9 +240,21 @@ public void testMultipleRowsNotMerged() throws Exception "visited", 5 ) ); + + DimensionsSpec dimensionsSpec = new DimensionsSpec( + Arrays.asList( + new StringDimensionSchema("host"), + new StringDimensionSchema("keywords") + ), + null, + null + ); + + Map typeHelperMap = InputRowSerde.getTypeHelperMap(dimensionsSpec); + List rows = Lists.newArrayList( - new BytesWritable(InputRowSerde.toBytes(row1, aggregators, true)), - new BytesWritable(InputRowSerde.toBytes(row2, aggregators, true)) + new BytesWritable(InputRowSerde.toBytes(typeHelperMap, row1, aggregators, true)), + new BytesWritable(InputRowSerde.toBytes(typeHelperMap, row2, aggregators, true)) ); Reducer.Context context = EasyMock.createNiceMock(Reducer.Context.class); @@ -253,7 +277,7 @@ public void testMultipleRowsNotMerged() throws Exception Assert.assertTrue(captureKey1.getValue() == key); Assert.assertTrue(captureKey2.getValue() == key); - InputRow capturedRow1 = InputRowSerde.fromBytes(captureVal1.getValue().getBytes(), aggregators); + InputRow capturedRow1 = InputRowSerde.fromBytes(typeHelperMap, captureVal1.getValue().getBytes(), aggregators); Assert.assertEquals(Arrays.asList("host", "keywords"), capturedRow1.getDimensions()); Assert.assertEquals(Collections.singletonList("host1"), capturedRow1.getDimension("host")); Assert.assertEquals(Arrays.asList("bar", "foo"), capturedRow1.getDimension("keywords")); @@ -264,7 +288,7 @@ public void testMultipleRowsNotMerged() throws Exception 0.001 ); - InputRow capturedRow2 = InputRowSerde.fromBytes(captureVal2.getValue().getBytes(), aggregators); + InputRow capturedRow2 = InputRowSerde.fromBytes(typeHelperMap, captureVal2.getValue().getBytes(), aggregators); Assert.assertEquals(Arrays.asList("host", "keywords"), capturedRow2.getDimensions()); Assert.assertEquals(Collections.singletonList("host2"), capturedRow2.getDimension("host")); Assert.assertEquals(Arrays.asList("bar", "foo"), capturedRow2.getDimension("keywords")); diff --git a/indexing-hadoop/src/test/java/io/druid/indexer/InputRowSerdeTest.java b/indexing-hadoop/src/test/java/io/druid/indexer/InputRowSerdeTest.java index 67a9b5abf8a3..71609e42dd32 100644 --- a/indexing-hadoop/src/test/java/io/druid/indexer/InputRowSerdeTest.java +++ b/indexing-hadoop/src/test/java/io/druid/indexer/InputRowSerdeTest.java @@ -20,9 +20,14 @@ package io.druid.indexer; import com.google.common.collect.ImmutableList; -import com.google.common.collect.ImmutableMap; +import com.google.common.collect.Maps; import io.druid.data.input.InputRow; import io.druid.data.input.MapBasedInputRow; +import io.druid.data.input.impl.DimensionsSpec; +import io.druid.data.input.impl.DoubleDimensionSchema; +import io.druid.data.input.impl.FloatDimensionSchema; +import io.druid.data.input.impl.LongDimensionSchema; +import io.druid.data.input.impl.StringDimensionSchema; import io.druid.hll.HyperLogLogCollector; import io.druid.jackson.AggregatorsModule; import io.druid.java.util.common.parsers.ParseException; @@ -35,8 +40,11 @@ import io.druid.segment.ColumnSelectorFactory; import org.easymock.EasyMock; import org.junit.Assert; +import org.junit.Rule; import org.junit.Test; +import org.junit.rules.ExpectedException; +import java.util.Arrays; import java.util.Collections; import java.util.List; import java.util.Map; @@ -53,17 +61,22 @@ public class InputRowSerdeTest new AggregatorsModule(); //registers ComplexMetric serde for hyperUnique } + @Rule + public ExpectedException expectedException = ExpectedException.none(); + public InputRowSerdeTest() { this.timestamp = System.currentTimeMillis(); - this.dims = ImmutableList.of("dim_non_existing", "d1", "d2"); - this.event = ImmutableMap.of( - "d1", "d1v", - "d2", ImmutableList.of("d2v1", "d2v2"), - "m1", 5.0f, - "m2", 100L, - "m3", "m3v" - ); + this.dims = ImmutableList.of("dim_non_existing", "d1", "d2", "d3", "d4", "d5"); + this.event = Maps.newHashMap(); + event.put("d1", "d1v"); + event.put("d2", ImmutableList.of("d2v1", "d2v2")); + event.put("d3", 200L); + event.put("d4", 300.1f); + event.put("d5", 400.5d); + event.put("m1", 5.0f); + event.put("m2", 100L); + event.put("m3", "m3v"); } @Test @@ -99,14 +112,29 @@ public Aggregator factorize(ColumnSelectorFactory metricFactory) } }; - byte[] data = InputRowSerde.toBytes(in, aggregatorFactories, false); // Ignore Unparseable aggregator - InputRow out = InputRowSerde.fromBytes(data, aggregatorFactories); + DimensionsSpec dimensionsSpec = new DimensionsSpec( + Arrays.asList( + new StringDimensionSchema("d1"), + new StringDimensionSchema("d2"), + new LongDimensionSchema("d3"), + new FloatDimensionSchema("d4"), + new DoubleDimensionSchema("d5") + ), + null, + null + ); + + byte[] data = InputRowSerde.toBytes(InputRowSerde.getTypeHelperMap(dimensionsSpec), in, aggregatorFactories, false); // Ignore Unparseable aggregator + InputRow out = InputRowSerde.fromBytes(InputRowSerde.getTypeHelperMap(dimensionsSpec), data, aggregatorFactories); Assert.assertEquals(timestamp, out.getTimestampFromEpoch()); Assert.assertEquals(dims, out.getDimensions()); Assert.assertEquals(Collections.EMPTY_LIST, out.getDimension("dim_non_existing")); Assert.assertEquals(ImmutableList.of("d1v"), out.getDimension("d1")); Assert.assertEquals(ImmutableList.of("d2v1", "d2v2"), out.getDimension("d2")); + Assert.assertEquals(200L, out.getRaw("d3")); + Assert.assertEquals(300.1f, out.getRaw("d4")); + Assert.assertEquals(400.5d, out.getRaw("d5")); Assert.assertEquals(0.0f, out.getMetric("agg_non_existing").floatValue(), 0.00001); Assert.assertEquals(5.0f, out.getMetric("m1out").floatValue(), 0.00001); @@ -117,7 +145,7 @@ public Aggregator factorize(ColumnSelectorFactory metricFactory) EasyMock.verify(mockedAggregator); } - @Test(expected = ParseException.class) + @Test public void testThrowParseExceptions() { InputRow in = new MapBasedInputRow( @@ -133,7 +161,66 @@ public void testThrowParseExceptions() new LongSumAggregatorFactory("unparseable", "m3") // Unparseable from String to Long }; - InputRowSerde.toBytes(in, aggregatorFactories, true); + DimensionsSpec dimensionsSpec = new DimensionsSpec( + Arrays.asList( + new StringDimensionSchema("d1"), + new StringDimensionSchema("d2"), + new LongDimensionSchema("d3"), + new FloatDimensionSchema("d4"), + new DoubleDimensionSchema("d5") + ), + null, + null + ); + expectedException.expect(ParseException.class); + expectedException.expectMessage("Encountered parse error for aggregator[unparseable]"); + InputRowSerde.toBytes(InputRowSerde.getTypeHelperMap(dimensionsSpec), in, aggregatorFactories, true); + } + + @Test + public void testDimensionParseExceptions() + { + InputRow in = new MapBasedInputRow( + timestamp, + dims, + event + ); + AggregatorFactory[] aggregatorFactories = new AggregatorFactory[]{ + new LongSumAggregatorFactory("m2out", "m2") + }; + + expectedException.expect(ParseException.class); + expectedException.expectMessage("could not convert value [d1v] to long"); + DimensionsSpec dimensionsSpec = new DimensionsSpec( + Arrays.asList( + new LongDimensionSchema("d1") + ), + null, + null + ); + InputRowSerde.toBytes(InputRowSerde.getTypeHelperMap(dimensionsSpec), in, aggregatorFactories, true); + + expectedException.expect(ParseException.class); + expectedException.expectMessage("could not convert value [d1v] to float"); + dimensionsSpec = new DimensionsSpec( + Arrays.asList( + new FloatDimensionSchema("d1") + ), + null, + null + ); + InputRowSerde.toBytes(InputRowSerde.getTypeHelperMap(dimensionsSpec), in, aggregatorFactories, true); + + expectedException.expect(ParseException.class); + expectedException.expectMessage("could not convert value [d1v] to double"); + dimensionsSpec = new DimensionsSpec( + Arrays.asList( + new DoubleDimensionSchema("d1") + ), + null, + null + ); + InputRowSerde.toBytes(InputRowSerde.getTypeHelperMap(dimensionsSpec), in, aggregatorFactories, true); } } diff --git a/indexing-service/pom.xml b/indexing-service/pom.xml index eedb98c94875..5900bae2e5f4 100644 --- a/indexing-service/pom.xml +++ b/indexing-service/pom.xml @@ -26,7 +26,7 @@ io.druid druid - 0.12.0-SNAPSHOT + 0.12.2-SNAPSHOT diff --git a/indexing-service/src/main/java/io/druid/indexing/common/TaskToolbox.java b/indexing-service/src/main/java/io/druid/indexing/common/TaskToolbox.java index 3db6e0f7b907..dd132769192f 100644 --- a/indexing-service/src/main/java/io/druid/indexing/common/TaskToolbox.java +++ b/indexing-service/src/main/java/io/druid/indexing/common/TaskToolbox.java @@ -27,8 +27,8 @@ import com.google.common.collect.Multimap; import com.google.common.collect.Multimaps; import com.google.inject.Provider; -import com.metamx.emitter.service.ServiceEmitter; -import com.metamx.metrics.MonitorScheduler; +import io.druid.java.util.emitter.service.ServiceEmitter; +import io.druid.java.util.metrics.MonitorScheduler; import io.druid.client.cache.Cache; import io.druid.client.cache.CacheConfig; import io.druid.discovery.DataNodeService; diff --git a/indexing-service/src/main/java/io/druid/indexing/common/TaskToolboxFactory.java b/indexing-service/src/main/java/io/druid/indexing/common/TaskToolboxFactory.java index 43d2abea594c..c17b23fe210a 100644 --- a/indexing-service/src/main/java/io/druid/indexing/common/TaskToolboxFactory.java +++ b/indexing-service/src/main/java/io/druid/indexing/common/TaskToolboxFactory.java @@ -23,8 +23,8 @@ import com.google.common.base.Preconditions; import com.google.inject.Inject; import com.google.inject.Provider; -import com.metamx.emitter.service.ServiceEmitter; -import com.metamx.metrics.MonitorScheduler; +import io.druid.java.util.emitter.service.ServiceEmitter; +import io.druid.java.util.metrics.MonitorScheduler; import io.druid.client.cache.Cache; import io.druid.client.cache.CacheConfig; import io.druid.discovery.DataNodeService; diff --git a/indexing-service/src/main/java/io/druid/indexing/common/actions/LocalTaskActionClient.java b/indexing-service/src/main/java/io/druid/indexing/common/actions/LocalTaskActionClient.java index 7bce2b6e89ba..0e688ecbfd82 100644 --- a/indexing-service/src/main/java/io/druid/indexing/common/actions/LocalTaskActionClient.java +++ b/indexing-service/src/main/java/io/druid/indexing/common/actions/LocalTaskActionClient.java @@ -19,7 +19,7 @@ package io.druid.indexing.common.actions; -import com.metamx.emitter.EmittingLogger; +import io.druid.java.util.emitter.EmittingLogger; import io.druid.indexing.common.task.Task; import io.druid.indexing.overlord.TaskStorage; import io.druid.java.util.common.ISE; diff --git a/indexing-service/src/main/java/io/druid/indexing/common/actions/RemoteTaskActionClient.java b/indexing-service/src/main/java/io/druid/indexing/common/actions/RemoteTaskActionClient.java index b2e3c5f23c3c..9c1c7e823993 100644 --- a/indexing-service/src/main/java/io/druid/indexing/common/actions/RemoteTaskActionClient.java +++ b/indexing-service/src/main/java/io/druid/indexing/common/actions/RemoteTaskActionClient.java @@ -21,7 +21,7 @@ import com.fasterxml.jackson.databind.ObjectMapper; import com.google.common.base.Throwables; -import com.metamx.http.client.response.FullResponseHolder; +import io.druid.java.util.http.client.response.FullResponseHolder; import io.druid.discovery.DruidLeaderClient; import io.druid.indexing.common.RetryPolicy; import io.druid.indexing.common.RetryPolicyFactory; diff --git a/indexing-service/src/main/java/io/druid/indexing/common/actions/SegmentMetadataUpdateAction.java b/indexing-service/src/main/java/io/druid/indexing/common/actions/SegmentMetadataUpdateAction.java index 2e33d5e15c09..7c55939f4d56 100644 --- a/indexing-service/src/main/java/io/druid/indexing/common/actions/SegmentMetadataUpdateAction.java +++ b/indexing-service/src/main/java/io/druid/indexing/common/actions/SegmentMetadataUpdateAction.java @@ -24,7 +24,7 @@ import com.fasterxml.jackson.annotation.JsonProperty; import com.fasterxml.jackson.core.type.TypeReference; import com.google.common.collect.ImmutableSet; -import com.metamx.emitter.service.ServiceMetricEvent; +import io.druid.java.util.emitter.service.ServiceMetricEvent; import io.druid.indexing.common.task.Task; import io.druid.indexing.overlord.CriticalAction; import io.druid.java.util.common.ISE; diff --git a/indexing-service/src/main/java/io/druid/indexing/common/actions/SegmentNukeAction.java b/indexing-service/src/main/java/io/druid/indexing/common/actions/SegmentNukeAction.java index 044cfebd00fa..dad3ca516460 100644 --- a/indexing-service/src/main/java/io/druid/indexing/common/actions/SegmentNukeAction.java +++ b/indexing-service/src/main/java/io/druid/indexing/common/actions/SegmentNukeAction.java @@ -24,7 +24,7 @@ import com.fasterxml.jackson.annotation.JsonProperty; import com.fasterxml.jackson.core.type.TypeReference; import com.google.common.collect.ImmutableSet; -import com.metamx.emitter.service.ServiceMetricEvent; +import io.druid.java.util.emitter.service.ServiceMetricEvent; import io.druid.indexing.common.task.Task; import io.druid.indexing.overlord.CriticalAction; import io.druid.java.util.common.ISE; diff --git a/indexing-service/src/main/java/io/druid/indexing/common/actions/SegmentTransactionalInsertAction.java b/indexing-service/src/main/java/io/druid/indexing/common/actions/SegmentTransactionalInsertAction.java index c22275def06a..acd6eb5a647c 100644 --- a/indexing-service/src/main/java/io/druid/indexing/common/actions/SegmentTransactionalInsertAction.java +++ b/indexing-service/src/main/java/io/druid/indexing/common/actions/SegmentTransactionalInsertAction.java @@ -23,12 +23,12 @@ import com.fasterxml.jackson.annotation.JsonProperty; import com.fasterxml.jackson.core.type.TypeReference; import com.google.common.collect.ImmutableSet; -import com.metamx.emitter.service.ServiceMetricEvent; import io.druid.indexing.common.task.Task; import io.druid.indexing.overlord.CriticalAction; import io.druid.indexing.overlord.DataSourceMetadata; import io.druid.indexing.overlord.SegmentPublishResult; import io.druid.java.util.common.logger.Logger; +import io.druid.java.util.emitter.service.ServiceMetricEvent; import io.druid.query.DruidMetrics; import io.druid.timeline.DataSegment; @@ -127,22 +127,21 @@ public SegmentPublishResult perform(Task task, TaskActionToolbox toolbox) throws throw new RuntimeException(e); } + // Emit metrics + final ServiceMetricEvent.Builder metricBuilder = new ServiceMetricEvent.Builder() + .setDimension(DruidMetrics.DATASOURCE, task.getDataSource()) + .setDimension(DruidMetrics.TASK_TYPE, task.getType()); + if (retVal.isSuccess()) { - // Emit metrics - final ServiceMetricEvent.Builder metricBuilder = new ServiceMetricEvent.Builder() - .setDimension(DruidMetrics.DATASOURCE, task.getDataSource()) - .setDimension(DruidMetrics.TASK_TYPE, task.getType()); - - if (retVal.isSuccess()) { - toolbox.getEmitter().emit(metricBuilder.build("segment/txn/success", 1)); - } else { - toolbox.getEmitter().emit(metricBuilder.build("segment/txn/failure", 1)); - } - - for (DataSegment segment : retVal.getSegments()) { - metricBuilder.setDimension(DruidMetrics.INTERVAL, segment.getInterval().toString()); - toolbox.getEmitter().emit(metricBuilder.build("segment/added/bytes", segment.getSize())); - } + toolbox.getEmitter().emit(metricBuilder.build("segment/txn/success", 1)); + } else { + toolbox.getEmitter().emit(metricBuilder.build("segment/txn/failure", 1)); + } + + // getSegments() should return an empty set if announceHistoricalSegments() failed + for (DataSegment segment : retVal.getSegments()) { + metricBuilder.setDimension(DruidMetrics.INTERVAL, segment.getInterval().toString()); + toolbox.getEmitter().emit(metricBuilder.build("segment/added/bytes", segment.getSize())); } return retVal; diff --git a/indexing-service/src/main/java/io/druid/indexing/common/actions/TaskActionToolbox.java b/indexing-service/src/main/java/io/druid/indexing/common/actions/TaskActionToolbox.java index 59ca22942159..4d08465b99e4 100644 --- a/indexing-service/src/main/java/io/druid/indexing/common/actions/TaskActionToolbox.java +++ b/indexing-service/src/main/java/io/druid/indexing/common/actions/TaskActionToolbox.java @@ -20,7 +20,7 @@ package io.druid.indexing.common.actions; import com.google.inject.Inject; -import com.metamx.emitter.service.ServiceEmitter; +import io.druid.java.util.emitter.service.ServiceEmitter; import io.druid.indexing.overlord.IndexerMetadataStorageCoordinator; import io.druid.indexing.overlord.TaskLockbox; import io.druid.indexing.overlord.supervisor.SupervisorManager; diff --git a/indexing-service/src/main/java/io/druid/indexing/common/index/YeOldePlumberSchool.java b/indexing-service/src/main/java/io/druid/indexing/common/index/YeOldePlumberSchool.java index b6e2a26d88fc..4364f50f40a8 100644 --- a/indexing-service/src/main/java/io/druid/indexing/common/index/YeOldePlumberSchool.java +++ b/indexing-service/src/main/java/io/druid/indexing/common/index/YeOldePlumberSchool.java @@ -199,7 +199,7 @@ public void finishJob() .withDimensions(ImmutableList.copyOf(mappedSegment.getAvailableDimensions())) .withBinaryVersion(SegmentUtils.getVersionFromDir(fileToUpload)); - dataSegmentPusher.push(fileToUpload, segmentToUpload); + dataSegmentPusher.push(fileToUpload, segmentToUpload, false); log.info( "Uploaded segment[%s]", diff --git a/indexing-service/src/main/java/io/druid/indexing/common/task/CompactionTask.java b/indexing-service/src/main/java/io/druid/indexing/common/task/CompactionTask.java index 96f3889982b9..5e94285cd9f7 100644 --- a/indexing-service/src/main/java/io/druid/indexing/common/task/CompactionTask.java +++ b/indexing-service/src/main/java/io/druid/indexing/common/task/CompactionTask.java @@ -51,6 +51,7 @@ import io.druid.java.util.common.ISE; import io.druid.java.util.common.JodaUtils; import io.druid.java.util.common.Pair; +import io.druid.java.util.common.RE; import io.druid.java.util.common.granularity.NoneGranularity; import io.druid.java.util.common.guava.Comparators; import io.druid.java.util.common.jackson.JacksonUtils; @@ -78,6 +79,7 @@ import java.util.ArrayList; import java.util.Arrays; import java.util.Collections; +import java.util.Comparator; import java.util.HashMap; import java.util.List; import java.util.Map; @@ -189,26 +191,35 @@ public TaskStatus run(final TaskToolbox toolbox) throws Exception jsonMapper ); - indexTaskSpec = new IndexTask( - getId(), - getGroupId(), - getTaskResource(), - getDataSource(), - ingestionSpec, - getContext() - ); - } - - if (indexTaskSpec.getIngestionSchema() == null) { - log.info("Cannot find segments for interval"); + if (ingestionSpec != null) { + indexTaskSpec = new IndexTask( + getId(), + getGroupId(), + getTaskResource(), + getDataSource(), + ingestionSpec, + getContext() + ); + } } - final String json = jsonMapper.writerWithDefaultPrettyPrinter().writeValueAsString(indexTaskSpec); - log.info("Generated compaction task details: " + json); + if (indexTaskSpec == null) { + log.warn("Failed to generate compaction spec"); + return TaskStatus.failure(getId()); + } else { + final String json = jsonMapper.writerWithDefaultPrettyPrinter().writeValueAsString(indexTaskSpec); + log.info("Generated compaction task details: " + json); - return indexTaskSpec.run(toolbox); + return indexTaskSpec.run(toolbox); + } } + /** + * Generate {@link IndexIngestionSpec} from input segments. + * + * @return null if input segments don't exist. Otherwise, a generated ingestionSpec. + */ + @Nullable @VisibleForTesting static IndexIngestionSpec createIngestionSchema( TaskToolbox toolbox, @@ -281,12 +292,22 @@ private static DataSchema createDataSchema( throws IOException, SegmentLoadingException { // find metadata for interval - final List queryableIndices = loadSegments(timelineSegments, segmentFileMap, indexIO); + final List> queryableIndexAndSegments = loadSegments( + timelineSegments, + segmentFileMap, + indexIO + ); // find merged aggregators - final List aggregatorFactories = queryableIndices + for (Pair pair : queryableIndexAndSegments) { + final QueryableIndex index = pair.lhs; + if (index.getMetadata() == null) { + throw new RE("Index metadata doesn't exist for segment[%s]", pair.rhs.getIdentifier()); + } + } + final List aggregatorFactories = queryableIndexAndSegments .stream() - .map(index -> index.getMetadata().getAggregators()) + .map(pair -> pair.lhs.getMetadata().getAggregators()) // We have already done null check on index.getMetadata() .collect(Collectors.toList()); final AggregatorFactory[] mergedAggregators = AggregatorFactory.mergeAggregators(aggregatorFactories); @@ -296,7 +317,11 @@ private static DataSchema createDataSchema( // find granularity spec // set rollup only if rollup is set for all segments - final boolean rollup = queryableIndices.stream().allMatch(index -> index.getMetadata().isRollup()); + final boolean rollup = queryableIndexAndSegments.stream().allMatch(pair -> { + // We have already checked getMetadata() doesn't return null + final Boolean isRollup = pair.lhs.getMetadata().isRollup(); + return isRollup != null && isRollup; + }); final GranularitySpec granularitySpec = new ArbitraryGranularitySpec( new NoneGranularity(), rollup, @@ -305,7 +330,7 @@ private static DataSchema createDataSchema( // find unique dimensions final DimensionsSpec finalDimensionsSpec = dimensionsSpec == null ? - createDimensionsSpec(queryableIndices) : + createDimensionsSpec(queryableIndexAndSegments) : dimensionsSpec; final InputRowParser parser = new NoopInputRowParser(new TimeAndDimsParseSpec(null, finalDimensionsSpec)); @@ -319,7 +344,7 @@ private static DataSchema createDataSchema( ); } - private static DimensionsSpec createDimensionsSpec(List queryableIndices) + private static DimensionsSpec createDimensionsSpec(List> queryableIndices) { final BiMap uniqueDims = HashBiMap.create(); final Map dimensionSchemaMap = new HashMap<>(); @@ -329,9 +354,24 @@ private static DimensionsSpec createDimensionsSpec(List queryabl // Dimensions are extracted from the recent segments to olders because recent segments are likely to be queried more // frequently, and thus the performance should be optimized for recent ones rather than old ones. - // timelineSegments are sorted in order of interval + // timelineSegments are sorted in order of interval, but we do a sanity check here. + final Comparator intervalComparator = Comparators.intervalsByStartThenEnd(); + for (int i = 0; i < queryableIndices.size() - 1; i++) { + final Interval shouldBeSmaller = queryableIndices.get(i).lhs.getDataInterval(); + final Interval shouldBeLarger = queryableIndices.get(i + 1).lhs.getDataInterval(); + Preconditions.checkState( + intervalComparator.compare(shouldBeSmaller, shouldBeLarger) <= 0, + "QueryableIndexes are not sorted! Interval[%s] of segment[%s] is laster than interval[%s] of segment[%s]", + shouldBeSmaller, + queryableIndices.get(i).rhs.getIdentifier(), + shouldBeLarger, + queryableIndices.get(i + 1).rhs.getIdentifier() + ); + } + int index = 0; - for (QueryableIndex queryableIndex : Lists.reverse(queryableIndices)) { + for (Pair pair : Lists.reverse(queryableIndices)) { + final QueryableIndex queryableIndex = pair.lhs; final Map dimensionHandlerMap = queryableIndex.getDimensionHandlers(); for (String dimension : queryableIndex.getAvailableDimensions()) { @@ -376,23 +416,22 @@ private static DimensionsSpec createDimensionsSpec(List queryabl return new DimensionsSpec(dimensionSchemas, null, null); } - private static List loadSegments( + private static List> loadSegments( List> timelineSegments, Map segmentFileMap, IndexIO indexIO ) throws IOException { - final List segments = new ArrayList<>(); + final List> segments = new ArrayList<>(); for (TimelineObjectHolder timelineSegment : timelineSegments) { final PartitionHolder partitionHolder = timelineSegment.getObject(); for (PartitionChunk chunk : partitionHolder) { final DataSegment segment = chunk.getObject(); - segments.add( - indexIO.loadIndex( - Preconditions.checkNotNull(segmentFileMap.get(segment), "File for segment %s", segment.getIdentifier()) - ) + final QueryableIndex queryableIndex = indexIO.loadIndex( + Preconditions.checkNotNull(segmentFileMap.get(segment), "File for segment %s", segment.getIdentifier()) ); + segments.add(Pair.of(queryableIndex, segment)); } } diff --git a/indexing-service/src/main/java/io/druid/indexing/common/task/ConvertSegmentTask.java b/indexing-service/src/main/java/io/druid/indexing/common/task/ConvertSegmentTask.java index 4605995ee613..460225af1fe0 100644 --- a/indexing-service/src/main/java/io/druid/indexing/common/task/ConvertSegmentTask.java +++ b/indexing-service/src/main/java/io/druid/indexing/common/task/ConvertSegmentTask.java @@ -440,7 +440,8 @@ private void convertSegment(TaskToolbox toolbox) throws SegmentLoadingException, // Appending to the version makes a new version that inherits most comparability parameters of the original // version, but is "newer" than said original version. DataSegment updatedSegment = segment.withVersion(StringUtils.format("%s_v%s", segment.getVersion(), outVersion)); - updatedSegment = toolbox.getSegmentPusher().push(outLocation, updatedSegment); + + updatedSegment = toolbox.getSegmentPusher().push(outLocation, updatedSegment, false); actionClient.submit(new SegmentInsertAction(Sets.newHashSet(updatedSegment))); } else { diff --git a/indexing-service/src/main/java/io/druid/indexing/common/task/IndexTask.java b/indexing-service/src/main/java/io/druid/indexing/common/task/IndexTask.java index 1a9bade78c9f..1d88733f9522 100644 --- a/indexing-service/src/main/java/io/druid/indexing/common/task/IndexTask.java +++ b/indexing-service/src/main/java/io/druid/indexing/common/task/IndexTask.java @@ -24,11 +24,9 @@ import com.fasterxml.jackson.annotation.JsonProperty; import com.fasterxml.jackson.annotation.JsonTypeName; import com.fasterxml.jackson.databind.ObjectMapper; -import com.google.common.base.Function; import com.google.common.base.Joiner; import com.google.common.base.Optional; import com.google.common.base.Preconditions; -import com.google.common.base.Supplier; import com.google.common.base.Throwables; import com.google.common.collect.ImmutableList; import com.google.common.collect.ImmutableMap; @@ -36,7 +34,6 @@ import com.google.common.hash.HashFunction; import com.google.common.hash.Hashing; import com.google.common.util.concurrent.ListenableFuture; -import io.druid.data.input.Committer; import io.druid.data.input.Firehose; import io.druid.data.input.FirehoseFactory; import io.druid.data.input.InputRow; @@ -57,7 +54,6 @@ import io.druid.java.util.common.guava.Comparators; import io.druid.java.util.common.logger.Logger; import io.druid.java.util.common.parsers.ParseException; -import io.druid.segment.writeout.SegmentWriteOutMediumFactory; import io.druid.query.DruidMetrics; import io.druid.segment.IndexSpec; import io.druid.segment.indexing.DataSchema; @@ -71,15 +67,15 @@ import io.druid.segment.realtime.RealtimeMetricsMonitor; import io.druid.segment.realtime.appenderator.Appenderator; import io.druid.segment.realtime.appenderator.AppenderatorConfig; -import io.druid.segment.realtime.appenderator.AppenderatorDriver; +import io.druid.segment.realtime.appenderator.BaseAppenderatorDriver; import io.druid.segment.realtime.appenderator.AppenderatorDriverAddResult; import io.druid.segment.realtime.appenderator.Appenderators; +import io.druid.segment.realtime.appenderator.BatchAppenderatorDriver; import io.druid.segment.realtime.appenderator.SegmentAllocator; import io.druid.segment.realtime.appenderator.SegmentIdentifier; import io.druid.segment.realtime.appenderator.SegmentsAndMetadata; import io.druid.segment.realtime.appenderator.TransactionalSegmentPublisher; -import io.druid.segment.realtime.plumber.Committers; -import io.druid.segment.realtime.plumber.NoopSegmentHandoffNotifierFactory; +import io.druid.segment.writeout.SegmentWriteOutMediumFactory; import io.druid.timeline.DataSegment; import io.druid.timeline.partition.HashBasedNumberedShardSpec; import io.druid.timeline.partition.NoneShardSpec; @@ -284,16 +280,15 @@ private static String findVersion(Map versions, Interval inter private static boolean isGuaranteedRollup(IndexIOConfig ioConfig, IndexTuningConfig tuningConfig) { Preconditions.checkState( - !(tuningConfig.isForceGuaranteedRollup() && - (tuningConfig.isForceExtendableShardSpecs() || ioConfig.isAppendToExisting())), - "Perfect rollup cannot be guaranteed with extendable shardSpecs" + !tuningConfig.isForceGuaranteedRollup() || !ioConfig.isAppendToExisting(), + "Perfect rollup cannot be guaranteed when appending to existing dataSources" ); return tuningConfig.isForceGuaranteedRollup(); } private static boolean isExtendableShardSpecs(IndexIOConfig ioConfig, IndexTuningConfig tuningConfig) { - return !isGuaranteedRollup(ioConfig, tuningConfig); + return tuningConfig.isForceExtendableShardSpecs() || ioConfig.isAppendToExisting(); } /** @@ -546,7 +541,7 @@ private static BiFunction getShardSpecCreateFunctio } /** - * This method reads input data row by row and adds the read row to a proper segment using {@link AppenderatorDriver}. + * This method reads input data row by row and adds the read row to a proper segment using {@link BaseAppenderatorDriver}. * If there is no segment for the row, a new one is created. Segments can be published in the middle of reading inputs * if one of below conditions are satisfied. * @@ -555,7 +550,7 @@ private static BiFunction getShardSpecCreateFunctio * If the number of rows in a segment exceeds {@link IndexTuningConfig#targetPartitionSize} * *
  • - * If the number of rows added to {@link AppenderatorDriver} so far exceeds {@link IndexTuningConfig#maxTotalRows} + * If the number of rows added to {@link BaseAppenderatorDriver} so far exceeds {@link IndexTuningConfig#maxTotalRows} *
  • * * @@ -590,11 +585,7 @@ dataSchema, new RealtimeIOConfig(null, null, null), null final IndexIOConfig ioConfig = ingestionSchema.getIOConfig(); final IndexTuningConfig tuningConfig = ingestionSchema.tuningConfig; - final long publishTimeout = tuningConfig.getPublishTimeout(); - final long maxRowsInAppenderator = tuningConfig.getMaxTotalRows(); - final int maxRowsInSegment = tuningConfig.getTargetPartitionSize() == null - ? Integer.MAX_VALUE - : tuningConfig.getTargetPartitionSize(); + final long pushTimeout = tuningConfig.getPushTimeout(); final boolean isGuaranteedRollup = isGuaranteedRollup(ioConfig, tuningConfig); final SegmentAllocator segmentAllocator; @@ -644,7 +635,12 @@ dataSchema, new RealtimeIOConfig(null, null, null), null } final int partitionNum = counters.computeIfAbsent(interval, x -> new AtomicInteger()).getAndIncrement(); - return new SegmentIdentifier(getDataSource(), interval, findVersion(versions, interval), new NumberedShardSpec(partitionNum, 0)); + return new SegmentIdentifier( + getDataSource(), + interval, + findVersion(versions, interval), + new NumberedShardSpec(partitionNum, 0) + ); }; } @@ -654,97 +650,73 @@ dataSchema, new RealtimeIOConfig(null, null, null), null }; try ( - final Appenderator appenderator = newAppenderator(fireDepartmentMetrics, toolbox, dataSchema, tuningConfig); - final AppenderatorDriver driver = newDriver( - appenderator, - toolbox, - segmentAllocator, - fireDepartmentMetrics - ); - final Firehose firehose = firehoseFactory.connect(dataSchema.getParser(), firehoseTempDir) + final Appenderator appenderator = newAppenderator(fireDepartmentMetrics, toolbox, dataSchema, tuningConfig); + final BatchAppenderatorDriver driver = newDriver(appenderator, toolbox, segmentAllocator); + final Firehose firehose = firehoseFactory.connect(dataSchema.getParser(), firehoseTempDir) ) { - final Supplier committerSupplier = Committers.supplierFromFirehose(firehose); + driver.startJob(); - if (driver.startJob() != null) { - driver.clear(); - } - - try { - while (firehose.hasMore()) { - try { - final InputRow inputRow = firehose.nextRow(); + while (firehose.hasMore()) { + try { + final InputRow inputRow = firehose.nextRow(); - if (inputRow == null) { - fireDepartmentMetrics.incrementThrownAway(); - continue; - } + if (inputRow == null) { + fireDepartmentMetrics.incrementThrownAway(); + continue; + } - final Optional optInterval = granularitySpec.bucketInterval(inputRow.getTimestamp()); - if (!optInterval.isPresent()) { - fireDepartmentMetrics.incrementThrownAway(); - continue; - } + final Optional optInterval = granularitySpec.bucketInterval(inputRow.getTimestamp()); + if (!optInterval.isPresent()) { + fireDepartmentMetrics.incrementThrownAway(); + continue; + } - final String sequenceName; - - if (isGuaranteedRollup) { - // Sequence name is based solely on the shardSpec, and there will only be one segment per sequence. - final Interval interval = optInterval.get(); - final ShardSpec shardSpec = shardSpecs.getShardSpec(interval, inputRow); - sequenceName = Appenderators.getSequenceName(interval, findVersion(versions, interval), shardSpec); - } else { - // Segments are created as needed, using a single sequence name. They may be allocated from the overlord - // (in append mode) or may be created on our own authority (in overwrite mode). - sequenceName = getId(); - } - final AppenderatorDriverAddResult addResult = driver.add(inputRow, sequenceName, committerSupplier); - - if (addResult.isOk()) { - // incremental segment publishment is allowed only when rollup don't have to be perfect. - if (!isGuaranteedRollup && - (addResult.getNumRowsInSegment() >= maxRowsInSegment || - addResult.getTotalNumRowsInAppenderator() >= maxRowsInAppenderator)) { - // There can be some segments waiting for being published even though any rows won't be added to them. - // If those segments are not published here, the available space in appenderator will be kept to be small - // which makes the size of segments smaller. - final SegmentsAndMetadata published = awaitPublish( - driver.publishAll( - publisher, - committerSupplier.get() - ), - publishTimeout - ); - // Even though IndexTask uses NoopHandoffNotifier which does nothing for segment handoff, - // the below code is needed to update the total number of rows added to the appenderator so far. - // See AppenderatorDriver.registerHandoff() and Appenderator.drop(). - // A hard-coded timeout is used here because the below get() is expected to return immediately. - driver.registerHandoff(published).get(30, TimeUnit.SECONDS); - } - } else { - throw new ISE("Failed to add a row with timestamp[%s]", inputRow.getTimestamp()); - } + final String sequenceName; - fireDepartmentMetrics.incrementProcessed(); + if (isGuaranteedRollup) { + // Sequence name is based solely on the shardSpec, and there will only be one segment per sequence. + final Interval interval = optInterval.get(); + final ShardSpec shardSpec = shardSpecs.getShardSpec(interval, inputRow); + sequenceName = Appenderators.getSequenceName(interval, findVersion(versions, interval), shardSpec); + } else { + // Segments are created as needed, using a single sequence name. They may be allocated from the overlord + // (in append mode) or may be created on our own authority (in overwrite mode). + sequenceName = getId(); } - catch (ParseException e) { - if (tuningConfig.isReportParseExceptions()) { - throw e; - } else { - fireDepartmentMetrics.incrementUnparseable(); + final AppenderatorDriverAddResult addResult = driver.add(inputRow, sequenceName); + + if (addResult.isOk()) { + // incremental segment publishment is allowed only when rollup don't have to be perfect. + if (!isGuaranteedRollup && + (exceedMaxRowsInSegment(addResult.getNumRowsInSegment(), tuningConfig) || + exceedMaxRowsInAppenderator(addResult.getTotalNumRowsInAppenderator(), tuningConfig))) { + // There can be some segments waiting for being published even though any rows won't be added to them. + // If those segments are not published here, the available space in appenderator will be kept to be small + // which makes the size of segments smaller. + final SegmentsAndMetadata pushed = driver.pushAllAndClear(pushTimeout); + log.info("Pushed segments[%s]", pushed.getSegments()); } + } else { + throw new ISE("Failed to add a row with timestamp[%s]", inputRow.getTimestamp()); + } + + fireDepartmentMetrics.incrementProcessed(); + } + catch (ParseException e) { + if (tuningConfig.isReportParseExceptions()) { + throw e; + } else { + fireDepartmentMetrics.incrementUnparseable(); } } - } - finally { - driver.persist(committerSupplier.get()); } + final SegmentsAndMetadata pushed = driver.pushAllAndClear(pushTimeout); + log.info("Pushed segments[%s]", pushed.getSegments()); + final SegmentsAndMetadata published = awaitPublish( - driver.publishAll( - publisher, - committerSupplier.get() - ), - publishTimeout + driver.publishAll(publisher), + pushTimeout ); if (published == null) { @@ -755,14 +727,7 @@ dataSchema, new RealtimeIOConfig(null, null, null), null "Published segments[%s]", Joiner.on(", ").join( Iterables.transform( published.getSegments(), - new Function() - { - @Override - public String apply(DataSegment input) - { - return input.getIdentifier(); - } - } + DataSegment::getIdentifier ) ) ); @@ -774,11 +739,24 @@ public String apply(DataSegment input) } } + private static boolean exceedMaxRowsInSegment(int numRowsInSegment, IndexTuningConfig indexTuningConfig) + { + // maxRowsInSegment should be null if numShards is set in indexTuningConfig + final Integer maxRowsInSegment = indexTuningConfig.getTargetPartitionSize(); + return maxRowsInSegment != null && maxRowsInSegment <= numRowsInSegment; + } + + private static boolean exceedMaxRowsInAppenderator(long numRowsInAppenderator, IndexTuningConfig indexTuningConfig) + { + // maxRowsInAppenderator should be null if numShards is set in indexTuningConfig + final Long maxRowsInAppenderator = indexTuningConfig.getMaxTotalRows(); + return maxRowsInAppenderator != null && maxRowsInAppenderator <= numRowsInAppenderator; + } + private static SegmentsAndMetadata awaitPublish( ListenableFuture publishFuture, long publishTimeout - ) - throws ExecutionException, InterruptedException, TimeoutException + ) throws ExecutionException, InterruptedException, TimeoutException { if (publishTimeout == 0) { return publishFuture.get(); @@ -805,20 +783,17 @@ private static Appenderator newAppenderator( ); } - private static AppenderatorDriver newDriver( + private static BatchAppenderatorDriver newDriver( final Appenderator appenderator, final TaskToolbox toolbox, - final SegmentAllocator segmentAllocator, - final FireDepartmentMetrics metrics + final SegmentAllocator segmentAllocator ) { - return new AppenderatorDriver( + return new BatchAppenderatorDriver( appenderator, segmentAllocator, - new NoopSegmentHandoffNotifierFactory(), // don't wait for handoff since we don't serve queries new ActionBasedUsedSegmentChecker(toolbox.getTaskActionClient()), - toolbox.getObjectMapper(), - metrics + toolbox.getDataSegmentKiller() ); } @@ -950,21 +925,33 @@ public static class IndexTuningConfig implements TuningConfig, AppenderatorConfi private static final boolean DEFAULT_FORCE_EXTENDABLE_SHARD_SPECS = false; private static final boolean DEFAULT_GUARANTEE_ROLLUP = false; private static final boolean DEFAULT_REPORT_PARSE_EXCEPTIONS = false; - private static final long DEFAULT_PUBLISH_TIMEOUT = 0; + private static final long DEFAULT_PUSH_TIMEOUT = 0; static final int DEFAULT_TARGET_PARTITION_SIZE = 5000000; private final Integer targetPartitionSize; private final int maxRowsInMemory; - private final int maxTotalRows; + private final Long maxTotalRows; private final Integer numShards; private final IndexSpec indexSpec; private final File basePersistDirectory; private final int maxPendingPersists; + + /** + * This flag is to force to always use an extendableShardSpec (like {@link NumberedShardSpec} even if + * {@link #forceGuaranteedRollup} is set. + */ private final boolean forceExtendableShardSpecs; + + /** + * This flag is to force _perfect rollup mode_. {@link IndexTask} will scan the whole input data twice to 1) figure + * out proper shard specs for each segment and 2) generate segments. Note that perfect rollup mode basically assumes + * that no more data will be appended in the future. As a result, in perfect rollup mode, {@link NoneShardSpec} and + * {@link HashBasedNumberedShardSpec} are used for a single shard and two or shards, respectively. + */ private final boolean forceGuaranteedRollup; private final boolean reportParseExceptions; - private final long publishTimeout; + private final long pushTimeout; @Nullable private final SegmentWriteOutMediumFactory segmentWriteOutMediumFactory; @@ -972,7 +959,7 @@ public static class IndexTuningConfig implements TuningConfig, AppenderatorConfi public IndexTuningConfig( @JsonProperty("targetPartitionSize") @Nullable Integer targetPartitionSize, @JsonProperty("maxRowsInMemory") @Nullable Integer maxRowsInMemory, - @JsonProperty("maxTotalRows") @Nullable Integer maxTotalRows, + @JsonProperty("maxTotalRows") @Nullable Long maxTotalRows, @JsonProperty("rowFlushBoundary") @Nullable Integer rowFlushBoundary_forBackCompatibility, // DEPRECATED @JsonProperty("numShards") @Nullable Integer numShards, @JsonProperty("indexSpec") @Nullable IndexSpec indexSpec, @@ -982,7 +969,8 @@ public IndexTuningConfig( @JsonProperty("forceExtendableShardSpecs") @Nullable Boolean forceExtendableShardSpecs, @JsonProperty("forceGuaranteedRollup") @Nullable Boolean forceGuaranteedRollup, @JsonProperty("reportParseExceptions") @Nullable Boolean reportParseExceptions, - @JsonProperty("publishTimeout") @Nullable Long publishTimeout, + @JsonProperty("publishTimeout") @Nullable Long publishTimeout, // deprecated + @JsonProperty("pushTimeout") @Nullable Long pushTimeout, @JsonProperty("segmentWriteOutMediumFactory") @Nullable SegmentWriteOutMediumFactory segmentWriteOutMediumFactory ) { @@ -996,7 +984,7 @@ public IndexTuningConfig( forceExtendableShardSpecs, forceGuaranteedRollup, reportParseExceptions, - publishTimeout, + pushTimeout != null ? pushTimeout : publishTimeout, null, segmentWriteOutMediumFactory ); @@ -1010,14 +998,14 @@ private IndexTuningConfig() private IndexTuningConfig( @Nullable Integer targetPartitionSize, @Nullable Integer maxRowsInMemory, - @Nullable Integer maxTotalRows, + @Nullable Long maxTotalRows, @Nullable Integer numShards, @Nullable IndexSpec indexSpec, @Nullable Integer maxPendingPersists, @Nullable Boolean forceExtendableShardSpecs, @Nullable Boolean forceGuaranteedRollup, @Nullable Boolean reportParseExceptions, - @Nullable Long publishTimeout, + @Nullable Long pushTimeout, @Nullable File basePersistDirectory, @Nullable SegmentWriteOutMediumFactory segmentWriteOutMediumFactory ) @@ -1027,15 +1015,9 @@ private IndexTuningConfig( "targetPartitionSize and numShards cannot both be set" ); - this.targetPartitionSize = numShards != null && !numShards.equals(-1) - ? null - : (targetPartitionSize == null || targetPartitionSize.equals(-1) - ? DEFAULT_TARGET_PARTITION_SIZE - : targetPartitionSize); + this.targetPartitionSize = initializeTargetPartitionSize(numShards, targetPartitionSize); this.maxRowsInMemory = maxRowsInMemory == null ? DEFAULT_MAX_ROWS_IN_MEMORY : maxRowsInMemory; - this.maxTotalRows = maxTotalRows == null - ? DEFAULT_MAX_TOTAL_ROWS - : maxTotalRows; + this.maxTotalRows = initializeMaxTotalRows(numShards, maxTotalRows); this.numShards = numShards == null || numShards.equals(-1) ? null : numShards; this.indexSpec = indexSpec == null ? DEFAULT_INDEX_SPEC : indexSpec; this.maxPendingPersists = maxPendingPersists == null ? DEFAULT_MAX_PENDING_PERSISTS : maxPendingPersists; @@ -1046,17 +1028,32 @@ private IndexTuningConfig( this.reportParseExceptions = reportParseExceptions == null ? DEFAULT_REPORT_PARSE_EXCEPTIONS : reportParseExceptions; - this.publishTimeout = publishTimeout == null ? DEFAULT_PUBLISH_TIMEOUT : publishTimeout; + this.pushTimeout = pushTimeout == null ? DEFAULT_PUSH_TIMEOUT : pushTimeout; this.basePersistDirectory = basePersistDirectory; - Preconditions.checkArgument( - !(this.forceExtendableShardSpecs && this.forceGuaranteedRollup), - "Perfect rollup cannot be guaranteed with extendable shardSpecs" - ); - this.segmentWriteOutMediumFactory = segmentWriteOutMediumFactory; } + private static Integer initializeTargetPartitionSize(Integer numShards, Integer targetPartitionSize) + { + if (numShards == null || numShards == -1) { + return targetPartitionSize == null || targetPartitionSize.equals(-1) + ? DEFAULT_TARGET_PARTITION_SIZE + : targetPartitionSize; + } else { + return null; + } + } + + private static Long initializeMaxTotalRows(Integer numShards, Long maxTotalRows) + { + if (numShards == null || numShards == -1) { + return maxTotalRows == null ? DEFAULT_MAX_TOTAL_ROWS : maxTotalRows; + } else { + return null; + } + } + public IndexTuningConfig withBasePersistDirectory(File dir) { return new IndexTuningConfig( @@ -1069,7 +1066,7 @@ public IndexTuningConfig withBasePersistDirectory(File dir) forceExtendableShardSpecs, forceGuaranteedRollup, reportParseExceptions, - publishTimeout, + pushTimeout, dir, segmentWriteOutMediumFactory ); @@ -1089,7 +1086,7 @@ public int getMaxRowsInMemory() } @JsonProperty - public int getMaxTotalRows() + public Long getMaxTotalRows() { return maxTotalRows; } @@ -1150,9 +1147,9 @@ public boolean isReportParseExceptions() } @JsonProperty - public long getPublishTimeout() + public long getPushTimeout() { - return publishTimeout; + return pushTimeout; } @Override @@ -1180,12 +1177,12 @@ public boolean equals(Object o) } IndexTuningConfig that = (IndexTuningConfig) o; return maxRowsInMemory == that.maxRowsInMemory && - maxTotalRows == that.maxTotalRows && + Objects.equals(maxTotalRows, that.maxTotalRows) && maxPendingPersists == that.maxPendingPersists && forceExtendableShardSpecs == that.forceExtendableShardSpecs && forceGuaranteedRollup == that.forceGuaranteedRollup && reportParseExceptions == that.reportParseExceptions && - publishTimeout == that.publishTimeout && + pushTimeout == that.pushTimeout && Objects.equals(targetPartitionSize, that.targetPartitionSize) && Objects.equals(numShards, that.numShards) && Objects.equals(indexSpec, that.indexSpec) && @@ -1207,7 +1204,7 @@ public int hashCode() forceExtendableShardSpecs, forceGuaranteedRollup, reportParseExceptions, - publishTimeout, + pushTimeout, segmentWriteOutMediumFactory ); } diff --git a/indexing-service/src/main/java/io/druid/indexing/common/task/MergeTaskBase.java b/indexing-service/src/main/java/io/druid/indexing/common/task/MergeTaskBase.java index 8a6cb91dea52..454899f5c774 100644 --- a/indexing-service/src/main/java/io/druid/indexing/common/task/MergeTaskBase.java +++ b/indexing-service/src/main/java/io/druid/indexing/common/task/MergeTaskBase.java @@ -34,9 +34,9 @@ import com.google.common.collect.Ordering; import com.google.common.collect.Sets; import com.google.common.hash.Hashing; -import com.metamx.emitter.EmittingLogger; -import com.metamx.emitter.service.ServiceEmitter; -import com.metamx.emitter.service.ServiceMetricEvent; +import io.druid.java.util.emitter.EmittingLogger; +import io.druid.java.util.emitter.service.ServiceEmitter; +import io.druid.java.util.emitter.service.ServiceMetricEvent; import io.druid.indexing.common.TaskLock; import io.druid.indexing.common.TaskStatus; import io.druid.indexing.common.TaskToolbox; @@ -185,7 +185,8 @@ public String apply(DataSegment input) long uploadStart = System.currentTimeMillis(); // Upload file - final DataSegment uploadedSegment = toolbox.getSegmentPusher().push(fileToUpload, mergedSegment); + + final DataSegment uploadedSegment = toolbox.getSegmentPusher().push(fileToUpload, mergedSegment, false); emitter.emit(builder.build("merger/uploadTime", System.currentTimeMillis() - uploadStart)); emitter.emit(builder.build("merger/mergeSize", uploadedSegment.getSize())); diff --git a/indexing-service/src/main/java/io/druid/indexing/common/task/RealtimeIndexTask.java b/indexing-service/src/main/java/io/druid/indexing/common/task/RealtimeIndexTask.java index 0a3a8b48547b..2a6b3b4723ac 100644 --- a/indexing-service/src/main/java/io/druid/indexing/common/task/RealtimeIndexTask.java +++ b/indexing-service/src/main/java/io/druid/indexing/common/task/RealtimeIndexTask.java @@ -28,7 +28,7 @@ import com.google.common.collect.ImmutableList; import com.google.common.collect.ImmutableMap; import com.google.common.primitives.Ints; -import com.metamx.emitter.EmittingLogger; +import io.druid.java.util.emitter.EmittingLogger; import io.druid.data.input.Committer; import io.druid.data.input.Firehose; import io.druid.data.input.FirehoseFactory; diff --git a/indexing-service/src/main/java/io/druid/indexing/firehose/IngestSegmentFirehoseFactory.java b/indexing-service/src/main/java/io/druid/indexing/firehose/IngestSegmentFirehoseFactory.java index b6516c9a47e6..61c245c57d91 100644 --- a/indexing-service/src/main/java/io/druid/indexing/firehose/IngestSegmentFirehoseFactory.java +++ b/indexing-service/src/main/java/io/druid/indexing/firehose/IngestSegmentFirehoseFactory.java @@ -30,7 +30,7 @@ import com.google.common.collect.HashBiMap; import com.google.common.collect.Iterables; import com.google.common.collect.Lists; -import com.metamx.emitter.EmittingLogger; +import io.druid.java.util.emitter.EmittingLogger; import io.druid.data.input.Firehose; import io.druid.data.input.FirehoseFactory; import io.druid.data.input.impl.InputRowParser; diff --git a/indexing-service/src/main/java/io/druid/indexing/overlord/ForkingTaskRunner.java b/indexing-service/src/main/java/io/druid/indexing/overlord/ForkingTaskRunner.java index ee103420207d..7509c8cd8152 100644 --- a/indexing-service/src/main/java/io/druid/indexing/overlord/ForkingTaskRunner.java +++ b/indexing-service/src/main/java/io/druid/indexing/overlord/ForkingTaskRunner.java @@ -41,7 +41,7 @@ import com.google.common.util.concurrent.ListeningExecutorService; import com.google.common.util.concurrent.MoreExecutors; import com.google.inject.Inject; -import com.metamx.emitter.EmittingLogger; +import io.druid.java.util.emitter.EmittingLogger; import io.druid.java.util.common.concurrent.Execs; import io.druid.guice.annotations.Self; import io.druid.indexer.TaskLocation; diff --git a/indexing-service/src/main/java/io/druid/indexing/overlord/MetadataTaskStorage.java b/indexing-service/src/main/java/io/druid/indexing/overlord/MetadataTaskStorage.java index 479b92e92f38..011aaa5d167e 100644 --- a/indexing-service/src/main/java/io/druid/indexing/overlord/MetadataTaskStorage.java +++ b/indexing-service/src/main/java/io/druid/indexing/overlord/MetadataTaskStorage.java @@ -28,7 +28,7 @@ import com.google.common.collect.ImmutableList; import com.google.common.collect.Iterables; import com.google.inject.Inject; -import com.metamx.emitter.EmittingLogger; +import io.druid.java.util.emitter.EmittingLogger; import io.druid.indexing.common.TaskLock; import io.druid.indexing.common.TaskStatus; import io.druid.indexing.common.actions.TaskAction; diff --git a/indexing-service/src/main/java/io/druid/indexing/overlord/RemoteTaskRunner.java b/indexing-service/src/main/java/io/druid/indexing/overlord/RemoteTaskRunner.java index c3e26bd5fac4..53f5e227a5b1 100644 --- a/indexing-service/src/main/java/io/druid/indexing/overlord/RemoteTaskRunner.java +++ b/indexing-service/src/main/java/io/druid/indexing/overlord/RemoteTaskRunner.java @@ -43,12 +43,12 @@ import com.google.common.util.concurrent.ListeningScheduledExecutorService; import com.google.common.util.concurrent.MoreExecutors; import com.google.common.util.concurrent.SettableFuture; -import com.metamx.emitter.EmittingLogger; -import com.metamx.http.client.HttpClient; -import com.metamx.http.client.Request; -import com.metamx.http.client.response.InputStreamResponseHandler; -import com.metamx.http.client.response.StatusResponseHandler; -import com.metamx.http.client.response.StatusResponseHolder; +import io.druid.java.util.emitter.EmittingLogger; +import io.druid.java.util.http.client.HttpClient; +import io.druid.java.util.http.client.Request; +import io.druid.java.util.http.client.response.InputStreamResponseHandler; +import io.druid.java.util.http.client.response.StatusResponseHandler; +import io.druid.java.util.http.client.response.StatusResponseHolder; import io.druid.java.util.common.concurrent.Execs; import io.druid.concurrent.LifecycleLock; import io.druid.curator.CuratorUtils; @@ -1229,6 +1229,11 @@ private void taskComplete( @Override public Collection markWorkersLazy(Predicate isLazyWorker, int maxWorkers) { + // skip the lock and bail early if we should not mark any workers lazy (e.g. number + // of current workers is at or below the minNumWorkers of autoscaler config) + if (maxWorkers < 1) { + return Collections.emptyList(); + } // status lock is used to prevent any tasks being assigned to the worker while we mark it lazy synchronized (statusLock) { Iterator iterator = zkWorkers.keySet().iterator(); diff --git a/indexing-service/src/main/java/io/druid/indexing/overlord/RemoteTaskRunnerFactory.java b/indexing-service/src/main/java/io/druid/indexing/overlord/RemoteTaskRunnerFactory.java index 5e0b165c0cab..44e7d5015763 100644 --- a/indexing-service/src/main/java/io/druid/indexing/overlord/RemoteTaskRunnerFactory.java +++ b/indexing-service/src/main/java/io/druid/indexing/overlord/RemoteTaskRunnerFactory.java @@ -22,7 +22,7 @@ import com.fasterxml.jackson.databind.ObjectMapper; import com.google.common.base.Supplier; import com.google.inject.Inject; -import com.metamx.http.client.HttpClient; +import io.druid.java.util.http.client.HttpClient; import io.druid.curator.cache.PathChildrenCacheFactory; import io.druid.guice.annotations.EscalatedGlobal; import io.druid.indexing.overlord.autoscaling.NoopProvisioningStrategy; diff --git a/indexing-service/src/main/java/io/druid/indexing/overlord/TaskLockbox.java b/indexing-service/src/main/java/io/druid/indexing/overlord/TaskLockbox.java index ef478256d578..5a6c2a297733 100644 --- a/indexing-service/src/main/java/io/druid/indexing/overlord/TaskLockbox.java +++ b/indexing-service/src/main/java/io/druid/indexing/overlord/TaskLockbox.java @@ -31,7 +31,7 @@ import com.google.common.collect.Ordering; import com.google.common.collect.Sets; import com.google.inject.Inject; -import com.metamx.emitter.EmittingLogger; +import io.druid.java.util.emitter.EmittingLogger; import io.druid.indexing.common.TaskLock; import io.druid.indexing.common.TaskLockType; import io.druid.indexing.common.task.Task; diff --git a/indexing-service/src/main/java/io/druid/indexing/overlord/TaskMaster.java b/indexing-service/src/main/java/io/druid/indexing/overlord/TaskMaster.java index 4fa12b6decff..f1d14e10f2b0 100644 --- a/indexing-service/src/main/java/io/druid/indexing/overlord/TaskMaster.java +++ b/indexing-service/src/main/java/io/druid/indexing/overlord/TaskMaster.java @@ -22,8 +22,8 @@ import com.google.common.base.Optional; import com.google.common.base.Throwables; import com.google.inject.Inject; -import com.metamx.emitter.EmittingLogger; -import com.metamx.emitter.service.ServiceEmitter; +import io.druid.java.util.emitter.EmittingLogger; +import io.druid.java.util.emitter.service.ServiceEmitter; import io.druid.client.indexing.IndexingService; import io.druid.curator.discovery.ServiceAnnouncer; import io.druid.discovery.DruidLeaderSelector; diff --git a/indexing-service/src/main/java/io/druid/indexing/overlord/TaskQueue.java b/indexing-service/src/main/java/io/druid/indexing/overlord/TaskQueue.java index a360f5c677bf..56b5b2f3710c 100644 --- a/indexing-service/src/main/java/io/druid/indexing/overlord/TaskQueue.java +++ b/indexing-service/src/main/java/io/druid/indexing/overlord/TaskQueue.java @@ -33,9 +33,9 @@ import com.google.common.util.concurrent.ListenableFuture; import com.google.common.util.concurrent.ThreadFactoryBuilder; import com.google.inject.Inject; -import com.metamx.emitter.EmittingLogger; -import com.metamx.emitter.service.ServiceEmitter; -import com.metamx.emitter.service.ServiceMetricEvent; +import io.druid.java.util.emitter.EmittingLogger; +import io.druid.java.util.emitter.service.ServiceEmitter; +import io.druid.java.util.emitter.service.ServiceMetricEvent; import io.druid.indexing.common.TaskStatus; import io.druid.indexing.common.actions.TaskActionClientFactory; import io.druid.indexing.common.task.Task; diff --git a/indexing-service/src/main/java/io/druid/indexing/overlord/TaskRunnerUtils.java b/indexing-service/src/main/java/io/druid/indexing/overlord/TaskRunnerUtils.java index 2a6d917d4d73..8067c96edbd1 100644 --- a/indexing-service/src/main/java/io/druid/indexing/overlord/TaskRunnerUtils.java +++ b/indexing-service/src/main/java/io/druid/indexing/overlord/TaskRunnerUtils.java @@ -19,7 +19,7 @@ package io.druid.indexing.overlord; -import com.metamx.emitter.EmittingLogger; +import io.druid.java.util.emitter.EmittingLogger; import io.druid.indexer.TaskLocation; import io.druid.indexing.common.TaskStatus; import io.druid.java.util.common.Pair; diff --git a/indexing-service/src/main/java/io/druid/indexing/overlord/ThreadPoolTaskRunner.java b/indexing-service/src/main/java/io/druid/indexing/overlord/ThreadPoolTaskRunner.java index b7d7008b0937..d24d7e4d8015 100644 --- a/indexing-service/src/main/java/io/druid/indexing/overlord/ThreadPoolTaskRunner.java +++ b/indexing-service/src/main/java/io/druid/indexing/overlord/ThreadPoolTaskRunner.java @@ -29,9 +29,9 @@ import com.google.common.util.concurrent.ListeningExecutorService; import com.google.common.util.concurrent.MoreExecutors; import com.google.inject.Inject; -import com.metamx.emitter.EmittingLogger; -import com.metamx.emitter.service.ServiceEmitter; -import com.metamx.emitter.service.ServiceMetricEvent; +import io.druid.java.util.emitter.EmittingLogger; +import io.druid.java.util.emitter.service.ServiceEmitter; +import io.druid.java.util.emitter.service.ServiceMetricEvent; import io.druid.java.util.common.concurrent.Execs; import io.druid.concurrent.TaskThreadPriority; import io.druid.guice.annotations.Self; diff --git a/indexing-service/src/main/java/io/druid/indexing/overlord/autoscaling/AbstractWorkerProvisioningStrategy.java b/indexing-service/src/main/java/io/druid/indexing/overlord/autoscaling/AbstractWorkerProvisioningStrategy.java index ce53188416b1..5ab74b5dd12a 100644 --- a/indexing-service/src/main/java/io/druid/indexing/overlord/autoscaling/AbstractWorkerProvisioningStrategy.java +++ b/indexing-service/src/main/java/io/druid/indexing/overlord/autoscaling/AbstractWorkerProvisioningStrategy.java @@ -20,7 +20,7 @@ package io.druid.indexing.overlord.autoscaling; import com.google.common.base.Supplier; -import com.metamx.emitter.EmittingLogger; +import io.druid.java.util.emitter.EmittingLogger; import io.druid.indexing.overlord.WorkerTaskRunner; import io.druid.java.util.common.DateTimes; import io.druid.java.util.common.granularity.PeriodGranularity; diff --git a/indexing-service/src/main/java/io/druid/indexing/overlord/autoscaling/NoopAutoScaler.java b/indexing-service/src/main/java/io/druid/indexing/overlord/autoscaling/NoopAutoScaler.java index a83dc35f1757..a4330c50df5d 100644 --- a/indexing-service/src/main/java/io/druid/indexing/overlord/autoscaling/NoopAutoScaler.java +++ b/indexing-service/src/main/java/io/druid/indexing/overlord/autoscaling/NoopAutoScaler.java @@ -19,7 +19,7 @@ package io.druid.indexing.overlord.autoscaling; -import com.metamx.emitter.EmittingLogger; +import io.druid.java.util.emitter.EmittingLogger; import io.druid.java.util.common.UOE; diff --git a/indexing-service/src/main/java/io/druid/indexing/overlord/autoscaling/PendingTaskBasedWorkerProvisioningStrategy.java b/indexing-service/src/main/java/io/druid/indexing/overlord/autoscaling/PendingTaskBasedWorkerProvisioningStrategy.java index 476f6fdcb9c2..916833b24aaa 100644 --- a/indexing-service/src/main/java/io/druid/indexing/overlord/autoscaling/PendingTaskBasedWorkerProvisioningStrategy.java +++ b/indexing-service/src/main/java/io/druid/indexing/overlord/autoscaling/PendingTaskBasedWorkerProvisioningStrategy.java @@ -30,8 +30,8 @@ import com.google.common.collect.Maps; import com.google.common.collect.Sets; import com.google.inject.Inject; -import com.metamx.common.concurrent.ScheduledExecutors; -import com.metamx.emitter.EmittingLogger; +import io.druid.java.util.common.concurrent.ScheduledExecutors; +import io.druid.java.util.emitter.EmittingLogger; import io.druid.indexing.common.task.Task; import io.druid.indexing.overlord.ImmutableWorkerInfo; import io.druid.indexing.overlord.WorkerTaskRunner; diff --git a/indexing-service/src/main/java/io/druid/indexing/overlord/autoscaling/SimpleWorkerProvisioningStrategy.java b/indexing-service/src/main/java/io/druid/indexing/overlord/autoscaling/SimpleWorkerProvisioningStrategy.java index 8ac414f5720f..c5d11ff80819 100644 --- a/indexing-service/src/main/java/io/druid/indexing/overlord/autoscaling/SimpleWorkerProvisioningStrategy.java +++ b/indexing-service/src/main/java/io/druid/indexing/overlord/autoscaling/SimpleWorkerProvisioningStrategy.java @@ -29,8 +29,8 @@ import com.google.common.collect.Lists; import com.google.common.collect.Sets; import com.google.inject.Inject; -import com.metamx.common.concurrent.ScheduledExecutors; -import com.metamx.emitter.EmittingLogger; +import io.druid.java.util.common.concurrent.ScheduledExecutors; +import io.druid.java.util.emitter.EmittingLogger; import io.druid.indexing.overlord.ImmutableWorkerInfo; import io.druid.indexing.overlord.TaskRunnerWorkItem; import io.druid.indexing.overlord.WorkerTaskRunner; diff --git a/indexing-service/src/main/java/io/druid/indexing/overlord/autoscaling/ec2/EC2AutoScaler.java b/indexing-service/src/main/java/io/druid/indexing/overlord/autoscaling/ec2/EC2AutoScaler.java index 60ef3f247341..479253a2b595 100644 --- a/indexing-service/src/main/java/io/druid/indexing/overlord/autoscaling/ec2/EC2AutoScaler.java +++ b/indexing-service/src/main/java/io/druid/indexing/overlord/autoscaling/ec2/EC2AutoScaler.java @@ -36,7 +36,7 @@ import com.google.common.base.Function; import com.google.common.collect.FluentIterable; import com.google.common.collect.Lists; -import com.metamx.emitter.EmittingLogger; +import io.druid.java.util.emitter.EmittingLogger; import io.druid.indexing.overlord.autoscaling.AutoScaler; import io.druid.indexing.overlord.autoscaling.AutoScalingData; import io.druid.indexing.overlord.autoscaling.SimpleWorkerProvisioningConfig; diff --git a/indexing-service/src/main/java/io/druid/indexing/overlord/supervisor/SupervisorManager.java b/indexing-service/src/main/java/io/druid/indexing/overlord/supervisor/SupervisorManager.java index 1dd4d76d7620..00f2c92c4b75 100644 --- a/indexing-service/src/main/java/io/druid/indexing/overlord/supervisor/SupervisorManager.java +++ b/indexing-service/src/main/java/io/druid/indexing/overlord/supervisor/SupervisorManager.java @@ -23,7 +23,7 @@ import com.google.common.base.Preconditions; import com.google.common.base.Throwables; import com.google.inject.Inject; -import com.metamx.emitter.EmittingLogger; +import io.druid.java.util.emitter.EmittingLogger; import io.druid.indexing.overlord.DataSourceMetadata; import io.druid.java.util.common.Pair; import io.druid.java.util.common.lifecycle.LifecycleStart; @@ -71,6 +71,7 @@ public boolean createOrUpdateAndStartSupervisor(SupervisorSpec spec) Preconditions.checkState(started, "SupervisorManager not started"); Preconditions.checkNotNull(spec, "spec"); Preconditions.checkNotNull(spec.getId(), "spec.getId()"); + Preconditions.checkNotNull(spec.getDataSources(), "spec.getDatasources()"); synchronized (lock) { Preconditions.checkState(started, "SupervisorManager not started"); @@ -197,7 +198,7 @@ private boolean possiblyStopAndRemoveSupervisorInternal(String id, boolean write } if (writeTombstone) { - metadataSupervisorManager.insert(id, new NoopSupervisorSpec()); // where NoopSupervisorSpec is a tombstone + metadataSupervisorManager.insert(id, new NoopSupervisorSpec(null, pair.rhs.getDataSources())); // where NoopSupervisorSpec is a tombstone } pair.lhs.stop(true); supervisors.remove(id); @@ -232,7 +233,7 @@ private boolean createAndStartSupervisorInternal(SupervisorSpec spec, boolean pe catch (Exception e) { // Supervisor creation or start failed write tombstone only when trying to start a new supervisor if (persistSpec) { - metadataSupervisorManager.insert(id, new NoopSupervisorSpec()); + metadataSupervisorManager.insert(id, new NoopSupervisorSpec(null, spec.getDataSources())); } Throwables.propagate(e); } diff --git a/indexing-service/src/main/java/io/druid/indexing/overlord/supervisor/SupervisorResource.java b/indexing-service/src/main/java/io/druid/indexing/overlord/supervisor/SupervisorResource.java index c23e028e6f10..09196df401c3 100644 --- a/indexing-service/src/main/java/io/druid/indexing/overlord/supervisor/SupervisorResource.java +++ b/indexing-service/src/main/java/io/druid/indexing/overlord/supervisor/SupervisorResource.java @@ -22,10 +22,9 @@ import com.google.common.base.Function; import com.google.common.base.Optional; import com.google.common.base.Preconditions; -import com.google.common.base.Predicate; import com.google.common.collect.ImmutableMap; import com.google.common.collect.Iterables; -import com.google.common.collect.Maps; +import com.google.common.collect.Lists; import com.google.common.collect.Sets; import com.google.inject.Inject; import com.sun.jersey.spi.container.ResourceFilters; @@ -49,6 +48,7 @@ import javax.ws.rs.core.Context; import javax.ws.rs.core.MediaType; import javax.ws.rs.core.Response; +import java.util.ArrayList; import java.util.Collection; import java.util.List; import java.util.Map; @@ -61,6 +61,20 @@ @Path("/druid/indexer/v1/supervisor") public class SupervisorResource { + private static final Function> SPEC_DATASOURCE_READ_RA_GENERATOR = + supervisorSpec -> { + if (supervisorSpec.getSpec() == null) { + return null; + } + if (supervisorSpec.getSpec().getDataSources() == null) { + return new ArrayList<>(); + } + return Iterables.transform( + supervisorSpec.getSpec().getDataSources(), + AuthorizationUtils.DATASOURCE_READ_RA_GENERATOR + ); + }; + private final TaskMaster taskMaster; private final AuthConfig authConfig; private final AuthorizerMapper authorizerMapper; @@ -216,26 +230,14 @@ public Response specGetAllHistory(@Context final HttpServletRequest req) @Override public Response apply(final SupervisorManager manager) { - final Map> supervisorHistory = manager.getSupervisorHistory(); - - final Set authorizedSupervisorIds = filterAuthorizedSupervisorIds( - req, - manager, - supervisorHistory.keySet() - ); - - final Map> authorizedSupervisorHistory = Maps.filterKeys( - supervisorHistory, - new Predicate() - { - @Override - public boolean apply(String id) - { - return authorizedSupervisorIds.contains(id); - } - } - ); - return Response.ok(supervisorHistory).build(); + return Response.ok( + AuthorizationUtils.filterAuthorizedResources( + req, + manager.getSupervisorHistory(), + SPEC_DATASOURCE_READ_RA_GENERATOR, + authorizerMapper + ) + ).build(); } } ); @@ -244,8 +246,9 @@ public boolean apply(String id) @GET @Path("/{id}/history") @Produces(MediaType.APPLICATION_JSON) - @ResourceFilters(SupervisorResourceFilter.class) - public Response specGetHistory(@PathParam("id") final String id) + public Response specGetHistory( + @Context final HttpServletRequest req, + @PathParam("id") final String id) { return asLeaderWithSupervisorManager( new Function() @@ -253,23 +256,32 @@ public Response specGetHistory(@PathParam("id") final String id) @Override public Response apply(SupervisorManager manager) { - Map> history = manager.getSupervisorHistory(); - if (history.containsKey(id)) { - return Response.ok(history.get(id)).build(); - } else { - return Response.status(Response.Status.NOT_FOUND) - .entity( - ImmutableMap.of( - "error", - StringUtils.format( - "No history for [%s] (history available for %s)", - id, - history.keySet() - ) - ) - ) - .build(); + Map> supervisorHistory = manager.getSupervisorHistory(); + Iterable historyForId = supervisorHistory.get(id); + if (historyForId != null) { + final List authorizedHistoryForId = + Lists.newArrayList( + AuthorizationUtils.filterAuthorizedResources( + req, + historyForId, + SPEC_DATASOURCE_READ_RA_GENERATOR, + authorizerMapper + ) + ); + if (authorizedHistoryForId.size() > 0) { + return Response.ok(authorizedHistoryForId).build(); + } } + + return Response.status(Response.Status.NOT_FOUND) + .entity( + ImmutableMap.of( + "error", + StringUtils.format("No history for [%s].", id) + ) + ) + .build(); + } } ); diff --git a/indexing-service/src/main/java/io/druid/indexing/worker/WorkerTaskMonitor.java b/indexing-service/src/main/java/io/druid/indexing/worker/WorkerTaskMonitor.java index 5d470102f133..74f5c3357546 100644 --- a/indexing-service/src/main/java/io/druid/indexing/worker/WorkerTaskMonitor.java +++ b/indexing-service/src/main/java/io/druid/indexing/worker/WorkerTaskMonitor.java @@ -26,7 +26,7 @@ import com.google.common.util.concurrent.ListenableFuture; import com.google.common.util.concurrent.MoreExecutors; import com.google.inject.Inject; -import com.metamx.emitter.EmittingLogger; +import io.druid.java.util.emitter.EmittingLogger; import io.druid.indexer.TaskLocation; import io.druid.indexing.common.TaskStatus; import io.druid.indexing.common.task.Task; diff --git a/indexing-service/src/main/java/io/druid/indexing/worker/executor/ExecutorLifecycle.java b/indexing-service/src/main/java/io/druid/indexing/worker/executor/ExecutorLifecycle.java index 144c3512b90f..d06830eaf6d7 100644 --- a/indexing-service/src/main/java/io/druid/indexing/worker/executor/ExecutorLifecycle.java +++ b/indexing-service/src/main/java/io/druid/indexing/worker/executor/ExecutorLifecycle.java @@ -26,7 +26,7 @@ import com.google.common.util.concurrent.Futures; import com.google.common.util.concurrent.ListenableFuture; import com.google.inject.Inject; -import com.metamx.emitter.EmittingLogger; +import io.druid.java.util.emitter.EmittingLogger; import io.druid.java.util.common.concurrent.Execs; import io.druid.indexing.common.TaskStatus; import io.druid.indexing.common.actions.TaskActionClientFactory; diff --git a/indexing-service/src/test/java/io/druid/indexing/common/TaskToolboxTest.java b/indexing-service/src/test/java/io/druid/indexing/common/TaskToolboxTest.java index 1542b33aef6d..b52a3a351d09 100644 --- a/indexing-service/src/test/java/io/druid/indexing/common/TaskToolboxTest.java +++ b/indexing-service/src/test/java/io/druid/indexing/common/TaskToolboxTest.java @@ -21,8 +21,8 @@ import com.fasterxml.jackson.databind.ObjectMapper; import com.google.common.collect.ImmutableList; -import com.metamx.emitter.service.ServiceEmitter; -import com.metamx.metrics.MonitorScheduler; +import io.druid.java.util.emitter.service.ServiceEmitter; +import io.druid.java.util.metrics.MonitorScheduler; import io.druid.client.cache.Cache; import io.druid.client.cache.CacheConfig; import io.druid.indexing.common.actions.TaskActionClientFactory; diff --git a/indexing-service/src/test/java/io/druid/indexing/common/actions/RemoteTaskActionClientTest.java b/indexing-service/src/test/java/io/druid/indexing/common/actions/RemoteTaskActionClientTest.java index 7543b0def3dd..b5f79d678ac6 100644 --- a/indexing-service/src/test/java/io/druid/indexing/common/actions/RemoteTaskActionClientTest.java +++ b/indexing-service/src/test/java/io/druid/indexing/common/actions/RemoteTaskActionClientTest.java @@ -20,8 +20,8 @@ package io.druid.indexing.common.actions; import com.fasterxml.jackson.databind.ObjectMapper; -import com.metamx.http.client.Request; -import com.metamx.http.client.response.FullResponseHolder; +import io.druid.java.util.http.client.Request; +import io.druid.java.util.http.client.response.FullResponseHolder; import io.druid.discovery.DruidLeaderClient; import io.druid.indexing.common.RetryPolicyConfig; import io.druid.indexing.common.RetryPolicyFactory; diff --git a/indexing-service/src/test/java/io/druid/indexing/common/actions/SegmentAllocateActionTest.java b/indexing-service/src/test/java/io/druid/indexing/common/actions/SegmentAllocateActionTest.java index b7e82324739c..cef4c48bfeea 100644 --- a/indexing-service/src/test/java/io/druid/indexing/common/actions/SegmentAllocateActionTest.java +++ b/indexing-service/src/test/java/io/druid/indexing/common/actions/SegmentAllocateActionTest.java @@ -25,8 +25,8 @@ import com.google.common.collect.ImmutableList; import com.google.common.collect.ImmutableSet; import com.google.common.collect.Iterables; -import com.metamx.emitter.EmittingLogger; -import com.metamx.emitter.service.ServiceEmitter; +import io.druid.java.util.emitter.EmittingLogger; +import io.druid.java.util.emitter.service.ServiceEmitter; import io.druid.indexing.common.TaskLock; import io.druid.indexing.common.task.NoopTask; import io.druid.indexing.common.task.Task; diff --git a/indexing-service/src/test/java/io/druid/indexing/common/task/CompactionTaskTest.java b/indexing-service/src/test/java/io/druid/indexing/common/task/CompactionTaskTest.java index 7bccdbab3d83..2db8255fa9c7 100644 --- a/indexing-service/src/test/java/io/druid/indexing/common/task/CompactionTaskTest.java +++ b/indexing-service/src/test/java/io/druid/indexing/common/task/CompactionTaskTest.java @@ -26,6 +26,7 @@ import com.google.common.base.Preconditions; import com.google.common.collect.ImmutableList; import com.google.common.collect.ImmutableMap; +import com.google.common.collect.Iterables; import com.google.common.collect.Lists; import io.druid.data.input.FirehoseFactory; import io.druid.data.input.impl.DimensionSchema; @@ -69,8 +70,8 @@ import io.druid.segment.column.Column; import io.druid.segment.column.ColumnBuilder; import io.druid.segment.column.ValueType; -import io.druid.segment.data.CompressionStrategy; import io.druid.segment.data.CompressionFactory.LongEncodingStrategy; +import io.druid.segment.data.CompressionStrategy; import io.druid.segment.data.ListIndexed; import io.druid.segment.data.RoaringBitmapSerdeFactory; import io.druid.segment.incremental.IncrementalIndex; @@ -84,6 +85,7 @@ import org.hamcrest.CoreMatchers; import org.joda.time.Interval; import org.junit.Assert; +import org.junit.Before; import org.junit.BeforeClass; import org.junit.Rule; import org.junit.Test; @@ -125,10 +127,12 @@ public class CompactionTaskTest private static Map AGGREGATORS; private static List SEGMENTS; private static ObjectMapper objectMapper = setupInjectablesInObjectMapper(new DefaultObjectMapper()); - private static TaskToolbox toolbox; + private static Map segmentMap; + + private TaskToolbox toolbox; @BeforeClass - public static void setup() + public static void setupClass() { DIMENSIONS = new HashMap<>(); AGGREGATORS = new HashMap<>(); @@ -161,7 +165,7 @@ public static void setup() AGGREGATORS.put("agg_3", new FloatFirstAggregatorFactory("agg_3", "float_dim_3")); AGGREGATORS.put("agg_4", new DoubleLastAggregatorFactory("agg_4", "double_dim_4")); - final Map segmentMap = new HashMap<>(5); + segmentMap = new HashMap<>(5); for (int i = 0; i < 5; i++) { final Interval segmentInterval = Intervals.of(StringUtils.format("2017-0%d-01/2017-0%d-01", (i + 1), (i + 2))); segmentMap.put( @@ -180,7 +184,11 @@ public static void setup() ); } SEGMENTS = new ArrayList<>(segmentMap.keySet()); + } + @Before + public void setup() + { toolbox = new TestTaskToolbox( new TestTaskActionClient(new ArrayList<>(segmentMap.keySet())), new TestIndexIO(objectMapper, segmentMap), @@ -227,7 +235,7 @@ private static IndexTuningConfig createTuningConfig() return new IndexTuningConfig( 5000000, 500000, - 1000000, + 1000000L, null, null, new IndexSpec( @@ -241,6 +249,7 @@ private static IndexTuningConfig createTuningConfig() false, true, false, + null, 100L, null ); @@ -390,6 +399,24 @@ public void testCreateIngestionSchemaWithDifferentSegmentSet() throws IOExceptio ); } + @Test + public void testMissingMetadata() throws IOException, SegmentLoadingException + { + expectedException.expect(RuntimeException.class); + expectedException.expectMessage(CoreMatchers.startsWith("Index metadata doesn't exist for segment")); + + final TestIndexIO indexIO = (TestIndexIO) toolbox.getIndexIO(); + indexIO.removeMetadata(Iterables.getFirst(indexIO.getQueryableIndexMap().keySet(), null)); + final List segments = new ArrayList<>(SEGMENTS); + CompactionTask.createIngestionSchema( + toolbox, + new SegmentProvider(segments), + null, + TUNING_CONFIG, + objectMapper + ); + } + private static DimensionsSpec getExpectedDimensionsSpecForAutoGeneration() { return new DimensionsSpec( @@ -575,7 +602,7 @@ private static class TestIndexIO extends IndexIO } final Metadata metadata = new Metadata(); - metadata.setAggregators(aggregatorFactories.toArray(new AggregatorFactory[aggregatorFactories.size()])); + metadata.setAggregators(aggregatorFactories.toArray(new AggregatorFactory[0])); metadata.setRollup(false); queryableIndexMap.put( @@ -598,6 +625,31 @@ public QueryableIndex loadIndex(File file) throws IOException { return queryableIndexMap.get(file); } + + void removeMetadata(File file) + { + final SimpleQueryableIndex index = (SimpleQueryableIndex) queryableIndexMap.get(file); + if (index != null) { + queryableIndexMap.put( + file, + new SimpleQueryableIndex( + index.getDataInterval(), + index.getColumnNames(), + index.getAvailableDimensions(), + index.getBitmapFactoryForDimensions(), + index.getColumns(), + index.getFileMapper(), + null, + index.getDimensionHandlers() + ) + ); + } + } + + Map getQueryableIndexMap() + { + return queryableIndexMap; + } } private static Column createColumn(DimensionSchema dimensionSchema) diff --git a/indexing-service/src/test/java/io/druid/indexing/common/task/IndexTaskTest.java b/indexing-service/src/test/java/io/druid/indexing/common/task/IndexTaskTest.java index 79310c9771de..d7022ad91517 100644 --- a/indexing-service/src/test/java/io/druid/indexing/common/task/IndexTaskTest.java +++ b/indexing-service/src/test/java/io/druid/indexing/common/task/IndexTaskTest.java @@ -57,14 +57,15 @@ import io.druid.segment.IndexMergerV9; import io.druid.segment.IndexSpec; import io.druid.segment.indexing.DataSchema; -import io.druid.segment.transform.ExpressionTransform; -import io.druid.segment.transform.TransformSpec; import io.druid.segment.indexing.granularity.ArbitraryGranularitySpec; import io.druid.segment.indexing.granularity.GranularitySpec; import io.druid.segment.indexing.granularity.UniformGranularitySpec; +import io.druid.segment.loading.DataSegmentKiller; import io.druid.segment.loading.DataSegmentPusher; import io.druid.segment.realtime.appenderator.SegmentIdentifier; import io.druid.segment.realtime.firehose.LocalFirehoseFactory; +import io.druid.segment.transform.ExpressionTransform; +import io.druid.segment.transform.TransformSpec; import io.druid.timeline.DataSegment; import io.druid.timeline.partition.HashBasedNumberedShardSpec; import io.druid.timeline.partition.NoneShardSpec; @@ -191,7 +192,7 @@ public void testForceExtendableShardSpecs() throws Exception tmpDir, null, null, - createTuningConfig(2, null, true, false), + createTuningConfig(2, null, true, true), false ), null @@ -396,12 +397,12 @@ public void testAppendToExisting() throws Exception Assert.assertEquals("test", segments.get(0).getDataSource()); Assert.assertEquals(Intervals.of("2014/P1D"), segments.get(0).getInterval()); - Assert.assertTrue(segments.get(0).getShardSpec().getClass().equals(NumberedShardSpec.class)); + Assert.assertEquals(NumberedShardSpec.class, segments.get(0).getShardSpec().getClass()); Assert.assertEquals(0, segments.get(0).getShardSpec().getPartitionNum()); Assert.assertEquals("test", segments.get(1).getDataSource()); Assert.assertEquals(Intervals.of("2014/P1D"), segments.get(1).getInterval()); - Assert.assertTrue(segments.get(1).getShardSpec().getClass().equals(NumberedShardSpec.class)); + Assert.assertEquals(NumberedShardSpec.class, segments.get(1).getShardSpec().getClass()); Assert.assertEquals(1, segments.get(1).getShardSpec().getPartitionNum()); } @@ -581,7 +582,7 @@ public void testWithSmallMaxTotalRows() throws Exception Granularities.MINUTE, null ), - createTuningConfig(2, 2, 2, null, false, false, true), + createTuningConfig(2, 2, 2L, null, false, false, true), false ), null @@ -623,7 +624,7 @@ public void testPerfectRollup() throws Exception true, null ), - createTuningConfig(3, 2, 2, null, false, true, true), + createTuningConfig(3, 2, 2L, null, false, true, true), false ), null @@ -664,7 +665,7 @@ public void testBestEffortRollup() throws Exception true, null ), - createTuningConfig(3, 2, 2, null, false, false, true), + createTuningConfig(3, 2, 2L, null, false, false, true), false ), null @@ -1006,7 +1007,7 @@ public String getPathForHadoop() } @Override - public DataSegment push(File file, DataSegment segment) throws IOException + public DataSegment push(File file, DataSegment segment, boolean useUniquePath) { segments.add(segment); return segment; @@ -1019,12 +1020,27 @@ public Map makeLoadSpec(URI uri) } }; + final DataSegmentKiller killer = new DataSegmentKiller() + { + @Override + public void kill(DataSegment segment) + { + + } + + @Override + public void killAll() + { + + } + }; + final TaskToolbox box = new TaskToolbox( null, actionClient, null, pusher, - null, + killer, null, null, null, @@ -1128,7 +1144,7 @@ private static IndexTuningConfig createTuningConfig( private static IndexTuningConfig createTuningConfig( Integer targetPartitionSize, Integer maxRowsInMemory, - Integer maxTotalRows, + Long maxTotalRows, Integer numShards, boolean forceExtendableShardSpecs, boolean forceGuaranteedRollup, @@ -1148,6 +1164,7 @@ private static IndexTuningConfig createTuningConfig( forceGuaranteedRollup, reportParseException, null, + null, null ); } diff --git a/indexing-service/src/test/java/io/druid/indexing/common/task/RealtimeIndexTaskTest.java b/indexing-service/src/test/java/io/druid/indexing/common/task/RealtimeIndexTaskTest.java index 521bba77a360..bea1c613145d 100644 --- a/indexing-service/src/test/java/io/druid/indexing/common/task/RealtimeIndexTaskTest.java +++ b/indexing-service/src/test/java/io/druid/indexing/common/task/RealtimeIndexTaskTest.java @@ -30,10 +30,10 @@ import com.google.common.util.concurrent.ListenableFuture; import com.google.common.util.concurrent.ListeningExecutorService; import com.google.common.util.concurrent.MoreExecutors; -import com.metamx.emitter.EmittingLogger; -import com.metamx.emitter.core.NoopEmitter; -import com.metamx.emitter.service.ServiceEmitter; -import com.metamx.metrics.MonitorScheduler; +import io.druid.java.util.emitter.EmittingLogger; +import io.druid.java.util.emitter.core.NoopEmitter; +import io.druid.java.util.emitter.service.ServiceEmitter; +import io.druid.java.util.metrics.MonitorScheduler; import io.druid.client.cache.CacheConfig; import io.druid.client.cache.MapCache; import io.druid.data.input.Firehose; diff --git a/indexing-service/src/test/java/io/druid/indexing/common/task/SameIntervalMergeTaskTest.java b/indexing-service/src/test/java/io/druid/indexing/common/task/SameIntervalMergeTaskTest.java index 63389a804bae..50932dbb2311 100644 --- a/indexing-service/src/test/java/io/druid/indexing/common/task/SameIntervalMergeTaskTest.java +++ b/indexing-service/src/test/java/io/druid/indexing/common/task/SameIntervalMergeTaskTest.java @@ -185,7 +185,8 @@ public RetType submit(TaskAction taskAction) throws IOExcepti return null; } }, - new NoopServiceEmitter(), new DataSegmentPusher() + new NoopServiceEmitter(), + new DataSegmentPusher() { @Deprecated @Override @@ -201,12 +202,13 @@ public String getPathForHadoop() } @Override - public DataSegment push(File file, DataSegment segment) throws IOException + public DataSegment push(File file, DataSegment segment, boolean useUniquePath) { // the merged segment is pushed to storage segments.add(segment); return segment; } + @Override public Map makeLoadSpec(URI finalIndexZipFilePath) { diff --git a/indexing-service/src/test/java/io/druid/indexing/common/task/TaskSerdeTest.java b/indexing-service/src/test/java/io/druid/indexing/common/task/TaskSerdeTest.java index b0484c882239..dd94f9508de1 100644 --- a/indexing-service/src/test/java/io/druid/indexing/common/task/TaskSerdeTest.java +++ b/indexing-service/src/test/java/io/druid/indexing/common/task/TaskSerdeTest.java @@ -190,7 +190,7 @@ public void testIndexTaskSerde() throws Exception jsonMapper ), new IndexIOConfig(new LocalFirehoseFactory(new File("lol"), "rofl", null), true), - new IndexTuningConfig(10000, 10, null, 9999, null, indexSpec, 3, true, true, false, null, null, null) + new IndexTuningConfig(10000, 10, null, 9999, null, indexSpec, 3, true, true, false, null, null, null, null) ), null ); @@ -253,7 +253,7 @@ public void testIndexTaskwithResourceSerde() throws Exception jsonMapper ), new IndexIOConfig(new LocalFirehoseFactory(new File("lol"), "rofl", null), true), - new IndexTuningConfig(10000, 10, null, null, null, indexSpec, 3, true, true, false, null, null, null) + new IndexTuningConfig(10000, 10, null, null, null, indexSpec, 3, true, true, false, null, null, null, null) ), null ); diff --git a/indexing-service/src/test/java/io/druid/indexing/firehose/IngestSegmentFirehoseFactoryTest.java b/indexing-service/src/test/java/io/druid/indexing/firehose/IngestSegmentFirehoseFactoryTest.java index 624ddc4cc425..9767ebf513ac 100644 --- a/indexing-service/src/test/java/io/druid/indexing/firehose/IngestSegmentFirehoseFactoryTest.java +++ b/indexing-service/src/test/java/io/druid/indexing/firehose/IngestSegmentFirehoseFactoryTest.java @@ -31,7 +31,6 @@ import com.google.common.io.Files; import com.google.inject.Binder; import com.google.inject.Module; -import com.metamx.emitter.service.ServiceEmitter; import io.druid.data.input.InputRow; import io.druid.data.input.impl.DimensionsSpec; import io.druid.data.input.impl.InputRowParser; @@ -61,6 +60,7 @@ import io.druid.java.util.common.JodaUtils; import io.druid.java.util.common.StringUtils; import io.druid.java.util.common.logger.Logger; +import io.druid.java.util.emitter.service.ServiceEmitter; import io.druid.math.expr.ExprMacroTable; import io.druid.metadata.IndexerSQLMetadataStorageCoordinator; import io.druid.query.aggregation.DoubleSumAggregatorFactory; @@ -184,7 +184,8 @@ public List getUsedSegmentsForInterval(String dataSource, Interval } @Override - public List getUsedSegmentsForIntervals(String dataSource, List interval) throws IOException + public List getUsedSegmentsForIntervals(String dataSource, List interval) + throws IOException { return ImmutableList.copyOf(segmentSet); } @@ -249,7 +250,7 @@ public String getPathForHadoop() } @Override - public DataSegment push(File file, DataSegment segment) throws IOException + public DataSegment push(File file, DataSegment segment, boolean useUniquePath) { return segment; } @@ -537,7 +538,11 @@ public void simpleFirehoseReadingTest() throws IOException Assert.assertArrayEquals(new String[]{DIM_NAME}, row.getDimensions().toArray()); Assert.assertArrayEquals(new String[]{DIM_VALUE}, row.getDimension(DIM_NAME).toArray()); Assert.assertEquals(METRIC_LONG_VALUE.longValue(), row.getMetric(METRIC_LONG_NAME)); - Assert.assertEquals(METRIC_FLOAT_VALUE, row.getMetric(METRIC_FLOAT_NAME).floatValue(), METRIC_FLOAT_VALUE * 0.0001); + Assert.assertEquals( + METRIC_FLOAT_VALUE, + row.getMetric(METRIC_FLOAT_NAME).floatValue(), + METRIC_FLOAT_VALUE * 0.0001 + ); ++rowcount; } } diff --git a/indexing-service/src/test/java/io/druid/indexing/overlord/RemoteTaskRunnerTest.java b/indexing-service/src/test/java/io/druid/indexing/overlord/RemoteTaskRunnerTest.java index c2a199f872d3..be311c3a39de 100644 --- a/indexing-service/src/test/java/io/druid/indexing/overlord/RemoteTaskRunnerTest.java +++ b/indexing-service/src/test/java/io/druid/indexing/overlord/RemoteTaskRunnerTest.java @@ -28,8 +28,8 @@ import com.google.common.collect.Lists; import com.google.common.collect.Sets; import com.google.common.util.concurrent.ListenableFuture; -import com.metamx.emitter.EmittingLogger; -import com.metamx.emitter.service.ServiceEmitter; +import io.druid.java.util.emitter.EmittingLogger; +import io.druid.java.util.emitter.service.ServiceEmitter; import io.druid.indexer.TaskState; import io.druid.indexing.common.IndexingServiceCondition; import io.druid.indexing.common.TaskStatus; @@ -550,6 +550,24 @@ public boolean apply(ImmutableWorkerInfo input) Assert.assertEquals(1, remoteTaskRunner.getLazyWorkers().size()); } + @Test + public void testFindLazyWorkerNotRunningAnyTaskButWithZeroMaxWorkers() throws Exception + { + doSetup(); + Collection lazyworkers = remoteTaskRunner.markWorkersLazy( + new Predicate() + { + @Override + public boolean apply(ImmutableWorkerInfo input) + { + return true; + } + }, 0 + ); + Assert.assertEquals(0, lazyworkers.size()); + Assert.assertEquals(0, remoteTaskRunner.getLazyWorkers().size()); + } + @Test public void testWorkerZKReconnect() throws Exception { diff --git a/indexing-service/src/test/java/io/druid/indexing/overlord/RemoteTaskRunnerTestUtils.java b/indexing-service/src/test/java/io/druid/indexing/overlord/RemoteTaskRunnerTestUtils.java index 7bcdff1530ad..cc4328814077 100644 --- a/indexing-service/src/test/java/io/druid/indexing/overlord/RemoteTaskRunnerTestUtils.java +++ b/indexing-service/src/test/java/io/druid/indexing/overlord/RemoteTaskRunnerTestUtils.java @@ -24,7 +24,7 @@ import com.google.common.base.Preconditions; import com.google.common.base.Supplier; import com.google.common.base.Throwables; -import com.metamx.http.client.HttpClient; +import io.druid.java.util.http.client.HttpClient; import io.druid.common.guava.DSuppliers; import io.druid.curator.PotentiallyGzippedCompressionProvider; import io.druid.curator.cache.PathChildrenCacheFactory; diff --git a/indexing-service/src/test/java/io/druid/indexing/overlord/TaskLifecycleTest.java b/indexing-service/src/test/java/io/druid/indexing/overlord/TaskLifecycleTest.java index e408bd28c2db..e583a85b2671 100644 --- a/indexing-service/src/test/java/io/druid/indexing/overlord/TaskLifecycleTest.java +++ b/indexing-service/src/test/java/io/druid/indexing/overlord/TaskLifecycleTest.java @@ -34,10 +34,6 @@ import com.google.common.collect.Lists; import com.google.common.collect.Ordering; import com.google.common.util.concurrent.MoreExecutors; -import com.metamx.emitter.EmittingLogger; -import com.metamx.emitter.service.ServiceEmitter; -import com.metamx.metrics.Monitor; -import com.metamx.metrics.MonitorScheduler; import io.druid.client.cache.MapCache; import io.druid.data.input.Firehose; import io.druid.data.input.FirehoseFactory; @@ -82,6 +78,10 @@ import io.druid.java.util.common.StringUtils; import io.druid.java.util.common.granularity.Granularities; import io.druid.java.util.common.guava.Comparators; +import io.druid.java.util.emitter.EmittingLogger; +import io.druid.java.util.emitter.service.ServiceEmitter; +import io.druid.java.util.metrics.Monitor; +import io.druid.java.util.metrics.MonitorScheduler; import io.druid.metadata.DerbyMetadataStorageActionHandlerFactory; import io.druid.metadata.TestDerbyConnector; import io.druid.query.QueryRunnerFactoryConglomerate; @@ -485,7 +485,7 @@ public String getPathForHadoop(String dataSource) } @Override - public DataSegment push(File file, DataSegment segment) throws IOException + public DataSegment push(File file, DataSegment segment, boolean useUniquePath) { pushedSegments++; return segment; @@ -527,8 +527,11 @@ private TaskToolboxFactory setUpTaskToolboxFactory( Preconditions.checkNotNull(emitter); taskLockbox = new TaskLockbox(taskStorage); - tac = new LocalTaskActionClientFactory(taskStorage, new TaskActionToolbox(taskLockbox, mdc, emitter, EasyMock.createMock( - SupervisorManager.class))); + tac = new LocalTaskActionClientFactory( + taskStorage, + new TaskActionToolbox(taskLockbox, mdc, emitter, EasyMock.createMock( + SupervisorManager.class)) + ); File tmpDir = temporaryFolder.newFolder(); taskConfig = new TaskConfig(tmpDir.toString(), null, null, 50000, null, false, null, null); @@ -671,7 +674,7 @@ public void testIndexTask() throws Exception mapper ), new IndexIOConfig(new MockFirehoseFactory(false), false), - new IndexTuningConfig(10000, 10, null, null, null, indexSpec, 3, true, true, false, null, null, null) + new IndexTuningConfig(10000, 10, null, null, null, indexSpec, 3, true, true, false, null, null, null, null) ), null ); @@ -729,7 +732,7 @@ public void testIndexTaskFailure() throws Exception mapper ), new IndexIOConfig(new MockExceptionalFirehoseFactory(), false), - new IndexTuningConfig(10000, 10, null, null, null, indexSpec, 3, true, true, false, null, null, null) + new IndexTuningConfig(10000, 10, null, null, null, indexSpec, 3, true, true, false, null, null, null, null) ), null ); @@ -1034,7 +1037,7 @@ public String getPathForHadoop() } @Override - public DataSegment push(File file, DataSegment dataSegment) throws IOException + public DataSegment push(File file, DataSegment dataSegment, boolean useUniquePath) { throw new RuntimeException("FAILURE"); } @@ -1094,7 +1097,22 @@ public void testResumeTasks() throws Exception mapper ), new IndexIOConfig(new MockFirehoseFactory(false), false), - new IndexTuningConfig(10000, 10, null, null, null, indexSpec, null, false, null, null, null, null, null) + new IndexTuningConfig( + 10000, + 10, + null, + null, + null, + indexSpec, + null, + false, + null, + null, + null, + null, + null, + null + ) ), null ); diff --git a/indexing-service/src/test/java/io/druid/indexing/overlord/TaskLockboxTest.java b/indexing-service/src/test/java/io/druid/indexing/overlord/TaskLockboxTest.java index 11266b60def0..33dd21076e20 100644 --- a/indexing-service/src/test/java/io/druid/indexing/overlord/TaskLockboxTest.java +++ b/indexing-service/src/test/java/io/druid/indexing/overlord/TaskLockboxTest.java @@ -21,8 +21,8 @@ import com.fasterxml.jackson.databind.ObjectMapper; import com.google.common.collect.Iterables; -import com.metamx.emitter.EmittingLogger; -import com.metamx.emitter.service.ServiceEmitter; +import io.druid.java.util.emitter.EmittingLogger; +import io.druid.java.util.emitter.service.ServiceEmitter; import io.druid.indexing.common.TaskLock; import io.druid.indexing.common.TaskLockType; import io.druid.indexing.common.TaskStatus; diff --git a/indexing-service/src/test/java/io/druid/indexing/overlord/autoscaling/PendingTaskBasedProvisioningStrategyTest.java b/indexing-service/src/test/java/io/druid/indexing/overlord/autoscaling/PendingTaskBasedProvisioningStrategyTest.java index e6afde258183..426eb89378de 100644 --- a/indexing-service/src/test/java/io/druid/indexing/overlord/autoscaling/PendingTaskBasedProvisioningStrategyTest.java +++ b/indexing-service/src/test/java/io/druid/indexing/overlord/autoscaling/PendingTaskBasedProvisioningStrategyTest.java @@ -23,9 +23,9 @@ import com.google.common.collect.ImmutableMap; import com.google.common.collect.Lists; import com.google.common.collect.Maps; -import com.metamx.emitter.EmittingLogger; -import com.metamx.emitter.service.ServiceEmitter; -import com.metamx.emitter.service.ServiceEventBuilder; +import io.druid.java.util.emitter.EmittingLogger; +import io.druid.java.util.emitter.service.ServiceEmitter; +import io.druid.java.util.emitter.service.ServiceEventBuilder; import io.druid.common.guava.DSuppliers; import io.druid.java.util.common.concurrent.Execs; import io.druid.indexer.TaskLocation; diff --git a/indexing-service/src/test/java/io/druid/indexing/overlord/autoscaling/SimpleProvisioningStrategyTest.java b/indexing-service/src/test/java/io/druid/indexing/overlord/autoscaling/SimpleProvisioningStrategyTest.java index f917d359c64a..2e7294f52391 100644 --- a/indexing-service/src/test/java/io/druid/indexing/overlord/autoscaling/SimpleProvisioningStrategyTest.java +++ b/indexing-service/src/test/java/io/druid/indexing/overlord/autoscaling/SimpleProvisioningStrategyTest.java @@ -24,9 +24,9 @@ import com.google.common.collect.ImmutableMap; import com.google.common.collect.Lists; import com.google.common.collect.Maps; -import com.metamx.emitter.EmittingLogger; -import com.metamx.emitter.service.ServiceEmitter; -import com.metamx.emitter.service.ServiceEventBuilder; +import io.druid.java.util.emitter.EmittingLogger; +import io.druid.java.util.emitter.service.ServiceEmitter; +import io.druid.java.util.emitter.service.ServiceEventBuilder; import io.druid.common.guava.DSuppliers; import io.druid.java.util.common.concurrent.Execs; import io.druid.indexer.TaskLocation; diff --git a/indexing-service/src/test/java/io/druid/indexing/overlord/http/OverlordResourceTest.java b/indexing-service/src/test/java/io/druid/indexing/overlord/http/OverlordResourceTest.java index 78242e8f8603..22b9e512aaa6 100644 --- a/indexing-service/src/test/java/io/druid/indexing/overlord/http/OverlordResourceTest.java +++ b/indexing-service/src/test/java/io/druid/indexing/overlord/http/OverlordResourceTest.java @@ -120,8 +120,8 @@ public Access authorize(AuthenticationResult authenticationResult, Resource reso public void expectAuthorizationTokenCheck() { - AuthenticationResult authenticationResult = new AuthenticationResult("druid", "druid", null); + AuthenticationResult authenticationResult = new AuthenticationResult("druid", "druid", null, null); EasyMock.expect(req.getAttribute(AuthConfig.DRUID_AUTHORIZATION_CHECKED)).andReturn(null).anyTimes(); EasyMock.expect(req.getAttribute(AuthConfig.DRUID_AUTHENTICATION_RESULT)) .andReturn(authenticationResult) diff --git a/indexing-service/src/test/java/io/druid/indexing/overlord/http/OverlordTest.java b/indexing-service/src/test/java/io/druid/indexing/overlord/http/OverlordTest.java index b88556cf4fc3..9d95c65afd09 100644 --- a/indexing-service/src/test/java/io/druid/indexing/overlord/http/OverlordTest.java +++ b/indexing-service/src/test/java/io/druid/indexing/overlord/http/OverlordTest.java @@ -26,8 +26,8 @@ import com.google.common.collect.Lists; import com.google.common.util.concurrent.ListenableFuture; import com.google.common.util.concurrent.MoreExecutors; -import com.metamx.emitter.EmittingLogger; -import com.metamx.emitter.service.ServiceEmitter; +import io.druid.java.util.emitter.EmittingLogger; +import io.druid.java.util.emitter.service.ServiceEmitter; import io.druid.curator.PotentiallyGzippedCompressionProvider; import io.druid.curator.discovery.NoopServiceAnnouncer; import io.druid.discovery.DruidLeaderSelector; @@ -131,7 +131,7 @@ public void setUp() throws Exception req = EasyMock.createMock(HttpServletRequest.class); EasyMock.expect(req.getAttribute(AuthConfig.DRUID_AUTHORIZATION_CHECKED)).andReturn(null).anyTimes(); EasyMock.expect(req.getAttribute(AuthConfig.DRUID_AUTHENTICATION_RESULT)).andReturn( - new AuthenticationResult("druid", "druid", null) + new AuthenticationResult("druid", "druid", null, null) ).anyTimes(); req.setAttribute(AuthConfig.DRUID_AUTHORIZATION_CHECKED, true); EasyMock.expectLastCall().anyTimes(); diff --git a/indexing-service/src/test/java/io/druid/indexing/overlord/supervisor/SupervisorManagerTest.java b/indexing-service/src/test/java/io/druid/indexing/overlord/supervisor/SupervisorManagerTest.java index c22dde9eb1f4..8415b919b721 100644 --- a/indexing-service/src/test/java/io/druid/indexing/overlord/supervisor/SupervisorManagerTest.java +++ b/indexing-service/src/test/java/io/druid/indexing/overlord/supervisor/SupervisorManagerTest.java @@ -35,6 +35,7 @@ import org.junit.rules.ExpectedException; import org.junit.runner.RunWith; +import java.util.ArrayList; import java.util.List; import java.util.Map; @@ -295,7 +296,7 @@ public Supervisor createSupervisor() @Override public List getDataSources() { - return null; + return new ArrayList<>(); } } diff --git a/indexing-service/src/test/java/io/druid/indexing/overlord/supervisor/SupervisorResourceTest.java b/indexing-service/src/test/java/io/druid/indexing/overlord/supervisor/SupervisorResourceTest.java index 2c34ef83a28c..cb72a1922058 100644 --- a/indexing-service/src/test/java/io/druid/indexing/overlord/supervisor/SupervisorResourceTest.java +++ b/indexing-service/src/test/java/io/druid/indexing/overlord/supervisor/SupervisorResourceTest.java @@ -19,6 +19,7 @@ package io.druid.indexing.overlord.supervisor; +import com.fasterxml.jackson.databind.ObjectMapper; import com.google.common.base.Optional; import com.google.common.collect.ImmutableList; import com.google.common.collect.ImmutableMap; @@ -28,9 +29,13 @@ import io.druid.indexing.overlord.DataSourceMetadata; import io.druid.indexing.overlord.TaskMaster; import io.druid.java.util.common.DateTimes; +import io.druid.server.security.Access; +import io.druid.server.security.Action; import io.druid.server.security.AuthConfig; -import io.druid.server.security.AuthTestUtils; import io.druid.server.security.AuthenticationResult; +import io.druid.server.security.Authorizer; +import io.druid.server.security.AuthorizerMapper; +import io.druid.server.security.Resource; import org.easymock.Capture; import org.easymock.EasyMock; import org.easymock.EasyMockRunner; @@ -43,6 +48,7 @@ import javax.servlet.http.HttpServletRequest; import javax.ws.rs.core.Response; +import java.util.Arrays; import java.util.List; import java.util.Map; import java.util.Set; @@ -64,13 +70,40 @@ public class SupervisorResourceTest extends EasyMockSupport @Before public void setUp() throws Exception { - supervisorResource = new SupervisorResource(taskMaster, new AuthConfig(), AuthTestUtils.TEST_AUTHORIZER_MAPPER); + supervisorResource = new SupervisorResource( + taskMaster, + new AuthConfig(), + new AuthorizerMapper(null) { + @Override + public Authorizer getAuthorizer(String name) + { + return new Authorizer() + { + @Override + public Access authorize( + AuthenticationResult authenticationResult, Resource resource, Action action + ) + { + if (authenticationResult.getIdentity().equals("druid")) { + return Access.OK; + } else { + if (resource.getName().equals("datasource2")) { + return new Access(false, "not authorized."); + } else { + return Access.OK; + } + } + } + }; + } + } + ); } @Test public void testSpecPost() throws Exception { - SupervisorSpec spec = new TestSupervisorSpec("my-id", null) { + SupervisorSpec spec = new TestSupervisorSpec("my-id", null, null) { @Override public List getDataSources() @@ -83,7 +116,7 @@ public List getDataSources() EasyMock.expect(supervisorManager.createOrUpdateAndStartSupervisor(spec)).andReturn(true); EasyMock.expect(request.getAttribute(AuthConfig.DRUID_AUTHORIZATION_CHECKED)).andReturn(null).atLeastOnce(); EasyMock.expect(request.getAttribute(AuthConfig.DRUID_AUTHENTICATION_RESULT)).andReturn( - new AuthenticationResult("druid", "druid", null) + new AuthenticationResult("druid", "druid", null, null) ).atLeastOnce(); request.setAttribute(AuthConfig.DRUID_AUTHORIZATION_CHECKED, true); EasyMock.expectLastCall().anyTimes(); @@ -109,7 +142,7 @@ public List getDataSources() public void testSpecGetAll() throws Exception { Set supervisorIds = ImmutableSet.of("id1", "id2"); - SupervisorSpec spec1 = new TestSupervisorSpec("id1", null) { + SupervisorSpec spec1 = new TestSupervisorSpec("id1", null, null) { @Override public List getDataSources() @@ -117,7 +150,7 @@ public List getDataSources() return Lists.newArrayList("datasource1"); } }; - SupervisorSpec spec2 = new TestSupervisorSpec("id2", null) { + SupervisorSpec spec2 = new TestSupervisorSpec("id2", null, null) { @Override public List getDataSources() @@ -132,7 +165,7 @@ public List getDataSources() EasyMock.expect(supervisorManager.getSupervisorSpec("id2")).andReturn(Optional.of(spec2)); EasyMock.expect(request.getAttribute(AuthConfig.DRUID_AUTHORIZATION_CHECKED)).andReturn(null).atLeastOnce(); EasyMock.expect(request.getAttribute(AuthConfig.DRUID_AUTHENTICATION_RESULT)).andReturn( - new AuthenticationResult("druid", "druid", null) + new AuthenticationResult("druid", "druid", null, null) ).atLeastOnce(); request.setAttribute(AuthConfig.DRUID_AUTHORIZATION_CHECKED, true); EasyMock.expectLastCall().anyTimes(); @@ -157,7 +190,7 @@ public List getDataSources() @Test public void testSpecGet() throws Exception { - SupervisorSpec spec = new TestSupervisorSpec("my-id", null); + SupervisorSpec spec = new TestSupervisorSpec("my-id", null, null); EasyMock.expect(taskMaster.getSupervisorManager()).andReturn(Optional.of(supervisorManager)).times(2); EasyMock.expect(supervisorManager.getSupervisorSpec("my-id")).andReturn(Optional.of(spec)); @@ -255,33 +288,186 @@ public void testShutdown() throws Exception @Test public void testSpecGetAllHistory() throws Exception { + List versions1 = ImmutableList.of( + new VersionedSupervisorSpec( + new TestSupervisorSpec("id1", null, Arrays.asList("datasource1")), + "v1" + ), + new VersionedSupervisorSpec( + new TestSupervisorSpec("id1", null, Arrays.asList("datasource1")), + "v2" + ), + new VersionedSupervisorSpec( + new NoopSupervisorSpec(null, Arrays.asList("datasource1")), + "tombstone" + ) + ); + List versions2 = ImmutableList.of( + new VersionedSupervisorSpec( + new TestSupervisorSpec("id2", null, Arrays.asList("datasource2")), + "v1" + ), + new VersionedSupervisorSpec( + new TestSupervisorSpec("id2", null, Arrays.asList("datasource2")), + "v2" + ), + new VersionedSupervisorSpec( + new NoopSupervisorSpec(null, Arrays.asList("datasource2")), + "tombstone" + ), + new VersionedSupervisorSpec( + new TestSupervisorSpec("id2", null, Arrays.asList("datasource2")), + "v3" + ) + ); + List versions3 = ImmutableList.of( + new VersionedSupervisorSpec( + new TestSupervisorSpec("id2", null, Arrays.asList("datasource3")), + "v1" + ), + new VersionedSupervisorSpec( + new NoopSupervisorSpec(null, null), + "tombstone" + ), + new VersionedSupervisorSpec( + new TestSupervisorSpec("id2", null, Arrays.asList("datasource3")), + "v2" + ), + new VersionedSupervisorSpec( + new NoopSupervisorSpec(null, null), + "tombstone" + ), + new VersionedSupervisorSpec( + new TestSupervisorSpec("id2", null, Arrays.asList("datasource3")), + "v3" + ) + ); Map> history = Maps.newHashMap(); - history.put("id1", null); - history.put("id2", null); + history.put("id1", versions1); + history.put("id2", versions2); + history.put("id3", versions3); EasyMock.expect(taskMaster.getSupervisorManager()).andReturn(Optional.of(supervisorManager)).times(2); EasyMock.expect(supervisorManager.getSupervisorHistory()).andReturn(history); - SupervisorSpec spec1 = new TestSupervisorSpec("id1", null) { + SupervisorSpec spec1 = new TestSupervisorSpec("id1", null, Arrays.asList("datasource1")); + SupervisorSpec spec2 = new TestSupervisorSpec("id2", null, Arrays.asList("datasource2")); + EasyMock.expect(supervisorManager.getSupervisorSpec("id1")).andReturn(Optional.of(spec1)).atLeastOnce(); + EasyMock.expect(supervisorManager.getSupervisorSpec("id2")).andReturn(Optional.of(spec2)).atLeastOnce(); + EasyMock.expect(request.getAttribute(AuthConfig.DRUID_AUTHORIZATION_CHECKED)).andReturn(null).atLeastOnce(); + EasyMock.expect(request.getAttribute(AuthConfig.DRUID_AUTHENTICATION_RESULT)).andReturn( + new AuthenticationResult("druid", "druid", null, null) + ).atLeastOnce(); + request.setAttribute(AuthConfig.DRUID_AUTHORIZATION_CHECKED, true); + EasyMock.expectLastCall().anyTimes(); + replayAll(); - @Override - public List getDataSources() - { - return Lists.newArrayList("datasource1"); - } - }; - SupervisorSpec spec2 = new TestSupervisorSpec("id2", null) { + Response response = supervisorResource.specGetAllHistory(request); - @Override - public List getDataSources() - { - return Lists.newArrayList("datasource2"); - } - }; + Assert.assertEquals(200, response.getStatus()); + Assert.assertEquals(history, response.getEntity()); + + resetAll(); + + EasyMock.expect(taskMaster.getSupervisorManager()).andReturn(Optional.absent()); + replayAll(); + + response = supervisorResource.specGetAllHistory(request); + verifyAll(); + + Assert.assertEquals(503, response.getStatus()); + } + + @Test + public void testSpecGetAllHistoryWithAuthFailureFiltering() throws Exception + { + List versions1 = ImmutableList.of( + new VersionedSupervisorSpec( + new TestSupervisorSpec("id1", null, Arrays.asList("datasource1")), + "v1" + ), + new VersionedSupervisorSpec( + new TestSupervisorSpec("id1", null, Arrays.asList("datasource1")), + "v2" + ), + new VersionedSupervisorSpec( + new NoopSupervisorSpec(null, Arrays.asList("datasource1")), + "tombstone" + ) + ); + List versions2 = ImmutableList.of( + new VersionedSupervisorSpec( + new TestSupervisorSpec("id2", null, Arrays.asList("datasource2")), + "v1" + ), + new VersionedSupervisorSpec( + new TestSupervisorSpec("id2", null, Arrays.asList("datasource2")), + "v2" + ), + new VersionedSupervisorSpec( + new NoopSupervisorSpec(null, Arrays.asList("datasource2")), + "tombstone" + ) + ); + List versions3 = ImmutableList.of( + new VersionedSupervisorSpec( + new TestSupervisorSpec("id2", null, Arrays.asList("datasource2")), + "v1" + ), + new VersionedSupervisorSpec( + new TestSupervisorSpec("id2", null, Arrays.asList("datasource2")), + "v2" + ), + new VersionedSupervisorSpec( + new NoopSupervisorSpec(null, Arrays.asList("datasource2")), + "tombstone" + ), + new VersionedSupervisorSpec( + new TestSupervisorSpec("id3", null, Arrays.asList("datasource3")), + "v1" + ), + new VersionedSupervisorSpec( + new NoopSupervisorSpec(null, Arrays.asList("datasource3")), + "tombstone" + ) + ); + List versions4 = ImmutableList.of( + new VersionedSupervisorSpec( + new TestSupervisorSpec("id2", null, Arrays.asList("datasource2")), + "v1" + ), + new VersionedSupervisorSpec( + new NoopSupervisorSpec(null, null), + "tombstone" + ), + new VersionedSupervisorSpec( + new TestSupervisorSpec("id2", null, Arrays.asList("datasource2")), + "v2" + ), + new VersionedSupervisorSpec( + new NoopSupervisorSpec(null, null), + "tombstone" + ), + new VersionedSupervisorSpec( + new TestSupervisorSpec("id2", null, Arrays.asList("datasource2")), + "v3" + ) + ); + + Map> history = Maps.newHashMap(); + history.put("id1", versions1); + history.put("id2", versions2); + history.put("id3", versions3); + history.put("id4", versions4); + + EasyMock.expect(taskMaster.getSupervisorManager()).andReturn(Optional.of(supervisorManager)).times(2); + EasyMock.expect(supervisorManager.getSupervisorHistory()).andReturn(history); + SupervisorSpec spec1 = new TestSupervisorSpec("id1", null, Arrays.asList("datasource1")); + SupervisorSpec spec2 = new TestSupervisorSpec("id2", null, Arrays.asList("datasource2")); EasyMock.expect(supervisorManager.getSupervisorSpec("id1")).andReturn(Optional.of(spec1)).atLeastOnce(); EasyMock.expect(supervisorManager.getSupervisorSpec("id2")).andReturn(Optional.of(spec2)).atLeastOnce(); EasyMock.expect(request.getAttribute(AuthConfig.DRUID_AUTHORIZATION_CHECKED)).andReturn(null).atLeastOnce(); EasyMock.expect(request.getAttribute(AuthConfig.DRUID_AUTHENTICATION_RESULT)).andReturn( - new AuthenticationResult("druid", "druid", null) + new AuthenticationResult("wronguser", "druid", null, null) ).atLeastOnce(); request.setAttribute(AuthConfig.DRUID_AUTHORIZATION_CHECKED, true); EasyMock.expectLastCall().anyTimes(); @@ -289,8 +475,37 @@ public List getDataSources() Response response = supervisorResource.specGetAllHistory(request); + Map> filteredHistory = Maps.newHashMap(); + filteredHistory.put("id1", versions1); + filteredHistory.put( + "id3", + ImmutableList.of( + new VersionedSupervisorSpec( + new TestSupervisorSpec("id3", null, Arrays.asList("datasource3")), + "v1" + ), + new VersionedSupervisorSpec( + new NoopSupervisorSpec(null, Arrays.asList("datasource3")), + "tombstone" + ) + ) + ); + filteredHistory.put( + "id4", + ImmutableList.of( + new VersionedSupervisorSpec( + new NoopSupervisorSpec(null, null), + "tombstone" + ), + new VersionedSupervisorSpec( + new NoopSupervisorSpec(null, null), + "tombstone" + ) + ) + ); + Assert.assertEquals(200, response.getStatus()); - Assert.assertEquals(history, response.getEntity()); + Assert.assertEquals(filteredHistory, response.getEntity()); resetAll(); @@ -306,33 +521,193 @@ public List getDataSources() @Test public void testSpecGetHistory() throws Exception { - List versions = ImmutableList.of( - new VersionedSupervisorSpec(null, "v1"), - new VersionedSupervisorSpec(null, "v2") + List versions1 = ImmutableList.of( + new VersionedSupervisorSpec( + new TestSupervisorSpec("id1", null, Arrays.asList("datasource1")), + "v1" + ), + new VersionedSupervisorSpec( + new NoopSupervisorSpec(null, Arrays.asList("datasource1")), + "tombstone" + ), + new VersionedSupervisorSpec( + new TestSupervisorSpec("id1", null, Arrays.asList("datasource1")), + "v2" + ) + ); + List versions2 = ImmutableList.of( + new VersionedSupervisorSpec( + new TestSupervisorSpec("id2", null, Arrays.asList("datasource2")), + "v1" + ), + new VersionedSupervisorSpec( + new NoopSupervisorSpec(null, Arrays.asList("datasource2")), + "tombstone" + ), + new VersionedSupervisorSpec( + new TestSupervisorSpec("id2", null, Arrays.asList("datasource2")), + "v2" + ) ); Map> history = Maps.newHashMap(); - history.put("id1", versions); - history.put("id2", null); + history.put("id1", versions1); + history.put("id2", versions2); - EasyMock.expect(taskMaster.getSupervisorManager()).andReturn(Optional.of(supervisorManager)).times(2); - EasyMock.expect(supervisorManager.getSupervisorHistory()).andReturn(history).times(2); + EasyMock.expect(taskMaster.getSupervisorManager()).andReturn(Optional.of(supervisorManager)).times(3); + EasyMock.expect(supervisorManager.getSupervisorHistory()).andReturn(history).times(3); + EasyMock.expect(request.getAttribute(AuthConfig.DRUID_AUTHORIZATION_CHECKED)).andReturn(null).atLeastOnce(); + EasyMock.expect(request.getAttribute(AuthConfig.DRUID_AUTHENTICATION_RESULT)).andReturn( + new AuthenticationResult("druid", "druid", null, null) + ).atLeastOnce(); + request.setAttribute(AuthConfig.DRUID_AUTHORIZATION_CHECKED, true); + EasyMock.expectLastCall().anyTimes(); + replayAll(); + + Response response = supervisorResource.specGetHistory(request, "id1"); + + Assert.assertEquals(200, response.getStatus()); + Assert.assertEquals(versions1, response.getEntity()); + + response = supervisorResource.specGetHistory(request, "id2"); + + Assert.assertEquals(200, response.getStatus()); + Assert.assertEquals(versions2, response.getEntity()); + + response = supervisorResource.specGetHistory(request, "id3"); + + Assert.assertEquals(404, response.getStatus()); + + resetAll(); + + EasyMock.expect(taskMaster.getSupervisorManager()).andReturn(Optional.absent()); + replayAll(); + + response = supervisorResource.specGetHistory(request, "id1"); + verifyAll(); + + Assert.assertEquals(503, response.getStatus()); + } + + @Test + public void testSpecGetHistoryWithAuthFailure() throws Exception + { + List versions1 = ImmutableList.of( + new VersionedSupervisorSpec( + new TestSupervisorSpec("id1", null, Arrays.asList("datasource1")), + "v1" + ), + new VersionedSupervisorSpec( + new NoopSupervisorSpec(null, Arrays.asList("datasource3")), + "tombstone" + ), + new VersionedSupervisorSpec( + new TestSupervisorSpec("id1", null, Arrays.asList("datasource1")), + "v2" + ) + ); + List versions2 = ImmutableList.of( + new VersionedSupervisorSpec( + new TestSupervisorSpec("id2", null, Arrays.asList("datasource2")), + "v1" + ), + new VersionedSupervisorSpec( + new NoopSupervisorSpec(null, Arrays.asList("datasource2")), + "tombstone" + ), + new VersionedSupervisorSpec( + new TestSupervisorSpec("id2", null, Arrays.asList("datasource2")), + "v2" + ) + ); + List versions3 = ImmutableList.of( + new VersionedSupervisorSpec( + new TestSupervisorSpec("id3", null, Arrays.asList("datasource3")), + "v1" + ), + new VersionedSupervisorSpec( + new NoopSupervisorSpec(null, null), + "tombstone" + ), + new VersionedSupervisorSpec( + new TestSupervisorSpec("id3", null, Arrays.asList("datasource2")), + "v2" + ), + new VersionedSupervisorSpec( + new NoopSupervisorSpec(null, null), + "tombstone" + ), + new VersionedSupervisorSpec( + new TestSupervisorSpec("id3", null, Arrays.asList("datasource3")), + "v2" + ), + new VersionedSupervisorSpec( + new NoopSupervisorSpec(null, Arrays.asList("datasource3")), + "tombstone" + ) + ); + Map> history = Maps.newHashMap(); + history.put("id1", versions1); + history.put("id2", versions2); + history.put("id3", versions3); + + EasyMock.expect(taskMaster.getSupervisorManager()).andReturn(Optional.of(supervisorManager)).times(4); + EasyMock.expect(supervisorManager.getSupervisorHistory()).andReturn(history).times(4); + EasyMock.expect(request.getAttribute(AuthConfig.DRUID_AUTHORIZATION_CHECKED)).andReturn(null).atLeastOnce(); + EasyMock.expect(request.getAttribute(AuthConfig.DRUID_AUTHENTICATION_RESULT)).andReturn( + new AuthenticationResult("notdruid", "druid", null, null) + ).atLeastOnce(); + request.setAttribute(AuthConfig.DRUID_AUTHORIZATION_CHECKED, true); + EasyMock.expectLastCall().anyTimes(); replayAll(); - Response response = supervisorResource.specGetHistory("id1"); + Response response = supervisorResource.specGetHistory(request, "id1"); Assert.assertEquals(200, response.getStatus()); - Assert.assertEquals(versions, response.getEntity()); + Assert.assertEquals(versions1, response.getEntity()); - response = supervisorResource.specGetHistory("id3"); + response = supervisorResource.specGetHistory(request, "id2"); + // user is not authorized to access datasource2 Assert.assertEquals(404, response.getStatus()); + response = supervisorResource.specGetHistory(request, "id3"); + Assert.assertEquals(200, response.getStatus()); + Assert.assertEquals( + ImmutableList.of( + new VersionedSupervisorSpec( + new TestSupervisorSpec("id3", null, Arrays.asList("datasource3")), + "v1" + ), + new VersionedSupervisorSpec( + new NoopSupervisorSpec(null, null), + "tombstone" + ), + new VersionedSupervisorSpec( + new NoopSupervisorSpec(null, null), + "tombstone" + ), + new VersionedSupervisorSpec( + new TestSupervisorSpec("id3", null, Arrays.asList("datasource3")), + "v2" + ), + new VersionedSupervisorSpec( + new NoopSupervisorSpec(null, Arrays.asList("datasource3")), + "tombstone" + ) + ), + response.getEntity() + ); + + response = supervisorResource.specGetHistory(request, "id4"); + Assert.assertEquals(404, response.getStatus()); + + resetAll(); EasyMock.expect(taskMaster.getSupervisorManager()).andReturn(Optional.absent()); replayAll(); - response = supervisorResource.specGetHistory("id1"); + response = supervisorResource.specGetHistory(request, "id1"); verifyAll(); Assert.assertEquals(503, response.getStatus()); @@ -371,15 +746,34 @@ public void testReset() throws Exception verifyAll(); } + @Test + public void testNoopSupervisorSpecSerde() throws Exception + { + ObjectMapper mapper = new ObjectMapper(); + String oldSpec = "{\"type\":\"NoopSupervisorSpec\",\"id\":null,\"dataSources\":null}"; + NoopSupervisorSpec expectedSpec = new NoopSupervisorSpec(null, null); + NoopSupervisorSpec deserializedSpec = mapper.readValue(oldSpec, NoopSupervisorSpec.class); + Assert.assertEquals(expectedSpec, deserializedSpec); + + NoopSupervisorSpec spec1 = new NoopSupervisorSpec("abcd", Lists.newArrayList("defg")); + NoopSupervisorSpec spec2 = mapper.readValue( + mapper.writeValueAsBytes(spec1), + NoopSupervisorSpec.class + ); + Assert.assertEquals(spec1, spec2); + } + private static class TestSupervisorSpec implements SupervisorSpec { private final String id; private final Supervisor supervisor; + private final List datasources; - public TestSupervisorSpec(String id, Supervisor supervisor) + public TestSupervisorSpec(String id, Supervisor supervisor, List datasources) { this.id = id; this.supervisor = supervisor; + this.datasources = datasources; } @Override @@ -397,7 +791,38 @@ public Supervisor createSupervisor() @Override public List getDataSources() { - return null; + return datasources; + } + + @Override + public boolean equals(Object o) + { + if (this == o) { + return true; + } + if (o == null || getClass() != o.getClass()) { + return false; + } + + TestSupervisorSpec that = (TestSupervisorSpec) o; + + if (getId() != null ? !getId().equals(that.getId()) : that.getId() != null) { + return false; + } + if (supervisor != null ? !supervisor.equals(that.supervisor) : that.supervisor != null) { + return false; + } + return datasources != null ? datasources.equals(that.datasources) : that.datasources == null; + + } + + @Override + public int hashCode() + { + int result = getId() != null ? getId().hashCode() : 0; + result = 31 * result + (supervisor != null ? supervisor.hashCode() : 0); + result = 31 * result + (datasources != null ? datasources.hashCode() : 0); + return result; } } } diff --git a/indexing-service/src/test/java/io/druid/indexing/test/TestDataSegmentPusher.java b/indexing-service/src/test/java/io/druid/indexing/test/TestDataSegmentPusher.java index 923a80299605..b9b27f48d07a 100644 --- a/indexing-service/src/test/java/io/druid/indexing/test/TestDataSegmentPusher.java +++ b/indexing-service/src/test/java/io/druid/indexing/test/TestDataSegmentPusher.java @@ -25,7 +25,6 @@ import io.druid.timeline.DataSegment; import java.io.File; -import java.io.IOException; import java.net.URI; import java.util.Map; import java.util.Set; @@ -48,7 +47,7 @@ public String getPathForHadoop() } @Override - public DataSegment push(File file, DataSegment segment) throws IOException + public DataSegment push(File file, DataSegment segment, boolean useUniquePath) { pushedSegments.add(segment); return segment; diff --git a/integration-tests/docker/broker.conf b/integration-tests/docker/broker.conf index e7e744d5db8a..d791e82b70f5 100644 --- a/integration-tests/docker/broker.conf +++ b/integration-tests/docker/broker.conf @@ -34,6 +34,8 @@ command=java -Ddruid.escalator.authorizerName=basic -Ddruid.auth.authorizers="[\"basic\"]" -Ddruid.auth.authorizer.basic.type=basic + -Ddruid.sql.enable=true + -Ddruid.sql.avatica.enable=true -cp /shared/docker/lib/* io.druid.cli.Main server broker redirect_stderr=true diff --git a/integration-tests/docker/router.conf b/integration-tests/docker/router.conf index 3222c18db36b..06af3aab2656 100644 --- a/integration-tests/docker/router.conf +++ b/integration-tests/docker/router.conf @@ -23,6 +23,8 @@ command=java -Ddruid.escalator.authorizerName=basic -Ddruid.auth.authorizers="[\"basic\"]" -Ddruid.auth.authorizer.basic.type=basic + -Ddruid.sql.enable=true + -Ddruid.sql.avatica.enable=true -cp /shared/docker/lib/* io.druid.cli.Main server router redirect_stderr=true diff --git a/integration-tests/pom.xml b/integration-tests/pom.xml index 0c17f2fa2038..80729cc3e5bc 100644 --- a/integration-tests/pom.xml +++ b/integration-tests/pom.xml @@ -26,7 +26,7 @@ io.druid druid - 0.12.0-SNAPSHOT + 0.12.2-SNAPSHOT @@ -98,6 +98,16 @@ + + org.apache.calcite.avatica + avatica + 1.10.0 + + + org.apache.calcite.avatica + avatica-server + 1.10.0 + org.testng testng diff --git a/integration-tests/src/main/java/io/druid/testing/clients/ClientInfoResourceTestClient.java b/integration-tests/src/main/java/io/druid/testing/clients/ClientInfoResourceTestClient.java index 9c66444d2119..65bccb904f53 100644 --- a/integration-tests/src/main/java/io/druid/testing/clients/ClientInfoResourceTestClient.java +++ b/integration-tests/src/main/java/io/druid/testing/clients/ClientInfoResourceTestClient.java @@ -24,10 +24,10 @@ import com.google.common.base.Charsets; import com.google.common.base.Throwables; import com.google.inject.Inject; -import com.metamx.http.client.HttpClient; -import com.metamx.http.client.Request; -import com.metamx.http.client.response.StatusResponseHandler; -import com.metamx.http.client.response.StatusResponseHolder; +import io.druid.java.util.http.client.HttpClient; +import io.druid.java.util.http.client.Request; +import io.druid.java.util.http.client.response.StatusResponseHandler; +import io.druid.java.util.http.client.response.StatusResponseHolder; import io.druid.java.util.common.ISE; import io.druid.java.util.common.StringUtils; import io.druid.testing.IntegrationTestingConfig; diff --git a/integration-tests/src/main/java/io/druid/testing/clients/CoordinatorResourceTestClient.java b/integration-tests/src/main/java/io/druid/testing/clients/CoordinatorResourceTestClient.java index 9ce846653433..367094cfdafa 100644 --- a/integration-tests/src/main/java/io/druid/testing/clients/CoordinatorResourceTestClient.java +++ b/integration-tests/src/main/java/io/druid/testing/clients/CoordinatorResourceTestClient.java @@ -24,10 +24,10 @@ import com.google.common.base.Charsets; import com.google.common.base.Throwables; import com.google.inject.Inject; -import com.metamx.http.client.HttpClient; -import com.metamx.http.client.Request; -import com.metamx.http.client.response.StatusResponseHandler; -import com.metamx.http.client.response.StatusResponseHolder; +import io.druid.java.util.http.client.HttpClient; +import io.druid.java.util.http.client.Request; +import io.druid.java.util.http.client.response.StatusResponseHandler; +import io.druid.java.util.http.client.response.StatusResponseHolder; import io.druid.java.util.common.ISE; import io.druid.java.util.common.RE; import io.druid.java.util.common.StringUtils; diff --git a/integration-tests/src/main/java/io/druid/testing/clients/EventReceiverFirehoseTestClient.java b/integration-tests/src/main/java/io/druid/testing/clients/EventReceiverFirehoseTestClient.java index d41729d592ea..83ebf5fe0138 100644 --- a/integration-tests/src/main/java/io/druid/testing/clients/EventReceiverFirehoseTestClient.java +++ b/integration-tests/src/main/java/io/druid/testing/clients/EventReceiverFirehoseTestClient.java @@ -24,10 +24,10 @@ import com.fasterxml.jackson.jaxrs.smile.SmileMediaTypes; import com.google.common.base.Charsets; import com.google.common.base.Throwables; -import com.metamx.http.client.HttpClient; -import com.metamx.http.client.Request; -import com.metamx.http.client.response.StatusResponseHandler; -import com.metamx.http.client.response.StatusResponseHolder; +import io.druid.java.util.http.client.HttpClient; +import io.druid.java.util.http.client.Request; +import io.druid.java.util.http.client.response.StatusResponseHandler; +import io.druid.java.util.http.client.response.StatusResponseHolder; import io.druid.java.util.common.jackson.JacksonUtils; import io.druid.java.util.common.ISE; import io.druid.java.util.common.StringUtils; diff --git a/integration-tests/src/main/java/io/druid/testing/clients/OverlordResourceTestClient.java b/integration-tests/src/main/java/io/druid/testing/clients/OverlordResourceTestClient.java index f47d97e4f536..38c6e3d174f1 100644 --- a/integration-tests/src/main/java/io/druid/testing/clients/OverlordResourceTestClient.java +++ b/integration-tests/src/main/java/io/druid/testing/clients/OverlordResourceTestClient.java @@ -25,10 +25,10 @@ import com.google.common.base.Predicates; import com.google.common.base.Throwables; import com.google.inject.Inject; -import com.metamx.http.client.HttpClient; -import com.metamx.http.client.Request; -import com.metamx.http.client.response.StatusResponseHandler; -import com.metamx.http.client.response.StatusResponseHolder; +import io.druid.java.util.http.client.HttpClient; +import io.druid.java.util.http.client.Request; +import io.druid.java.util.http.client.response.StatusResponseHandler; +import io.druid.java.util.http.client.response.StatusResponseHolder; import io.druid.indexer.TaskState; import io.druid.java.util.common.ISE; import io.druid.java.util.common.RetryUtils; diff --git a/integration-tests/src/main/java/io/druid/testing/clients/QueryResourceTestClient.java b/integration-tests/src/main/java/io/druid/testing/clients/QueryResourceTestClient.java index 8060871ba5db..b6a2c6bcce30 100644 --- a/integration-tests/src/main/java/io/druid/testing/clients/QueryResourceTestClient.java +++ b/integration-tests/src/main/java/io/druid/testing/clients/QueryResourceTestClient.java @@ -25,10 +25,10 @@ import com.google.common.base.Charsets; import com.google.common.base.Throwables; import com.google.inject.Inject; -import com.metamx.http.client.HttpClient; -import com.metamx.http.client.Request; -import com.metamx.http.client.response.StatusResponseHandler; -import com.metamx.http.client.response.StatusResponseHolder; +import io.druid.java.util.http.client.HttpClient; +import io.druid.java.util.http.client.Request; +import io.druid.java.util.http.client.response.StatusResponseHandler; +import io.druid.java.util.http.client.response.StatusResponseHolder; import io.druid.java.util.common.ISE; import io.druid.java.util.common.StringUtils; import io.druid.query.Query; diff --git a/integration-tests/src/main/java/io/druid/testing/guice/DruidTestModule.java b/integration-tests/src/main/java/io/druid/testing/guice/DruidTestModule.java index b2cb8b00c133..9383a032a39d 100644 --- a/integration-tests/src/main/java/io/druid/testing/guice/DruidTestModule.java +++ b/integration-tests/src/main/java/io/druid/testing/guice/DruidTestModule.java @@ -24,13 +24,13 @@ import com.google.inject.Binder; import com.google.inject.Module; import com.google.inject.Provides; -import com.metamx.common.lifecycle.Lifecycle; -import com.metamx.emitter.core.LoggingEmitter; -import com.metamx.emitter.core.LoggingEmitterConfig; -import com.metamx.emitter.service.ServiceEmitter; -import com.metamx.http.client.CredentialedHttpClient; -import com.metamx.http.client.HttpClient; -import com.metamx.http.client.auth.BasicCredentials; +import io.druid.java.util.common.lifecycle.Lifecycle; +import io.druid.java.util.emitter.core.LoggingEmitter; +import io.druid.java.util.emitter.core.LoggingEmitterConfig; +import io.druid.java.util.emitter.service.ServiceEmitter; +import io.druid.java.util.http.client.CredentialedHttpClient; +import io.druid.java.util.http.client.HttpClient; +import io.druid.java.util.http.client.auth.BasicCredentials; import io.druid.curator.CuratorConfig; import io.druid.guice.JsonConfigProvider; import io.druid.guice.ManageLifecycle; diff --git a/integration-tests/src/main/java/org/testng/DruidTestRunnerFactory.java b/integration-tests/src/main/java/org/testng/DruidTestRunnerFactory.java index bece170369d8..ed809088e003 100644 --- a/integration-tests/src/main/java/org/testng/DruidTestRunnerFactory.java +++ b/integration-tests/src/main/java/org/testng/DruidTestRunnerFactory.java @@ -23,10 +23,10 @@ import com.google.common.base.Throwables; import com.google.inject.Injector; import com.google.inject.Key; -import com.metamx.http.client.HttpClient; -import com.metamx.http.client.Request; -import com.metamx.http.client.response.StatusResponseHandler; -import com.metamx.http.client.response.StatusResponseHolder; +import io.druid.java.util.http.client.HttpClient; +import io.druid.java.util.http.client.Request; +import io.druid.java.util.http.client.response.StatusResponseHandler; +import io.druid.java.util.http.client.response.StatusResponseHolder; import io.druid.java.util.common.StringUtils; import io.druid.java.util.common.lifecycle.Lifecycle; import io.druid.java.util.common.logger.Logger; diff --git a/integration-tests/src/test/java/io/druid/tests/indexer/ITRealtimeIndexTaskTest.java b/integration-tests/src/test/java/io/druid/tests/indexer/ITRealtimeIndexTaskTest.java index 530250c14a6e..54d31540b204 100644 --- a/integration-tests/src/test/java/io/druid/tests/indexer/ITRealtimeIndexTaskTest.java +++ b/integration-tests/src/test/java/io/druid/tests/indexer/ITRealtimeIndexTaskTest.java @@ -21,7 +21,7 @@ import com.google.common.base.Throwables; import com.google.inject.Inject; -import com.metamx.http.client.HttpClient; +import io.druid.java.util.http.client.HttpClient; import io.druid.curator.discovery.ServerDiscoveryFactory; import io.druid.curator.discovery.ServerDiscoverySelector; import io.druid.java.util.common.DateTimes; diff --git a/integration-tests/src/test/java/io/druid/tests/indexer/ITUnionQueryTest.java b/integration-tests/src/test/java/io/druid/tests/indexer/ITUnionQueryTest.java index 645f66dddae1..392a39358746 100644 --- a/integration-tests/src/test/java/io/druid/tests/indexer/ITUnionQueryTest.java +++ b/integration-tests/src/test/java/io/druid/tests/indexer/ITUnionQueryTest.java @@ -22,7 +22,7 @@ import com.beust.jcommander.internal.Lists; import com.google.common.base.Throwables; import com.google.inject.Inject; -import com.metamx.http.client.HttpClient; +import io.druid.java.util.http.client.HttpClient; import io.druid.curator.discovery.ServerDiscoveryFactory; import io.druid.curator.discovery.ServerDiscoverySelector; import io.druid.java.util.common.DateTimes; diff --git a/integration-tests/src/test/java/io/druid/tests/security/ITBasicAuthConfigurationTest.java b/integration-tests/src/test/java/io/druid/tests/security/ITBasicAuthConfigurationTest.java index d03ce64b346a..572b565fbb9b 100644 --- a/integration-tests/src/test/java/io/druid/tests/security/ITBasicAuthConfigurationTest.java +++ b/integration-tests/src/test/java/io/druid/tests/security/ITBasicAuthConfigurationTest.java @@ -24,12 +24,12 @@ import com.google.common.base.Charsets; import com.google.common.base.Throwables; import com.google.inject.Inject; -import com.metamx.http.client.CredentialedHttpClient; -import com.metamx.http.client.HttpClient; -import com.metamx.http.client.Request; -import com.metamx.http.client.auth.BasicCredentials; -import com.metamx.http.client.response.StatusResponseHandler; -import com.metamx.http.client.response.StatusResponseHolder; +import io.druid.java.util.http.client.CredentialedHttpClient; +import io.druid.java.util.http.client.HttpClient; +import io.druid.java.util.http.client.Request; +import io.druid.java.util.http.client.auth.BasicCredentials; +import io.druid.java.util.http.client.response.StatusResponseHandler; +import io.druid.java.util.http.client.response.StatusResponseHolder; import io.druid.guice.annotations.Client; import io.druid.java.util.common.ISE; import io.druid.java.util.common.StringUtils; @@ -39,8 +39,10 @@ import io.druid.server.security.Resource; import io.druid.server.security.ResourceAction; import io.druid.server.security.ResourceType; +import io.druid.sql.avatica.DruidAvaticaHandler; import io.druid.testing.IntegrationTestingConfig; import io.druid.testing.guice.DruidTestModuleFactory; +import org.apache.calcite.avatica.AvaticaSqlException; import org.jboss.netty.handler.codec.http.HttpMethod; import org.jboss.netty.handler.codec.http.HttpResponseStatus; import org.testng.Assert; @@ -49,9 +51,14 @@ import javax.ws.rs.core.MediaType; import java.net.URL; +import java.sql.Connection; +import java.sql.DriverManager; +import java.sql.ResultSet; +import java.sql.Statement; import java.util.Arrays; import java.util.List; import java.util.Map; +import java.util.Properties; @Guice(moduleFactory = DruidTestModuleFactory.class) public class ITBasicAuthConfigurationTest @@ -197,8 +204,80 @@ public void testAuthConfiguration() throws Exception LOG.info("Checking access for user druid99."); checkNodeAccess(newUser99Client); + + String brokerUrl = "jdbc:avatica:remote:url=" + config.getBrokerUrl() + DruidAvaticaHandler.AVATICA_PATH; + String routerUrl = "jdbc:avatica:remote:url=" + config.getRouterUrl() + DruidAvaticaHandler.AVATICA_PATH; + + LOG.info("Checking Avatica query on broker."); + testAvaticaQuery(brokerUrl); + + LOG.info("Checking Avatica query on router."); + testAvaticaQuery(routerUrl); + + LOG.info("Testing Avatica query on broker with incorrect credentials."); + testAvaticaAuthFailure(brokerUrl); + + LOG.info("Testing Avatica query on router with incorrect credentials."); + testAvaticaAuthFailure(routerUrl); + + LOG.info("Checking OPTIONS requests on services..."); + testOptionsRequests(adminClient); + } + + private void testOptionsRequests(HttpClient httpClient) + { + makeRequest(httpClient, HttpMethod.OPTIONS, config.getCoordinatorUrl() + "/status", null); + makeRequest(httpClient, HttpMethod.OPTIONS, config.getIndexerUrl() + "/status", null); + makeRequest(httpClient, HttpMethod.OPTIONS, config.getBrokerUrl() + "/status", null); + makeRequest(httpClient, HttpMethod.OPTIONS, config.getHistoricalUrl() + "/status", null); + makeRequest(httpClient, HttpMethod.OPTIONS, config.getRouterUrl() + "/status", null); } + private void testAvaticaQuery(String url) + { + LOG.info("URL: " + url); + try { + Properties connectionProperties = new Properties(); + connectionProperties.put("user", "admin"); + connectionProperties.put("password", "priest"); + Connection connection = DriverManager.getConnection(url, connectionProperties); + Statement statement = connection.createStatement(); + statement.setMaxRows(450); + String query = "SELECT * FROM INFORMATION_SCHEMA.COLUMNS"; + ResultSet resultSet = statement.executeQuery(query); + Assert.assertTrue(resultSet.next()); + statement.close(); + connection.close(); + } + catch (Exception e) { + throw new RuntimeException(e); + } + } + + private void testAvaticaAuthFailure(String url) throws Exception + { + LOG.info("URL: " + url); + try { + Properties connectionProperties = new Properties(); + connectionProperties.put("user", "admin"); + connectionProperties.put("password", "wrongpassword"); + Connection connection = DriverManager.getConnection(url, connectionProperties); + Statement statement = connection.createStatement(); + statement.setMaxRows(450); + String query = "SELECT * FROM INFORMATION_SCHEMA.COLUMNS"; + statement.executeQuery(query); + } + catch (AvaticaSqlException ase) { + Assert.assertEquals( + ase.getErrorMessage(), + "Error while executing SQL \"SELECT * FROM INFORMATION_SCHEMA.COLUMNS\": Remote driver error: ForbiddenException: Authentication failed." + ); + return; + } + Assert.fail("Test failed, did not get AvaticaSqlException."); + } + + private void checkNodeAccess(HttpClient httpClient) { makeRequest(httpClient, HttpMethod.GET, config.getCoordinatorUrl() + "/status", null); diff --git a/integration-tests/src/test/resources/indexer/union_select_query.json b/integration-tests/src/test/resources/indexer/union_select_query.json index 76e4d0be63ed..f35e682c313a 100644 --- a/integration-tests/src/test/resources/indexer/union_select_query.json +++ b/integration-tests/src/test/resources/indexer/union_select_query.json @@ -75,7 +75,7 @@ } }, { - "segmentId": "wikipedia_index_test0_2013-08-31T0com.metamx.common.ISE: one or more twitter queries failed0:00:00.000Z_2013-09-01T00:00:00.000Z_2014-05-01T15:27:43.993Z", + "segmentId": "wikipedia_index_test0_2013-08-31T0io.druid.java.util.common.ISE: one or more twitter queries failed0:00:00.000Z_2013-09-01T00:00:00.000Z_2014-05-01T15:27:43.993Z", "offset": 0, "event": { "timestamp": "2013-08-31T03:32:45.000Z", diff --git a/java-util/pom.xml b/java-util/pom.xml index 84f093753e44..bf348d92878a 100644 --- a/java-util/pom.xml +++ b/java-util/pom.xml @@ -24,7 +24,7 @@ io.druid druid - 0.12.0-SNAPSHOT + 0.12.2-SNAPSHOT java-util @@ -38,6 +38,12 @@ + + UTF-8 + 1.6.5 + ${sigar.base.version}.132 + + org.slf4j @@ -85,6 +91,12 @@ + + net.thisptr + jackson-jq + + + com.google.code.findbugs jsr305 @@ -93,6 +105,37 @@ javax.validation validation-api + + org.asynchttpclient + async-http-client + + + + + org.hyperic + sigar + ${sigar.version} + true + + + org.hyperic + sigar-dist + ${sigar.version} + zip + + provided + + + org.gridkit.lab + jvm-attach-api + + + + + io.netty + netty + @@ -111,10 +154,46 @@ test true - - net.thisptr - jackson-jq - + + org.apache.logging.log4j + log4j-api + test + + + org.apache.logging.log4j + log4j-core + test + + + org.apache.logging.log4j + log4j-slf4j-impl + test + + + org.apache.logging.log4j + log4j-1.2-api + test + + + org.apache.logging.log4j + log4j-jul + test + + + org.eclipse.jetty + jetty-server + test + + + org.eclipse.jetty + jetty-servlet + test + + + it.unimi.dsi + fastutil + test + @@ -141,7 +220,56 @@ + + org.apache.maven.plugins + maven-dependency-plugin + + + copy-sigar-lib-to-resources + generate-resources + + unpack-dependencies + + + org.hyperic + sigar-dist + **/sigar-bin/lib/* + **/sigar-bin/lib/*jar + + ${project.build.directory} + + + + + + + org.apache.maven.plugins + maven-surefire-plugin + + + + java.library.path + ${project.build.directory}/hyperic-sigar-${sigar.base.version}/sigar-bin/lib/ + + + + + + + + + ${project.build.directory}/hyperic-sigar-${sigar.base.version}/sigar-bin/lib + + + - \ No newline at end of file + + + sigar + https://repository.jboss.org/nexus/content/repositories/thirdparty-uploads/ + + + + diff --git a/java-util/src/main/java/io/druid/concurrent/ConcurrentAwaitableCounter.java b/java-util/src/main/java/io/druid/concurrent/ConcurrentAwaitableCounter.java new file mode 100644 index 000000000000..e9ffa61ff726 --- /dev/null +++ b/java-util/src/main/java/io/druid/concurrent/ConcurrentAwaitableCounter.java @@ -0,0 +1,166 @@ +/* + * Licensed to Metamarkets Group Inc. (Metamarkets) under one + * or more contributor license agreements. See the NOTICE file + * distributed with this work for additional information + * regarding copyright ownership. Metamarkets licenses this file + * to you under the Apache License, Version 2.0 (the + * "License"); you may not use this file except in compliance + * with the License. You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, + * software distributed under the License is distributed on an + * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY + * KIND, either express or implied. See the License for the + * specific language governing permissions and limitations + * under the License. + */ + +package io.druid.concurrent; + +import java.util.concurrent.TimeUnit; +import java.util.concurrent.TimeoutException; +import java.util.concurrent.locks.AbstractQueuedLongSynchronizer; + +/** + * This synchronization object allows to {@link #increment} a counter without blocking, potentially from multiple + * threads (although in some use cases there is just one incrementer thread), and block in other thread(s), awaiting + * when the count reaches the provided value: see {@link #awaitCount}, or the specified number of events since the + * call: see {@link #awaitNextIncrements}. + * + * This counter wraps around {@link Long#MAX_VALUE} and starts from 0 again, so "next" count should be generally + * obtained by calling {@link #nextCount nextCount(currentCount)} rather than {@code currentCount + 1}. + * + * Memory consistency effects: actions in threads prior to calling {@link #increment} while the count was less than the + * awaited value happen-before actions following count awaiting methods such as {@link #awaitCount}. + */ +public final class ConcurrentAwaitableCounter +{ + private static final long MAX_COUNT = Long.MAX_VALUE; + + /** + * This method should be called to obtain the next total increment count to be passed to {@link #awaitCount} methods, + * instead of just adding 1 to the previous count, because the count must wrap around {@link Long#MAX_VALUE} and start + * from 0 again. + */ + public static long nextCount(long prevCount) + { + return (prevCount + 1) & MAX_COUNT; + } + + private static class Sync extends AbstractQueuedLongSynchronizer + { + @Override + protected long tryAcquireShared(long countWhenWaitStarted) + { + long currentCount = getState(); + return compareCounts(currentCount, countWhenWaitStarted) > 0 ? 1 : -1; + } + + @Override + protected boolean tryReleaseShared(long increment) + { + long count; + long nextCount; + do { + count = getState(); + nextCount = (count + increment) & MAX_COUNT; + } while (!compareAndSetState(count, nextCount)); + return true; + } + + long getCount() + { + return getState(); + } + } + + private final Sync sync = new Sync(); + + /** + * Increment the count. This method could be safely called from concurrent threads. + */ + public void increment() + { + sync.releaseShared(1); + } + + /** + * Await until the {@link #increment} is called on this counter object the specified number of times from the creation + * of this counter object. + */ + public void awaitCount(long totalCount) throws InterruptedException + { + checkTotalCount(totalCount); + long currentCount = sync.getCount(); + while (compareCounts(totalCount, currentCount) > 0) { + sync.acquireSharedInterruptibly(currentCount); + currentCount = sync.getCount(); + } + } + + private static void checkTotalCount(long totalCount) + { + if (totalCount < 0) { + throw new AssertionError( + "Total count must always be >= 0, even in the face of overflow. " + + "The next count should always be obtained by calling ConcurrentAwaitableCounter.nextCount(prevCount), " + + "not just +1" + ); + } + } + + /** + * Await until the {@link #increment} is called on this counter object the specified number of times from the creation + * of this counter object, for not longer than the specified period of time. If by this time the target increment + * count is not reached, {@link TimeoutException} is thrown. + */ + public void awaitCount(long totalCount, long timeout, TimeUnit unit) throws InterruptedException, TimeoutException + { + checkTotalCount(totalCount); + long nanos = unit.toNanos(timeout); + long currentCount = sync.getCount(); + while (compareCounts(totalCount, currentCount) > 0) { + if (!sync.tryAcquireSharedNanos(currentCount, nanos)) { + throw new TimeoutException(); + } + currentCount = sync.getCount(); + } + } + + private static int compareCounts(long count1, long count2) + { + long diff = (count1 - count2) & MAX_COUNT; + if (diff == 0) { + return 0; + } + return diff < MAX_COUNT / 2 ? 1 : -1; + } + + /** + * Somewhat loosely defined wait for "next N increments", because the starting point is not defined from the Java + * Memory Model perspective. + */ + public void awaitNextIncrements(long nextIncrements) throws InterruptedException + { + if (nextIncrements <= 0) { + throw new IllegalArgumentException("nextIncrements is not positive: " + nextIncrements); + } + if (nextIncrements > MAX_COUNT / 4) { + throw new UnsupportedOperationException("Couldn't wait for so many increments: " + nextIncrements); + } + awaitCount((sync.getCount() + nextIncrements) & MAX_COUNT); + } + + /** + * The difference between this method and {@link #awaitCount(long, long, TimeUnit)} with argument 1 is that {@code + * awaitFirstIncrement()} returns boolean designating whether the count was await (while waiting for no longer than + * for the specified period of time), while {@code awaitCount()} throws {@link TimeoutException} if the count was not + * awaited. + */ + public boolean awaitFirstIncrement(long timeout, TimeUnit unit) throws InterruptedException + { + return sync.tryAcquireSharedNanos(0, unit.toNanos(timeout)); + } +} diff --git a/java-util/src/main/java/io/druid/java/util/common/CompressionUtils.java b/java-util/src/main/java/io/druid/java/util/common/CompressionUtils.java index 08614ff8d759..fd7bc1e6ec57 100644 --- a/java-util/src/main/java/io/druid/java/util/common/CompressionUtils.java +++ b/java-util/src/main/java/io/druid/java/util/common/CompressionUtils.java @@ -48,36 +48,59 @@ public class CompressionUtils { private static final Logger log = new Logger(CompressionUtils.class); private static final int DEFAULT_RETRY_COUNT = 3; - - public static final String GZ_SUFFIX = ".gz"; - public static final String ZIP_SUFFIX = ".zip"; + private static final String GZ_SUFFIX = ".gz"; + private static final String ZIP_SUFFIX = ".zip"; /** * Zip the contents of directory into the file indicated by outputZipFile. Sub directories are skipped * * @param directory The directory whose contents should be added to the zip in the output stream. * @param outputZipFile The output file to write the zipped data to + * @param fsync True if the output file should be fsynced to disk * * @return The number of bytes (uncompressed) read from the input directory. * * @throws IOException */ - public static long zip(File directory, File outputZipFile) throws IOException + public static long zip(File directory, File outputZipFile, boolean fsync) throws IOException { if (!isZip(outputZipFile.getName())) { log.warn("No .zip suffix[%s], putting files from [%s] into it anyway.", outputZipFile, directory); } try (final FileOutputStream out = new FileOutputStream(outputZipFile)) { - return zip(directory, out); + long bytes = zip(directory, out); + + // For explanation of why fsyncing here is a good practice: + // https://github.com/druid-io/druid/pull/5187#pullrequestreview-85188984 + if (fsync) { + out.getChannel().force(true); + } + + return bytes; } } + /** + * Zip the contents of directory into the file indicated by outputZipFile. Sub directories are skipped + * + * @param directory The directory whose contents should be added to the zip in the output stream. + * @param outputZipFile The output file to write the zipped data to + * + * @return The number of bytes (uncompressed) read from the input directory. + * + * @throws IOException + */ + public static long zip(File directory, File outputZipFile) throws IOException + { + return zip(directory, outputZipFile, false); + } + /** * Zips the contents of the input directory to the output stream. Sub directories are skipped * * @param directory The directory whose contents should be added to the zip in the output stream. - * @param out The output stream to write the zip data to. It is closed in the process + * @param out The output stream to write the zip data to. Caller is responsible for closing this stream. * * @return The number of bytes (uncompressed) read from the input directory. * @@ -88,23 +111,23 @@ public static long zip(File directory, OutputStream out) throws IOException if (!directory.isDirectory()) { throw new IOE("directory[%s] is not a directory", directory); } - final File[] files = directory.listFiles(); + + final ZipOutputStream zipOut = new ZipOutputStream(out); long totalSize = 0; - try (final ZipOutputStream zipOut = new ZipOutputStream(out)) { - for (File file : files) { - log.info("Adding file[%s] with size[%,d]. Total size so far[%,d]", file, file.length(), totalSize); - if (file.length() >= Integer.MAX_VALUE) { - zipOut.finish(); - throw new IOE("file[%s] too large [%,d]", file, file.length()); - } - zipOut.putNextEntry(new ZipEntry(file.getName())); - totalSize += Files.asByteSource(file).copyTo(zipOut); + for (File file : directory.listFiles()) { + log.info("Adding file[%s] with size[%,d]. Total size so far[%,d]", file, file.length(), totalSize); + if (file.length() >= Integer.MAX_VALUE) { + zipOut.finish(); + throw new IOE("file[%s] too large [%,d]", file, file.length()); } - zipOut.closeEntry(); - // Workarround for http://hg.openjdk.java.net/jdk8/jdk8/jdk/rev/759aa847dcaf - zipOut.flush(); + zipOut.putNextEntry(new ZipEntry(file.getName())); + totalSize += Files.asByteSource(file).copyTo(zipOut); } + zipOut.closeEntry(); + // Workaround for http://hg.openjdk.java.net/jdk8/jdk8/jdk/rev/759aa847dcaf + zipOut.flush(); + zipOut.finish(); return totalSize; } diff --git a/java-util/src/main/java/io/druid/java/util/common/FileUtils.java b/java-util/src/main/java/io/druid/java/util/common/FileUtils.java index 72be3a57e18f..0a5d08063881 100644 --- a/java-util/src/main/java/io/druid/java/util/common/FileUtils.java +++ b/java-util/src/main/java/io/druid/java/util/common/FileUtils.java @@ -27,11 +27,16 @@ import java.io.File; import java.io.FileNotFoundException; +import java.io.FileOutputStream; +import java.io.FilterOutputStream; import java.io.IOException; +import java.io.OutputStream; import java.nio.MappedByteBuffer; import java.nio.channels.FileChannel; +import java.nio.file.StandardCopyOption; import java.util.Arrays; import java.util.Collection; +import java.util.UUID; public class FileUtils { @@ -46,6 +51,7 @@ public boolean apply(Throwable input) return input instanceof Exception; } }; + /** * Copy input byte source to outFile. If outFile exists, it is attempted to be deleted. * @@ -150,10 +156,11 @@ public void addFile(File file) * }} * * @param file the file to map + * * @return a {@link MappedByteBufferHandler}, wrapping a read-only buffer reflecting {@code file} - * @throws FileNotFoundException if the {@code file} does not exist - * @throws IOException if an I/O error occurs * + * @throws FileNotFoundException if the {@code file} does not exist + * @throws IOException if an I/O error occurs * @see FileChannel#map(FileChannel.MapMode, long, long) */ public static MappedByteBufferHandler map(File file) throws IOException @@ -161,4 +168,64 @@ public static MappedByteBufferHandler map(File file) throws IOException MappedByteBuffer mappedByteBuffer = Files.map(file); return new MappedByteBufferHandler(mappedByteBuffer); } + + /** + * Write to a file atomically, by first writing to a temporary file in the same directory and then moving it to + * the target location. This function attempts to clean up its temporary files when possible, but they may stick + * around (for example, if the JVM crashes partway through executing the function). In any case, the target file + * should be unharmed. + * + * The OutputStream passed to the consumer is uncloseable; calling close on it will do nothing. This is to ensure + * that the stream stays open so we can fsync it here before closing. Hopefully, this doesn't cause any problems + * for callers. + * + * This method is not just thread-safe, but is also safe to use from multiple processes on the same machine. + */ + public static void writeAtomically(final File file, OutputStreamConsumer f) throws IOException + { + writeAtomically(file, file.getParentFile(), f); + } + + private static void writeAtomically(final File file, final File tmpDir, OutputStreamConsumer f) throws IOException + { + final File tmpFile = new File(tmpDir, StringUtils.format(".%s.%s", file.getName(), UUID.randomUUID())); + + try { + try (final FileOutputStream out = new FileOutputStream(tmpFile)) { + // Pass f an uncloseable stream so we can fsync before closing. + f.accept(uncloseable(out)); + + // fsync to avoid write-then-rename-then-crash causing empty files on some filesystems. + out.getChannel().force(true); + } + + // No exception thrown; do the move. + java.nio.file.Files.move( + tmpFile.toPath(), + file.toPath(), + StandardCopyOption.ATOMIC_MOVE, + StandardCopyOption.REPLACE_EXISTING + ); + } + finally { + tmpFile.delete(); + } + } + + private static OutputStream uncloseable(final OutputStream out) throws IOException + { + return new FilterOutputStream(out) + { + @Override + public void close() throws IOException + { + // Do nothing. + } + }; + } + + public interface OutputStreamConsumer + { + void accept(OutputStream outputStream) throws IOException; + } } diff --git a/java-util/src/main/java/io/druid/java/util/common/Pair.java b/java-util/src/main/java/io/druid/java/util/common/Pair.java index 62142622c70c..8bdd62172bdf 100644 --- a/java-util/src/main/java/io/druid/java/util/common/Pair.java +++ b/java-util/src/main/java/io/druid/java/util/common/Pair.java @@ -21,6 +21,7 @@ import com.google.common.base.Function; +import javax.annotation.Nullable; import java.util.Comparator; /** @@ -28,18 +29,20 @@ public class Pair { - public static Pair of(T1 lhs, T2 rhs) + public static Pair of(@Nullable T1 lhs, @Nullable T2 rhs) { return new Pair<>(lhs, rhs); } + @Nullable public final T1 lhs; + @Nullable public final T2 rhs; public Pair( - T1 lhs, - T2 rhs + @Nullable T1 lhs, + @Nullable T2 rhs ) { this.lhs = lhs; diff --git a/java-util/src/main/java/io/druid/java/util/common/StreamUtils.java b/java-util/src/main/java/io/druid/java/util/common/StreamUtils.java index 079d6c9c810b..23ff9d621ae3 100644 --- a/java-util/src/main/java/io/druid/java/util/common/StreamUtils.java +++ b/java-util/src/main/java/io/druid/java/util/common/StreamUtils.java @@ -25,6 +25,9 @@ import com.google.common.io.ByteSource; import com.google.common.io.ByteStreams; +import java.io.BufferedOutputStream; +import java.io.File; +import java.io.FileOutputStream; import java.io.IOException; import java.io.InputStream; import java.io.OutputStream; @@ -34,6 +37,31 @@ */ public class StreamUtils { + /** + * Copy from an input stream to a file (and buffer it) and close the input stream. + *

    + * It is highly recommended to use FileUtils.retryCopy whenever possible, and not use a raw `InputStream` + * + * @param is The input stream to copy bytes from. `is` is closed regardless of the copy result. + * @param file The file to copy bytes to. Any parent directories are automatically created. + * + * @return The count of bytes written to the file + * + * @throws IOException + */ + public static long copyToFileAndClose(InputStream is, File file) throws IOException + { + file.getParentFile().mkdirs(); + try (OutputStream os = new BufferedOutputStream(new FileOutputStream(file))) { + final long result = ByteStreams.copy(is, os); + // Workarround for http://hg.openjdk.java.net/jdk8/jdk8/jdk/rev/759aa847dcaf + os.flush(); + return result; + } + finally { + is.close(); + } + } /** * Copy from `is` to `os` and close the streams regardless of the result. diff --git a/java-util/src/main/java/io/druid/java/util/common/granularity/Granularities.java b/java-util/src/main/java/io/druid/java/util/common/granularity/Granularities.java index 599110550f9b..de8b6f688d2f 100644 --- a/java-util/src/main/java/io/druid/java/util/common/granularity/Granularities.java +++ b/java-util/src/main/java/io/druid/java/util/common/granularity/Granularities.java @@ -41,4 +41,8 @@ public class Granularities public static final Granularity ALL = GranularityType.ALL.getDefaultGranularity(); public static final Granularity NONE = GranularityType.NONE.getDefaultGranularity(); + public static Granularity nullToAll(Granularity granularity) + { + return granularity == null ? Granularities.ALL : granularity; + } } diff --git a/java-util/src/main/java/io/druid/java/util/common/guava/Sequence.java b/java-util/src/main/java/io/druid/java/util/common/guava/Sequence.java index 20fab62adc20..77d711b8a987 100644 --- a/java-util/src/main/java/io/druid/java/util/common/guava/Sequence.java +++ b/java-util/src/main/java/io/druid/java/util/common/guava/Sequence.java @@ -34,7 +34,7 @@ * methods get called and other resources get cleaned up whenever processing is complete. Without this inversion * it is very easy to unintentionally leak resources when iterating over something that is backed by a resource. *

    - * Sequences also expose {#see com.metamx.common.guava.Yielder} Yielder objects which allow you to implement a + * Sequences also expose {#see io.druid.java.util.common.guava.Yielder} Yielder objects which allow you to implement a * continuation over the Sequence. Yielder do not offer the same guarantees of automatic resource management * as the accumulate method, but they are Closeable and will do the proper cleanup when close() is called on them. */ diff --git a/java-util/src/main/java/io/druid/java/util/common/lifecycle/Lifecycle.java b/java-util/src/main/java/io/druid/java/util/common/lifecycle/Lifecycle.java index 05999be54566..eb8771adc48a 100644 --- a/java-util/src/main/java/io/druid/java/util/common/lifecycle/Lifecycle.java +++ b/java-util/src/main/java/io/druid/java/util/common/lifecycle/Lifecycle.java @@ -403,8 +403,7 @@ public void start() throws Exception for (Annotation annotation : method.getAnnotations()) { if (annotation.annotationType() .getCanonicalName() - .equals("io.druid.java.util.common.lifecycle.LifecycleStart") || - annotation.annotationType().getCanonicalName().equals("com.metamx.common.lifecycle.LifecycleStart")) { + .equals("io.druid.java.util.common.lifecycle.LifecycleStart")) { doStart = true; break; } @@ -424,8 +423,7 @@ public void stop() for (Annotation annotation : method.getAnnotations()) { if (annotation.annotationType() .getCanonicalName() - .equals("io.druid.java.util.common.lifecycle.LifecycleStop") || - annotation.annotationType().getCanonicalName().equals("com.metamx.common.lifecycle.LifecycleStop")) { + .equals("io.druid.java.util.common.lifecycle.LifecycleStop")) { doStop = true; break; } diff --git a/java-util/src/main/java/io/druid/java/util/common/logger/Logger.java b/java-util/src/main/java/io/druid/java/util/common/logger/Logger.java index 656de5f97c07..0d8189a0a08e 100644 --- a/java-util/src/main/java/io/druid/java/util/common/logger/Logger.java +++ b/java-util/src/main/java/io/druid/java/util/common/logger/Logger.java @@ -38,6 +38,12 @@ public Logger(Class clazz) log = LoggerFactory.getLogger(clazz); } + @Override + public String toString() + { + return StringUtils.format("Logger{name=[%s], class[%s]}", log.getName(), log.getClass()); + } + public void trace(String message, Object... formatArgs) { if (log.isTraceEnabled()) { diff --git a/java-util/src/main/java/io/druid/java/util/emitter/EmittingLogger.java b/java-util/src/main/java/io/druid/java/util/emitter/EmittingLogger.java new file mode 100644 index 000000000000..55c679d4991b --- /dev/null +++ b/java-util/src/main/java/io/druid/java/util/emitter/EmittingLogger.java @@ -0,0 +1,139 @@ +/* + * Licensed to Metamarkets Group Inc. (Metamarkets) under one + * or more contributor license agreements. See the NOTICE file + * distributed with this work for additional information + * regarding copyright ownership. Metamarkets licenses this file + * to you under the Apache License, Version 2.0 (the + * "License"); you may not use this file except in compliance + * with the License. You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, + * software distributed under the License is distributed on an + * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY + * KIND, either express or implied. See the License for the + * specific language governing permissions and limitations + * under the License. + */ + +package io.druid.java.util.emitter; + +import com.google.common.base.Preconditions; +import com.google.common.base.Predicates; +import com.google.common.collect.Maps; +import io.druid.java.util.common.ISE; +import io.druid.java.util.common.StringUtils; +import io.druid.java.util.common.logger.Logger; +import io.druid.java.util.emitter.service.AlertBuilder; +import io.druid.java.util.emitter.service.ServiceEmitter; + +import java.io.PrintWriter; +import java.io.StringWriter; + +/** + */ +public class EmittingLogger extends Logger +{ + private static volatile ServiceEmitter emitter = null; + + private final String className; + + public static void registerEmitter(ServiceEmitter emitter) + { + Preconditions.checkNotNull(emitter); + EmittingLogger.emitter = emitter; + } + + public EmittingLogger(String className) + { + super(className); + + this.className = className; + } + + public EmittingLogger(Class clazz) + { + super(clazz); + + this.className = clazz.getName(); + } + + public AlertBuilder makeAlert(String message, Object... objects) + { + return makeAlert(null, message, objects); + } + + public AlertBuilder makeAlert(Throwable t, String message, Object... objects) + { + if (emitter == null) { + final String errorMessage = StringUtils.format( + "Emitter not initialized! Cannot alert. Please make sure to call %s.registerEmitter()", this.getClass() + ); + + error(errorMessage); + throw new ISE(errorMessage); + } + + final AlertBuilder retVal = new EmittingAlertBuilder(t, StringUtils.format(message, objects), emitter) + .addData("class", className); + + if (t != null) { + final StringWriter trace = new StringWriter(); + final PrintWriter pw = new PrintWriter(trace); + t.printStackTrace(pw); + retVal.addData("exceptionType", t.getClass()); + retVal.addData("exceptionMessage", t.getMessage()); + retVal.addData("exceptionStackTrace", trace.toString()); + } + + return retVal; + } + + public class EmittingAlertBuilder extends AlertBuilder + { + private final Throwable t; + + private volatile boolean emitted = false; + + private EmittingAlertBuilder(Throwable t, String description, ServiceEmitter emitter) + { + super(description, emitter); + this.t = t; + } + + @Override + public void emit() + { + logIt("%s: %s"); + + emitted = true; + + super.emit(); + } + + @Override + protected void finalize() throws Throwable + { + if (!emitted) { + logIt("Alert not emitted, emitting. %s: %s"); + super.emit(); + } + } + + private void logIt(String format) + { + if (t == null) { + error(format, description, dataMap); + } else { + // Filter out the stack trace from the message, because it should be in the logline already if it's wanted. + error( + t, + format, + description, + Maps.filterKeys(dataMap, Predicates.not(Predicates.equalTo("exceptionStackTrace"))) + ); + } + } + } +} diff --git a/java-util/src/main/java/io/druid/java/util/emitter/core/BaseHttpEmittingConfig.java b/java-util/src/main/java/io/druid/java/util/emitter/core/BaseHttpEmittingConfig.java new file mode 100644 index 000000000000..9838bfb1903a --- /dev/null +++ b/java-util/src/main/java/io/druid/java/util/emitter/core/BaseHttpEmittingConfig.java @@ -0,0 +1,180 @@ +/* + * Licensed to Metamarkets Group Inc. (Metamarkets) under one + * or more contributor license agreements. See the NOTICE file + * distributed with this work for additional information + * regarding copyright ownership. Metamarkets licenses this file + * to you under the Apache License, Version 2.0 (the + * "License"); you may not use this file except in compliance + * with the License. You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, + * software distributed under the License is distributed on an + * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY + * KIND, either express or implied. See the License for the + * specific language governing permissions and limitations + * under the License. + */ + +package io.druid.java.util.emitter.core; + +import com.fasterxml.jackson.annotation.JsonProperty; +import io.druid.java.util.common.Pair; + +import javax.validation.constraints.Min; + +public class BaseHttpEmittingConfig +{ + public static final long DEFAULT_FLUSH_MILLIS = 60 * 1000; + public static final int DEFAULT_FLUSH_COUNTS = 500; + + /** ensure the event buffers don't use more than 10% of memory by default */ + public static final int DEFAULT_MAX_BATCH_SIZE; + public static final int DEFAULT_BATCH_QUEUE_SIZE_LIMIT; + static { + Pair batchConfigPair = getDefaultBatchSizeAndLimit(Runtime.getRuntime().maxMemory()); + DEFAULT_MAX_BATCH_SIZE = batchConfigPair.lhs; + DEFAULT_BATCH_QUEUE_SIZE_LIMIT = batchConfigPair.rhs; + } + + /** + * Do not time out in case flushTimeOut is not set + */ + public static final long DEFAULT_FLUSH_TIME_OUT = Long.MAX_VALUE; + public static final String DEFAULT_BASIC_AUTHENTICATION = null; + public static final BatchingStrategy DEFAULT_BATCHING_STRATEGY = BatchingStrategy.ARRAY; + public static final ContentEncoding DEFAULT_CONTENT_ENCODING = null; + public static final float DEFAULT_HTTP_TIMEOUT_ALLOWANCE_FACTOR = 2.0f; + /** + * The default value effective doesn't set the min timeout + */ + public static final int DEFAULT_MIN_HTTP_TIMEOUT_MILLIS = 0; + + public static Pair getDefaultBatchSizeAndLimit(long maxMemory) + { + long memoryLimit = maxMemory / 10; + long batchSize = 5 * 1024 * 1024; + long queueLimit = 50; + + if (batchSize * queueLimit > memoryLimit) { + queueLimit = memoryLimit / batchSize; + } + + // make room for at least two queue items + if (queueLimit < 2) { + queueLimit = 2; + batchSize = memoryLimit / queueLimit; + } + + return new Pair<>((int) batchSize, (int) queueLimit); + } + + @Min(1) + @JsonProperty + long flushMillis = DEFAULT_FLUSH_MILLIS; + + @Min(0) + @JsonProperty + int flushCount = DEFAULT_FLUSH_COUNTS; + + @Min(0) + @JsonProperty + long flushTimeOut = DEFAULT_FLUSH_TIME_OUT; + + @JsonProperty + String basicAuthentication = DEFAULT_BASIC_AUTHENTICATION; + + @JsonProperty + BatchingStrategy batchingStrategy = DEFAULT_BATCHING_STRATEGY; + + @Min(0) + @JsonProperty + int maxBatchSize = DEFAULT_MAX_BATCH_SIZE; + + @JsonProperty + ContentEncoding contentEncoding = DEFAULT_CONTENT_ENCODING; + + @Min(0) + @JsonProperty + int batchQueueSizeLimit = DEFAULT_BATCH_QUEUE_SIZE_LIMIT; + + @Min(1) + @JsonProperty + float httpTimeoutAllowanceFactor = DEFAULT_HTTP_TIMEOUT_ALLOWANCE_FACTOR; + + @Min(0) + @JsonProperty + int minHttpTimeoutMillis = DEFAULT_MIN_HTTP_TIMEOUT_MILLIS; + + public long getFlushMillis() + { + return flushMillis; + } + + public int getFlushCount() + { + return flushCount; + } + + public long getFlushTimeOut() + { + return flushTimeOut; + } + + public String getBasicAuthentication() + { + return basicAuthentication; + } + + public BatchingStrategy getBatchingStrategy() + { + return batchingStrategy; + } + + public int getMaxBatchSize() + { + return maxBatchSize; + } + + public ContentEncoding getContentEncoding() + { + return contentEncoding; + } + + public int getBatchQueueSizeLimit() + { + return batchQueueSizeLimit; + } + + public float getHttpTimeoutAllowanceFactor() + { + return httpTimeoutAllowanceFactor; + } + + public int getMinHttpTimeoutMillis() + { + return minHttpTimeoutMillis; + } + + @Override + public String toString() + { + return "BaseHttpEmittingConfig{" + toStringBase() + '}'; + } + + protected String toStringBase() + { + return + "flushMillis=" + flushMillis + + ", flushCount=" + flushCount + + ", flushTimeOut=" + flushTimeOut + + ", basicAuthentication='" + basicAuthentication + '\'' + + ", batchingStrategy=" + batchingStrategy + + ", maxBatchSize=" + maxBatchSize + + ", contentEncoding=" + contentEncoding + + ", batchQueueSizeLimit=" + batchQueueSizeLimit + + ", httpTimeoutAllowanceFactor=" + httpTimeoutAllowanceFactor + + ", minHttpTimeoutMillis=" + minHttpTimeoutMillis; + } +} diff --git a/java-util/src/main/java/io/druid/java/util/emitter/core/Batch.java b/java-util/src/main/java/io/druid/java/util/emitter/core/Batch.java new file mode 100644 index 000000000000..cb0bc88042b1 --- /dev/null +++ b/java-util/src/main/java/io/druid/java/util/emitter/core/Batch.java @@ -0,0 +1,359 @@ +/* + * Licensed to Metamarkets Group Inc. (Metamarkets) under one + * or more contributor license agreements. See the NOTICE file + * distributed with this work for additional information + * regarding copyright ownership. Metamarkets licenses this file + * to you under the Apache License, Version 2.0 (the + * "License"); you may not use this file except in compliance + * with the License. You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, + * software distributed under the License is distributed on an + * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY + * KIND, either express or implied. See the License for the + * specific language governing permissions and limitations + * under the License. + */ + +package io.druid.java.util.emitter.core; + +import com.google.common.base.Preconditions; +import io.druid.java.util.common.logger.Logger; + +import java.util.concurrent.atomic.AtomicInteger; +import java.util.concurrent.locks.AbstractQueuedLongSynchronizer; + +/** + * Buffer for batched data + synchronization state. + *

    + * The state structure ({@link AbstractQueuedLongSynchronizer#state}): + * Bits 0-30 - bufferWatermark + * Bit 31 - always 0 + * Bits 32-62 - "parties" (the number of concurrent writers) + * Bit 63 - sealed flag + *

    + * Writer threads (callers of {@link HttpPostEmitter#emit(Event)}) are eligible to come, increment bufferWatermark and + * write data into the buffer, as long as sealed flag is false. + *

    + * {@link HttpPostEmitter#emittingThread} is eligible to emit the buffer, when sealed flag=true and parties=0 (all + * writes are completed). See {@link #isEmittingAllowed(long)}. + *

    + * In this class, "lock" means "increment number of parties by 1", i. e. lock the emitter thread from emitting this + * batch. "Unlock" means "decrement number of parties by 1". + */ +class Batch extends AbstractQueuedLongSynchronizer +{ + private static final Logger log = new Logger(Batch.class); + + private static final long PARTY = 1L << 32; + private static final long SEAL_BIT = 1L << 63; + + private static int bufferWatermark(long state) + { + return (int) state; + } + + private static int parties(long state) + { + return ((int) (state >>> 32)) & Integer.MAX_VALUE; + } + + private static boolean isSealed(long state) + { + // The highest bit is 1. + return state < 0; + } + + private static boolean isEmittingAllowed(long state) + { + return isSealed(state) && parties(state) == 0; + } + + /** + * Tags (arg values) to transmit request from {@link #releaseShared(long)} to {@link #tryReleaseShared(long)}. + */ + private static final long UNLOCK_TAG = 0; + private static final long UNLOCK_AND_SEAL_TAG = 1; + private static final long SEAL_TAG = 2; + + /** + * The emitter this batch belongs to. + */ + private final HttpPostEmitter emitter; + + /** + * The data buffer of the batch. + */ + final byte[] buffer; + + /** + * Ordering number of this batch, as they filled & emitted in {@link HttpPostEmitter} serially, starting from 0. + * It's a boxed Long rather than primitive long, because we want to minimize the number of allocations done in + * {@link HttpPostEmitter#onSealExclusive} and so the probability of {@link OutOfMemoryError}. + * @see HttpPostEmitter#onSealExclusive + * @see HttpPostEmitter#concurrentBatch + */ + final Long batchNumber; + + /** + * The number of events in this batch, needed for event count-based batch emitting. + */ + final AtomicInteger eventCount = new AtomicInteger(0); + + /** + * The time when the first event was written into this batch, needed for timeout-based batch emitting. + */ + private long firstEventTimestamp = -1; + + Batch(HttpPostEmitter emitter, byte[] buffer, long batchNumber) + { + this.emitter = emitter; + this.buffer = buffer; + this.batchNumber = batchNumber; + } + + int getSealedBufferWatermark() + { + long state = getState(); + Preconditions.checkState(isSealed(state)); + return bufferWatermark(state); + } + + /** + * Tries to add (write) event to the batch, returns true, if successful. If fails, no subsequent attempts to add event + * to this batch will succeed, the next batch should be taken. + */ + boolean tryAddEvent(byte[] event) + { + while (true) { + long state = getState(); + if (isSealed(state)) { + return false; + } + int bufferWatermark = bufferWatermark(state); + if (bufferWatermark == 0) { + if (tryAddFirstEvent(event)) { + return true; + } + } else if (newBufferWatermark(bufferWatermark, event) <= emitter.maxBufferWatermark) { + if (tryAddNonFirstEvent(state, event)) { + return true; + } + } else { + seal(); + return false; + } + } + } + + private boolean tryAddFirstEvent(byte[] event) + { + if (!tryReserveFirstEventSizeAndLock(event)) { + return false; + } + try { + int bufferOffset = emitter.batchingStrategy.writeBatchStart(buffer); + writeEvent(event, bufferOffset); + eventCount.incrementAndGet(); + firstEventTimestamp = System.currentTimeMillis(); + return true; + } + finally { + unlock(); + } + } + + private boolean tryReserveFirstEventSizeAndLock(byte[] event) + { + return compareAndSetState(0, emitter.batchingStrategy.batchStartLength() + event.length + PARTY); + } + + private int newBufferWatermark(int bufferWatermark, byte[] eventBytes) + { + return bufferWatermark + emitter.batchingStrategy.separatorLength() + eventBytes.length; + } + + private boolean tryAddNonFirstEvent(long state, byte[] event) + { + int bufferOffset = tryReserveEventSizeAndLock(state, emitter.batchingStrategy.separatorLength() + event.length); + if (bufferOffset < 0) { + return false; + } + try { + bufferOffset = emitter.batchingStrategy.writeMessageSeparator(buffer, bufferOffset); + writeEvent(event, bufferOffset); + return true; + } + finally { + unlockAndSealIfNeeded(); + } + } + + /** + * Returns the buffer offset at which the caller has reserved the ability to write `size` bytes exclusively, + * or negative number, if the reservation attempt failed. + */ + private int tryReserveEventSizeAndLock(long state, int size) + { + Preconditions.checkArgument(size > 0); + int bufferWatermark = bufferWatermark(state); + while (true) { + if (compareAndSetState(state, state + size + PARTY)) { + return bufferWatermark; + } + state = getState(); + if (isSealed(state)) { + return -1; + } + bufferWatermark = bufferWatermark(state); + int newBufferWatermark = bufferWatermark + size; + Preconditions.checkState(newBufferWatermark > 0); + if (newBufferWatermark > emitter.maxBufferWatermark) { + return -1; + } + } + } + + private void unlockAndSealIfNeeded() + { + if (eventCount.incrementAndGet() >= emitter.config.getFlushCount()) { + unlockAndSeal(); + } else { + long timeSinceFirstEvent = System.currentTimeMillis() - firstEventTimestamp; + if (firstEventTimestamp > 0 && timeSinceFirstEvent > emitter.config.getFlushMillis()) { + unlockAndSeal(); + } else { + unlock(); + } + } + } + + void sealIfFlushNeeded() + { + long timeSinceFirstEvent = System.currentTimeMillis() - firstEventTimestamp; + if (firstEventTimestamp > 0 && timeSinceFirstEvent > emitter.config.getFlushMillis()) { + seal(); + } + } + + private void writeEvent(byte[] event, int bufferOffset) + { + System.arraycopy(event, 0, buffer, bufferOffset, event.length); + } + + + private void unlock() + { + releaseShared(UNLOCK_TAG); + } + + private void unlockAndSeal() + { + releaseShared(UNLOCK_AND_SEAL_TAG); + } + + void seal() + { + releaseShared(SEAL_TAG); + } + + @Override + protected boolean tryReleaseShared(long tag) + { + if (tag == UNLOCK_TAG) { + while (true) { + long state = getState(); + int parties = parties(state); + if (parties == 0) { + throw new IllegalMonitorStateException(); + } + long newState = state - PARTY; + if (compareAndSetState(state, newState)) { + return isEmittingAllowed(newState); + } + } + } else if (tag == UNLOCK_AND_SEAL_TAG) { + while (true) { + long state = getState(); + int parties = parties(state); + if (parties == 0) { + throw new IllegalMonitorStateException(); + } + long newState = (state - PARTY) | SEAL_BIT; + if (compareAndSetState(state, newState)) { + // Ensures only one thread calls emitter.onSealExclusive() for each batch. + if (!isSealed(state)) { + log.debug("Unlocked and sealed batch [%d]", batchNumber); + debugLogState("old state", state); + debugLogState("new state", newState); + emitter.onSealExclusive( + this, + firstEventTimestamp > 0 ? System.currentTimeMillis() - firstEventTimestamp : -1 + ); + } + return isEmittingAllowed(newState); + } + } + } else if (tag == SEAL_TAG) { + while (true) { + long state = getState(); + if (isSealed(state)) { + // Returning false, despite acquisition could be possible now, because this thread actually didn't update the + // state, i. e. didn't "release" in AbstractQueuedLongSynchronizer's terms. + return false; + } + long newState = state | SEAL_BIT; + if (compareAndSetState(state, newState)) { + log.debug("Sealed batch [%d]", batchNumber); + debugLogState("old state", state); + debugLogState("new state", newState); + emitter.onSealExclusive( + this, + firstEventTimestamp > 0 ? System.currentTimeMillis() - firstEventTimestamp : -1 + ); + return isEmittingAllowed(newState); + } + } + } else { + throw new IllegalStateException("Unknown tag: " + tag); + } + } + + void awaitEmittingAllowed() + { + acquireShared(1); + } + + @Override + protected long tryAcquireShared(long ignored) + { + return isEmittingAllowed(getState()) ? 1 : -1; + } + + @Override + public String toString() + { + long state = getState(); + return "Batch{" + + "batchNumber=" + batchNumber + + ", bufferWatermark=" + bufferWatermark(state) + + ", parties=" + parties(state) + + ", isSealed=" + isSealed(state) + + "}"; + } + + private static void debugLogState(String name, long state) + { + if (log.isDebugEnabled()) { + log.debug( + "%s[bufferWatermark=%d, parties=%d, isSealed=%s]", + name, + bufferWatermark(state), + parties(state), + isSealed(state) + ); + } + } +} diff --git a/java-util/src/main/java/io/druid/java/util/emitter/core/BatchingStrategy.java b/java-util/src/main/java/io/druid/java/util/emitter/core/BatchingStrategy.java new file mode 100644 index 000000000000..b94f894b42c6 --- /dev/null +++ b/java-util/src/main/java/io/druid/java/util/emitter/core/BatchingStrategy.java @@ -0,0 +1,155 @@ +/* + * Licensed to Metamarkets Group Inc. (Metamarkets) under one + * or more contributor license agreements. See the NOTICE file + * distributed with this work for additional information + * regarding copyright ownership. Metamarkets licenses this file + * to you under the Apache License, Version 2.0 (the + * "License"); you may not use this file except in compliance + * with the License. You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, + * software distributed under the License is distributed on an + * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY + * KIND, either express or implied. See the License for the + * specific language governing permissions and limitations + * under the License. + */ + +package io.druid.java.util.emitter.core; + +public enum BatchingStrategy +{ + ARRAY { + + @Override + public int batchStartLength() + { + return 1; + } + + @Override + public int separatorLength() + { + return 1; + } + + @Override + public int batchEndLength() + { + return 2; + } + + @Override + public int writeBatchStart(byte[] buffer) + { + buffer[0] = '['; + return batchStartLength(); + } + + @Override + public int writeMessageSeparator(byte[] buffer, int bufferOffset) + { + buffer[bufferOffset] = ','; + return bufferOffset + separatorLength(); + } + + @Override + public int writeBatchEnd(byte[] buffer, int bufferOffset) + { + buffer[bufferOffset] = ']'; + buffer[bufferOffset + 1] = '\n'; + return bufferOffset + batchEndLength(); + } + }, + NEWLINES { + + @Override + public int batchStartLength() + { + return 0; + } + + @Override + public int separatorLength() + { + return 1; + } + + @Override + public int batchEndLength() + { + return 1; + } + + @Override + public int writeBatchStart(byte[] buffer) + { + // Write nothing + return batchStartLength(); + } + + @Override + public int writeMessageSeparator(byte[] buffer, int bufferOffset) + { + buffer[bufferOffset] = '\n'; + return bufferOffset + separatorLength(); + } + + @Override + public int writeBatchEnd(byte[] buffer, int bufferOffset) + { + return writeMessageSeparator(buffer, bufferOffset); + } + }, + ONLY_EVENTS { + @Override + public int batchStartLength() + { + return 0; + } + + @Override + public int separatorLength() + { + return 0; + } + + @Override + public int batchEndLength() + { + return 0; + } + + @Override + public int writeBatchStart(byte[] buffer) + { + return 0; + } + + @Override + public int writeMessageSeparator(byte[] buffer, int bufferOffset) + { + return bufferOffset; + } + + @Override + public int writeBatchEnd(byte[] buffer, int bufferOffset) + { + return bufferOffset; + } + }; + + public abstract int batchStartLength(); + + public abstract int separatorLength(); + + public abstract int batchEndLength(); + + public abstract int writeBatchStart(byte[] buffer); + + public abstract int writeMessageSeparator(byte[] buffer, int bufferOffset); + + public abstract int writeBatchEnd(byte[] buffer, int bufferOffset); +} diff --git a/java-util/src/main/java/io/druid/java/util/emitter/core/ComposingEmitter.java b/java-util/src/main/java/io/druid/java/util/emitter/core/ComposingEmitter.java new file mode 100644 index 000000000000..b2d5b9fc8afb --- /dev/null +++ b/java-util/src/main/java/io/druid/java/util/emitter/core/ComposingEmitter.java @@ -0,0 +1,113 @@ +/* + * Licensed to Metamarkets Group Inc. (Metamarkets) under one + * or more contributor license agreements. See the NOTICE file + * distributed with this work for additional information + * regarding copyright ownership. Metamarkets licenses this file + * to you under the Apache License, Version 2.0 (the + * "License"); you may not use this file except in compliance + * with the License. You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, + * software distributed under the License is distributed on an + * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY + * KIND, either express or implied. See the License for the + * specific language governing permissions and limitations + * under the License. + */ + +package io.druid.java.util.emitter.core; + +import com.google.common.base.Preconditions; +import io.druid.java.util.common.lifecycle.LifecycleStart; +import io.druid.java.util.common.lifecycle.LifecycleStop; +import io.druid.java.util.common.logger.Logger; + +import java.io.IOException; +import java.util.List; + +public class ComposingEmitter implements Emitter +{ + private static Logger log = new Logger(ComposingEmitter.class); + + private final List emitters; + + public ComposingEmitter(List emitters) + { + this.emitters = Preconditions.checkNotNull(emitters, "null emitters"); + } + + @Override + @LifecycleStart + public void start() + { + log.info("Starting Composing Emitter."); + + for (Emitter e : emitters) { + log.info("Starting emitter %s.", e.getClass().getName()); + e.start(); + } + } + + @Override + public void emit(Event event) + { + for (Emitter e : emitters) { + e.emit(event); + } + } + + @Override + public void flush() throws IOException + { + boolean fail = false; + log.info("Flushing Composing Emitter."); + + for (Emitter e : emitters) { + try { + log.info("Flushing emitter %s.", e.getClass().getName()); + e.flush(); + } + catch (IOException ex) { + log.error(ex, "Failed to flush emitter [%s]", e.getClass().getName()); + fail = true; + } + } + + if (fail) { + throw new IOException("failed to flush one or more emitters"); + } + } + + @Override + @LifecycleStop + public void close() throws IOException + { + boolean fail = false; + log.info("Closing Composing Emitter."); + + for (Emitter e : emitters) { + try { + log.info("Closing emitter %s.", e.getClass().getName()); + e.close(); + } + catch (IOException ex) { + log.error(ex, "Failed to close emitter [%s]", e.getClass().getName()); + fail = true; + } + } + + if (fail) { + throw new IOException("failed to close one or more emitters"); + } + } + + @Override + public String toString() + { + return "ComposingEmitter{" + + "emitters=" + emitters + + '}'; + } +} diff --git a/java-util/src/main/java/io/druid/java/util/emitter/core/ConcurrentTimeCounter.java b/java-util/src/main/java/io/druid/java/util/emitter/core/ConcurrentTimeCounter.java new file mode 100644 index 000000000000..4b836789f7ae --- /dev/null +++ b/java-util/src/main/java/io/druid/java/util/emitter/core/ConcurrentTimeCounter.java @@ -0,0 +1,96 @@ +/* + * Licensed to Metamarkets Group Inc. (Metamarkets) under one + * or more contributor license agreements. See the NOTICE file + * distributed with this work for additional information + * regarding copyright ownership. Metamarkets licenses this file + * to you under the Apache License, Version 2.0 (the + * "License"); you may not use this file except in compliance + * with the License. You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, + * software distributed under the License is distributed on an + * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY + * KIND, either express or implied. See the License for the + * specific language governing permissions and limitations + * under the License. + */ + +package io.druid.java.util.emitter.core; + +import com.google.common.primitives.UnsignedInts; + +import java.util.concurrent.atomic.AtomicLong; + +/** + * A class to accumulate simple stats of some time points. All methods are safe to use from multiple threads. + */ +public class ConcurrentTimeCounter +{ + /** Lower 32 bits for sum of {@link #add}ed times, higher 32 bits for the count */ + private final AtomicLong timeSumAndCount = new AtomicLong(0L); + /** Lower 32 bits for the max {@link #add}ed time, 63th bit for indication if any value is added. */ + private final AtomicLong max = new AtomicLong(-1); + /** Similar to {@link #max} */ + private final AtomicLong min = new AtomicLong(-1); + + public void add(int time) + { + long x = (1L << 32) | time; + timeSumAndCount.addAndGet(x); + updateMax(time); + updateMin(time); + } + + private void updateMax(int time) + { + long max; + do { + max = this.max.get(); + if (max >= 0 && ((int) max) >= time) { + return; + } + } while (!this.max.compareAndSet(max, UnsignedInts.toLong(time))); + } + + private void updateMin(int time) + { + long min; + do { + min = this.min.get(); + if (min >= 0 && ((int) min) <= time) { + return; + } + } while (!this.min.compareAndSet(min, UnsignedInts.toLong(time))); + } + + public long getTimeSumAndCountAndReset() + { + return timeSumAndCount.getAndSet(0L); + } + + public int getAndResetMaxTime() + { + long max = this.max.getAndSet(-1); + // If max < 0, means no times added yet, then return 0 + return max >= 0 ? (int) max : 0; + } + + public int getAndResetMinTime() + { + long min = this.min.getAndSet(-1); + // If min < 0, means no times added yet, then return 0 + return min >= 0 ? (int) min : 0; + } + + public static int timeSum(long timeSumAndCount) + { + return (int) timeSumAndCount; + } + + public static int count(long timeSumAndCount) + { + return (int) (timeSumAndCount >> 32); + } +} diff --git a/java-util/src/main/java/io/druid/java/util/emitter/core/ContentEncoding.java b/java-util/src/main/java/io/druid/java/util/emitter/core/ContentEncoding.java new file mode 100644 index 000000000000..ad45c24de2be --- /dev/null +++ b/java-util/src/main/java/io/druid/java/util/emitter/core/ContentEncoding.java @@ -0,0 +1,25 @@ +/* + * Licensed to Metamarkets Group Inc. (Metamarkets) under one + * or more contributor license agreements. See the NOTICE file + * distributed with this work for additional information + * regarding copyright ownership. Metamarkets licenses this file + * to you under the Apache License, Version 2.0 (the + * "License"); you may not use this file except in compliance + * with the License. You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, + * software distributed under the License is distributed on an + * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY + * KIND, either express or implied. See the License for the + * specific language governing permissions and limitations + * under the License. + */ + +package io.druid.java.util.emitter.core; + +public enum ContentEncoding +{ + GZIP +} diff --git a/java-util/src/main/java/io/druid/java/util/emitter/core/Emitter.java b/java-util/src/main/java/io/druid/java/util/emitter/core/Emitter.java new file mode 100644 index 000000000000..a3fdf88b92e2 --- /dev/null +++ b/java-util/src/main/java/io/druid/java/util/emitter/core/Emitter.java @@ -0,0 +1,38 @@ +/* + * Licensed to Metamarkets Group Inc. (Metamarkets) under one + * or more contributor license agreements. See the NOTICE file + * distributed with this work for additional information + * regarding copyright ownership. Metamarkets licenses this file + * to you under the Apache License, Version 2.0 (the + * "License"); you may not use this file except in compliance + * with the License. You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, + * software distributed under the License is distributed on an + * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY + * KIND, either express or implied. See the License for the + * specific language governing permissions and limitations + * under the License. + */ + +package io.druid.java.util.emitter.core; + +import java.io.Closeable; +import java.io.Flushable; +import java.io.IOException; + +/** + */ +public interface Emitter extends Closeable, Flushable +{ + void start(); + void emit(Event event); + + @Override + void flush() throws IOException; + + @Override + void close() throws IOException; +} diff --git a/java-util/src/main/java/io/druid/java/util/emitter/core/Emitters.java b/java-util/src/main/java/io/druid/java/util/emitter/core/Emitters.java new file mode 100644 index 000000000000..537233c9c4f4 --- /dev/null +++ b/java-util/src/main/java/io/druid/java/util/emitter/core/Emitters.java @@ -0,0 +1,177 @@ +/* + * Licensed to Metamarkets Group Inc. (Metamarkets) under one + * or more contributor license agreements. See the NOTICE file + * distributed with this work for additional information + * regarding copyright ownership. Metamarkets licenses this file + * to you under the Apache License, Version 2.0 (the + * "License"); you may not use this file except in compliance + * with the License. You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, + * software distributed under the License is distributed on an + * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY + * KIND, either express or implied. See the License for the + * specific language governing permissions and limitations + * under the License. + */ + +package io.druid.java.util.emitter.core; + +import com.fasterxml.jackson.databind.ObjectMapper; +import com.google.common.collect.Maps; +import io.druid.java.util.common.IAE; +import io.druid.java.util.common.ISE; +import io.druid.java.util.common.StringUtils; +import io.druid.java.util.common.lifecycle.Lifecycle; +import io.druid.java.util.common.logger.Logger; +import io.druid.java.util.emitter.factory.EmitterFactory; +import org.asynchttpclient.AsyncHttpClient; + +import java.util.HashMap; +import java.util.Map; +import java.util.Properties; + +public class Emitters +{ + private static final Logger log = new Logger(Emitters.class); + + private static final String LOG_EMITTER_PROP = "io.druid.java.util.emitter.logging"; + private static final String HTTP_EMITTER_PROP = "io.druid.java.util.emitter.http"; + private static final String CUSTOM_EMITTER_TYPE_PROP = "io.druid.java.util.emitter.type"; + + public static Emitter create(Properties props, AsyncHttpClient httpClient, Lifecycle lifecycle) + { + return create(props, httpClient, new ObjectMapper(), lifecycle); + } + + public static Emitter create( + Properties props, + AsyncHttpClient httpClient, + ObjectMapper jsonMapper, + Lifecycle lifecycle + ) + { + Map jsonified = Maps.newHashMap(); + if (props.getProperty(LOG_EMITTER_PROP) != null) { + jsonified = makeLoggingMap(props); + jsonified.put("type", "logging"); + } else if (props.getProperty(HTTP_EMITTER_PROP) != null) { + jsonified = makeHttpMap(props); + jsonified.put("type", "http"); + } else if (props.getProperty(CUSTOM_EMITTER_TYPE_PROP) != null) { + jsonified = makeCustomFactoryMap(props); + } else { + throw new ISE( + "Unknown type of emitter. Please set [%s], [%s] or provide registered subtype of io.druid.java.util.emitter.core.factory.EmitterFactory via [%s]", + LOG_EMITTER_PROP, + HTTP_EMITTER_PROP, + CUSTOM_EMITTER_TYPE_PROP + ); + } + return jsonMapper.convertValue(jsonified, EmitterFactory.class).makeEmitter(jsonMapper, httpClient, lifecycle); + } + + // Package-visible for unit tests + + static Map makeHttpMap(Properties props) + { + Map httpMap = Maps.newHashMap(); + + final String urlProperty = "io.druid.java.util.emitter.http.url"; + + final String baseUrl = props.getProperty(urlProperty); + if (baseUrl == null) { + throw new IAE("Property[%s] must be set", urlProperty); + } + + httpMap.put("recipientBaseUrl", baseUrl); + httpMap.put("flushMillis", Long.parseLong(props.getProperty("io.druid.java.util.emitter.flushMillis", "60000"))); + httpMap.put("flushCount", Integer.parseInt(props.getProperty("io.druid.java.util.emitter.flushCount", "300"))); + /** + * The defaultValue for "io.druid.java.util.emitter.http.flushTimeOut" must be same as {@link HttpEmitterConfig.DEFAULT_FLUSH_TIME_OUT} + * */ + httpMap.put( + "flushTimeOut", + Long.parseLong(props.getProperty( + "io.druid.java.util.emitter.http.flushTimeOut", + String.valueOf(Long.MAX_VALUE) + )) + ); + if (props.containsKey("io.druid.java.util.emitter.http.basicAuthentication")) { + httpMap.put("basicAuthentication", props.getProperty("io.druid.java.util.emitter.http.basicAuthentication")); + } + if (props.containsKey("io.druid.java.util.emitter.http.batchingStrategy")) { + httpMap.put( + "batchingStrategy", + StringUtils.toUpperCase(props.getProperty("io.druid.java.util.emitter.http.batchingStrategy")) + ); + } + if (props.containsKey("io.druid.java.util.emitter.http.maxBatchSize")) { + httpMap.put("maxBatchSize", Integer.parseInt(props.getProperty("io.druid.java.util.emitter.http.maxBatchSize"))); + } + if (props.containsKey("io.druid.java.util.emitter.http.batchQueueSizeLimit")) { + httpMap.put( + "batchQueueSizeLimit", + Integer.parseInt(props.getProperty("io.druid.java.util.emitter.http.batchQueueSizeLimit")) + ); + } + if (props.containsKey("io.druid.java.util.emitter.http.httpTimeoutAllowanceFactor")) { + httpMap.put( + "httpTimeoutAllowanceFactor", + Float.parseFloat(props.getProperty("io.druid.java.util.emitter.http.httpTimeoutAllowanceFactor")) + ); + } + if (props.containsKey("io.druid.java.util.emitter.http.minHttpTimeoutMillis")) { + httpMap.put( + "minHttpTimeoutMillis", + Float.parseFloat(props.getProperty("io.druid.java.util.emitter.http.minHttpTimeoutMillis")) + ); + } + return httpMap; + } + + // Package-visible for unit tests + static Map makeLoggingMap(Properties props) + { + Map loggingMap = Maps.newHashMap(); + + loggingMap.put( + "loggerClass", props.getProperty("io.druid.java.util.emitter.logging.class", LoggingEmitter.class.getName()) + ); + loggingMap.put( + "logLevel", props.getProperty("io.druid.java.util.emitter.logging.level", "debug") + ); + return loggingMap; + } + + static Map makeCustomFactoryMap(Properties props) + { + Map factoryMap = Maps.newHashMap(); + String prefix = "io.druid.java.util.emitter."; + + for (Map.Entry entry : props.entrySet()) { + String key = entry.getKey().toString(); + if (key.startsWith(prefix)) { + String combinedKey = key.substring(prefix.length()); + Map currentLevelJson = factoryMap; + String currentKey = null; + String[] keyPath = combinedKey.split("\\."); + + for (int i = 0; i < keyPath.length - 1; i++) { + String keyPart = keyPath[i]; + Object nextLevelJson = currentLevelJson.get(keyPart); + if (nextLevelJson == null) { + nextLevelJson = new HashMap(); + currentLevelJson.put(keyPart, nextLevelJson); + } + currentLevelJson = (Map) nextLevelJson; + } + + currentLevelJson.put(keyPath[keyPath.length - 1], entry.getValue()); + } + } + return factoryMap; + } +} diff --git a/java-util/src/main/java/io/druid/java/util/emitter/core/Event.java b/java-util/src/main/java/io/druid/java/util/emitter/core/Event.java new file mode 100644 index 000000000000..b9588ab40a0d --- /dev/null +++ b/java-util/src/main/java/io/druid/java/util/emitter/core/Event.java @@ -0,0 +1,37 @@ +/* + * Licensed to Metamarkets Group Inc. (Metamarkets) under one + * or more contributor license agreements. See the NOTICE file + * distributed with this work for additional information + * regarding copyright ownership. Metamarkets licenses this file + * to you under the Apache License, Version 2.0 (the + * "License"); you may not use this file except in compliance + * with the License. You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, + * software distributed under the License is distributed on an + * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY + * KIND, either express or implied. See the License for the + * specific language governing permissions and limitations + * under the License. + */ + +package io.druid.java.util.emitter.core; + +import org.joda.time.DateTime; + +import java.util.Map; + +/** + */ +public interface Event +{ + Map toMap(); + + String getFeed(); + + DateTime getCreatedTime(); + + boolean isSafeToBuffer(); +} diff --git a/java-util/src/main/java/io/druid/java/util/emitter/core/FeedUriExtractor.java b/java-util/src/main/java/io/druid/java/util/emitter/core/FeedUriExtractor.java new file mode 100644 index 000000000000..dbac2aa7408b --- /dev/null +++ b/java-util/src/main/java/io/druid/java/util/emitter/core/FeedUriExtractor.java @@ -0,0 +1,49 @@ +/* + * Licensed to Metamarkets Group Inc. (Metamarkets) under one + * or more contributor license agreements. See the NOTICE file + * distributed with this work for additional information + * regarding copyright ownership. Metamarkets licenses this file + * to you under the Apache License, Version 2.0 (the + * "License"); you may not use this file except in compliance + * with the License. You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, + * software distributed under the License is distributed on an + * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY + * KIND, either express or implied. See the License for the + * specific language governing permissions and limitations + * under the License. + */ + +package io.druid.java.util.emitter.core; + +import io.druid.java.util.common.StringUtils; + +import java.net.URI; +import java.net.URISyntaxException; + +public class FeedUriExtractor implements UriExtractor +{ + private String uriPattern; + + public FeedUriExtractor(String uriPattern) + { + this.uriPattern = uriPattern; + } + + @Override + public URI apply(Event event) throws URISyntaxException + { + return new URI(StringUtils.format(uriPattern, event.getFeed())); + } + + @Override + public String toString() + { + return "FeedUriExtractor{" + + "uriPattern='" + uriPattern + '\'' + + '}'; + } +} diff --git a/java-util/src/main/java/io/druid/java/util/emitter/core/HttpEmitterConfig.java b/java-util/src/main/java/io/druid/java/util/emitter/core/HttpEmitterConfig.java new file mode 100644 index 000000000000..1db1b840ff73 --- /dev/null +++ b/java-util/src/main/java/io/druid/java/util/emitter/core/HttpEmitterConfig.java @@ -0,0 +1,140 @@ +/* + * Licensed to Metamarkets Group Inc. (Metamarkets) under one + * or more contributor license agreements. See the NOTICE file + * distributed with this work for additional information + * regarding copyright ownership. Metamarkets licenses this file + * to you under the Apache License, Version 2.0 (the + * "License"); you may not use this file except in compliance + * with the License. You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, + * software distributed under the License is distributed on an + * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY + * KIND, either express or implied. See the License for the + * specific language governing permissions and limitations + * under the License. + */ + +package io.druid.java.util.emitter.core; + +import com.fasterxml.jackson.annotation.JsonProperty; + +import javax.validation.constraints.NotNull; + +/** + */ +public class HttpEmitterConfig extends BaseHttpEmittingConfig +{ + @NotNull + @JsonProperty + String recipientBaseUrl = null; + + /** + * For JSON deserialization only. In other cases use {@link Builder} + */ + public HttpEmitterConfig() {} + + public HttpEmitterConfig(BaseHttpEmittingConfig base, String recipientBaseUrl) + { + this.flushMillis = base.flushMillis; + this.flushCount = base.flushCount; + this.flushTimeOut = base.flushTimeOut; + this.recipientBaseUrl = recipientBaseUrl; + this.basicAuthentication = base.basicAuthentication; + this.batchingStrategy = base.batchingStrategy; + this.maxBatchSize = base.maxBatchSize; + this.contentEncoding = base.contentEncoding; + this.batchQueueSizeLimit = base.batchQueueSizeLimit; + this.httpTimeoutAllowanceFactor = base.httpTimeoutAllowanceFactor; + this.minHttpTimeoutMillis = base.minHttpTimeoutMillis; + } + + public String getRecipientBaseUrl() + { + return recipientBaseUrl; + } + + @Override + public String toString() + { + return "HttpEmitterConfig{" + + toStringBase() + + ", recipientBaseUrl=\'" + recipientBaseUrl + '\'' + + '}'; + } + + public static class Builder extends HttpEmitterConfig + { + public Builder(String recipientBaseUrl) + { + this.recipientBaseUrl = recipientBaseUrl; + } + + public Builder setFlushMillis(long flushMillis) + { + this.flushMillis = flushMillis; + return this; + } + + public Builder setFlushCount(int flushCount) + { + this.flushCount = flushCount; + return this; + } + + public Builder setFlushTimeOut(long flushTimeOut) + { + this.flushTimeOut = flushTimeOut; + return this; + } + + public Builder setBasicAuthentication(String basicAuthentication) + { + this.basicAuthentication = basicAuthentication; + return this; + } + + public Builder setBatchingStrategy(BatchingStrategy batchingStrategy) + { + this.batchingStrategy = batchingStrategy; + return this; + } + + public Builder setMaxBatchSize(int maxBatchSize) + { + this.maxBatchSize = maxBatchSize; + return this; + } + + public Builder setContentEncoding(ContentEncoding contentEncoding) + { + this.contentEncoding = contentEncoding; + return this; + } + + public Builder setBatchQueueSizeLimit(int batchQueueSizeLimit) + { + this.batchQueueSizeLimit = batchQueueSizeLimit; + return this; + } + + public Builder setHttpTimeoutAllowanceFactor(float httpTimeoutAllowanceFactor) + { + this.httpTimeoutAllowanceFactor = httpTimeoutAllowanceFactor; + return this; + } + + public Builder setMinHttpTimeoutMillis(int minHttpTimeoutMillis) + { + this.minHttpTimeoutMillis = minHttpTimeoutMillis; + return this; + } + + public HttpEmitterConfig build() + { + return new HttpEmitterConfig(this, recipientBaseUrl); + } + } +} diff --git a/java-util/src/main/java/io/druid/java/util/emitter/core/HttpPostEmitter.java b/java-util/src/main/java/io/druid/java/util/emitter/core/HttpPostEmitter.java new file mode 100644 index 000000000000..c8ea0ab6cc68 --- /dev/null +++ b/java-util/src/main/java/io/druid/java/util/emitter/core/HttpPostEmitter.java @@ -0,0 +1,937 @@ +/* + * Licensed to Metamarkets Group Inc. (Metamarkets) under one + * or more contributor license agreements. See the NOTICE file + * distributed with this work for additional information + * regarding copyright ownership. Metamarkets licenses this file + * to you under the Apache License, Version 2.0 (the + * "License"); you may not use this file except in compliance + * with the License. You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, + * software distributed under the License is distributed on an + * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY + * KIND, either express or implied. See the License for the + * specific language governing permissions and limitations + * under the License. + */ + +package io.druid.java.util.emitter.core; + +import com.fasterxml.jackson.databind.ObjectMapper; +import com.google.common.annotations.VisibleForTesting; +import com.google.common.base.Preconditions; +import com.google.common.base.Predicate; +import com.google.common.base.Throwables; +import com.google.common.primitives.Ints; +import io.druid.concurrent.ConcurrentAwaitableCounter; +import io.druid.java.util.common.ISE; +import io.druid.java.util.common.RetryUtils; +import io.druid.java.util.common.StringUtils; +import io.druid.java.util.common.lifecycle.LifecycleStart; +import io.druid.java.util.common.lifecycle.LifecycleStop; +import io.druid.java.util.common.logger.Logger; +import io.netty.handler.codec.http.HttpHeaders; +import org.asynchttpclient.AsyncHttpClient; +import org.asynchttpclient.ListenableFuture; +import org.asynchttpclient.RequestBuilder; +import org.asynchttpclient.Response; + +import javax.annotation.Nullable; +import java.io.Closeable; +import java.io.Flushable; +import java.io.IOException; +import java.net.MalformedURLException; +import java.net.URL; +import java.nio.ByteBuffer; +import java.nio.charset.StandardCharsets; +import java.util.ArrayDeque; +import java.util.Base64; +import java.util.concurrent.Callable; +import java.util.concurrent.ConcurrentLinkedDeque; +import java.util.concurrent.ConcurrentLinkedQueue; +import java.util.concurrent.CountDownLatch; +import java.util.concurrent.ExecutionException; +import java.util.concurrent.RejectedExecutionException; +import java.util.concurrent.TimeUnit; +import java.util.concurrent.TimeoutException; +import java.util.concurrent.atomic.AtomicInteger; +import java.util.concurrent.atomic.AtomicLong; +import java.util.concurrent.atomic.AtomicReference; +import java.util.concurrent.locks.LockSupport; +import java.util.zip.GZIPOutputStream; + +public class HttpPostEmitter implements Flushable, Closeable, Emitter +{ + private static final int MAX_EVENT_SIZE = 1023 * 1024; // Set max size slightly less than 1M to allow for metadata + + private static final int MAX_SEND_RETRIES = 3; + + /** + * Threshold of the size of {@link #buffersToEmit} when switch from using {@link + * BaseHttpEmittingConfig#getHttpTimeoutAllowanceFactor()} to {@link #EQUILIBRIUM_ALLOWANCE_FACTOR} + */ + private static final int EMIT_QUEUE_THRESHOLD_1 = 5; + + /** + * Threshold of the size of {@link #buffersToEmit} when switch from using {@link #EQUILIBRIUM_ALLOWANCE_FACTOR} + * to {@link #TIGHT_ALLOWANCE_FACTOR}. + */ + private static final int EMIT_QUEUE_THRESHOLD_2 = 10; + + /** + * 0.9 is to give room for unexpected latency or time out not being respected rigorously. + */ + private static final double EQUILIBRIUM_ALLOWANCE_FACTOR = 0.9; + + private static final double TIGHT_ALLOWANCE_FACTOR = 0.5; + + /** + * Used in {@link EmittingThread#emitLargeEvents()} to ensure fair emitting of both large events and batched events. + */ + private static final byte[] LARGE_EVENTS_STOP = new byte[]{}; + + private static final Logger log = new Logger(HttpPostEmitter.class); + private static final AtomicInteger instanceCounter = new AtomicInteger(); + + final BatchingStrategy batchingStrategy; + final HttpEmitterConfig config; + private final int bufferSize; + final int maxBufferWatermark; + private final int largeEventThreshold; + private final AsyncHttpClient client; + private final ObjectMapper jsonMapper; + private final String url; + + private final ConcurrentLinkedQueue buffersToReuse = new ConcurrentLinkedQueue<>(); + /** + * "Approximate" because not exactly atomically synchronized with {@link #buffersToReuse} updates. {@link + * ConcurrentLinkedQueue#size()} is not used, because it's O(n). + */ + private final AtomicInteger approximateBuffersToReuseCount = new AtomicInteger(); + + /** + * concurrentBatch.get() == null means the service is closed. concurrentBatch.get() is the instance of Integer, + * it means that some thread has failed with a serious error during {@link #onSealExclusive} (with the batch number + * corresponding to the Integer object) and {@link #tryRecoverCurrentBatch} needs to be called. Otherwise (i. e. + * normally), an instance of {@link Batch} is stored in this atomic reference. + */ + private final AtomicReference concurrentBatch = new AtomicReference<>(); + + private final ConcurrentLinkedDeque buffersToEmit = new ConcurrentLinkedDeque<>(); + /** + * See {@link #approximateBuffersToReuseCount} + */ + private final AtomicInteger approximateBuffersToEmitCount = new AtomicInteger(); + /** + * See {@link #approximateBuffersToReuseCount} + */ + private final AtomicLong approximateEventsToEmitCount = new AtomicLong(); + + private final ConcurrentLinkedQueue largeEventsToEmit = new ConcurrentLinkedQueue<>(); + /** + * See {@link #approximateBuffersToReuseCount} + */ + private final AtomicInteger approximateLargeEventsToEmitCount = new AtomicInteger(); + + private final ConcurrentAwaitableCounter emittedBatchCounter = new ConcurrentAwaitableCounter(); + private final EmittingThread emittingThread; + private final AtomicLong totalEmittedEvents = new AtomicLong(); + private final AtomicInteger allocatedBuffers = new AtomicInteger(); + private final AtomicInteger droppedBuffers = new AtomicInteger(); + + private volatile long lastFillTimeMillis; + private final ConcurrentTimeCounter batchFillingTimeCounter = new ConcurrentTimeCounter(); + + private final Object startLock = new Object(); + private final CountDownLatch startLatch = new CountDownLatch(1); + private boolean running = false; + + public HttpPostEmitter(HttpEmitterConfig config, AsyncHttpClient client) + { + this(config, client, new ObjectMapper()); + } + + public HttpPostEmitter(HttpEmitterConfig config, AsyncHttpClient client, ObjectMapper jsonMapper) + { + batchingStrategy = config.getBatchingStrategy(); + final int batchOverhead = batchingStrategy.batchStartLength() + batchingStrategy.batchEndLength(); + Preconditions.checkArgument( + config.getMaxBatchSize() >= MAX_EVENT_SIZE + batchOverhead, + StringUtils.format( + "maxBatchSize must be greater than MAX_EVENT_SIZE[%,d] + overhead[%,d].", + MAX_EVENT_SIZE, + batchOverhead + ) + ); + this.config = config; + this.bufferSize = config.getMaxBatchSize(); + this.maxBufferWatermark = bufferSize - batchingStrategy.batchEndLength(); + // Chosen so that if event size < largeEventThreshold, at least 2 events could fit the standard buffer. + this.largeEventThreshold = (bufferSize - batchOverhead - batchingStrategy.separatorLength()) / 2; + this.client = client; + this.jsonMapper = jsonMapper; + try { + this.url = new URL(config.getRecipientBaseUrl()).toString(); + } + catch (MalformedURLException e) { + throw new ISE(e, "Bad URL: %s", config.getRecipientBaseUrl()); + } + emittingThread = new EmittingThread(config); + long firstBatchNumber = 1; + concurrentBatch.set(new Batch(this, acquireBuffer(), firstBatchNumber)); + // lastFillTimeMillis must not be 0, minHttpTimeoutMillis could be. + lastFillTimeMillis = Math.max(config.minHttpTimeoutMillis, 1); + } + + @Override + @LifecycleStart + public void start() + { + synchronized (startLock) { + if (!running) { + if (startLatch.getCount() == 0) { + throw new IllegalStateException("Already started."); + } + running = true; + startLatch.countDown(); + emittingThread.start(); + } + } + } + + private void awaitStarted() + { + try { + if (!startLatch.await(1, TimeUnit.SECONDS)) { + throw new RejectedExecutionException("Service is not started."); + } + if (isTerminated()) { + throw new RejectedExecutionException("Service is closed."); + } + } + catch (InterruptedException e) { + log.debug("Interrupted waiting for start"); + Thread.currentThread().interrupt(); + throw new RuntimeException(e); + } + } + + private boolean isTerminated() + { + return concurrentBatch.get() == null; + } + + @Override + public void emit(Event event) + { + emitAndReturnBatch(event); + } + + @VisibleForTesting + @Nullable + Batch emitAndReturnBatch(Event event) + { + awaitStarted(); + + final byte[] eventBytes = eventToBytes(event); + + if (eventBytes.length > MAX_EVENT_SIZE) { + log.error( + "Event too large to emit (%,d > %,d): %s ...", + eventBytes.length, + MAX_EVENT_SIZE, + StringUtils.fromUtf8(ByteBuffer.wrap(eventBytes), 1024) + ); + return null; + } + + if (eventBytes.length > largeEventThreshold) { + writeLargeEvent(eventBytes); + return null; + } + + while (true) { + Object batchObj = concurrentBatch.get(); + if (batchObj instanceof Integer) { + tryRecoverCurrentBatch((Integer) batchObj); + continue; + } + if (batchObj == null) { + throw new RejectedExecutionException("Service is closed."); + } + Batch batch = (Batch) batchObj; + if (batch.tryAddEvent(eventBytes)) { + return batch; + } else { + log.debug("Failed to emit an event in batch [%s]", batch); + } + // Spin loop, until the thread calling onSealExclusive() updates the concurrentBatch. This update becomes visible + // eventually, because concurrentBatch.get() is a volatile read. + } + } + + private byte[] eventToBytes(Event event) + { + try { + return jsonMapper.writeValueAsBytes(event); + } + catch (IOException e) { + throw Throwables.propagate(e); + } + } + + private void writeLargeEvent(byte[] eventBytes) + { + // It's better to drop the oldest, not latest event, but dropping the oldest is not easy to implement, because + // LARGE_EVENTS_STOP could be added into the queue concurrently. So just not adding the latest event. + // >, not >=, because largeEventsToEmit could contain LARGE_EVENTS_STOP + if (approximateBuffersToEmitCount.get() > config.getBatchQueueSizeLimit()) { + log.error( + "largeEventsToEmit queue size reached the limit [%d], dropping the latest large event", + config.getBatchQueueSizeLimit() + ); + } else { + largeEventsToEmit.add(eventBytes); + approximateBuffersToEmitCount.incrementAndGet(); + approximateLargeEventsToEmitCount.incrementAndGet(); + approximateEventsToEmitCount.incrementAndGet(); + } + wakeUpEmittingThread(); + } + + /** + * Called from {@link Batch} only once for each Batch in existence. + */ + void onSealExclusive(Batch batch, long elapsedTimeMillis) + { + try { + doOnSealExclusive(batch, elapsedTimeMillis); + } + catch (Throwable t) { + try { + if (!concurrentBatch.compareAndSet(batch, batch.batchNumber)) { + log.error("Unexpected failure to set currentBatch to the failed Batch.batchNumber"); + } + log.error(t, "Serious error during onSealExclusive(), set currentBatch to the failed Batch.batchNumber"); + } + catch (Throwable t2) { + t.addSuppressed(t2); + } + throw t; + } + } + + private void doOnSealExclusive(Batch batch, long elapsedTimeMillis) + { + batchFillingTimeCounter.add((int) Math.max(elapsedTimeMillis, 0)); + if (elapsedTimeMillis > 0) { + // If elapsedTimeMillis is 0 or negative, it's likely because System.currentTimeMillis() is not monotonic, so not + // accounting this time for determining batch sending timeout. + lastFillTimeMillis = elapsedTimeMillis; + } + addBatchToEmitQueue(batch); + wakeUpEmittingThread(); + if (!isTerminated()) { + long nextBatchNumber = ConcurrentAwaitableCounter.nextCount(batch.batchNumber); + byte[] newBuffer = acquireBuffer(); + if (!concurrentBatch.compareAndSet(batch, new Batch(this, newBuffer, nextBatchNumber))) { + buffersToReuse.add(newBuffer); + // If compareAndSet failed, the service should be closed concurrently, i. e. we expect isTerminated() = true. + // If we don't see this, there should be some bug in HttpPostEmitter. + Preconditions.checkState(isTerminated()); + } + } + } + + private void tryRecoverCurrentBatch(Integer failedBatchNumber) + { + log.info("Trying to recover currentBatch"); + long nextBatchNumber = ConcurrentAwaitableCounter.nextCount(failedBatchNumber); + byte[] newBuffer = acquireBuffer(); + if (concurrentBatch.compareAndSet(failedBatchNumber, new Batch(this, newBuffer, nextBatchNumber))) { + log.info("Successfully recovered currentBatch"); + } else { + // It's normal, a concurrent thread could succeed to recover first. + buffersToReuse.add(newBuffer); + } + } + + private void addBatchToEmitQueue(Batch batch) + { + limitBuffersToEmitSize(); + buffersToEmit.addLast(batch); + approximateBuffersToEmitCount.incrementAndGet(); + approximateEventsToEmitCount.addAndGet(batch.eventCount.get()); + } + + private void limitBuffersToEmitSize() + { + if (approximateBuffersToEmitCount.get() >= config.getBatchQueueSizeLimit()) { + Batch droppedBatch = buffersToEmit.pollFirst(); + if (droppedBatch != null) { + batchFinalized(); + approximateBuffersToEmitCount.decrementAndGet(); + approximateEventsToEmitCount.addAndGet(-droppedBatch.eventCount.get()); + droppedBuffers.incrementAndGet(); + log.error( + "buffersToEmit queue size reached the limit [%d], dropping the oldest buffer to emit", + config.getBatchQueueSizeLimit() + ); + } + } + } + + private void batchFinalized() + { + // Notify HttpPostEmitter.flush(), that the batch is emitted, or failed, or dropped. + emittedBatchCounter.increment(); + } + + private Batch pollBatchFromEmitQueue() + { + Batch result = buffersToEmit.pollFirst(); + if (result == null) { + return null; + } + approximateBuffersToEmitCount.decrementAndGet(); + approximateEventsToEmitCount.addAndGet(-result.eventCount.get()); + return result; + } + + private void wakeUpEmittingThread() + { + LockSupport.unpark(emittingThread); + } + + @Override + public void flush() throws IOException + { + awaitStarted(); + Object batchObj = concurrentBatch.get(); + if (batchObj instanceof Batch) { + flush((Batch) batchObj); + } + } + + private void flush(Batch batch) throws IOException + { + if (batch == null) { + return; + } + batch.seal(); + try { + // This check doesn't always awaits for this exact batch to be emitted, because another batch could be dropped + // from the queue ahead of this one, in limitBuffersToEmitSize(). But there is no better way currently to wait for + // the exact batch, and it's not that important. + emittedBatchCounter.awaitCount(batch.batchNumber, config.getFlushTimeOut(), TimeUnit.MILLISECONDS); + } + catch (TimeoutException e) { + String message = StringUtils.format("Timed out after [%d] millis during flushing", config.getFlushTimeOut()); + throw new IOException(message, e); + } + catch (InterruptedException e) { + log.debug("Thread Interrupted"); + Thread.currentThread().interrupt(); + throw new IOException("Thread Interrupted while flushing", e); + } + } + + @Override + @LifecycleStop + public void close() throws IOException + { + synchronized (startLock) { + if (running) { + running = false; + Object lastBatch = concurrentBatch.getAndSet(null); + if (lastBatch instanceof Batch) { + flush((Batch) lastBatch); + } + emittingThread.shuttingDown = true; + // EmittingThread is interrupted after the last batch is flushed. + emittingThread.interrupt(); + } + } + } + + @Override + public String toString() + { + return "HttpPostEmitter{" + + "config=" + config + + '}'; + } + + private class EmittingThread extends Thread + { + private final ArrayDeque failedBuffers = new ArrayDeque<>(); + /** + * "Approximate", because not exactly synchronized with {@link #failedBuffers} updates. Not using size() on + * {@link #failedBuffers}, because access to it is not synchronized, while approximateFailedBuffersCount is queried + * not within EmittingThread. + */ + private final AtomicInteger approximateFailedBuffersCount = new AtomicInteger(); + + private final ConcurrentTimeCounter successfulSendingTimeCounter = new ConcurrentTimeCounter(); + private final ConcurrentTimeCounter failedSendingTimeCounter = new ConcurrentTimeCounter(); + + /** + * Cache the exception. Need an exception because {@link RetryUtils} operates only via exceptions. + */ + private final TimeoutException timeoutLessThanMinimumException; + + private boolean shuttingDown = false; + private ZeroCopyByteArrayOutputStream gzipBaos; + + EmittingThread(HttpEmitterConfig config) + { + super("HttpPostEmitter-" + instanceCounter.incrementAndGet()); + setDaemon(true); + timeoutLessThanMinimumException = new TimeoutException( + "Timeout less than minimum [" + config.getMinHttpTimeoutMillis() + "] ms." + ); + // To not showing and writing nonsense and misleading stack trace in logs. + timeoutLessThanMinimumException.setStackTrace(new StackTraceElement[]{}); + } + + @Override + public void run() + { + while (true) { + boolean needsToShutdown = needsToShutdown(); + try { + emitLargeEvents(); + emitBatches(); + tryEmitOneFailedBuffer(); + + if (needsToShutdown) { + tryEmitAndDrainAllFailedBuffers(); + // Make GC life easier + drainBuffersToReuse(); + return; + } + } + catch (Throwable t) { + log.error(t, "Uncaught exception in EmittingThread.run()"); + } + if (failedBuffers.isEmpty()) { + // Waiting for 1/2 of config.getFlushMillis() in order to flush events not more than 50% later than specified. + // If nanos=0 parkNanos() doesn't wait at all, then we don't want. + long waitNanos = Math.max(TimeUnit.MILLISECONDS.toNanos(config.getFlushMillis()) / 2, 1); + LockSupport.parkNanos(HttpPostEmitter.this, waitNanos); + } + } + } + + private boolean needsToShutdown() + { + boolean needsToShutdown = Thread.interrupted() || shuttingDown; + if (needsToShutdown) { + Object lastBatch = concurrentBatch.getAndSet(null); + if (lastBatch instanceof Batch) { + ((Batch) lastBatch).seal(); + } + } else { + Object batch = concurrentBatch.get(); + if (batch instanceof Batch) { + ((Batch) batch).sealIfFlushNeeded(); + } else { + // batch == null means that HttpPostEmitter is terminated. Batch object could also be Integer, if some + // thread just failed with a serious error in onSealExclusive(), in this case we don't want to shutdown + // the emitter thread. + needsToShutdown = batch == null; + } + } + return needsToShutdown; + } + + private void emitBatches() + { + for (Batch batch; (batch = pollBatchFromEmitQueue()) != null; ) { + emit(batch); + } + } + + private void emit(final Batch batch) + { + // Awaits until all concurrent event writers finish copy their event bytes to the buffer. This call provides + // memory visibility guarantees. + batch.awaitEmittingAllowed(); + try { + final int bufferWatermark = batch.getSealedBufferWatermark(); + if (bufferWatermark == 0) { // sealed while empty + return; + } + int eventCount = batch.eventCount.get(); + log.debug( + "Sending batch #%d to url[%s], event count[%d], bytes[%d]", + batch.batchNumber, + url, + eventCount, + bufferWatermark + ); + int bufferEndOffset = batchingStrategy.writeBatchEnd(batch.buffer, bufferWatermark); + + if (sendWithRetries(batch.buffer, bufferEndOffset, eventCount, true)) { + buffersToReuse.add(batch.buffer); + approximateBuffersToReuseCount.incrementAndGet(); + } else { + limitFailedBuffersSize(); + failedBuffers.addLast(new FailedBuffer(batch.buffer, bufferEndOffset, eventCount)); + approximateFailedBuffersCount.incrementAndGet(); + } + } + finally { + batchFinalized(); + } + } + + private void limitFailedBuffersSize() + { + if (failedBuffers.size() >= config.getBatchQueueSizeLimit()) { + failedBuffers.removeFirst(); + approximateFailedBuffersCount.decrementAndGet(); + droppedBuffers.incrementAndGet(); + log.error( + "failedBuffers queue size reached the limit [%d], dropping the oldest failed buffer", + config.getBatchQueueSizeLimit() + ); + } + } + + @SuppressWarnings("ArrayEquality") + private void emitLargeEvents() + { + if (largeEventsToEmit.isEmpty()) { + return; + } + // Don't try to emit large events until exhaustion, to avoid starvation of "normal" batches, if large event + // posting rate is too high, though it should never happen in practice. + largeEventsToEmit.add(LARGE_EVENTS_STOP); + for (byte[] largeEvent; (largeEvent = largeEventsToEmit.poll()) != LARGE_EVENTS_STOP; ) { + emitLargeEvent(largeEvent); + approximateBuffersToEmitCount.decrementAndGet(); + approximateLargeEventsToEmitCount.decrementAndGet(); + approximateEventsToEmitCount.decrementAndGet(); + } + } + + private void emitLargeEvent(byte[] eventBytes) + { + byte[] buffer = acquireBuffer(); + int bufferOffset = batchingStrategy.writeBatchStart(buffer); + System.arraycopy(eventBytes, 0, buffer, bufferOffset, eventBytes.length); + bufferOffset += eventBytes.length; + bufferOffset = batchingStrategy.writeBatchEnd(buffer, bufferOffset); + if (sendWithRetries(buffer, bufferOffset, 1, true)) { + buffersToReuse.add(buffer); + approximateBuffersToReuseCount.incrementAndGet(); + } else { + limitFailedBuffersSize(); + failedBuffers.addLast(new FailedBuffer(buffer, bufferOffset, 1)); + approximateFailedBuffersCount.incrementAndGet(); + } + } + + private void tryEmitOneFailedBuffer() + { + FailedBuffer failedBuffer = failedBuffers.peekFirst(); + if (failedBuffer != null) { + if (sendWithRetries(failedBuffer.buffer, failedBuffer.length, failedBuffer.eventCount, false)) { + // Remove from the queue of failed buffer. + failedBuffers.pollFirst(); + approximateFailedBuffersCount.decrementAndGet(); + // Don't add the failed buffer back to the buffersToReuse queue here, because in a situation when we were not + // able to emit events for a while we don't have a way to discard buffers that were used to accumulate events + // during that period, if they are added back to buffersToReuse. For instance it may result in having 100 + // buffers in rotation even if we need just 2. + } + } + } + + private void tryEmitAndDrainAllFailedBuffers() + { + for (FailedBuffer failedBuffer; (failedBuffer = failedBuffers.pollFirst()) != null; ) { + sendWithRetries(failedBuffer.buffer, failedBuffer.length, failedBuffer.eventCount, false); + approximateFailedBuffersCount.decrementAndGet(); + } + } + + /** + * Returns true if sent successfully. + */ + private boolean sendWithRetries(final byte[] buffer, final int length, final int eventCount, boolean withTimeout) + { + long deadLineMillis = System.currentTimeMillis() + sendRequestTimeoutMillis(lastFillTimeMillis); + try { + RetryUtils.retry( + new Callable() + { + @Override + public Void call() throws Exception + { + send(buffer, length); + return null; + } + }, + new Predicate() + { + @Override + public boolean apply(Throwable e) + { + if (withTimeout && deadLineMillis - System.currentTimeMillis() <= 0) { // overflow-aware + return false; + } + if (e == timeoutLessThanMinimumException) { + return false; // Doesn't make sense to retry, because the result will be the same. + } + return !(e instanceof InterruptedException); + } + }, + MAX_SEND_RETRIES + ); + totalEmittedEvents.addAndGet(eventCount); + return true; + } + catch (InterruptedException e) { + return false; + } + catch (Exception e) { + if (e == timeoutLessThanMinimumException) { + log.debug(e, "Failed to send events to url[%s] with timeout less than minimum", config.getRecipientBaseUrl()); + } else { + log.error(e, "Failed to send events to url[%s]", config.getRecipientBaseUrl()); + } + return false; + } + } + + private void send(byte[] buffer, int length) throws Exception + { + long lastFillTimeMillis = HttpPostEmitter.this.lastFillTimeMillis; + final long timeoutMillis = sendRequestTimeoutMillis(lastFillTimeMillis); + if (timeoutMillis < config.getMinHttpTimeoutMillis()) { + throw timeoutLessThanMinimumException; + } + long sendingStartMs = System.currentTimeMillis(); + + final RequestBuilder request = new RequestBuilder("POST"); + request.setUrl(url); + byte[] payload; + int payloadLength; + ContentEncoding contentEncoding = config.getContentEncoding(); + if (contentEncoding != null) { + switch (contentEncoding) { + case GZIP: + try (GZIPOutputStream gzipOutputStream = acquireGzipOutputStream(length)) { + gzipOutputStream.write(buffer, 0, length); + } + payload = gzipBaos.getBuffer(); + payloadLength = gzipBaos.size(); + request.setHeader(HttpHeaders.Names.CONTENT_ENCODING, HttpHeaders.Values.GZIP); + break; + default: + throw new ISE("Unsupported content encoding [%s]", contentEncoding.name()); + } + } else { + payload = buffer; + payloadLength = length; + } + + + request.setHeader(HttpHeaders.Names.CONTENT_TYPE, "application/json"); + request.setHeader(HttpHeaders.Names.CONTENT_LENGTH, String.valueOf(payloadLength)); + request.setBody(ByteBuffer.wrap(payload, 0, payloadLength)); + + if (config.getBasicAuthentication() != null) { + final String[] parts = config.getBasicAuthentication().split(":", 2); + final String user = parts[0]; + final String password = parts.length > 1 ? parts[1] : ""; + String encoded = Base64.getEncoder().encodeToString((user + ':' + password).getBytes(StandardCharsets.UTF_8)); + request.setHeader(HttpHeaders.Names.AUTHORIZATION, "Basic " + encoded); + } + + request.setRequestTimeout(Ints.saturatedCast(timeoutMillis)); + + ListenableFuture future = client.executeRequest(request); + Response response; + try { + // Don't use Future.get(timeout), because we want to avoid sending the same data twice, in case the send + // succeeds finally, but after the timeout. + response = future.get(); + } + catch (ExecutionException e) { + accountFailedSending(sendingStartMs); + if (e.getCause() instanceof TimeoutException) { + log.error( + "Timing out emitter batch send, last batch fill time [%,d] ms, timeout [%,d] ms", + lastFillTimeMillis, + timeoutMillis + ); + } + throw e; + } + + if (response.getStatusCode() == 413) { + accountFailedSending(sendingStartMs); + throw new ISE( + "Received HTTP status 413 from [%s]. Batch size of [%d] may be too large, " + + "try adjusting maxBatchSizeBatch property", + config.getRecipientBaseUrl(), + config.getMaxBatchSize() + ); + } + + if (response.getStatusCode() / 100 != 2) { + accountFailedSending(sendingStartMs); + throw new ISE( + "Emissions of events not successful[%d: %s], with message[%s].", + response.getStatusCode(), + response.getStatusText(), + response.getResponseBody(StandardCharsets.UTF_8).trim() + ); + } + + accountSuccessfulSending(sendingStartMs); + } + + private long sendRequestTimeoutMillis(long lastFillTimeMillis) + { + int emitQueueSize = approximateBuffersToEmitCount.get(); + if (emitQueueSize < EMIT_QUEUE_THRESHOLD_1) { + return (long) (lastFillTimeMillis * config.httpTimeoutAllowanceFactor); + } + if (emitQueueSize < EMIT_QUEUE_THRESHOLD_2) { + // The idea is to not let buffersToEmit queue to grow faster than we can emit buffers. + return (long) (lastFillTimeMillis * EQUILIBRIUM_ALLOWANCE_FACTOR); + } + // If buffersToEmit still grows, try to restrict even more + return (long) (lastFillTimeMillis * TIGHT_ALLOWANCE_FACTOR); + } + + private void accountSuccessfulSending(long sendingStartMs) + { + successfulSendingTimeCounter.add((int) Math.max(System.currentTimeMillis() - sendingStartMs, 0)); + } + + private void accountFailedSending(long sendingStartMs) + { + failedSendingTimeCounter.add((int) Math.max(System.currentTimeMillis() - sendingStartMs, 0)); + } + + GZIPOutputStream acquireGzipOutputStream(int length) throws IOException + { + if (gzipBaos == null) { + gzipBaos = new ZeroCopyByteArrayOutputStream(length); + } else { + gzipBaos.reset(); + } + return new GZIPOutputStream(gzipBaos, true); + } + } + + private static class FailedBuffer + { + final byte[] buffer; + final int length; + final int eventCount; + + private FailedBuffer(byte[] buffer, int length, int eventCount) + { + this.buffer = buffer; + this.length = length; + this.eventCount = eventCount; + } + } + + private byte[] acquireBuffer() + { + byte[] buffer = buffersToReuse.poll(); + if (buffer == null) { + buffer = new byte[bufferSize]; + allocatedBuffers.incrementAndGet(); + } else { + approximateBuffersToReuseCount.decrementAndGet(); + } + return buffer; + } + + private void drainBuffersToReuse() + { + while (buffersToReuse.poll() != null) { + approximateBuffersToReuseCount.decrementAndGet(); + } + } + + /** + * This and the following methods are public for external monitoring purposes. + */ + public int getTotalAllocatedBuffers() + { + return allocatedBuffers.get(); + } + + public int getBuffersToEmit() + { + return approximateBuffersToEmitCount.get(); + } + + public int getBuffersToReuse() + { + return approximateBuffersToReuseCount.get(); + } + + public int getFailedBuffers() + { + return emittingThread.approximateFailedBuffersCount.get(); + } + + public int getDroppedBuffers() + { + return droppedBuffers.get(); + } + + public long getTotalEmittedEvents() + { + return totalEmittedEvents.get(); + } + + public long getEventsToEmit() + { + return approximateEventsToEmitCount.get(); + } + + public long getLargeEventsToEmit() + { + return approximateLargeEventsToEmitCount.get(); + } + + public ConcurrentTimeCounter getBatchFillingTimeCounter() + { + return batchFillingTimeCounter; + } + + public ConcurrentTimeCounter getSuccessfulSendingTimeCounter() + { + return emittingThread.successfulSendingTimeCounter; + } + + public ConcurrentTimeCounter getFailedSendingTimeCounter() + { + return emittingThread.successfulSendingTimeCounter; + } + + @VisibleForTesting + void waitForEmission(int batchNumber) throws Exception + { + emittedBatchCounter.awaitCount(batchNumber, 10, TimeUnit.SECONDS); + } + + @VisibleForTesting + void joinEmitterThread() throws InterruptedException + { + emittingThread.join(); + } +} diff --git a/java-util/src/main/java/io/druid/java/util/emitter/core/LoggingEmitter.java b/java-util/src/main/java/io/druid/java/util/emitter/core/LoggingEmitter.java new file mode 100644 index 000000000000..5bf21d88e0e2 --- /dev/null +++ b/java-util/src/main/java/io/druid/java/util/emitter/core/LoggingEmitter.java @@ -0,0 +1,190 @@ +/* + * Licensed to Metamarkets Group Inc. (Metamarkets) under one + * or more contributor license agreements. See the NOTICE file + * distributed with this work for additional information + * regarding copyright ownership. Metamarkets licenses this file + * to you under the Apache License, Version 2.0 (the + * "License"); you may not use this file except in compliance + * with the License. You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, + * software distributed under the License is distributed on an + * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY + * KIND, either express or implied. See the License for the + * specific language governing permissions and limitations + * under the License. + */ + +package io.druid.java.util.emitter.core; + +/** + */ + +import com.fasterxml.jackson.databind.ObjectMapper; +import io.druid.java.util.common.StringUtils; +import io.druid.java.util.common.lifecycle.LifecycleStart; +import io.druid.java.util.common.lifecycle.LifecycleStop; +import io.druid.java.util.common.logger.Logger; + +import java.io.IOException; +import java.util.concurrent.RejectedExecutionException; +import java.util.concurrent.atomic.AtomicBoolean; + +/** + */ +public class LoggingEmitter implements Emitter +{ + private final Logger log; + private final Level level; + private final ObjectMapper jsonMapper; + + private final AtomicBoolean started = new AtomicBoolean(false); + + public LoggingEmitter(LoggingEmitterConfig config, ObjectMapper jsonMapper) + { + this(new Logger(config.getLoggerClass()), Level.toLevel(config.getLogLevel()), jsonMapper); + } + + public LoggingEmitter(Logger log, Level level, ObjectMapper jsonMapper) + { + this.log = log; + this.level = level; + this.jsonMapper = jsonMapper; + } + + @Override + @LifecycleStart + public void start() + { + final boolean alreadyStarted = started.getAndSet(true); + if (!alreadyStarted) { + final String message = "Start: started [%s]"; + switch (level) { + case TRACE: + if (log.isTraceEnabled()) { + log.trace(message, started.get()); + } + break; + case DEBUG: + if (log.isDebugEnabled()) { + log.debug(message, started.get()); + } + break; + case INFO: + if (log.isInfoEnabled()) { + log.info(message, started.get()); + } + break; + case WARN: + log.warn(message, started.get()); + break; + case ERROR: + log.error(message, started.get()); + break; + } + } + } + + @Override + public void emit(Event event) + { + synchronized (started) { + if (!started.get()) { + throw new RejectedExecutionException("Service not started."); + } + } + try { + final String message = "Event [%s]"; + switch (level) { + case TRACE: + if (log.isTraceEnabled()) { + log.trace(message, jsonMapper.writeValueAsString(event)); + } + break; + case DEBUG: + if (log.isDebugEnabled()) { + log.debug(message, jsonMapper.writeValueAsString(event)); + } + break; + case INFO: + if (log.isInfoEnabled()) { + log.info(message, jsonMapper.writeValueAsString(event)); + } + break; + case WARN: + log.warn(message, jsonMapper.writeValueAsString(event)); + break; + case ERROR: + log.error(message, jsonMapper.writeValueAsString(event)); + break; + } + } + catch (Exception e) { + log.warn(e, "Failed to generate json"); + } + } + + @Override + public void flush() throws IOException + { + + } + + @Override + @LifecycleStop + public void close() throws IOException + { + final boolean wasStarted = started.getAndSet(false); + if (wasStarted) { + final String message = "Close: started [%s]"; + switch (level) { + case TRACE: + if (log.isTraceEnabled()) { + log.trace(message, started.get()); + } + break; + case DEBUG: + if (log.isDebugEnabled()) { + log.debug(message, started.get()); + } + break; + case INFO: + if (log.isInfoEnabled()) { + log.info(message, started.get()); + } + break; + case WARN: + log.warn(message, started.get()); + break; + case ERROR: + log.error(message, started.get()); + break; + } + } + } + + @Override + public String toString() + { + return "LoggingEmitter{" + + "log=" + log + + ", level=" + level + + '}'; + } + + public enum Level + { + TRACE, + DEBUG, + INFO, + WARN, + ERROR; + + public static Level toLevel(String name) + { + return Level.valueOf(StringUtils.toUpperCase(name)); + } + } +} diff --git a/java-util/src/main/java/io/druid/java/util/emitter/core/LoggingEmitterConfig.java b/java-util/src/main/java/io/druid/java/util/emitter/core/LoggingEmitterConfig.java new file mode 100644 index 000000000000..c4aba3de01c4 --- /dev/null +++ b/java-util/src/main/java/io/druid/java/util/emitter/core/LoggingEmitterConfig.java @@ -0,0 +1,66 @@ +/* + * Licensed to Metamarkets Group Inc. (Metamarkets) under one + * or more contributor license agreements. See the NOTICE file + * distributed with this work for additional information + * regarding copyright ownership. Metamarkets licenses this file + * to you under the Apache License, Version 2.0 (the + * "License"); you may not use this file except in compliance + * with the License. You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, + * software distributed under the License is distributed on an + * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY + * KIND, either express or implied. See the License for the + * specific language governing permissions and limitations + * under the License. + */ + +package io.druid.java.util.emitter.core; + +import com.fasterxml.jackson.annotation.JsonProperty; + +import javax.validation.constraints.NotNull; + +/** + */ +public class LoggingEmitterConfig +{ + @NotNull + @JsonProperty + private String loggerClass = LoggingEmitter.class.getName(); + + @NotNull + @JsonProperty + private String logLevel = "info"; + + public String getLoggerClass() + { + return loggerClass; + } + + public void setLoggerClass(String loggerClass) + { + this.loggerClass = loggerClass; + } + + public String getLogLevel() + { + return logLevel; + } + + public void setLogLevel(String logLevel) + { + this.logLevel = logLevel; + } + + @Override + public String toString() + { + return "LoggingEmitterConfig{" + + "loggerClass='" + loggerClass + '\'' + + ", logLevel='" + logLevel + '\'' + + '}'; + } +} diff --git a/java-util/src/main/java/io/druid/java/util/emitter/core/NoopEmitter.java b/java-util/src/main/java/io/druid/java/util/emitter/core/NoopEmitter.java new file mode 100644 index 000000000000..13897ddd3900 --- /dev/null +++ b/java-util/src/main/java/io/druid/java/util/emitter/core/NoopEmitter.java @@ -0,0 +1,51 @@ +/* + * Licensed to Metamarkets Group Inc. (Metamarkets) under one + * or more contributor license agreements. See the NOTICE file + * distributed with this work for additional information + * regarding copyright ownership. Metamarkets licenses this file + * to you under the Apache License, Version 2.0 (the + * "License"); you may not use this file except in compliance + * with the License. You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, + * software distributed under the License is distributed on an + * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY + * KIND, either express or implied. See the License for the + * specific language governing permissions and limitations + * under the License. + */ + +package io.druid.java.util.emitter.core; + +import java.io.IOException; + +/** + */ +public class NoopEmitter implements Emitter +{ + @Override + public void start() + { + // Do nothing + } + + @Override + public void emit(Event event) + { + // Do nothing + } + + @Override + public void flush() throws IOException + { + // Do nothing + } + + @Override + public void close() throws IOException + { + // Do nothing + } +} diff --git a/java-util/src/main/java/io/druid/java/util/emitter/core/ParametrizedUriEmitter.java b/java-util/src/main/java/io/druid/java/util/emitter/core/ParametrizedUriEmitter.java new file mode 100644 index 000000000000..18934c4f3fb7 --- /dev/null +++ b/java-util/src/main/java/io/druid/java/util/emitter/core/ParametrizedUriEmitter.java @@ -0,0 +1,212 @@ +/* + * Licensed to Metamarkets Group Inc. (Metamarkets) under one + * or more contributor license agreements. See the NOTICE file + * distributed with this work for additional information + * regarding copyright ownership. Metamarkets licenses this file + * to you under the Apache License, Version 2.0 (the + * "License"); you may not use this file except in compliance + * with the License. You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, + * software distributed under the License is distributed on an + * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY + * KIND, either express or implied. See the License for the + * specific language governing permissions and limitations + * under the License. + */ + +package io.druid.java.util.emitter.core; + +import com.fasterxml.jackson.databind.ObjectMapper; +import com.google.common.base.Throwables; +import com.google.common.collect.ImmutableSet; +import io.druid.java.util.common.lifecycle.Lifecycle; +import io.druid.java.util.common.lifecycle.LifecycleStart; +import io.druid.java.util.common.lifecycle.LifecycleStop; +import io.druid.java.util.common.logger.Logger; +import org.asynchttpclient.AsyncHttpClient; + +import javax.annotation.concurrent.GuardedBy; +import java.io.Closeable; +import java.io.Flushable; +import java.io.IOException; +import java.net.URI; +import java.net.URISyntaxException; +import java.util.Set; +import java.util.concurrent.ConcurrentHashMap; +import java.util.function.BiConsumer; + +public class ParametrizedUriEmitter implements Flushable, Closeable, Emitter +{ + private static final Logger log = new Logger(ParametrizedUriEmitter.class); + private static final Set ONLY_FEED_PARAM = ImmutableSet.of("feed"); + + private static UriExtractor makeUriExtractor(ParametrizedUriEmitterConfig config) + { + final String baseUri = config.getRecipientBaseUrlPattern(); + final ParametrizedUriExtractor parametrizedUriExtractor = new ParametrizedUriExtractor(baseUri); + UriExtractor uriExtractor = parametrizedUriExtractor; + if (ONLY_FEED_PARAM.equals(parametrizedUriExtractor.getParams())) { + uriExtractor = new FeedUriExtractor(baseUri.replace("{feed}", "%s")); + } + return uriExtractor; + } + + /** + * Type should be ConcurrentHashMap, not {@link java.util.concurrent.ConcurrentMap}, because the latter _doesn't_ + * guarantee that the lambda passed to {@link java.util.Map#computeIfAbsent} is executed at most once. + */ + private final ConcurrentHashMap emitters = new ConcurrentHashMap<>(); + private final UriExtractor uriExtractor; + private final Object startCloseLock = new Object(); + @GuardedBy("startCloseLock") + private boolean started = false; + @GuardedBy("startCloseLock") + private boolean closed = false; + private final Lifecycle innerLifecycle = new Lifecycle(); + private final AsyncHttpClient client; + private final ObjectMapper jsonMapper; + private final ParametrizedUriEmitterConfig config; + + public ParametrizedUriEmitter( + ParametrizedUriEmitterConfig config, + AsyncHttpClient client, + ObjectMapper jsonMapper + ) + { + this(config, client, jsonMapper, makeUriExtractor(config)); + } + + public ParametrizedUriEmitter( + ParametrizedUriEmitterConfig config, + AsyncHttpClient client, + ObjectMapper jsonMapper, + UriExtractor uriExtractor + ) + { + this.config = config; + this.client = client; + this.jsonMapper = jsonMapper; + this.uriExtractor = uriExtractor; + } + + @Override + @LifecycleStart + public void start() + { + // Use full synchronized instead of atomic flag, because otherwise some thread may think that the emitter is already + // started while it's in the process of starting by another thread. + synchronized (startCloseLock) { + if (started) { + return; + } + started = true; + try { + innerLifecycle.start(); + } + catch (RuntimeException e) { + throw e; + } + catch (Exception e) { + throw new RuntimeException(e); + } + } + } + + @Override + public void emit(Event event) + { + try { + URI uri = uriExtractor.apply(event); + HttpPostEmitter emitter = emitters.get(uri); + if (emitter == null) { + try { + emitter = emitters.computeIfAbsent(uri, u -> { + try { + return innerLifecycle.addMaybeStartManagedInstance( + new HttpPostEmitter( + config.buildHttpEmitterConfig(u.toString()), + client, + jsonMapper + ) + ); + } + catch (Exception e) { + throw Throwables.propagate(e); + } + }); + } + catch (RuntimeException e) { + log.error(e, "Error while creating or starting an HttpPostEmitter for URI[%s]", uri); + return; + } + } + emitter.emit(event); + } + catch (URISyntaxException e) { + log.error(e, "Failed to extract URI for event[%s]", event.toMap()); + } + } + + @Override + @LifecycleStop + public void close() throws IOException + { + // Use full synchronized instead of atomic flag, because otherwise some thread may think that the emitter is already + // closed while it's in the process of closing by another thread. + synchronized (startCloseLock) { + if (closed) { + return; + } + closed = true; + innerLifecycle.stop(); + } + } + + @Override + public void flush() throws IOException + { + Exception thrown = null; + for (HttpPostEmitter httpPostEmitter : emitters.values()) { + try { + httpPostEmitter.flush(); + } + catch (Exception e) { + // If flush was interrupted, exit the loop + if (Thread.currentThread().isInterrupted()) { + if (thrown != null) { + e.addSuppressed(thrown); + } + throw Throwables.propagate(e); + } + if (thrown == null) { + thrown = e; + } else { + if (thrown != e) { + thrown.addSuppressed(e); + } + } + } + } + if (thrown != null) { + throw Throwables.propagate(thrown); + } + } + + public void forEachEmitter(BiConsumer action) + { + emitters.forEach(action); + } + + @Override + public String toString() + { + return "ParametrizedUriEmitter{" + + "emitters=" + emitters.keySet() + + ", uriExtractor=" + uriExtractor + + ", config=" + config + + '}'; + } +} diff --git a/java-util/src/main/java/io/druid/java/util/emitter/core/ParametrizedUriEmitterConfig.java b/java-util/src/main/java/io/druid/java/util/emitter/core/ParametrizedUriEmitterConfig.java new file mode 100644 index 000000000000..ba70ea550885 --- /dev/null +++ b/java-util/src/main/java/io/druid/java/util/emitter/core/ParametrizedUriEmitterConfig.java @@ -0,0 +1,55 @@ +/* + * Licensed to Metamarkets Group Inc. (Metamarkets) under one + * or more contributor license agreements. See the NOTICE file + * distributed with this work for additional information + * regarding copyright ownership. Metamarkets licenses this file + * to you under the Apache License, Version 2.0 (the + * "License"); you may not use this file except in compliance + * with the License. You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, + * software distributed under the License is distributed on an + * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY + * KIND, either express or implied. See the License for the + * specific language governing permissions and limitations + * under the License. + */ + +package io.druid.java.util.emitter.core; + +import com.fasterxml.jackson.annotation.JsonProperty; + +import javax.validation.constraints.NotNull; + +public class ParametrizedUriEmitterConfig +{ + private static final BaseHttpEmittingConfig DEFAULT_HTTP_EMITTING_CONFIG = new BaseHttpEmittingConfig(); + + @NotNull + @JsonProperty + private String recipientBaseUrlPattern; + + @JsonProperty("httpEmitting") + private BaseHttpEmittingConfig httpEmittingConfig = DEFAULT_HTTP_EMITTING_CONFIG; + + public String getRecipientBaseUrlPattern() + { + return recipientBaseUrlPattern; + } + + public HttpEmitterConfig buildHttpEmitterConfig(String baseUri) + { + return new HttpEmitterConfig(httpEmittingConfig, baseUri); + } + + @Override + public String toString() + { + return "ParametrizedUriEmitterConfig{" + + "recipientBaseUrlPattern='" + recipientBaseUrlPattern + '\'' + + ", httpEmittingConfig=" + httpEmittingConfig + + '}'; + } +} diff --git a/java-util/src/main/java/io/druid/java/util/emitter/core/ParametrizedUriExtractor.java b/java-util/src/main/java/io/druid/java/util/emitter/core/ParametrizedUriExtractor.java new file mode 100644 index 000000000000..6d29bd58ebcd --- /dev/null +++ b/java-util/src/main/java/io/druid/java/util/emitter/core/ParametrizedUriExtractor.java @@ -0,0 +1,80 @@ +/* + * Licensed to Metamarkets Group Inc. (Metamarkets) under one + * or more contributor license agreements. See the NOTICE file + * distributed with this work for additional information + * regarding copyright ownership. Metamarkets licenses this file + * to you under the Apache License, Version 2.0 (the + * "License"); you may not use this file except in compliance + * with the License. You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, + * software distributed under the License is distributed on an + * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY + * KIND, either express or implied. See the License for the + * specific language governing permissions and limitations + * under the License. + */ + +package io.druid.java.util.emitter.core; + +import io.druid.java.util.common.StringUtils; + +import java.net.URI; +import java.net.URISyntaxException; +import java.util.HashSet; +import java.util.Map; +import java.util.Set; +import java.util.regex.Matcher; +import java.util.regex.Pattern; + +public class ParametrizedUriExtractor implements UriExtractor +{ + private String uriPattern; + private Set params; + + public ParametrizedUriExtractor(String uriPattern) + { + this.uriPattern = uriPattern; + Matcher keyMatcher = Pattern.compile("\\{([^\\}]+)\\}").matcher(uriPattern); + params = new HashSet<>(); + while (keyMatcher.find()) { + params.add(keyMatcher.group(1)); + } + } + + public Set getParams() + { + return params; + } + + @Override + public URI apply(Event event) throws URISyntaxException + { + Map eventMap = event.toMap(); + String processedUri = uriPattern; + for (String key : params) { + Object paramValue = eventMap.get(key); + if (paramValue == null) { + throw new IllegalArgumentException(StringUtils.format( + "ParametrizedUriExtractor with pattern %s requires %s to be set in event, but found %s", + uriPattern, + key, + eventMap + )); + } + processedUri = processedUri.replace(StringUtils.format("{%s}", key), paramValue.toString()); + } + return new URI(processedUri); + } + + @Override + public String toString() + { + return "ParametrizedUriExtractor{" + + "uriPattern='" + uriPattern + '\'' + + ", params=" + params + + '}'; + } +} diff --git a/java-util/src/main/java/io/druid/java/util/emitter/core/UriExtractor.java b/java-util/src/main/java/io/druid/java/util/emitter/core/UriExtractor.java new file mode 100644 index 000000000000..42e57487c5a3 --- /dev/null +++ b/java-util/src/main/java/io/druid/java/util/emitter/core/UriExtractor.java @@ -0,0 +1,28 @@ +/* + * Licensed to Metamarkets Group Inc. (Metamarkets) under one + * or more contributor license agreements. See the NOTICE file + * distributed with this work for additional information + * regarding copyright ownership. Metamarkets licenses this file + * to you under the Apache License, Version 2.0 (the + * "License"); you may not use this file except in compliance + * with the License. You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, + * software distributed under the License is distributed on an + * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY + * KIND, either express or implied. See the License for the + * specific language governing permissions and limitations + * under the License. + */ + +package io.druid.java.util.emitter.core; + +import java.net.URI; +import java.net.URISyntaxException; + +public interface UriExtractor +{ + URI apply(Event event) throws URISyntaxException; +} diff --git a/java-util/src/main/java/io/druid/java/util/emitter/core/ZeroCopyByteArrayOutputStream.java b/java-util/src/main/java/io/druid/java/util/emitter/core/ZeroCopyByteArrayOutputStream.java new file mode 100644 index 000000000000..15e6b86935cd --- /dev/null +++ b/java-util/src/main/java/io/druid/java/util/emitter/core/ZeroCopyByteArrayOutputStream.java @@ -0,0 +1,36 @@ +/* + * Licensed to Metamarkets Group Inc. (Metamarkets) under one + * or more contributor license agreements. See the NOTICE file + * distributed with this work for additional information + * regarding copyright ownership. Metamarkets licenses this file + * to you under the Apache License, Version 2.0 (the + * "License"); you may not use this file except in compliance + * with the License. You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, + * software distributed under the License is distributed on an + * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY + * KIND, either express or implied. See the License for the + * specific language governing permissions and limitations + * under the License. + */ + +package io.druid.java.util.emitter.core; + +import java.io.ByteArrayOutputStream; + +class ZeroCopyByteArrayOutputStream extends ByteArrayOutputStream +{ + + ZeroCopyByteArrayOutputStream(int capacity) + { + super(capacity); + } + + byte[] getBuffer() + { + return buf; + } +} diff --git a/java-util/src/main/java/io/druid/java/util/emitter/factory/EmitterFactory.java b/java-util/src/main/java/io/druid/java/util/emitter/factory/EmitterFactory.java new file mode 100644 index 000000000000..c45ff6402471 --- /dev/null +++ b/java-util/src/main/java/io/druid/java/util/emitter/factory/EmitterFactory.java @@ -0,0 +1,39 @@ +/* + * Licensed to Metamarkets Group Inc. (Metamarkets) under one + * or more contributor license agreements. See the NOTICE file + * distributed with this work for additional information + * regarding copyright ownership. Metamarkets licenses this file + * to you under the Apache License, Version 2.0 (the + * "License"); you may not use this file except in compliance + * with the License. You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, + * software distributed under the License is distributed on an + * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY + * KIND, either express or implied. See the License for the + * specific language governing permissions and limitations + * under the License. + */ + +package io.druid.java.util.emitter.factory; + +import com.fasterxml.jackson.annotation.JsonSubTypes; +import com.fasterxml.jackson.annotation.JsonTypeInfo; +import com.fasterxml.jackson.databind.ObjectMapper; +import io.druid.java.util.common.lifecycle.Lifecycle; +import io.druid.java.util.emitter.core.Emitter; +import org.asynchttpclient.AsyncHttpClient; + +@JsonTypeInfo(use = JsonTypeInfo.Id.NAME, property = "type") +@JsonSubTypes(value = { + @JsonSubTypes.Type(name = "http", value = HttpEmitterFactory.class), + @JsonSubTypes.Type(name = "logging", value = LoggingEmitterFactory.class), + @JsonSubTypes.Type(name = "parametrized", value = ParametrizedUriEmitterFactory.class), + @JsonSubTypes.Type(name = "noop", value = NoopEmitterFactory.class), +}) +public interface EmitterFactory +{ + Emitter makeEmitter(ObjectMapper objectMapper, AsyncHttpClient httpClient, Lifecycle lifecycle); +} diff --git a/java-util/src/main/java/io/druid/java/util/emitter/factory/HttpEmitterFactory.java b/java-util/src/main/java/io/druid/java/util/emitter/factory/HttpEmitterFactory.java new file mode 100644 index 000000000000..fd43bd56111d --- /dev/null +++ b/java-util/src/main/java/io/druid/java/util/emitter/factory/HttpEmitterFactory.java @@ -0,0 +1,39 @@ +/* + * Licensed to Metamarkets Group Inc. (Metamarkets) under one + * or more contributor license agreements. See the NOTICE file + * distributed with this work for additional information + * regarding copyright ownership. Metamarkets licenses this file + * to you under the Apache License, Version 2.0 (the + * "License"); you may not use this file except in compliance + * with the License. You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, + * software distributed under the License is distributed on an + * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY + * KIND, either express or implied. See the License for the + * specific language governing permissions and limitations + * under the License. + */ + +package io.druid.java.util.emitter.factory; + +import com.fasterxml.jackson.databind.ObjectMapper; +import io.druid.java.util.common.lifecycle.Lifecycle; +import io.druid.java.util.emitter.core.Emitter; +import io.druid.java.util.emitter.core.HttpEmitterConfig; +import io.druid.java.util.emitter.core.HttpPostEmitter; +import org.asynchttpclient.AsyncHttpClient; + +public class HttpEmitterFactory extends HttpEmitterConfig implements EmitterFactory +{ + + @Override + public Emitter makeEmitter(ObjectMapper objectMapper, AsyncHttpClient httpClient, Lifecycle lifecycle) + { + Emitter retVal = new HttpPostEmitter(this, httpClient, objectMapper); + lifecycle.addManagedInstance(retVal); + return retVal; + } +} diff --git a/java-util/src/main/java/io/druid/java/util/emitter/factory/LoggingEmitterFactory.java b/java-util/src/main/java/io/druid/java/util/emitter/factory/LoggingEmitterFactory.java new file mode 100644 index 000000000000..5e7ee9325e16 --- /dev/null +++ b/java-util/src/main/java/io/druid/java/util/emitter/factory/LoggingEmitterFactory.java @@ -0,0 +1,45 @@ +/* + * Licensed to Metamarkets Group Inc. (Metamarkets) under one + * or more contributor license agreements. See the NOTICE file + * distributed with this work for additional information + * regarding copyright ownership. Metamarkets licenses this file + * to you under the Apache License, Version 2.0 (the + * "License"); you may not use this file except in compliance + * with the License. You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, + * software distributed under the License is distributed on an + * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY + * KIND, either express or implied. See the License for the + * specific language governing permissions and limitations + * under the License. + */ + +package io.druid.java.util.emitter.factory; + +import com.fasterxml.jackson.databind.ObjectMapper; +import io.druid.java.util.common.lifecycle.Lifecycle; +import io.druid.java.util.emitter.core.Emitter; +import io.druid.java.util.emitter.core.LoggingEmitter; +import io.druid.java.util.emitter.core.LoggingEmitterConfig; +import org.asynchttpclient.AsyncHttpClient; + +public class LoggingEmitterFactory extends LoggingEmitterConfig implements EmitterFactory +{ + public LoggingEmitterFactory() {} + + @Override + public Emitter makeEmitter(ObjectMapper objectMapper, AsyncHttpClient httpClient, Lifecycle lifecycle) + { + return makeEmitter(objectMapper, lifecycle); + } + + public Emitter makeEmitter(ObjectMapper objectMapper, Lifecycle lifecycle) + { + Emitter retVal = new LoggingEmitter(this, objectMapper); + lifecycle.addManagedInstance(retVal); + return retVal; + } +} diff --git a/java-util/src/main/java/io/druid/java/util/emitter/factory/NoopEmitterFactory.java b/java-util/src/main/java/io/druid/java/util/emitter/factory/NoopEmitterFactory.java new file mode 100644 index 000000000000..1e89c7b87f50 --- /dev/null +++ b/java-util/src/main/java/io/druid/java/util/emitter/factory/NoopEmitterFactory.java @@ -0,0 +1,42 @@ +/* + * Licensed to Metamarkets Group Inc. (Metamarkets) under one + * or more contributor license agreements. See the NOTICE file + * distributed with this work for additional information + * regarding copyright ownership. Metamarkets licenses this file + * to you under the Apache License, Version 2.0 (the + * "License"); you may not use this file except in compliance + * with the License. You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, + * software distributed under the License is distributed on an + * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY + * KIND, either express or implied. See the License for the + * specific language governing permissions and limitations + * under the License. + */ + +package io.druid.java.util.emitter.factory; + +import com.fasterxml.jackson.databind.ObjectMapper; +import io.druid.java.util.common.lifecycle.Lifecycle; +import io.druid.java.util.emitter.core.Emitter; +import io.druid.java.util.emitter.core.NoopEmitter; +import org.asynchttpclient.AsyncHttpClient; + +public class NoopEmitterFactory implements EmitterFactory +{ + @Override + public Emitter makeEmitter(ObjectMapper objectMapper, AsyncHttpClient httpClient, Lifecycle lifecycle) + { + return makeEmitter(lifecycle); + } + + public Emitter makeEmitter(Lifecycle lifecycle) + { + Emitter retVal = new NoopEmitter(); + lifecycle.addManagedInstance(retVal); + return retVal; + } +} diff --git a/java-util/src/main/java/io/druid/java/util/emitter/factory/ParametrizedUriEmitterFactory.java b/java-util/src/main/java/io/druid/java/util/emitter/factory/ParametrizedUriEmitterFactory.java new file mode 100644 index 000000000000..0c6956be9aaa --- /dev/null +++ b/java-util/src/main/java/io/druid/java/util/emitter/factory/ParametrizedUriEmitterFactory.java @@ -0,0 +1,39 @@ +/* + * Licensed to Metamarkets Group Inc. (Metamarkets) under one + * or more contributor license agreements. See the NOTICE file + * distributed with this work for additional information + * regarding copyright ownership. Metamarkets licenses this file + * to you under the Apache License, Version 2.0 (the + * "License"); you may not use this file except in compliance + * with the License. You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, + * software distributed under the License is distributed on an + * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY + * KIND, either express or implied. See the License for the + * specific language governing permissions and limitations + * under the License. + */ + +package io.druid.java.util.emitter.factory; + +import com.fasterxml.jackson.databind.ObjectMapper; +import io.druid.java.util.common.lifecycle.Lifecycle; +import io.druid.java.util.emitter.core.Emitter; +import io.druid.java.util.emitter.core.ParametrizedUriEmitter; +import io.druid.java.util.emitter.core.ParametrizedUriEmitterConfig; +import org.asynchttpclient.AsyncHttpClient; + +public class ParametrizedUriEmitterFactory extends ParametrizedUriEmitterConfig implements EmitterFactory +{ + + @Override + public Emitter makeEmitter(ObjectMapper objectMapper, AsyncHttpClient httpClient, Lifecycle lifecycle) + { + final Emitter retVal = new ParametrizedUriEmitter(this, httpClient, objectMapper); + lifecycle.addManagedInstance(retVal); + return retVal; + } +} diff --git a/java-util/src/main/java/io/druid/java/util/emitter/service/AlertBuilder.java b/java-util/src/main/java/io/druid/java/util/emitter/service/AlertBuilder.java new file mode 100644 index 000000000000..705da242c28f --- /dev/null +++ b/java-util/src/main/java/io/druid/java/util/emitter/service/AlertBuilder.java @@ -0,0 +1,90 @@ +/* + * Licensed to Metamarkets Group Inc. (Metamarkets) under one + * or more contributor license agreements. See the NOTICE file + * distributed with this work for additional information + * regarding copyright ownership. Metamarkets licenses this file + * to you under the Apache License, Version 2.0 (the + * "License"); you may not use this file except in compliance + * with the License. You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, + * software distributed under the License is distributed on an + * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY + * KIND, either express or implied. See the License for the + * specific language governing permissions and limitations + * under the License. + */ + +package io.druid.java.util.emitter.service; + +import com.google.common.collect.ImmutableMap; +import com.google.common.collect.Maps; +import io.druid.java.util.common.DateTimes; +import io.druid.java.util.common.StringUtils; + +import java.util.Map; + +/** +*/ +public class AlertBuilder extends ServiceEventBuilder +{ + protected final Map dataMap = Maps.newLinkedHashMap(); + protected final String description; + protected final ServiceEmitter emitter; + + protected AlertEvent.Severity severity = AlertEvent.Severity.DEFAULT; + + public static AlertBuilder create(String descriptionFormat, Object... objects) + { + return AlertBuilder.createEmittable(null, descriptionFormat, objects); + } + + public static AlertBuilder createEmittable(ServiceEmitter emitter, String descriptionFormat, Object... objects) + { + return new AlertBuilder(StringUtils.format(descriptionFormat, objects), emitter); + } + + protected AlertBuilder( + String description, + ServiceEmitter emitter + ) + { + this.description = description; + this.emitter = emitter; + } + + public AlertBuilder addData(String identifier, Object value) + { + dataMap.put(identifier, value); + return this; + } + + public AlertBuilder addData(Map data) + { + dataMap.putAll(data); + return this; + } + + public AlertBuilder severity(AlertEvent.Severity severity) + { + this.severity = severity; + return this; + } + + @Override + public AlertEvent build(ImmutableMap serviceDimensions) + { + return new AlertEvent(DateTimes.nowUtc(), serviceDimensions, severity, description, dataMap); + } + + public void emit() + { + if (emitter == null) { + throw new UnsupportedOperationException("Emitter is null, cannot emit."); + } + + emitter.emit(this); + } +} diff --git a/java-util/src/main/java/io/druid/java/util/emitter/service/AlertEvent.java b/java-util/src/main/java/io/druid/java/util/emitter/service/AlertEvent.java new file mode 100644 index 000000000000..81fc432e84c3 --- /dev/null +++ b/java-util/src/main/java/io/druid/java/util/emitter/service/AlertEvent.java @@ -0,0 +1,185 @@ +/* + * Licensed to Metamarkets Group Inc. (Metamarkets) under one + * or more contributor license agreements. See the NOTICE file + * distributed with this work for additional information + * regarding copyright ownership. Metamarkets licenses this file + * to you under the Apache License, Version 2.0 (the + * "License"); you may not use this file except in compliance + * with the License. You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, + * software distributed under the License is distributed on an + * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY + * KIND, either express or implied. See the License for the + * specific language governing permissions and limitations + * under the License. + */ + +package io.druid.java.util.emitter.service; + +import com.fasterxml.jackson.annotation.JsonValue; +import com.google.common.collect.ImmutableMap; +import io.druid.java.util.common.DateTimes; +import org.joda.time.DateTime; + +import java.util.Collections; +import java.util.Map; + +/** + */ +public class AlertEvent implements ServiceEvent +{ + private final ImmutableMap serviceDimensions; + private final Severity severity; + private final String description; + private final DateTime createdTime; + + private final Map dataMap; + + public AlertEvent( + DateTime createdTime, + ImmutableMap serviceDimensions, + Severity severity, + String description, + Map dataMap + ) + { + this.createdTime = createdTime; + this.serviceDimensions = serviceDimensions; + this.severity = severity; + this.description = description; + this.dataMap = dataMap; + } + + public AlertEvent( + DateTime createdTime, + String service, + String host, + Severity severity, + String description, + Map dataMap + ) + { + this(createdTime, ImmutableMap.of("service", service, "host", host), severity, description, dataMap); + } + + public AlertEvent( + String service, + String host, + Severity severity, + String description, + Map dataMap + ) + { + this(DateTimes.nowUtc(), service, host, severity, description, dataMap); + } + + public AlertEvent( + String service, + String host, + String description, + Map dataMap + ) + { + this(DateTimes.nowUtc(), service, host, Severity.DEFAULT, description, dataMap); + } + + public AlertEvent( + String service, + String host, + String description + ) + { + this(DateTimes.nowUtc(), service, host, Severity.DEFAULT, description, ImmutableMap.of()); + } + + @Override + public DateTime getCreatedTime() + { + return createdTime; + } + + @Override + public String getFeed() + { + return "alerts"; + } + + @Override + public String getService() + { + return serviceDimensions.get("service"); + } + + @Override + public String getHost() + { + return serviceDimensions.get("host"); + } + + @Override + public boolean isSafeToBuffer() + { + return false; + } + + public Severity getSeverity() + { + return severity; + } + + public String getDescription() + { + return description; + } + + public Map getDataMap() + { + return Collections.unmodifiableMap(dataMap); + } + + @Override + @JsonValue + public Map toMap() + { + return ImmutableMap.builder() + .put("feed", getFeed()) + .put("timestamp", createdTime.toString()) + .putAll(serviceDimensions) + .put("severity", severity.toString()) + .put("description", description) + .put("data", dataMap) + .build(); + } + + public enum Severity + { + ANOMALY { + @Override + public String toString() + { + return "anomaly"; + } + }, + + COMPONENT_FAILURE { + @Override + public String toString() + { + return "component-failure"; + } + }, + + SERVICE_FAILURE { + @Override + public String toString() + { + return "service-failure"; + } + }; + + public static final Severity DEFAULT = COMPONENT_FAILURE; + } +} diff --git a/java-util/src/main/java/io/druid/java/util/emitter/service/ServiceEmitter.java b/java-util/src/main/java/io/druid/java/util/emitter/service/ServiceEmitter.java new file mode 100644 index 000000000000..f18fdf09cfac --- /dev/null +++ b/java-util/src/main/java/io/druid/java/util/emitter/service/ServiceEmitter.java @@ -0,0 +1,106 @@ +/* + * Licensed to Metamarkets Group Inc. (Metamarkets) under one + * or more contributor license agreements. See the NOTICE file + * distributed with this work for additional information + * regarding copyright ownership. Metamarkets licenses this file + * to you under the Apache License, Version 2.0 (the + * "License"); you may not use this file except in compliance + * with the License. You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, + * software distributed under the License is distributed on an + * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY + * KIND, either express or implied. See the License for the + * specific language governing permissions and limitations + * under the License. + */ + +package io.druid.java.util.emitter.service; + +import com.google.common.base.Preconditions; +import com.google.common.collect.ImmutableMap; +import io.druid.java.util.common.lifecycle.LifecycleStart; +import io.druid.java.util.common.lifecycle.LifecycleStop; +import io.druid.java.util.emitter.core.Emitter; +import io.druid.java.util.emitter.core.Event; + +import java.io.IOException; + +public class ServiceEmitter implements Emitter +{ + private final ImmutableMap serviceDimensions; + private final Emitter emitter; + + public ServiceEmitter(String service, String host, Emitter emitter) + { + this(service, host, emitter, ImmutableMap.of()); + } + + public ServiceEmitter( + String service, + String host, + Emitter emitter, + ImmutableMap otherServiceDimensions + ) + { + this.serviceDimensions = ImmutableMap + .builder() + .put("service", Preconditions.checkNotNull(service)) + .put("host", Preconditions.checkNotNull(host)) + .putAll(otherServiceDimensions) + .build(); + this.emitter = emitter; + } + + public String getService() + { + return serviceDimensions.get("service"); + } + + public String getHost() + { + return serviceDimensions.get("host"); + } + + @Override + @LifecycleStart + public void start() + { + emitter.start(); + } + + @Override + public void emit(Event event) + { + emitter.emit(event); + } + + public void emit(ServiceEventBuilder builder) + { + emit(builder.build(serviceDimensions)); + } + + @Override + public void flush() throws IOException + { + emitter.flush(); + } + + @Override + @LifecycleStop + public void close() throws IOException + { + emitter.close(); + } + + @Override + public String toString() + { + return "ServiceEmitter{" + + "serviceDimensions=" + serviceDimensions + + ", emitter=" + emitter + + '}'; + } +} diff --git a/java-util/src/main/java/io/druid/java/util/emitter/service/ServiceEvent.java b/java-util/src/main/java/io/druid/java/util/emitter/service/ServiceEvent.java new file mode 100644 index 000000000000..4e983cd464dc --- /dev/null +++ b/java-util/src/main/java/io/druid/java/util/emitter/service/ServiceEvent.java @@ -0,0 +1,29 @@ +/* + * Licensed to Metamarkets Group Inc. (Metamarkets) under one + * or more contributor license agreements. See the NOTICE file + * distributed with this work for additional information + * regarding copyright ownership. Metamarkets licenses this file + * to you under the Apache License, Version 2.0 (the + * "License"); you may not use this file except in compliance + * with the License. You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, + * software distributed under the License is distributed on an + * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY + * KIND, either express or implied. See the License for the + * specific language governing permissions and limitations + * under the License. + */ + +package io.druid.java.util.emitter.service; + +import io.druid.java.util.emitter.core.Event; + +public interface ServiceEvent extends Event +{ + String getService(); + + String getHost(); +} diff --git a/java-util/src/main/java/io/druid/java/util/emitter/service/ServiceEventBuilder.java b/java-util/src/main/java/io/druid/java/util/emitter/service/ServiceEventBuilder.java new file mode 100644 index 000000000000..0fc45e57a400 --- /dev/null +++ b/java-util/src/main/java/io/druid/java/util/emitter/service/ServiceEventBuilder.java @@ -0,0 +1,33 @@ +/* + * Licensed to Metamarkets Group Inc. (Metamarkets) under one + * or more contributor license agreements. See the NOTICE file + * distributed with this work for additional information + * regarding copyright ownership. Metamarkets licenses this file + * to you under the Apache License, Version 2.0 (the + * "License"); you may not use this file except in compliance + * with the License. You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, + * software distributed under the License is distributed on an + * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY + * KIND, either express or implied. See the License for the + * specific language governing permissions and limitations + * under the License. + */ + +package io.druid.java.util.emitter.service; + +import com.google.common.collect.ImmutableMap; +import io.druid.java.util.emitter.core.Event; + +public abstract class ServiceEventBuilder +{ + public abstract X build(ImmutableMap serviceDimensions); + + public X build(String service, String host) + { + return build(ImmutableMap.of("service", service, "host", host)); + } +} diff --git a/java-util/src/main/java/io/druid/java/util/emitter/service/ServiceMetricEvent.java b/java-util/src/main/java/io/druid/java/util/emitter/service/ServiceMetricEvent.java new file mode 100644 index 000000000000..0c4bc9b549bc --- /dev/null +++ b/java-util/src/main/java/io/druid/java/util/emitter/service/ServiceMetricEvent.java @@ -0,0 +1,203 @@ +/* + * Licensed to Metamarkets Group Inc. (Metamarkets) under one + * or more contributor license agreements. See the NOTICE file + * distributed with this work for additional information + * regarding copyright ownership. Metamarkets licenses this file + * to you under the Apache License, Version 2.0 (the + * "License"); you may not use this file except in compliance + * with the License. You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, + * software distributed under the License is distributed on an + * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY + * KIND, either express or implied. See the License for the + * specific language governing permissions and limitations + * under the License. + */ + +package io.druid.java.util.emitter.service; + +import com.fasterxml.jackson.annotation.JsonValue; +import com.google.common.base.Predicate; +import com.google.common.collect.ImmutableMap; +import com.google.common.collect.Maps; +import io.druid.java.util.common.DateTimes; +import io.druid.java.util.common.ISE; +import org.joda.time.DateTime; + +import java.util.Arrays; +import java.util.Map; + +/** + */ +public class ServiceMetricEvent implements ServiceEvent +{ + public static Builder builder() + { + return new Builder(); + } + + private final DateTime createdTime; + private final ImmutableMap serviceDims; + private final Map userDims; + private final String feed; + private final String metric; + private final Number value; + + private ServiceMetricEvent( + DateTime createdTime, + ImmutableMap serviceDims, + Map userDims, + String feed, + String metric, + Number value + ) + { + this.createdTime = createdTime != null ? createdTime : DateTimes.nowUtc(); + this.serviceDims = serviceDims; + this.userDims = userDims; + this.feed = feed; + this.metric = metric; + this.value = value; + } + + @Override + public DateTime getCreatedTime() + { + return createdTime; + } + + @Override + public String getFeed() + { + return feed; + } + + @Override + public String getService() + { + return serviceDims.get("service"); + } + + @Override + public String getHost() + { + return serviceDims.get("host"); + } + + public Map getUserDims() + { + return ImmutableMap.copyOf(userDims); + } + + public String getMetric() + { + return metric; + } + + public Number getValue() + { + return value; + } + + @Override + public boolean isSafeToBuffer() + { + return true; + } + + @Override + @JsonValue + public Map toMap() + { + return ImmutableMap.builder() + .put("feed", getFeed()) + .put("timestamp", createdTime.toString()) + .putAll(serviceDims) + .put("metric", metric) + .put("value", value) + .putAll( + Maps.filterEntries( + userDims, + new Predicate>() + { + @Override + public boolean apply(Map.Entry input) + { + return input.getKey() != null; + } + } + ) + ) + .build(); + } + + public static class Builder + { + private final Map userDims = Maps.newTreeMap(); + private String feed = "metrics"; + + public Builder setFeed(String feed) + { + this.feed = feed; + return this; + } + + public Builder setDimension(String dim, String[] values) + { + userDims.put(dim, Arrays.asList(values)); + return this; + } + + public Builder setDimension(String dim, String value) + { + userDims.put(dim, value); + return this; + } + + public Object getDimension(String dim) + { + return userDims.get(dim); + } + + public ServiceEventBuilder build( + final String metric, + final Number value + ) + { + return build(null, metric, value); + } + + public ServiceEventBuilder build( + final DateTime createdTime, + final String metric, + final Number value + ) + { + if (Double.isNaN(value.doubleValue())) { + throw new ISE("Value of NaN is not allowed!"); + } + if (Double.isInfinite(value.doubleValue())) { + throw new ISE("Value of Infinite is not allowed!"); + } + + return new ServiceEventBuilder() + { + @Override + public ServiceMetricEvent build(ImmutableMap serviceDimensions) + { + return new ServiceMetricEvent( + createdTime, + serviceDimensions, + userDims, + feed, + metric, + value + ); + } + }; + } + } +} diff --git a/java-util/src/main/java/io/druid/java/util/http/client/AbstractHttpClient.java b/java-util/src/main/java/io/druid/java/util/http/client/AbstractHttpClient.java new file mode 100644 index 000000000000..8335aa561097 --- /dev/null +++ b/java-util/src/main/java/io/druid/java/util/http/client/AbstractHttpClient.java @@ -0,0 +1,35 @@ +/* + * Licensed to Metamarkets Group Inc. (Metamarkets) under one + * or more contributor license agreements. See the NOTICE file + * distributed with this work for additional information + * regarding copyright ownership. Metamarkets licenses this file + * to you under the Apache License, Version 2.0 (the + * "License"); you may not use this file except in compliance + * with the License. You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, + * software distributed under the License is distributed on an + * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY + * KIND, either express or implied. See the License for the + * specific language governing permissions and limitations + * under the License. + */ + +package io.druid.java.util.http.client; + +import com.google.common.util.concurrent.ListenableFuture; +import io.druid.java.util.http.client.response.HttpResponseHandler; + +public abstract class AbstractHttpClient implements HttpClient +{ + @Override + public ListenableFuture go( + final Request request, + final HttpResponseHandler handler + ) + { + return go(request, handler, null); + } +} diff --git a/java-util/src/main/java/io/druid/java/util/http/client/CredentialedHttpClient.java b/java-util/src/main/java/io/druid/java/util/http/client/CredentialedHttpClient.java new file mode 100644 index 000000000000..d56894433530 --- /dev/null +++ b/java-util/src/main/java/io/druid/java/util/http/client/CredentialedHttpClient.java @@ -0,0 +1,50 @@ +/* + * Licensed to Metamarkets Group Inc. (Metamarkets) under one + * or more contributor license agreements. See the NOTICE file + * distributed with this work for additional information + * regarding copyright ownership. Metamarkets licenses this file + * to you under the Apache License, Version 2.0 (the + * "License"); you may not use this file except in compliance + * with the License. You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, + * software distributed under the License is distributed on an + * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY + * KIND, either express or implied. See the License for the + * specific language governing permissions and limitations + * under the License. + */ + +package io.druid.java.util.http.client; + +import com.google.common.util.concurrent.ListenableFuture; +import io.druid.java.util.http.client.auth.Credentials; +import io.druid.java.util.http.client.response.HttpResponseHandler; +import org.joda.time.Duration; + +/** + */ +public class CredentialedHttpClient extends AbstractHttpClient +{ + + private final Credentials creds; + private final HttpClient delegate; + + public CredentialedHttpClient(Credentials creds, HttpClient delegate) + { + this.creds = creds; + this.delegate = delegate; + } + + @Override + public ListenableFuture go( + Request request, + HttpResponseHandler handler, + Duration requestReadTimeout + ) + { + return delegate.go(creds.addCredentials(request), handler, requestReadTimeout); + } +} diff --git a/java-util/src/main/java/io/druid/java/util/http/client/EnforceSslHttpClient.java b/java-util/src/main/java/io/druid/java/util/http/client/EnforceSslHttpClient.java new file mode 100644 index 000000000000..c2e41d1c270a --- /dev/null +++ b/java-util/src/main/java/io/druid/java/util/http/client/EnforceSslHttpClient.java @@ -0,0 +1,58 @@ +/* + * Licensed to Metamarkets Group Inc. (Metamarkets) under one + * or more contributor license agreements. See the NOTICE file + * distributed with this work for additional information + * regarding copyright ownership. Metamarkets licenses this file + * to you under the Apache License, Version 2.0 (the + * "License"); you may not use this file except in compliance + * with the License. You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, + * software distributed under the License is distributed on an + * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY + * KIND, either express or implied. See the License for the + * specific language governing permissions and limitations + * under the License. + */ + +package io.druid.java.util.http.client; + +import com.google.common.util.concurrent.ListenableFuture; +import io.druid.java.util.common.StringUtils; +import io.druid.java.util.http.client.response.HttpResponseHandler; +import org.joda.time.Duration; + +import java.net.URL; + +/** + */ +public class EnforceSslHttpClient extends AbstractHttpClient +{ + + private final HttpClient delegate; + + public EnforceSslHttpClient( + HttpClient delegate + ) + { + this.delegate = delegate; + } + + @Override + public ListenableFuture go( + Request request, + HttpResponseHandler handler, + Duration requestReadTimeout + ) + { + URL url = request.getUrl(); + + if (!"https".equals(url.getProtocol())) { + throw new IllegalArgumentException(StringUtils.format("Requests must be over https, got[%s].", url)); + } + + return delegate.go(request, handler, requestReadTimeout); + } +} diff --git a/java-util/src/main/java/io/druid/java/util/http/client/HttpClient.java b/java-util/src/main/java/io/druid/java/util/http/client/HttpClient.java new file mode 100644 index 000000000000..5950b863ed5e --- /dev/null +++ b/java-util/src/main/java/io/druid/java/util/http/client/HttpClient.java @@ -0,0 +1,71 @@ +/* + * Licensed to Metamarkets Group Inc. (Metamarkets) under one + * or more contributor license agreements. See the NOTICE file + * distributed with this work for additional information + * regarding copyright ownership. Metamarkets licenses this file + * to you under the Apache License, Version 2.0 (the + * "License"); you may not use this file except in compliance + * with the License. You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, + * software distributed under the License is distributed on an + * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY + * KIND, either express or implied. See the License for the + * specific language governing permissions and limitations + * under the License. + */ + +package io.druid.java.util.http.client; + +import com.google.common.util.concurrent.ListenableFuture; +import io.druid.java.util.http.client.response.HttpResponseHandler; +import org.joda.time.Duration; + +/** + * Interface for Async HTTP client libraries. + */ +public interface HttpClient +{ + /** + * Submit a request and process the response with the given response handler. + *

    + * Note that the Request object passed in to the HttpClient *may* be mutated by the actual client. This is + * largely done by composed clients, but the contract is that mutation is possible. It is the caller's + * responsibility to pass in a copy of the Request object if they want to have an object that is not mutated. + * + * @param request Request to process, this *may* be mutated by the client + * @param handler An asynchronous response handler that will be used to process results from the http call + * @param The type of the intermediate results from the handler + * @param The type of the final results that the returned ListenableFuture will contain + * + * @return A listenable future that will eventually provide an object of type Final + */ + ListenableFuture go( + Request request, + HttpResponseHandler handler + ); + + /** + * Submit a request and process the response with the given response handler. + *

    + * Note that the Request object passed in to the HttpClient *may* be mutated by the actual client. This is + * largely done by composed clients, but the contract is that mutation is possible. It is the caller's + * responsibility to pass in a copy of the Request object if they want to have an object that is not mutated. + * + * @param request Request to process, this *may* be mutated by the client + * @param handler An asynchronous response handler that will be used to process results from the http call + * @param readTimeout Read timeout to use for this request. Leave null to use the default readTimeout. Set to zero + * to disable timeouts for this request. + * @param The type of the intermediate results from the handler + * @param The type of the final results that the returned ListenableFuture will contain + * + * @return A listenable future that will eventually provide an object of type Final + */ + ListenableFuture go( + Request request, + HttpResponseHandler handler, + Duration readTimeout + ); +} diff --git a/java-util/src/main/java/io/druid/java/util/http/client/HttpClientConfig.java b/java-util/src/main/java/io/druid/java/util/http/client/HttpClientConfig.java new file mode 100644 index 000000000000..54e458052720 --- /dev/null +++ b/java-util/src/main/java/io/druid/java/util/http/client/HttpClientConfig.java @@ -0,0 +1,289 @@ +/* + * Licensed to Metamarkets Group Inc. (Metamarkets) under one + * or more contributor license agreements. See the NOTICE file + * distributed with this work for additional information + * regarding copyright ownership. Metamarkets licenses this file + * to you under the Apache License, Version 2.0 (the + * "License"); you may not use this file except in compliance + * with the License. You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, + * software distributed under the License is distributed on an + * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY + * KIND, either express or implied. See the License for the + * specific language governing permissions and limitations + * under the License. + */ + +package io.druid.java.util.http.client; + +import org.joda.time.Duration; +import org.joda.time.Period; + +import javax.net.ssl.SSLContext; + +/** + */ +public class HttpClientConfig +{ + public enum CompressionCodec + { + IDENTITY { + @Override + public String getEncodingString() + { + return "identity"; + } + }, + GZIP { + @Override + public String getEncodingString() + { + return "gzip"; + } + }, + DEFLATE { + @Override + public String getEncodingString() + { + return "deflate"; + } + }; + + /** + * Get the header-ified name of this encoding, which should go in "Accept-Encoding" and + * "Content-Encoding" headers. This is not just the lowercasing of the enum name, since + * we may one day support x- encodings like LZ4, which would likely be an enum named + * "LZ4" that has an encoding string like "x-lz4". + * + * @return encoding name + */ + public abstract String getEncodingString(); + } + + public static final CompressionCodec DEFAULT_COMPRESSION_CODEC = CompressionCodec.GZIP; + + // Default from NioClientSocketChannelFactory.DEFAULT_BOSS_COUNT, which is private: + private static final int DEFAULT_BOSS_COUNT = 1; + + // Default from SelectorUtil.DEFAULT_IO_THREADS, which is private: + private static final int DEFAULT_WORKER_COUNT = Runtime.getRuntime().availableProcessors() * 2; + + private static final Duration DEFAULT_UNUSED_CONNECTION_TIMEOUT_DURATION = new Period("PT4M").toStandardDuration(); + + public static Builder builder() + { + return new Builder(); + } + + private final int numConnections; + private final SSLContext sslContext; + private final Duration readTimeout; + private final Duration sslHandshakeTimeout; + private final int bossPoolSize; + private final int workerPoolSize; + private final CompressionCodec compressionCodec; + private final Duration unusedConnectionTimeoutDuration; + + @Deprecated // Use the builder instead + public HttpClientConfig( + int numConnections, + SSLContext sslContext + ) + { + this( + numConnections, + sslContext, + Duration.ZERO, + null, + DEFAULT_BOSS_COUNT, + DEFAULT_WORKER_COUNT, + DEFAULT_COMPRESSION_CODEC, + DEFAULT_UNUSED_CONNECTION_TIMEOUT_DURATION + ); + } + + @Deprecated // Use the builder instead + public HttpClientConfig( + int numConnections, + SSLContext sslContext, + Duration readTimeout + ) + { + this( + numConnections, + sslContext, + readTimeout, + null, + DEFAULT_BOSS_COUNT, + DEFAULT_WORKER_COUNT, + DEFAULT_COMPRESSION_CODEC, + DEFAULT_UNUSED_CONNECTION_TIMEOUT_DURATION + ); + } + + @Deprecated // Use the builder instead + public HttpClientConfig( + int numConnections, + SSLContext sslContext, + Duration readTimeout, + Duration sslHandshakeTimeout + ) + { + this( + numConnections, + sslContext, + readTimeout, + sslHandshakeTimeout, + DEFAULT_BOSS_COUNT, + DEFAULT_WORKER_COUNT, + DEFAULT_COMPRESSION_CODEC, + DEFAULT_UNUSED_CONNECTION_TIMEOUT_DURATION + ); + } + + private HttpClientConfig( + int numConnections, + SSLContext sslContext, + Duration readTimeout, + Duration sslHandshakeTimeout, + int bossPoolSize, + int workerPoolSize, + CompressionCodec compressionCodec, + Duration unusedConnectionTimeoutDuration + ) + { + this.numConnections = numConnections; + this.sslContext = sslContext; + this.readTimeout = readTimeout; + this.sslHandshakeTimeout = sslHandshakeTimeout; + this.bossPoolSize = bossPoolSize; + this.workerPoolSize = workerPoolSize; + this.compressionCodec = compressionCodec; + this.unusedConnectionTimeoutDuration = unusedConnectionTimeoutDuration; + } + + public int getNumConnections() + { + return numConnections; + } + + public SSLContext getSslContext() + { + return sslContext; + } + + public Duration getReadTimeout() + { + return readTimeout; + } + + public Duration getSslHandshakeTimeout() + { + return sslHandshakeTimeout; + } + + public int getBossPoolSize() + { + return bossPoolSize; + } + + public int getWorkerPoolSize() + { + return workerPoolSize; + } + + public CompressionCodec getCompressionCodec() + { + return compressionCodec; + } + + public Duration getUnusedConnectionTimeoutDuration() + { + return unusedConnectionTimeoutDuration; + } + + public static class Builder + { + private int numConnections = 1; + private SSLContext sslContext = null; + private Duration readTimeout = null; + private Duration sslHandshakeTimeout = null; + private int bossCount = DEFAULT_BOSS_COUNT; + private int workerCount = DEFAULT_WORKER_COUNT; + private CompressionCodec compressionCodec = DEFAULT_COMPRESSION_CODEC; + private Duration unusedConnectionTimeoutDuration = DEFAULT_UNUSED_CONNECTION_TIMEOUT_DURATION; + + private Builder() {} + + public Builder withNumConnections(int numConnections) + { + this.numConnections = numConnections; + return this; + } + + public Builder withSslContext(SSLContext sslContext) + { + this.sslContext = sslContext; + return this; + } + + public Builder withSslContext(String keyStorePath, String keyStorePassword) + { + this.sslContext = HttpClientInit.sslContextWithTrustedKeyStore(keyStorePath, keyStorePassword); + return this; + } + + public Builder withReadTimeout(Duration readTimeout) + { + this.readTimeout = readTimeout; + return this; + } + + public Builder withSslHandshakeTimeout(Duration sslHandshakeTimeout) + { + this.sslHandshakeTimeout = sslHandshakeTimeout; + return this; + } + + public Builder withBossCount(int bossCount) + { + this.bossCount = bossCount; + return this; + } + + public Builder withWorkerCount(int workerCount) + { + this.workerCount = workerCount; + return this; + } + + public Builder withCompressionCodec(CompressionCodec compressionCodec) + { + this.compressionCodec = compressionCodec; + return this; + } + + public Builder withUnusedConnectionTimeoutDuration(Duration unusedConnectionTimeoutDuration) + { + this.unusedConnectionTimeoutDuration = unusedConnectionTimeoutDuration; + return this; + } + + public HttpClientConfig build() + { + return new HttpClientConfig( + numConnections, + sslContext, + readTimeout, + sslHandshakeTimeout, + bossCount, + workerCount, + compressionCodec, + unusedConnectionTimeoutDuration + ); + } + } +} diff --git a/java-util/src/main/java/io/druid/java/util/http/client/HttpClientInit.java b/java-util/src/main/java/io/druid/java/util/http/client/HttpClientInit.java new file mode 100644 index 000000000000..1141829a9606 --- /dev/null +++ b/java-util/src/main/java/io/druid/java/util/http/client/HttpClientInit.java @@ -0,0 +1,231 @@ +/* + * Licensed to Metamarkets Group Inc. (Metamarkets) under one + * or more contributor license agreements. See the NOTICE file + * distributed with this work for additional information + * regarding copyright ownership. Metamarkets licenses this file + * to you under the Apache License, Version 2.0 (the + * "License"); you may not use this file except in compliance + * with the License. You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, + * software distributed under the License is distributed on an + * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY + * KIND, either express or implied. See the License for the + * specific language governing permissions and limitations + * under the License. + */ + +package io.druid.java.util.http.client; + +import com.google.common.base.Throwables; +import com.google.common.util.concurrent.ThreadFactoryBuilder; +import io.druid.java.util.common.guava.CloseQuietly; +import io.druid.java.util.common.lifecycle.Lifecycle; +import io.druid.java.util.http.client.netty.HttpClientPipelineFactory; +import io.druid.java.util.http.client.pool.ChannelResourceFactory; +import io.druid.java.util.http.client.pool.ResourcePool; +import io.druid.java.util.http.client.pool.ResourcePoolConfig; +import org.jboss.netty.bootstrap.ClientBootstrap; +import org.jboss.netty.channel.socket.nio.NioClientBossPool; +import org.jboss.netty.channel.socket.nio.NioClientSocketChannelFactory; +import org.jboss.netty.channel.socket.nio.NioWorkerPool; +import org.jboss.netty.logging.InternalLoggerFactory; +import org.jboss.netty.logging.Slf4JLoggerFactory; +import org.jboss.netty.util.HashedWheelTimer; +import org.jboss.netty.util.ThreadNameDeterminer; +import org.jboss.netty.util.Timer; +import org.joda.time.Duration; + +import javax.net.ssl.SSLContext; +import javax.net.ssl.TrustManagerFactory; +import java.io.FileInputStream; +import java.io.FileNotFoundException; +import java.io.IOException; +import java.security.KeyManagementException; +import java.security.KeyStore; +import java.security.KeyStoreException; +import java.security.NoSuchAlgorithmException; +import java.security.cert.CertificateException; +import java.util.concurrent.Executors; +import java.util.concurrent.TimeUnit; + +/** + */ +public class HttpClientInit +{ + public static HttpClient createClient(HttpClientConfig config, Lifecycle lifecycle) + { + try { + // We need to use the full constructor in order to set a ThreadNameDeterminer. The other parameters are taken + // from the defaults in HashedWheelTimer's other constructors. + final HashedWheelTimer timer = new HashedWheelTimer( + new ThreadFactoryBuilder().setDaemon(true) + .setNameFormat("HttpClient-Timer-%s") + .build(), + ThreadNameDeterminer.CURRENT, + 100, + TimeUnit.MILLISECONDS, + 512 + ); + lifecycle.addMaybeStartHandler( + new Lifecycle.Handler() + { + @Override + public void start() throws Exception + { + timer.start(); + } + + @Override + public void stop() + { + timer.stop(); + } + } + ); + return lifecycle.addMaybeStartManagedInstance( + new NettyHttpClient( + new ResourcePool<>( + new ChannelResourceFactory( + createBootstrap(lifecycle, timer, config.getBossPoolSize(), config.getWorkerPoolSize()), + config.getSslContext(), + timer, + config.getSslHandshakeTimeout() == null ? -1 : config.getSslHandshakeTimeout().getMillis() + ), + new ResourcePoolConfig( + config.getNumConnections(), + config.getUnusedConnectionTimeoutDuration().getMillis() + ) + ), + config.getReadTimeout(), + config.getCompressionCodec(), + timer + ) + ); + } + catch (Exception e) { + throw Throwables.propagate(e); + } + } + + @Deprecated + public static HttpClient createClient(ResourcePoolConfig config, final SSLContext sslContext, Lifecycle lifecycle) + { + return createClient( + new HttpClientConfig(config.getMaxPerKey(), sslContext, Duration.ZERO), + lifecycle + ); + } + + @Deprecated // use createClient directly + public static ClientBootstrap createBootstrap(Lifecycle lifecycle, Timer timer) + { + final HttpClientConfig defaultConfig = HttpClientConfig.builder().build(); + return createBootstrap(lifecycle, timer, defaultConfig.getBossPoolSize(), defaultConfig.getWorkerPoolSize()); + } + + @Deprecated // use createClient directly + public static ClientBootstrap createBootstrap(Lifecycle lifecycle) + { + final Timer timer = new HashedWheelTimer(new ThreadFactoryBuilder().setDaemon(true).build()); + return createBootstrap(lifecycle, timer); + } + + public static SSLContext sslContextWithTrustedKeyStore(final String keyStorePath, final String keyStorePassword) + { + FileInputStream in = null; + try { + final KeyStore ks = KeyStore.getInstance(KeyStore.getDefaultType()); + + in = new FileInputStream(keyStorePath); + ks.load(in, keyStorePassword.toCharArray()); + in.close(); + + final TrustManagerFactory tmf = TrustManagerFactory.getInstance(TrustManagerFactory.getDefaultAlgorithm()); + tmf.init(ks); + final SSLContext sslContext = SSLContext.getInstance("TLS"); + sslContext.init(null, tmf.getTrustManagers(), null); + + return sslContext; + } + catch (CertificateException e) { + throw Throwables.propagate(e); + } + catch (NoSuchAlgorithmException e) { + throw Throwables.propagate(e); + } + catch (KeyStoreException e) { + throw Throwables.propagate(e); + } + catch (KeyManagementException e) { + throw Throwables.propagate(e); + } + catch (FileNotFoundException e) { + throw Throwables.propagate(e); + } + catch (IOException e) { + throw Throwables.propagate(e); + } + finally { + CloseQuietly.close(in); + } + } + + private static ClientBootstrap createBootstrap(Lifecycle lifecycle, Timer timer, int bossPoolSize, int workerPoolSize) + { + final NioClientBossPool bossPool = new NioClientBossPool( + Executors.newCachedThreadPool( + new ThreadFactoryBuilder() + .setDaemon(true) + .setNameFormat("HttpClient-Netty-Boss-%s") + .build() + ), + bossPoolSize, + timer, + ThreadNameDeterminer.CURRENT + ); + + final NioWorkerPool workerPool = new NioWorkerPool( + Executors.newCachedThreadPool( + new ThreadFactoryBuilder() + .setDaemon(true) + .setNameFormat("HttpClient-Netty-Worker-%s") + .build() + ), + workerPoolSize, + ThreadNameDeterminer.CURRENT + ); + + final ClientBootstrap bootstrap = new ClientBootstrap(new NioClientSocketChannelFactory(bossPool, workerPool)); + + bootstrap.setOption("keepAlive", true); + bootstrap.setPipelineFactory(new HttpClientPipelineFactory()); + + InternalLoggerFactory.setDefaultFactory(new Slf4JLoggerFactory()); + + try { + lifecycle.addMaybeStartHandler( + new Lifecycle.Handler() + { + @Override + public void start() throws Exception + { + } + + @Override + public void stop() + { + bootstrap.releaseExternalResources(); + } + } + ); + } + catch (Exception e) { + throw Throwables.propagate(e); + } + + return bootstrap; + } +} diff --git a/java-util/src/main/java/io/druid/java/util/http/client/NettyHttpClient.java b/java-util/src/main/java/io/druid/java/util/http/client/NettyHttpClient.java new file mode 100644 index 000000000000..0d343a06f1ab --- /dev/null +++ b/java-util/src/main/java/io/druid/java/util/http/client/NettyHttpClient.java @@ -0,0 +1,405 @@ +/* + * Licensed to Metamarkets Group Inc. (Metamarkets) under one + * or more contributor license agreements. See the NOTICE file + * distributed with this work for additional information + * regarding copyright ownership. Metamarkets licenses this file + * to you under the Apache License, Version 2.0 (the + * "License"); you may not use this file except in compliance + * with the License. You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, + * software distributed under the License is distributed on an + * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY + * KIND, either express or implied. See the License for the + * specific language governing permissions and limitations + * under the License. + */ + +package io.druid.java.util.http.client; + +import com.google.common.base.Preconditions; +import com.google.common.base.Strings; +import com.google.common.collect.Multimap; +import com.google.common.util.concurrent.Futures; +import com.google.common.util.concurrent.ListenableFuture; +import com.google.common.util.concurrent.SettableFuture; +import io.druid.java.util.common.IAE; +import io.druid.java.util.common.StringUtils; +import io.druid.java.util.common.lifecycle.LifecycleStart; +import io.druid.java.util.common.lifecycle.LifecycleStop; +import io.druid.java.util.common.logger.Logger; +import io.druid.java.util.http.client.pool.ResourceContainer; +import io.druid.java.util.http.client.pool.ResourcePool; +import io.druid.java.util.http.client.response.ClientResponse; +import io.druid.java.util.http.client.response.HttpResponseHandler; +import org.jboss.netty.channel.Channel; +import org.jboss.netty.channel.ChannelException; +import org.jboss.netty.channel.ChannelFuture; +import org.jboss.netty.channel.ChannelFutureListener; +import org.jboss.netty.channel.ChannelHandlerContext; +import org.jboss.netty.channel.ChannelStateEvent; +import org.jboss.netty.channel.ExceptionEvent; +import org.jboss.netty.channel.MessageEvent; +import org.jboss.netty.channel.SimpleChannelUpstreamHandler; +import org.jboss.netty.handler.codec.http.DefaultHttpRequest; +import org.jboss.netty.handler.codec.http.HttpChunk; +import org.jboss.netty.handler.codec.http.HttpHeaders; +import org.jboss.netty.handler.codec.http.HttpMethod; +import org.jboss.netty.handler.codec.http.HttpRequest; +import org.jboss.netty.handler.codec.http.HttpResponse; +import org.jboss.netty.handler.codec.http.HttpVersion; +import org.jboss.netty.handler.timeout.ReadTimeoutHandler; +import org.jboss.netty.util.Timer; +import org.joda.time.Duration; + +import java.net.URL; +import java.util.Collection; +import java.util.Map; +import java.util.concurrent.TimeUnit; + +/** + */ +public class NettyHttpClient extends AbstractHttpClient +{ + private static final Logger log = new Logger(NettyHttpClient.class); + + private static final String READ_TIMEOUT_HANDLER_NAME = "read-timeout"; + private static final String LAST_HANDLER_NAME = "last-handler"; + + private final Timer timer; + private final ResourcePool pool; + private final HttpClientConfig.CompressionCodec compressionCodec; + private final Duration defaultReadTimeout; + + public NettyHttpClient( + ResourcePool pool + ) + { + this(pool, null, HttpClientConfig.DEFAULT_COMPRESSION_CODEC, null); + } + + NettyHttpClient( + ResourcePool pool, + Duration defaultReadTimeout, + HttpClientConfig.CompressionCodec compressionCodec, + Timer timer + ) + { + this.pool = Preconditions.checkNotNull(pool, "pool"); + this.defaultReadTimeout = defaultReadTimeout; + this.compressionCodec = Preconditions.checkNotNull(compressionCodec); + this.timer = timer; + + if (defaultReadTimeout != null && defaultReadTimeout.getMillis() > 0) { + Preconditions.checkNotNull(timer, "timer"); + } + } + + @LifecycleStart + public void start() + { + } + + @LifecycleStop + public void stop() + { + pool.close(); + } + + public HttpClient withReadTimeout(Duration readTimeout) + { + return new NettyHttpClient(pool, readTimeout, compressionCodec, timer); + } + + public NettyHttpClient withTimer(Timer timer) + { + return new NettyHttpClient(pool, defaultReadTimeout, compressionCodec, timer); + } + + @Override + public ListenableFuture go( + final Request request, + final HttpResponseHandler handler, + final Duration requestReadTimeout + ) + { + final HttpMethod method = request.getMethod(); + final URL url = request.getUrl(); + final Multimap headers = request.getHeaders(); + + final String requestDesc = StringUtils.format("%s %s", method, url); + if (log.isDebugEnabled()) { + log.debug("[%s] starting", requestDesc); + } + + // Block while acquiring a channel from the pool, then complete the request asynchronously. + final Channel channel; + final String hostKey = getPoolKey(url); + final ResourceContainer channelResourceContainer = pool.take(hostKey); + final ChannelFuture channelFuture = channelResourceContainer.get().awaitUninterruptibly(); + if (!channelFuture.isSuccess()) { + channelResourceContainer.returnResource(); // Some other poor sap will have to deal with it... + return Futures.immediateFailedFuture( + new ChannelException( + "Faulty channel in resource pool", + channelFuture.getCause() + ) + ); + } else { + channel = channelFuture.getChannel(); + } + + final String urlFile = Strings.nullToEmpty(url.getFile()); + final HttpRequest httpRequest = new DefaultHttpRequest( + HttpVersion.HTTP_1_1, + method, + urlFile.isEmpty() ? "/" : urlFile + ); + + if (!headers.containsKey(HttpHeaders.Names.HOST)) { + httpRequest.headers().add(HttpHeaders.Names.HOST, getHost(url)); + } + + // If Accept-Encoding is set in the Request, use that. Otherwise use the default from "compressionCodec". + if (!headers.containsKey(HttpHeaders.Names.ACCEPT_ENCODING)) { + httpRequest.headers().set(HttpHeaders.Names.ACCEPT_ENCODING, compressionCodec.getEncodingString()); + } + + for (Map.Entry> entry : headers.asMap().entrySet()) { + String key = entry.getKey(); + + for (String obj : entry.getValue()) { + httpRequest.headers().add(key, obj); + } + } + + if (request.hasContent()) { + httpRequest.setContent(request.getContent()); + } + + final long readTimeout = getReadTimeout(requestReadTimeout); + final SettableFuture retVal = SettableFuture.create(); + + if (readTimeout > 0) { + channel.getPipeline().addLast( + READ_TIMEOUT_HANDLER_NAME, + new ReadTimeoutHandler(timer, readTimeout, TimeUnit.MILLISECONDS) + ); + } + + channel.getPipeline().addLast( + LAST_HANDLER_NAME, + new SimpleChannelUpstreamHandler() + { + private volatile ClientResponse response = null; + + @Override + public void messageReceived(ChannelHandlerContext ctx, MessageEvent e) throws Exception + { + if (log.isDebugEnabled()) { + log.debug("[%s] messageReceived: %s", requestDesc, e.getMessage()); + } + try { + Object msg = e.getMessage(); + + if (msg instanceof HttpResponse) { + HttpResponse httpResponse = (HttpResponse) msg; + if (log.isDebugEnabled()) { + log.debug("[%s] Got response: %s", requestDesc, httpResponse.getStatus()); + } + + response = handler.handleResponse(httpResponse); + if (response.isFinished()) { + retVal.set((Final) response.getObj()); + } + + if (!httpResponse.isChunked()) { + finishRequest(); + } + } else if (msg instanceof HttpChunk) { + HttpChunk httpChunk = (HttpChunk) msg; + if (log.isDebugEnabled()) { + log.debug( + "[%s] Got chunk: %sB, last=%s", + requestDesc, + httpChunk.getContent().readableBytes(), + httpChunk.isLast() + ); + } + + if (httpChunk.isLast()) { + finishRequest(); + } else { + response = handler.handleChunk(response, httpChunk); + if (response.isFinished() && !retVal.isDone()) { + retVal.set((Final) response.getObj()); + } + } + } else { + throw new IllegalStateException(StringUtils.format("Unknown message type[%s]", msg.getClass())); + } + } + catch (Exception ex) { + log.warn(ex, "[%s] Exception thrown while processing message, closing channel.", requestDesc); + + if (!retVal.isDone()) { + retVal.set(null); + } + channel.close(); + channelResourceContainer.returnResource(); + + throw ex; + } + } + + private void finishRequest() + { + ClientResponse finalResponse = handler.done(response); + if (!finalResponse.isFinished()) { + throw new IllegalStateException( + StringUtils.format( + "[%s] Didn't get a completed ClientResponse Object from [%s]", + requestDesc, + handler.getClass() + ) + ); + } + if (!retVal.isDone()) { + retVal.set(finalResponse.getObj()); + } + removeHandlers(); + channelResourceContainer.returnResource(); + } + + @Override + public void exceptionCaught(ChannelHandlerContext context, ExceptionEvent event) throws Exception + { + if (log.isDebugEnabled()) { + final Throwable cause = event.getCause(); + if (cause == null) { + log.debug("[%s] Caught exception", requestDesc); + } else { + log.debug(cause, "[%s] Caught exception", requestDesc); + } + } + + retVal.setException(event.getCause()); + // response is non-null if we received initial chunk and then exception occurs + if (response != null) { + handler.exceptionCaught(response, event.getCause()); + } + removeHandlers(); + try { + channel.close(); + } + catch (Exception e) { + // ignore + } + finally { + channelResourceContainer.returnResource(); + } + + context.sendUpstream(event); + } + + @Override + public void channelDisconnected(ChannelHandlerContext context, ChannelStateEvent event) throws Exception + { + if (log.isDebugEnabled()) { + log.debug("[%s] Channel disconnected", requestDesc); + } + // response is non-null if we received initial chunk and then exception occurs + if (response != null) { + handler.exceptionCaught(response, new ChannelException("Channel disconnected")); + } + channel.close(); + channelResourceContainer.returnResource(); + if (!retVal.isDone()) { + log.warn("[%s] Channel disconnected before response complete", requestDesc); + retVal.setException(new ChannelException("Channel disconnected")); + } + context.sendUpstream(event); + } + + private void removeHandlers() + { + if (readTimeout > 0) { + channel.getPipeline().remove(READ_TIMEOUT_HANDLER_NAME); + } + channel.getPipeline().remove(LAST_HANDLER_NAME); + } + } + ); + + channel.write(httpRequest).addListener( + new ChannelFutureListener() + { + @Override + public void operationComplete(ChannelFuture future) throws Exception + { + if (!future.isSuccess()) { + channel.close(); + channelResourceContainer.returnResource(); + if (!retVal.isDone()) { + retVal.setException( + new ChannelException( + StringUtils.format("[%s] Failed to write request to channel", requestDesc), + future.getCause() + ) + ); + } + } + } + } + ); + + return retVal; + } + + private long getReadTimeout(Duration requestReadTimeout) + { + final long timeout; + if (requestReadTimeout != null) { + timeout = requestReadTimeout.getMillis(); + } else if (defaultReadTimeout != null) { + timeout = defaultReadTimeout.getMillis(); + } else { + timeout = 0; + } + + if (timeout > 0 && timer == null) { + log.warn("Cannot time out requests without a timer! Disabling timeout for this request."); + return 0; + } else { + return timeout; + } + } + + private String getHost(URL url) + { + int port = url.getPort(); + + if (port == -1) { + final String protocol = url.getProtocol(); + + if ("http".equalsIgnoreCase(protocol)) { + port = 80; + } else if ("https".equalsIgnoreCase(protocol)) { + port = 443; + } else { + throw new IAE("Cannot figure out default port for protocol[%s], please set Host header.", protocol); + } + } + + return StringUtils.format("%s:%s", url.getHost(), port); + } + + private String getPoolKey(URL url) + { + return StringUtils.format( + "%s://%s:%s", url.getProtocol(), url.getHost(), url.getPort() == -1 ? url.getDefaultPort() : url.getPort() + ); + } +} diff --git a/java-util/src/main/java/io/druid/java/util/http/client/Request.java b/java-util/src/main/java/io/druid/java/util/http/client/Request.java new file mode 100644 index 000000000000..f49764101a8c --- /dev/null +++ b/java-util/src/main/java/io/druid/java/util/http/client/Request.java @@ -0,0 +1,200 @@ +/* + * Licensed to Metamarkets Group Inc. (Metamarkets) under one + * or more contributor license agreements. See the NOTICE file + * distributed with this work for additional information + * regarding copyright ownership. Metamarkets licenses this file + * to you under the Apache License, Version 2.0 (the + * "License"); you may not use this file except in compliance + * with the License. You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, + * software distributed under the License is distributed on an + * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY + * KIND, either express or implied. See the License for the + * specific language governing permissions and limitations + * under the License. + */ + +package io.druid.java.util.http.client; + +import com.google.common.base.Charsets; +import com.google.common.base.Supplier; +import com.google.common.collect.Lists; +import com.google.common.collect.Maps; +import com.google.common.collect.Multimap; +import com.google.common.collect.Multimaps; +import io.druid.java.util.common.StringUtils; +import org.jboss.netty.buffer.ChannelBuffer; +import org.jboss.netty.buffer.ChannelBufferFactory; +import org.jboss.netty.buffer.HeapChannelBufferFactory; +import org.jboss.netty.handler.codec.base64.Base64; +import org.jboss.netty.handler.codec.http.HttpHeaders; +import org.jboss.netty.handler.codec.http.HttpMethod; + +import java.net.URL; +import java.nio.ByteBuffer; +import java.util.Arrays; +import java.util.Collection; +import java.util.List; +import java.util.Map; + +/** + */ +public class Request +{ + private static final ChannelBufferFactory factory = HeapChannelBufferFactory.getInstance(); + + private final HttpMethod method; + private final URL url; + private final Multimap headers = Multimaps.newListMultimap( + Maps.>newHashMap(), + new Supplier>() + { + @Override + public List get() + { + return Lists.newArrayList(); + } + } + ); + + private ChannelBuffer content; + + public Request( + HttpMethod method, + URL url + ) + { + this.method = method; + this.url = url; + } + + public HttpMethod getMethod() + { + return method; + } + + public URL getUrl() + { + return url; + } + + public Multimap getHeaders() + { + return headers; + } + + public boolean hasContent() + { + return content != null; + } + + public ChannelBuffer getContent() + { + return content; + } + + public Request copy() + { + Request retVal = new Request(method, url); + retVal.headers.putAll(this.headers); + retVal.content = content == null ? null : content.copy(); + return retVal; + } + + public Request setHeader(String header, String value) + { + headers.replaceValues(header, Arrays.asList(value)); + return this; + } + + public Request setHeaderValues(String header, Iterable value) + { + headers.replaceValues(header, value); + return this; + } + + public Request setHeaderValues(Multimap inHeaders) + { + for (Map.Entry> entry : inHeaders.asMap().entrySet()) { + this.setHeaderValues(entry.getKey(), entry.getValue()); + } + return this; + } + + public Request addHeader(String header, String value) + { + headers.put(header, value); + return this; + } + + public Request addHeaderValues(String header, Iterable value) + { + headers.putAll(header, value); + return this; + } + + public Request addHeaderValues(Multimap inHeaders) + { + for (Map.Entry> entry : inHeaders.asMap().entrySet()) { + this.addHeaderValues(entry.getKey(), entry.getValue()); + } + return this; + } + + public Request setContent(byte[] bytes) + { + return setContent(null, bytes); + } + + public Request setContent(byte[] bytes, int offset, int length) + { + return setContent(null, bytes, offset, length); + } + + public Request setContent(ChannelBuffer content) + { + return setContent(null, content); + } + + public Request setContent(String contentType, byte[] bytes) + { + return setContent(contentType, bytes, 0, bytes.length); + } + + public Request setContent(String contentType, byte[] bytes, int offset, int length) + { + return setContent(contentType, factory.getBuffer(bytes, offset, length)); + } + + public Request setContent(String contentType, ChannelBuffer content) + { + if (contentType != null) { + setHeader(HttpHeaders.Names.CONTENT_TYPE, contentType); + } + + this.content = content; + + setHeader(HttpHeaders.Names.CONTENT_LENGTH, String.valueOf(content.writerIndex())); + + return this; + } + + public Request setBasicAuthentication(String username, String password) + { + final String base64Value = base64Encode(StringUtils.format("%s:%s", username, password)); + setHeader(HttpHeaders.Names.AUTHORIZATION, StringUtils.format("Basic %s", base64Value)); + return this; + } + + private String base64Encode(final String value) + { + final ChannelBufferFactory bufferFactory = HeapChannelBufferFactory.getInstance(); + + return Base64 + .encode(bufferFactory.getBuffer(ByteBuffer.wrap(value.getBytes(Charsets.UTF_8))), false) + .toString(Charsets.UTF_8); + } +} diff --git a/java-util/src/main/java/io/druid/java/util/http/client/auth/BasicCredentials.java b/java-util/src/main/java/io/druid/java/util/http/client/auth/BasicCredentials.java new file mode 100644 index 000000000000..29cc85f5f385 --- /dev/null +++ b/java-util/src/main/java/io/druid/java/util/http/client/auth/BasicCredentials.java @@ -0,0 +1,45 @@ +/* + * Licensed to Metamarkets Group Inc. (Metamarkets) under one + * or more contributor license agreements. See the NOTICE file + * distributed with this work for additional information + * regarding copyright ownership. Metamarkets licenses this file + * to you under the Apache License, Version 2.0 (the + * "License"); you may not use this file except in compliance + * with the License. You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, + * software distributed under the License is distributed on an + * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY + * KIND, either express or implied. See the License for the + * specific language governing permissions and limitations + * under the License. + */ + +package io.druid.java.util.http.client.auth; + +import io.druid.java.util.http.client.Request; + +/** + */ +public class BasicCredentials implements Credentials +{ + private final String username; + private final String password; + + public BasicCredentials( + String username, + String password + ) + { + this.username = username; + this.password = password; + } + + @Override + public Request addCredentials(Request builder) + { + return builder.setBasicAuthentication(username, password); + } +} diff --git a/java-util/src/main/java/io/druid/java/util/http/client/auth/Credentials.java b/java-util/src/main/java/io/druid/java/util/http/client/auth/Credentials.java new file mode 100644 index 000000000000..35e204616cf5 --- /dev/null +++ b/java-util/src/main/java/io/druid/java/util/http/client/auth/Credentials.java @@ -0,0 +1,29 @@ +/* + * Licensed to Metamarkets Group Inc. (Metamarkets) under one + * or more contributor license agreements. See the NOTICE file + * distributed with this work for additional information + * regarding copyright ownership. Metamarkets licenses this file + * to you under the Apache License, Version 2.0 (the + * "License"); you may not use this file except in compliance + * with the License. You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, + * software distributed under the License is distributed on an + * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY + * KIND, either express or implied. See the License for the + * specific language governing permissions and limitations + * under the License. + */ + +package io.druid.java.util.http.client.auth; + +import io.druid.java.util.http.client.Request; + +/** + */ +public interface Credentials +{ + Request addCredentials(Request builder); +} diff --git a/java-util/src/main/java/io/druid/java/util/http/client/io/AppendableByteArrayInputStream.java b/java-util/src/main/java/io/druid/java/util/http/client/io/AppendableByteArrayInputStream.java new file mode 100644 index 000000000000..8c3c336cd568 --- /dev/null +++ b/java-util/src/main/java/io/druid/java/util/http/client/io/AppendableByteArrayInputStream.java @@ -0,0 +1,211 @@ +/* + * Licensed to Metamarkets Group Inc. (Metamarkets) under one + * or more contributor license agreements. See the NOTICE file + * distributed with this work for additional information + * regarding copyright ownership. Metamarkets licenses this file + * to you under the Apache License, Version 2.0 (the + * "License"); you may not use this file except in compliance + * with the License. You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, + * software distributed under the License is distributed on an + * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY + * KIND, either express or implied. See the License for the + * specific language governing permissions and limitations + * under the License. + */ + +package io.druid.java.util.http.client.io; + +import io.druid.java.util.common.logger.Logger; + +import java.io.IOException; +import java.io.InputStream; +import java.util.LinkedList; + +/** + */ +public class AppendableByteArrayInputStream extends InputStream +{ + private static final Logger log = new Logger(AppendableByteArrayInputStream.class); + + private final LinkedList bytes = new LinkedList(); + private final SingleByteReaderDoer singleByteReaderDoer = new SingleByteReaderDoer(); + + private volatile boolean done = false; + private volatile Throwable throwable; + private volatile int available = 0; + + private byte[] curr = new byte[]{}; + private int currIndex = 0; + + public void add(byte[] bytesToAdd) + { + if (bytesToAdd.length == 0) { + return; + } + + synchronized (singleByteReaderDoer) { + bytes.addLast(bytesToAdd); + available += bytesToAdd.length; + singleByteReaderDoer.notify(); + } + } + + public void done() + { + synchronized (singleByteReaderDoer) { + done = true; + singleByteReaderDoer.notify(); + } + } + + public void exceptionCaught(Throwable t) + { + synchronized (singleByteReaderDoer) { + done = true; + throwable = t; + singleByteReaderDoer.notify(); + } + } + + @Override + public int read() throws IOException + { + if (scanThroughBytesAndDoSomething(1, singleByteReaderDoer) == 0) { + return -1; + } + return singleByteReaderDoer.getRetVal(); + } + + @Override + public int read(final byte[] b, final int off, int len) throws IOException + { + if (b == null) { + throw new NullPointerException(); + } else if (off < 0 || len < 0 || len > b.length - off) { + throw new IndexOutOfBoundsException(); + } else if (len == 0) { + return 0; + } + + final long retVal = scanThroughBytesAndDoSomething( + len, + new Doer() + { + int currOff = off; + + @Override + public void doSomethingWithByteArray(int numRead) + { + System.arraycopy(curr, currIndex, b, currOff, numRead); + currOff += numRead; + } + } + ); + return retVal == 0 ? -1 : (int) retVal; + } + + @Override + public long skip(long n) throws IOException + { + return scanThroughBytesAndDoSomething( + n, + new Doer() + { + @Override + public void doSomethingWithByteArray(int numToScan) + { + } + } + ); + } + + private long scanThroughBytesAndDoSomething(long numToScan, Doer doer) throws IOException + { + long numScanned = 0; + long numPulled = 0; + + while (numToScan > numScanned) { + if (currIndex >= curr.length) { + synchronized (singleByteReaderDoer) { + if (bytes.isEmpty()) { + if (done) { + break; + } + try { + available -= numPulled; + numPulled = 0; + singleByteReaderDoer.wait(); + } + catch (InterruptedException e) { + Thread.currentThread().interrupt(); + throw new IOException("Interrupted!"); + } + } + + if (throwable != null) { + throw new IOException(throwable); + } + + if (bytes.isEmpty()) { + if (done) { + break; + } else { + log.debug("bytes was empty, but read thread was awakened without being done. This shouldn't happen."); + continue; + } + } + + curr = bytes.removeFirst(); + currIndex = 0; + } + } + + final long numToPullFromCurr = Math.min(curr.length - currIndex, numToScan - numScanned); + doer.doSomethingWithByteArray((int) numToPullFromCurr); + numScanned += numToPullFromCurr; + currIndex += numToPullFromCurr; + numPulled += numToPullFromCurr; + } + + synchronized (singleByteReaderDoer) { + available -= numPulled; + } + + return numScanned; + } + + @Override + public int available() throws IOException + { + return available; + } + + private interface Doer + { + void doSomethingWithByteArray(int numToScan); + } + + private class SingleByteReaderDoer implements Doer + { + private int retVal; + + public SingleByteReaderDoer() + { + } + + @Override + public void doSomethingWithByteArray(int numToScan) + { + retVal = curr[currIndex]; + } + + public int getRetVal() + { + return retVal; + } + } +} diff --git a/java-util/src/main/java/io/druid/java/util/http/client/netty/HttpClientPipelineFactory.java b/java-util/src/main/java/io/druid/java/util/http/client/netty/HttpClientPipelineFactory.java new file mode 100644 index 000000000000..eddda1a53a82 --- /dev/null +++ b/java-util/src/main/java/io/druid/java/util/http/client/netty/HttpClientPipelineFactory.java @@ -0,0 +1,42 @@ +/* + * Licensed to Metamarkets Group Inc. (Metamarkets) under one + * or more contributor license agreements. See the NOTICE file + * distributed with this work for additional information + * regarding copyright ownership. Metamarkets licenses this file + * to you under the Apache License, Version 2.0 (the + * "License"); you may not use this file except in compliance + * with the License. You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, + * software distributed under the License is distributed on an + * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY + * KIND, either express or implied. See the License for the + * specific language governing permissions and limitations + * under the License. + */ + +package io.druid.java.util.http.client.netty; + +import org.jboss.netty.channel.ChannelPipeline; +import org.jboss.netty.channel.ChannelPipelineFactory; +import org.jboss.netty.channel.DefaultChannelPipeline; +import org.jboss.netty.handler.codec.http.HttpClientCodec; +import org.jboss.netty.handler.codec.http.HttpContentDecompressor; + +/** + */ +public class HttpClientPipelineFactory implements ChannelPipelineFactory +{ + @Override + public ChannelPipeline getPipeline() throws Exception + { + ChannelPipeline pipeline = new DefaultChannelPipeline(); + + pipeline.addLast("codec", new HttpClientCodec()); + pipeline.addLast("inflater", new HttpContentDecompressor()); + + return pipeline; + } +} diff --git a/java-util/src/main/java/io/druid/java/util/http/client/pool/ChannelResourceFactory.java b/java-util/src/main/java/io/druid/java/util/http/client/pool/ChannelResourceFactory.java new file mode 100644 index 000000000000..91dbae44f7d9 --- /dev/null +++ b/java-util/src/main/java/io/druid/java/util/http/client/pool/ChannelResourceFactory.java @@ -0,0 +1,182 @@ +/* + * Licensed to Metamarkets Group Inc. (Metamarkets) under one + * or more contributor license agreements. See the NOTICE file + * distributed with this work for additional information + * regarding copyright ownership. Metamarkets licenses this file + * to you under the Apache License, Version 2.0 (the + * "License"); you may not use this file except in compliance + * with the License. You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, + * software distributed under the License is distributed on an + * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY + * KIND, either express or implied. See the License for the + * specific language governing permissions and limitations + * under the License. + */ + +package io.druid.java.util.http.client.pool; + +import com.google.common.base.Preconditions; +import io.druid.java.util.common.StringUtils; +import io.druid.java.util.common.logger.Logger; +import org.jboss.netty.bootstrap.ClientBootstrap; +import org.jboss.netty.channel.Channel; +import org.jboss.netty.channel.ChannelException; +import org.jboss.netty.channel.ChannelFuture; +import org.jboss.netty.channel.ChannelFutureListener; +import org.jboss.netty.channel.ChannelPipeline; +import org.jboss.netty.channel.Channels; +import org.jboss.netty.handler.ssl.SslHandler; +import org.jboss.netty.util.Timer; + +import javax.net.ssl.SSLContext; +import javax.net.ssl.SSLEngine; +import javax.net.ssl.SSLParameters; +import java.net.InetSocketAddress; +import java.net.MalformedURLException; +import java.net.URL; + +/** + */ +public class ChannelResourceFactory implements ResourceFactory +{ + private static final Logger log = new Logger(ChannelResourceFactory.class); + + private static final long DEFAULT_SSL_HANDSHAKE_TIMEOUT = 10000L; /* 10 seconds */ + + private final ClientBootstrap bootstrap; + private final SSLContext sslContext; + private final Timer timer; + private final long sslHandshakeTimeout; + + public ChannelResourceFactory( + ClientBootstrap bootstrap, + SSLContext sslContext, + Timer timer, + long sslHandshakeTimeout + ) + { + this.bootstrap = Preconditions.checkNotNull(bootstrap, "bootstrap"); + this.sslContext = sslContext; + this.timer = timer; + this.sslHandshakeTimeout = sslHandshakeTimeout >= 0 ? sslHandshakeTimeout : DEFAULT_SSL_HANDSHAKE_TIMEOUT; + + if (sslContext != null) { + Preconditions.checkNotNull(timer, "timer is required when sslContext is present"); + } + } + + @Override + public ChannelFuture generate(final String hostname) + { + log.info("Generating: %s", hostname); + URL url = null; + try { + url = new URL(hostname); + } + catch (MalformedURLException e) { + throw new RuntimeException(e); + } + + final String host = url.getHost(); + final int port = url.getPort() == -1 ? url.getDefaultPort() : url.getPort(); + final ChannelFuture retVal; + final ChannelFuture connectFuture = bootstrap.connect(new InetSocketAddress(host, port)); + + if ("https".equals(url.getProtocol())) { + if (sslContext == null) { + throw new IllegalStateException("No sslContext set, cannot do https"); + } + + final SSLEngine sslEngine = sslContext.createSSLEngine(host, port); + final SSLParameters sslParameters = new SSLParameters(); + sslParameters.setEndpointIdentificationAlgorithm("HTTPS"); + sslEngine.setSSLParameters(sslParameters); + sslEngine.setUseClientMode(true); + final SslHandler sslHandler = new SslHandler( + sslEngine, + SslHandler.getDefaultBufferPool(), + false, + timer, + sslHandshakeTimeout + ); + + // https://github.com/netty/netty/issues/160 + sslHandler.setCloseOnSSLException(true); + + final ChannelPipeline pipeline = connectFuture.getChannel().getPipeline(); + pipeline.addFirst("ssl", sslHandler); + + final ChannelFuture handshakeFuture = Channels.future(connectFuture.getChannel()); + connectFuture.addListener( + new ChannelFutureListener() + { + @Override + public void operationComplete(ChannelFuture f) throws Exception + { + if (f.isSuccess()) { + sslHandler.handshake().addListener( + new ChannelFutureListener() + { + @Override + public void operationComplete(ChannelFuture f2) throws Exception + { + if (f2.isSuccess()) { + handshakeFuture.setSuccess(); + } else { + handshakeFuture.setFailure( + new ChannelException( + StringUtils.format("Failed to handshake with host[%s]", hostname), + f2.getCause() + ) + ); + } + } + } + ); + } else { + handshakeFuture.setFailure( + new ChannelException( + StringUtils.format("Failed to connect to host[%s]", hostname), + f.getCause() + ) + ); + } + } + } + ); + + retVal = handshakeFuture; + } else { + retVal = connectFuture; + } + + return retVal; + } + + @Override + public boolean isGood(ChannelFuture resource) + { + Channel channel = resource.awaitUninterruptibly().getChannel(); + + boolean isSuccess = resource.isSuccess(); + boolean isConnected = channel.isConnected(); + boolean isOpen = channel.isOpen(); + + if (log.isTraceEnabled()) { + log.trace("isGood = isSucess[%s] && isConnected[%s] && isOpen[%s]", isSuccess, isConnected, isOpen); + } + + return isSuccess && isConnected && isOpen; + } + + @Override + public void close(ChannelFuture resource) + { + log.trace("Closing"); + resource.awaitUninterruptibly().getChannel().close(); + } +} diff --git a/java-util/src/main/java/io/druid/java/util/http/client/pool/ResourceContainer.java b/java-util/src/main/java/io/druid/java/util/http/client/pool/ResourceContainer.java new file mode 100644 index 000000000000..dcaac747f3e8 --- /dev/null +++ b/java-util/src/main/java/io/druid/java/util/http/client/pool/ResourceContainer.java @@ -0,0 +1,28 @@ +/* + * Licensed to Metamarkets Group Inc. (Metamarkets) under one + * or more contributor license agreements. See the NOTICE file + * distributed with this work for additional information + * regarding copyright ownership. Metamarkets licenses this file + * to you under the Apache License, Version 2.0 (the + * "License"); you may not use this file except in compliance + * with the License. You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, + * software distributed under the License is distributed on an + * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY + * KIND, either express or implied. See the License for the + * specific language governing permissions and limitations + * under the License. + */ + +package io.druid.java.util.http.client.pool; + +/** + */ +public interface ResourceContainer +{ + ResourceType get(); + void returnResource(); +} diff --git a/java-util/src/main/java/io/druid/java/util/http/client/pool/ResourceFactory.java b/java-util/src/main/java/io/druid/java/util/http/client/pool/ResourceFactory.java new file mode 100644 index 000000000000..324345304631 --- /dev/null +++ b/java-util/src/main/java/io/druid/java/util/http/client/pool/ResourceFactory.java @@ -0,0 +1,29 @@ +/* + * Licensed to Metamarkets Group Inc. (Metamarkets) under one + * or more contributor license agreements. See the NOTICE file + * distributed with this work for additional information + * regarding copyright ownership. Metamarkets licenses this file + * to you under the Apache License, Version 2.0 (the + * "License"); you may not use this file except in compliance + * with the License. You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, + * software distributed under the License is distributed on an + * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY + * KIND, either express or implied. See the License for the + * specific language governing permissions and limitations + * under the License. + */ + +package io.druid.java.util.http.client.pool; + +/** + */ +public interface ResourceFactory +{ + V generate(K key); + boolean isGood(V resource); + void close(V resource); +} diff --git a/java-util/src/main/java/io/druid/java/util/http/client/pool/ResourcePool.java b/java-util/src/main/java/io/druid/java/util/http/client/pool/ResourcePool.java new file mode 100644 index 000000000000..a9254f96de77 --- /dev/null +++ b/java-util/src/main/java/io/druid/java/util/http/client/pool/ResourcePool.java @@ -0,0 +1,301 @@ +/* + * Licensed to Metamarkets Group Inc. (Metamarkets) under one + * or more contributor license agreements. See the NOTICE file + * distributed with this work for additional information + * regarding copyright ownership. Metamarkets licenses this file + * to you under the Apache License, Version 2.0 (the + * "License"); you may not use this file except in compliance + * with the License. You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, + * software distributed under the License is distributed on an + * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY + * KIND, either express or implied. See the License for the + * specific language governing permissions and limitations + * under the License. + */ + +package io.druid.java.util.http.client.pool; + +import com.google.common.base.Preconditions; +import com.google.common.base.Throwables; +import com.google.common.cache.CacheBuilder; +import com.google.common.cache.CacheLoader; +import com.google.common.cache.LoadingCache; +import com.google.common.collect.ImmutableSet; +import io.druid.java.util.common.StringUtils; +import io.druid.java.util.common.logger.Logger; + +import java.io.Closeable; +import java.util.ArrayDeque; +import java.util.Map; +import java.util.concurrent.ExecutionException; +import java.util.concurrent.atomic.AtomicBoolean; + +/** + */ +public class ResourcePool implements Closeable +{ + private static final Logger log = new Logger(ResourcePool.class); + private final LoadingCache> pool; + private final AtomicBoolean closed = new AtomicBoolean(false); + + public ResourcePool( + final ResourceFactory factory, + final ResourcePoolConfig config + ) + { + this.pool = CacheBuilder.newBuilder().build( + new CacheLoader>() + { + @Override + public ImmediateCreationResourceHolder load(K input) throws Exception + { + return new ImmediateCreationResourceHolder( + config.getMaxPerKey(), + config.getUnusedConnectionTimeoutMillis(), + input, + factory + ); + } + } + ); + } + + public ResourceContainer take(final K key) + { + if (closed.get()) { + log.error(StringUtils.format("take(%s) called even though I'm closed.", key)); + return null; + } + + final ImmediateCreationResourceHolder holder; + try { + holder = pool.get(key); + } + catch (ExecutionException e) { + throw Throwables.propagate(e); + } + final V value = holder.get(); + + return new ResourceContainer() + { + private final AtomicBoolean returned = new AtomicBoolean(false); + + @Override + public V get() + { + Preconditions.checkState(!returned.get(), "Resource for key[%s] has been returned, cannot get().", key); + return value; + } + + @Override + public void returnResource() + { + if (returned.getAndSet(true)) { + log.warn(StringUtils.format("Resource at key[%s] was returned multiple times?", key)); + } else { + holder.giveBack(value); + } + } + + @Override + protected void finalize() throws Throwable + { + if (!returned.get()) { + log.warn( + StringUtils.format( + "Resource[%s] at key[%s] was not returned before Container was finalized, potential resource leak.", + value, + key + ) + ); + returnResource(); + } + super.finalize(); + } + }; + } + + @Override + public void close() + { + closed.set(true); + final Map> mapView = pool.asMap(); + for (K k : ImmutableSet.copyOf(mapView.keySet())) { + mapView.remove(k).close(); + } + } + + private static class ImmediateCreationResourceHolder + { + private final int maxSize; + private final K key; + private final ResourceFactory factory; + private final ArrayDeque> resourceHolderList; + private int deficit = 0; + private boolean closed = false; + private final long unusedResourceTimeoutMillis; + + private ImmediateCreationResourceHolder( + int maxSize, + long unusedResourceTimeoutMillis, + K key, + ResourceFactory factory + ) + { + this.maxSize = maxSize; + this.key = key; + this.factory = factory; + this.unusedResourceTimeoutMillis = unusedResourceTimeoutMillis; + this.resourceHolderList = new ArrayDeque<>(); + + for (int i = 0; i < maxSize; ++i) { + resourceHolderList.add(new ResourceHolder<>( + System.currentTimeMillis(), + Preconditions.checkNotNull( + factory.generate(key), + "factory.generate(key)" + ) + )); + } + } + + V get() + { + // resourceHolderList can't have nulls, so we'll use a null to signal that we need to create a new resource. + final V poolVal; + synchronized (this) { + while (!closed && resourceHolderList.size() == 0 && deficit == 0) { + try { + this.wait(); + } + catch (InterruptedException e) { + Thread.interrupted(); + return null; + } + } + + if (closed) { + log.info(StringUtils.format("get() called even though I'm closed. key[%s]", key)); + return null; + } else if (!resourceHolderList.isEmpty()) { + ResourceHolder holder = resourceHolderList.removeFirst(); + if (System.currentTimeMillis() - holder.getLastAccessedTime() > unusedResourceTimeoutMillis) { + factory.close(holder.getResource()); + poolVal = factory.generate(key); + } else { + poolVal = holder.getResource(); + } + } else if (deficit > 0) { + deficit--; + poolVal = null; + } else { + throw new IllegalStateException("WTF?! No objects left, and no object deficit. This is probably a bug."); + } + } + + // At this point, we must either return a valid resource or increment "deficit". + final V retVal; + try { + if (poolVal != null && factory.isGood(poolVal)) { + retVal = poolVal; + } else { + if (poolVal != null) { + factory.close(poolVal); + } + retVal = factory.generate(key); + } + } + catch (Throwable e) { + synchronized (this) { + deficit++; + this.notifyAll(); + } + throw Throwables.propagate(e); + } + + return retVal; + } + + void giveBack(V object) + { + Preconditions.checkNotNull(object, "object"); + + synchronized (this) { + if (closed) { + log.info(StringUtils.format("giveBack called after being closed. key[%s]", key)); + factory.close(object); + return; + } + + if (resourceHolderList.size() >= maxSize) { + if (holderListContains(object)) { + log.warn( + StringUtils.format( + "Returning object[%s] at key[%s] that has already been returned!? Skipping", + object, + key + ), + new Exception("Exception for stacktrace") + ); + } else { + log.warn( + StringUtils.format( + "Returning object[%s] at key[%s] even though we already have all that we can hold[%s]!? Skipping", + object, + key, + resourceHolderList + ), + new Exception("Exception for stacktrace") + ); + } + return; + } + + resourceHolderList.addLast(new ResourceHolder<>(System.currentTimeMillis(), object)); + this.notifyAll(); + } + } + + private boolean holderListContains(V object) + { + return resourceHolderList.stream().anyMatch(a -> a.getResource().equals(object)); + } + + void close() + { + synchronized (this) { + closed = true; + resourceHolderList.forEach(v -> factory.close(v.getResource())); + resourceHolderList.clear(); + this.notifyAll(); + } + } + } + + private static class ResourceHolder + { + private long lastAccessedTime; + private V resource; + + public ResourceHolder(long lastAccessedTime, V resource) + { + this.resource = resource; + this.lastAccessedTime = lastAccessedTime; + } + + public long getLastAccessedTime() + { + return lastAccessedTime; + } + + public V getResource() + { + return resource; + } + + } +} diff --git a/java-util/src/main/java/io/druid/java/util/http/client/pool/ResourcePoolConfig.java b/java-util/src/main/java/io/druid/java/util/http/client/pool/ResourcePoolConfig.java new file mode 100644 index 000000000000..66217d844341 --- /dev/null +++ b/java-util/src/main/java/io/druid/java/util/http/client/pool/ResourcePoolConfig.java @@ -0,0 +1,69 @@ +/* + * Licensed to Metamarkets Group Inc. (Metamarkets) under one + * or more contributor license agreements. See the NOTICE file + * distributed with this work for additional information + * regarding copyright ownership. Metamarkets licenses this file + * to you under the Apache License, Version 2.0 (the + * "License"); you may not use this file except in compliance + * with the License. You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, + * software distributed under the License is distributed on an + * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY + * KIND, either express or implied. See the License for the + * specific language governing permissions and limitations + * under the License. + */ + +package io.druid.java.util.http.client.pool; + +/** + */ +public class ResourcePoolConfig +{ + private final int maxPerKey; + private final long unusedConnectionTimeoutMillis; + + public ResourcePoolConfig( + int maxPerKey, + long unusedConnectionTimeoutMillis + ) + { + this.maxPerKey = maxPerKey; + this.unusedConnectionTimeoutMillis = unusedConnectionTimeoutMillis; + } + + @Deprecated + public ResourcePoolConfig( + int maxPerKey, + boolean cleanIdle, + long unusedConnectionTimeoutMillis + ) + { + this(maxPerKey, unusedConnectionTimeoutMillis); + + if (cleanIdle) { + throw new IllegalStateException( + "Cleaning up idle connections is a bad idea. " + + "If your services can't handle the max number then lower the max number." + ); + } + } + + public int getMaxPerKey() + { + return maxPerKey; + } + + public boolean isCleanIdle() + { + return false; + } + + public long getUnusedConnectionTimeoutMillis() + { + return unusedConnectionTimeoutMillis; + } +} diff --git a/java-util/src/main/java/io/druid/java/util/http/client/pool/ResourceVerifier.java b/java-util/src/main/java/io/druid/java/util/http/client/pool/ResourceVerifier.java new file mode 100644 index 000000000000..926b4bba0abe --- /dev/null +++ b/java-util/src/main/java/io/druid/java/util/http/client/pool/ResourceVerifier.java @@ -0,0 +1,26 @@ +/* + * Licensed to Metamarkets Group Inc. (Metamarkets) under one + * or more contributor license agreements. See the NOTICE file + * distributed with this work for additional information + * regarding copyright ownership. Metamarkets licenses this file + * to you under the Apache License, Version 2.0 (the + * "License"); you may not use this file except in compliance + * with the License. You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, + * software distributed under the License is distributed on an + * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY + * KIND, either express or implied. See the License for the + * specific language governing permissions and limitations + * under the License. + */ + +package io.druid.java.util.http.client.pool; + +/** + */ +public interface ResourceVerifier +{ +} diff --git a/java-util/src/main/java/io/druid/java/util/http/client/response/ClientResponse.java b/java-util/src/main/java/io/druid/java/util/http/client/response/ClientResponse.java new file mode 100644 index 000000000000..e5bd2ddb16f7 --- /dev/null +++ b/java-util/src/main/java/io/druid/java/util/http/client/response/ClientResponse.java @@ -0,0 +1,57 @@ +/* + * Licensed to Metamarkets Group Inc. (Metamarkets) under one + * or more contributor license agreements. See the NOTICE file + * distributed with this work for additional information + * regarding copyright ownership. Metamarkets licenses this file + * to you under the Apache License, Version 2.0 (the + * "License"); you may not use this file except in compliance + * with the License. You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, + * software distributed under the License is distributed on an + * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY + * KIND, either express or implied. See the License for the + * specific language governing permissions and limitations + * under the License. + */ + +package io.druid.java.util.http.client.response; + +/** + */ +public class ClientResponse +{ + private final boolean finished; + private final T obj; + + public static ClientResponse finished(T obj) + { + return new ClientResponse(true, obj); + } + + public static ClientResponse unfinished(T obj) + { + return new ClientResponse(false, obj); + } + + protected ClientResponse( + boolean finished, + T obj + ) + { + this.finished = finished; + this.obj = obj; + } + + public boolean isFinished() + { + return finished; + } + + public T getObj() + { + return obj; + } +} diff --git a/java-util/src/main/java/io/druid/java/util/http/client/response/FullResponseHandler.java b/java-util/src/main/java/io/druid/java/util/http/client/response/FullResponseHandler.java new file mode 100644 index 000000000000..2b75b4941432 --- /dev/null +++ b/java-util/src/main/java/io/druid/java/util/http/client/response/FullResponseHandler.java @@ -0,0 +1,80 @@ +/* + * Licensed to Metamarkets Group Inc. (Metamarkets) under one + * or more contributor license agreements. See the NOTICE file + * distributed with this work for additional information + * regarding copyright ownership. Metamarkets licenses this file + * to you under the Apache License, Version 2.0 (the + * "License"); you may not use this file except in compliance + * with the License. You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, + * software distributed under the License is distributed on an + * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY + * KIND, either express or implied. See the License for the + * specific language governing permissions and limitations + * under the License. + */ + +package io.druid.java.util.http.client.response; + +import org.jboss.netty.handler.codec.http.HttpChunk; +import org.jboss.netty.handler.codec.http.HttpResponse; + +import java.nio.charset.Charset; + +/** + */ +public class FullResponseHandler implements HttpResponseHandler +{ + private final Charset charset; + + public FullResponseHandler(Charset charset) + { + this.charset = charset; + } + + @Override + public ClientResponse handleResponse(HttpResponse response) + { + return ClientResponse.unfinished( + new FullResponseHolder( + response.getStatus(), + response, + new StringBuilder(response.getContent().toString(charset)) + ) + ); + } + + @Override + public ClientResponse handleChunk( + ClientResponse response, + HttpChunk chunk + ) + { + final StringBuilder builder = response.getObj().getBuilder(); + + if (builder == null) { + return ClientResponse.finished(null); + } + + builder.append(chunk.getContent().toString(charset)); + return response; + } + + @Override + public ClientResponse done(ClientResponse response) + { + return ClientResponse.finished(response.getObj()); + } + + @Override + public void exceptionCaught( + ClientResponse clientResponse, Throwable e + ) + { + // Its safe to Ignore as the ClientResponse returned in handleChunk were unfinished + } + +} diff --git a/java-util/src/main/java/io/druid/java/util/http/client/response/FullResponseHolder.java b/java-util/src/main/java/io/druid/java/util/http/client/response/FullResponseHolder.java new file mode 100644 index 000000000000..b75610cdae8d --- /dev/null +++ b/java-util/src/main/java/io/druid/java/util/http/client/response/FullResponseHolder.java @@ -0,0 +1,63 @@ +/* + * Licensed to Metamarkets Group Inc. (Metamarkets) under one + * or more contributor license agreements. See the NOTICE file + * distributed with this work for additional information + * regarding copyright ownership. Metamarkets licenses this file + * to you under the Apache License, Version 2.0 (the + * "License"); you may not use this file except in compliance + * with the License. You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, + * software distributed under the License is distributed on an + * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY + * KIND, either express or implied. See the License for the + * specific language governing permissions and limitations + * under the License. + */ + +package io.druid.java.util.http.client.response; + +import org.jboss.netty.handler.codec.http.HttpResponse; +import org.jboss.netty.handler.codec.http.HttpResponseStatus; + +/** + */ +public class FullResponseHolder +{ + private final HttpResponseStatus status; + private final HttpResponse response; + private final StringBuilder builder; + + public FullResponseHolder( + HttpResponseStatus status, + HttpResponse response, + StringBuilder builder + ) + { + this.status = status; + this.response = response; + this.builder = builder; + } + + public HttpResponseStatus getStatus() + { + return status; + } + + public HttpResponse getResponse() + { + return response; + } + + public StringBuilder getBuilder() + { + return builder; + } + + public String getContent() + { + return builder.toString(); + } +} diff --git a/java-util/src/main/java/io/druid/java/util/http/client/response/HttpResponseHandler.java b/java-util/src/main/java/io/druid/java/util/http/client/response/HttpResponseHandler.java new file mode 100644 index 000000000000..69eb792cb9cd --- /dev/null +++ b/java-util/src/main/java/io/druid/java/util/http/client/response/HttpResponseHandler.java @@ -0,0 +1,54 @@ +/* + * Licensed to Metamarkets Group Inc. (Metamarkets) under one + * or more contributor license agreements. See the NOTICE file + * distributed with this work for additional information + * regarding copyright ownership. Metamarkets licenses this file + * to you under the Apache License, Version 2.0 (the + * "License"); you may not use this file except in compliance + * with the License. You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, + * software distributed under the License is distributed on an + * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY + * KIND, either express or implied. See the License for the + * specific language governing permissions and limitations + * under the License. + */ + +package io.druid.java.util.http.client.response; + +import org.jboss.netty.handler.codec.http.HttpChunk; +import org.jboss.netty.handler.codec.http.HttpResponse; + +/** + * A handler for an HTTP request. + * + * The ClientResponse object passed around is used to store state between further chunks and indicate when it is safe + * to hand the object back to the caller. + * + * If the response is chunked, the ClientResponse object returned from handleResponse will be passed in as the + * first argument to handleChunk(). + * + * If the ClientResponse object is marked as finished, that indicates that the object stored is safe to hand + * off to the caller. This is most often done either from the done() method after all content has been processed or + * from the initial handleResponse method to indicate that the object is thread-safe and aware that it might be + * accessed before all chunks come back. + * + * Note: if you return a finished ClientResponse object from anything other than the done() method, IntermediateType + * must be castable to FinalType + */ +public interface HttpResponseHandler +{ + /** + * Handles the initial HttpResponse object that comes back from Netty. + * + * @param response - response from Netty + * @return + */ + ClientResponse handleResponse(HttpResponse response); + ClientResponse handleChunk(ClientResponse clientResponse, HttpChunk chunk); + ClientResponse done(ClientResponse clientResponse); + void exceptionCaught(ClientResponse clientResponse, Throwable e); +} diff --git a/java-util/src/main/java/io/druid/java/util/http/client/response/InputStreamResponseHandler.java b/java-util/src/main/java/io/druid/java/util/http/client/response/InputStreamResponseHandler.java new file mode 100644 index 000000000000..e2792842d226 --- /dev/null +++ b/java-util/src/main/java/io/druid/java/util/http/client/response/InputStreamResponseHandler.java @@ -0,0 +1,74 @@ +/* + * Licensed to Metamarkets Group Inc. (Metamarkets) under one + * or more contributor license agreements. See the NOTICE file + * distributed with this work for additional information + * regarding copyright ownership. Metamarkets licenses this file + * to you under the Apache License, Version 2.0 (the + * "License"); you may not use this file except in compliance + * with the License. You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, + * software distributed under the License is distributed on an + * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY + * KIND, either express or implied. See the License for the + * specific language governing permissions and limitations + * under the License. + */ + +package io.druid.java.util.http.client.response; + +import io.druid.java.util.http.client.io.AppendableByteArrayInputStream; +import org.jboss.netty.buffer.ChannelBuffer; +import org.jboss.netty.handler.codec.http.HttpChunk; +import org.jboss.netty.handler.codec.http.HttpResponse; + +import java.io.InputStream; + +/** + */ +public class InputStreamResponseHandler implements HttpResponseHandler +{ + @Override + public ClientResponse handleResponse(HttpResponse response) + { + AppendableByteArrayInputStream in = new AppendableByteArrayInputStream(); + in.add(getContentBytes(response.getContent())); + return ClientResponse.finished(in); + } + + @Override + public ClientResponse handleChunk( + ClientResponse clientResponse, HttpChunk chunk + ) + { + clientResponse.getObj().add(getContentBytes(chunk.getContent())); + return clientResponse; + } + + @Override + public ClientResponse done(ClientResponse clientResponse) + { + final AppendableByteArrayInputStream obj = clientResponse.getObj(); + obj.done(); + return ClientResponse.finished(obj); + } + + @Override + public void exceptionCaught( + ClientResponse clientResponse, + Throwable e + ) + { + final AppendableByteArrayInputStream obj = clientResponse.getObj(); + obj.exceptionCaught(e); + } + + private byte[] getContentBytes(ChannelBuffer content) + { + byte[] contentBytes = new byte[content.readableBytes()]; + content.readBytes(contentBytes); + return contentBytes; + } +} diff --git a/java-util/src/main/java/io/druid/java/util/http/client/response/SequenceInputStreamResponseHandler.java b/java-util/src/main/java/io/druid/java/util/http/client/response/SequenceInputStreamResponseHandler.java new file mode 100644 index 000000000000..ffe0a9a14187 --- /dev/null +++ b/java-util/src/main/java/io/druid/java/util/http/client/response/SequenceInputStreamResponseHandler.java @@ -0,0 +1,184 @@ +/* + * Licensed to Metamarkets Group Inc. (Metamarkets) under one + * or more contributor license agreements. See the NOTICE file + * distributed with this work for additional information + * regarding copyright ownership. Metamarkets licenses this file + * to you under the Apache License, Version 2.0 (the + * "License"); you may not use this file except in compliance + * with the License. You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, + * software distributed under the License is distributed on an + * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY + * KIND, either express or implied. See the License for the + * specific language governing permissions and limitations + * under the License. + */ + +package io.druid.java.util.http.client.response; + +import com.google.common.base.Throwables; +import com.google.common.io.ByteSource; +import io.druid.java.util.common.logger.Logger; +import org.jboss.netty.buffer.ChannelBuffer; +import org.jboss.netty.buffer.ChannelBufferInputStream; +import org.jboss.netty.handler.codec.http.HttpChunk; +import org.jboss.netty.handler.codec.http.HttpResponse; + +import java.io.IOException; +import java.io.InputStream; +import java.io.SequenceInputStream; +import java.util.Enumeration; +import java.util.concurrent.BlockingQueue; +import java.util.concurrent.LinkedBlockingQueue; +import java.util.concurrent.atomic.AtomicBoolean; +import java.util.concurrent.atomic.AtomicLong; + +/** + * A HTTP response handler which uses sequence input streams to create a final InputStream. + * Any particular instance is encouraged to overwrite a method and call the super if they need extra handling of the + * method parameters. + * + * This implementation uses a blocking queue to feed a SequenceInputStream that is terminated whenever the handler's Done + * method is called or a throwable is detected. + * + * The resulting InputStream will attempt to terminate normally, but on exception in HttpResponseHandler + * may end with an IOException upon read() + */ +public class SequenceInputStreamResponseHandler implements HttpResponseHandler +{ + private static final Logger log = new Logger(SequenceInputStreamResponseHandler.class); + private final AtomicLong byteCount = new AtomicLong(0); + private final BlockingQueue queue = new LinkedBlockingQueue<>(); + private final AtomicBoolean done = new AtomicBoolean(false); + + @Override + public ClientResponse handleResponse(HttpResponse response) + { + try { + queue.put(new ChannelBufferInputStream(response.getContent())); + } + catch (InterruptedException e) { + log.error(e, "Queue appending interrupted"); + Thread.currentThread().interrupt(); + throw Throwables.propagate(e); + } + byteCount.addAndGet(response.getContent().readableBytes()); + return ClientResponse.finished( + new SequenceInputStream( + new Enumeration() + { + @Override + public boolean hasMoreElements() + { + // Done is always true until the last stream has be put in the queue. + // Then the stream should be spouting good InputStreams. + synchronized (done) { + return !done.get() || !queue.isEmpty(); + } + } + + @Override + public InputStream nextElement() + { + try { + return queue.take(); + } + catch (InterruptedException e) { + log.warn(e, "Thread interrupted while taking from queue"); + Thread.currentThread().interrupt(); + throw Throwables.propagate(e); + } + } + } + ) + ); + } + + @Override + public ClientResponse handleChunk( + ClientResponse clientResponse, HttpChunk chunk + ) + { + final ChannelBuffer channelBuffer = chunk.getContent(); + final int bytes = channelBuffer.readableBytes(); + if (bytes > 0) { + try { + queue.put(new ChannelBufferInputStream(channelBuffer)); + // Queue.size() can be expensive in some implementations, but LinkedBlockingQueue.size is just an AtomicLong + log.debug("Added stream. Queue length %d", queue.size()); + } + catch (InterruptedException e) { + log.warn(e, "Thread interrupted while adding to queue"); + Thread.currentThread().interrupt(); + throw Throwables.propagate(e); + } + byteCount.addAndGet(bytes); + } else { + log.debug("Skipping zero length chunk"); + } + return clientResponse; + } + + @Override + public ClientResponse done(ClientResponse clientResponse) + { + synchronized (done) { + try { + // An empty byte array is put at the end to give the SequenceInputStream.close() as something to close out + // after done is set to true, regardless of the rest of the stream's state. + queue.put(ByteSource.empty().openStream()); + log.debug("Added terminal empty stream"); + } + catch (InterruptedException e) { + log.warn(e, "Thread interrupted while adding to queue"); + Thread.currentThread().interrupt(); + throw Throwables.propagate(e); + } + catch (IOException e) { + // This should never happen + log.wtf(e, "The empty stream threw an IOException"); + throw Throwables.propagate(e); + } + finally { + log.debug("Done after adding %d bytes of streams", byteCount.get()); + done.set(true); + } + } + return ClientResponse.finished(clientResponse.getObj()); + } + + @Override + public void exceptionCaught(final ClientResponse clientResponse, final Throwable e) + { + // Don't wait for lock in case the lock had something to do with the error + synchronized (done) { + done.set(true); + // Make a best effort to put a zero length buffer into the queue in case something is waiting on the take() + // If nothing is waiting on take(), this will be closed out anyways. + final boolean accepted = queue.offer( + new InputStream() + { + @Override + public int read() throws IOException + { + throw new IOException(e); + } + } + ); + if (!accepted) { + log.warn("Unable to place final IOException offer in queue"); + } else { + log.debug("Placed IOException in queue"); + } + log.debug(e, "Exception with queue length of %d and %d bytes available", queue.size(), byteCount.get()); + } + } + + public final long getByteCount() + { + return byteCount.get(); + } +} diff --git a/java-util/src/main/java/io/druid/java/util/http/client/response/StatusResponseHandler.java b/java-util/src/main/java/io/druid/java/util/http/client/response/StatusResponseHandler.java new file mode 100644 index 000000000000..c0b55e902e99 --- /dev/null +++ b/java-util/src/main/java/io/druid/java/util/http/client/response/StatusResponseHandler.java @@ -0,0 +1,79 @@ +/* + * Licensed to Metamarkets Group Inc. (Metamarkets) under one + * or more contributor license agreements. See the NOTICE file + * distributed with this work for additional information + * regarding copyright ownership. Metamarkets licenses this file + * to you under the Apache License, Version 2.0 (the + * "License"); you may not use this file except in compliance + * with the License. You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, + * software distributed under the License is distributed on an + * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY + * KIND, either express or implied. See the License for the + * specific language governing permissions and limitations + * under the License. + */ + +package io.druid.java.util.http.client.response; + +import org.jboss.netty.handler.codec.http.HttpChunk; +import org.jboss.netty.handler.codec.http.HttpResponse; + +import java.nio.charset.Charset; + +/** + */ +public class StatusResponseHandler implements HttpResponseHandler +{ + private final Charset charset; + + public StatusResponseHandler(Charset charset) + { + this.charset = charset; + } + + @Override + public ClientResponse handleResponse(HttpResponse response) + { + return ClientResponse.unfinished( + new StatusResponseHolder( + response.getStatus(), + new StringBuilder(response.getContent().toString(charset)) + ) + ); + } + + @Override + public ClientResponse handleChunk( + ClientResponse response, + HttpChunk chunk + ) + { + final StringBuilder builder = response.getObj().getBuilder(); + + if (builder == null) { + return ClientResponse.finished(null); + } + + builder.append(chunk.getContent().toString(charset)); + return response; + } + + @Override + public ClientResponse done(ClientResponse response) + { + return ClientResponse.finished(response.getObj()); + } + + @Override + public void exceptionCaught( + ClientResponse clientResponse, Throwable e + ) + { + // Its safe to Ignore as the ClientResponse returned in handleChunk were unfinished + } + +} diff --git a/java-util/src/main/java/io/druid/java/util/http/client/response/StatusResponseHolder.java b/java-util/src/main/java/io/druid/java/util/http/client/response/StatusResponseHolder.java new file mode 100644 index 000000000000..58142e1a2c2b --- /dev/null +++ b/java-util/src/main/java/io/druid/java/util/http/client/response/StatusResponseHolder.java @@ -0,0 +1,54 @@ +/* + * Licensed to Metamarkets Group Inc. (Metamarkets) under one + * or more contributor license agreements. See the NOTICE file + * distributed with this work for additional information + * regarding copyright ownership. Metamarkets licenses this file + * to you under the Apache License, Version 2.0 (the + * "License"); you may not use this file except in compliance + * with the License. You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, + * software distributed under the License is distributed on an + * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY + * KIND, either express or implied. See the License for the + * specific language governing permissions and limitations + * under the License. + */ + +package io.druid.java.util.http.client.response; + +import org.jboss.netty.handler.codec.http.HttpResponseStatus; + +/** + */ +public class StatusResponseHolder +{ + private final HttpResponseStatus status; + private final StringBuilder builder; + + public StatusResponseHolder( + HttpResponseStatus status, + StringBuilder builder + ) + { + this.status = status; + this.builder = builder; + } + + public HttpResponseStatus getStatus() + { + return status; + } + + public StringBuilder getBuilder() + { + return builder; + } + + public String getContent() + { + return builder.toString(); + } +} diff --git a/java-util/src/main/java/io/druid/java/util/http/client/response/ToStringResponseHandler.java b/java-util/src/main/java/io/druid/java/util/http/client/response/ToStringResponseHandler.java new file mode 100644 index 000000000000..2533b6b90801 --- /dev/null +++ b/java-util/src/main/java/io/druid/java/util/http/client/response/ToStringResponseHandler.java @@ -0,0 +1,78 @@ +/* + * Licensed to Metamarkets Group Inc. (Metamarkets) under one + * or more contributor license agreements. See the NOTICE file + * distributed with this work for additional information + * regarding copyright ownership. Metamarkets licenses this file + * to you under the Apache License, Version 2.0 (the + * "License"); you may not use this file except in compliance + * with the License. You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, + * software distributed under the License is distributed on an + * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY + * KIND, either express or implied. See the License for the + * specific language governing permissions and limitations + * under the License. + */ + +package io.druid.java.util.http.client.response; + +import org.jboss.netty.handler.codec.http.HttpChunk; +import org.jboss.netty.handler.codec.http.HttpResponse; + +import java.nio.charset.Charset; + +/** + */ +public class ToStringResponseHandler implements HttpResponseHandler +{ + private final Charset charset; + + public ToStringResponseHandler(Charset charset) + { + this.charset = charset; + } + + @Override + public ClientResponse handleResponse(HttpResponse response) + { + return ClientResponse.unfinished(new StringBuilder(response.getContent().toString(charset))); + } + + @Override + public ClientResponse handleChunk( + ClientResponse response, + HttpChunk chunk + ) + { + final StringBuilder builder = response.getObj(); + if (builder == null) { + return ClientResponse.finished(null); + } + + builder.append(chunk.getContent().toString(charset)); + return response; + } + + @Override + public ClientResponse done(ClientResponse response) + { + final StringBuilder builder = response.getObj(); + if (builder == null) { + return ClientResponse.finished(null); + } + + return ClientResponse.finished(builder.toString()); + } + + @Override + public void exceptionCaught( + ClientResponse clientResponse, Throwable e + ) + { + // Its safe to Ignore as the ClientResponse returned in handleChunk were unfinished + } + +} diff --git a/java-util/src/main/java/io/druid/java/util/metrics/AbstractMonitor.java b/java-util/src/main/java/io/druid/java/util/metrics/AbstractMonitor.java new file mode 100644 index 000000000000..e3f757d619f1 --- /dev/null +++ b/java-util/src/main/java/io/druid/java/util/metrics/AbstractMonitor.java @@ -0,0 +1,54 @@ +/* + * Licensed to Metamarkets Group Inc. (Metamarkets) under one + * or more contributor license agreements. See the NOTICE file + * distributed with this work for additional information + * regarding copyright ownership. Metamarkets licenses this file + * to you under the Apache License, Version 2.0 (the + * "License"); you may not use this file except in compliance + * with the License. You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, + * software distributed under the License is distributed on an + * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY + * KIND, either express or implied. See the License for the + * specific language governing permissions and limitations + * under the License. + */ + +package io.druid.java.util.metrics; + + +import io.druid.java.util.emitter.service.ServiceEmitter; + +/** + */ +public abstract class AbstractMonitor implements Monitor +{ + private volatile boolean started = false; + + @Override + public void start() + { + started = true; + } + + @Override + public void stop() + { + started = false; + } + + @Override + public boolean monitor(ServiceEmitter emitter) + { + if (started) { + return doMonitor(emitter); + } + + return false; + } + + public abstract boolean doMonitor(ServiceEmitter emitter); +} diff --git a/java-util/src/main/java/io/druid/java/util/metrics/CgroupUtil.java b/java-util/src/main/java/io/druid/java/util/metrics/CgroupUtil.java new file mode 100644 index 000000000000..a476b4f3159f --- /dev/null +++ b/java-util/src/main/java/io/druid/java/util/metrics/CgroupUtil.java @@ -0,0 +1,29 @@ +/* + * Licensed to Metamarkets Group Inc. (Metamarkets) under one + * or more contributor license agreements. See the NOTICE file + * distributed with this work for additional information + * regarding copyright ownership. Metamarkets licenses this file + * to you under the Apache License, Version 2.0 (the + * "License"); you may not use this file except in compliance + * with the License. You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, + * software distributed under the License is distributed on an + * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY + * KIND, either express or implied. See the License for the + * specific language governing permissions and limitations + * under the License. + */ + +package io.druid.java.util.metrics; + +import java.util.regex.Pattern; + +public class CgroupUtil +{ + public static final String SPACE_MATCH = Pattern.quote(" "); + public static final String COMMA_MATCH = Pattern.quote(","); + public static final String COLON_MATCH = Pattern.quote(":"); +} diff --git a/java-util/src/main/java/io/druid/java/util/metrics/CompoundMonitor.java b/java-util/src/main/java/io/druid/java/util/metrics/CompoundMonitor.java new file mode 100644 index 000000000000..1e1ca4634a05 --- /dev/null +++ b/java-util/src/main/java/io/druid/java/util/metrics/CompoundMonitor.java @@ -0,0 +1,75 @@ +/* + * Licensed to Metamarkets Group Inc. (Metamarkets) under one + * or more contributor license agreements. See the NOTICE file + * distributed with this work for additional information + * regarding copyright ownership. Metamarkets licenses this file + * to you under the Apache License, Version 2.0 (the + * "License"); you may not use this file except in compliance + * with the License. You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, + * software distributed under the License is distributed on an + * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY + * KIND, either express or implied. See the License for the + * specific language governing permissions and limitations + * under the License. + */ + +package io.druid.java.util.metrics; + +import com.google.common.base.Function; +import com.google.common.collect.Lists; +import io.druid.java.util.emitter.service.ServiceEmitter; + +import java.util.Arrays; +import java.util.List; + +public abstract class CompoundMonitor implements Monitor +{ + private final List monitors; + + public CompoundMonitor(List monitors) + { + this.monitors = monitors; + } + + public CompoundMonitor(Monitor... monitors) + { + this(Arrays.asList(monitors)); + } + + @Override + public void start() + { + for (Monitor monitor : monitors) { + monitor.start(); + } + } + + @Override + public void stop() + { + for (Monitor monitor : monitors) { + monitor.stop(); + } + } + + @Override + public boolean monitor(final ServiceEmitter emitter) + { + return shouldReschedule(Lists.transform(monitors, + new Function() + { + @Override + public Boolean apply(Monitor monitor) + { + return monitor.monitor(emitter); + } + } + )); + } + + public abstract boolean shouldReschedule(List reschedules); +} diff --git a/java-util/src/main/java/io/druid/java/util/metrics/CpuAcctDeltaMonitor.java b/java-util/src/main/java/io/druid/java/util/metrics/CpuAcctDeltaMonitor.java new file mode 100644 index 000000000000..e3a21f1d7203 --- /dev/null +++ b/java-util/src/main/java/io/druid/java/util/metrics/CpuAcctDeltaMonitor.java @@ -0,0 +1,134 @@ +/* + * Licensed to Metamarkets Group Inc. (Metamarkets) under one + * or more contributor license agreements. See the NOTICE file + * distributed with this work for additional information + * regarding copyright ownership. Metamarkets licenses this file + * to you under the Apache License, Version 2.0 (the + * "License"); you may not use this file except in compliance + * with the License. You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, + * software distributed under the License is distributed on an + * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY + * KIND, either express or implied. See the License for the + * specific language governing permissions and limitations + * under the License. + */ + +package io.druid.java.util.metrics; + +import com.google.common.base.Preconditions; +import com.google.common.collect.ImmutableMap; +import io.druid.java.util.common.DateTimes; +import io.druid.java.util.common.logger.Logger; +import io.druid.java.util.emitter.service.ServiceEmitter; +import io.druid.java.util.emitter.service.ServiceMetricEvent; +import io.druid.java.util.metrics.cgroups.CgroupDiscoverer; +import io.druid.java.util.metrics.cgroups.CpuAcct; +import io.druid.java.util.metrics.cgroups.ProcSelfCgroupDiscoverer; +import org.joda.time.DateTime; + +import java.util.Map; +import java.util.concurrent.atomic.AtomicReference; + +public class CpuAcctDeltaMonitor extends FeedDefiningMonitor +{ + private static final Logger log = new Logger(CpuAcctDeltaMonitor.class); + private final AtomicReference priorSnapshot = new AtomicReference<>(null); + private final Map dimensions; + + private final CgroupDiscoverer cgroupDiscoverer; + + public CpuAcctDeltaMonitor() + { + this(ImmutableMap.of()); + } + + public CpuAcctDeltaMonitor(final Map dimensions) + { + this(dimensions, DEFAULT_METRICS_FEED); + } + + public CpuAcctDeltaMonitor(final Map dimensions, final String feed) + { + this(feed, dimensions, new ProcSelfCgroupDiscoverer()); + } + + public CpuAcctDeltaMonitor( + String feed, + Map dimensions, + CgroupDiscoverer cgroupDiscoverer + ) + { + super(feed); + Preconditions.checkNotNull(dimensions); + this.dimensions = ImmutableMap.copyOf(dimensions); + this.cgroupDiscoverer = Preconditions.checkNotNull(cgroupDiscoverer, "cgroupDiscoverer required"); + } + + @Override + public boolean doMonitor(ServiceEmitter emitter) + { + final CpuAcct cpuAcct = new CpuAcct(cgroupDiscoverer); + final CpuAcct.CpuAcctMetric snapshot = cpuAcct.snapshot(); + final long nanoTime = System.nanoTime(); // Approx time... may be influenced by an unlucky GC + final DateTime dateTime = DateTimes.nowUtc(); + final SnapshotHolder priorSnapshotHolder = this.priorSnapshot.get(); + if (!priorSnapshot.compareAndSet(priorSnapshotHolder, new SnapshotHolder(snapshot, nanoTime))) { + log.debug("Pre-empted by another monitor run"); + return false; + } + if (priorSnapshotHolder == null) { + log.info("Detected first run, storing result for next run"); + return false; + } + final long elapsedNs = nanoTime - priorSnapshotHolder.timestamp; + if (snapshot.cpuCount() != priorSnapshotHolder.metric.cpuCount()) { + log.warn( + "Prior CPU count [%d] does not match current cpu count [%d]. Skipping metrics emission", + priorSnapshotHolder.metric.cpuCount(), + snapshot.cpuCount() + ); + return false; + } + for (int i = 0; i < snapshot.cpuCount(); ++i) { + final ServiceMetricEvent.Builder builderUsr = builder() + .setDimension("cpuName", Integer.toString(i)) + .setDimension("cpuTime", "usr"); + final ServiceMetricEvent.Builder builderSys = builder() + .setDimension("cpuName", Integer.toString(i)) + .setDimension("cpuTime", "sys"); + MonitorUtils.addDimensionsToBuilder(builderUsr, dimensions); + MonitorUtils.addDimensionsToBuilder(builderSys, dimensions); + emitter.emit(builderUsr.build( + dateTime, + "cgroup/cpu_time_delta_ns", + snapshot.usrTime(i) - priorSnapshotHolder.metric.usrTime(i) + )); + emitter.emit(builderSys.build( + dateTime, + "cgroup/cpu_time_delta_ns", + snapshot.sysTime(i) - priorSnapshotHolder.metric.sysTime(i) + )); + } + if (snapshot.cpuCount() > 0) { + // Don't bother emitting metrics if there aren't actually any cpus (usually from error) + emitter.emit(builder().build(dateTime, "cgroup/cpu_time_delta_ns_elapsed", elapsedNs)); + } + return true; + } + + static class SnapshotHolder + { + private final CpuAcct.CpuAcctMetric metric; + private final long timestamp; + + SnapshotHolder(CpuAcct.CpuAcctMetric metric, long timestamp) + { + this.metric = metric; + this.timestamp = timestamp; + } + } +} diff --git a/java-util/src/main/java/io/druid/java/util/metrics/FeedDefiningMonitor.java b/java-util/src/main/java/io/druid/java/util/metrics/FeedDefiningMonitor.java new file mode 100644 index 000000000000..aa3d2863d5b1 --- /dev/null +++ b/java-util/src/main/java/io/druid/java/util/metrics/FeedDefiningMonitor.java @@ -0,0 +1,40 @@ +/* + * Licensed to Metamarkets Group Inc. (Metamarkets) under one + * or more contributor license agreements. See the NOTICE file + * distributed with this work for additional information + * regarding copyright ownership. Metamarkets licenses this file + * to you under the Apache License, Version 2.0 (the + * "License"); you may not use this file except in compliance + * with the License. You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, + * software distributed under the License is distributed on an + * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY + * KIND, either express or implied. See the License for the + * specific language governing permissions and limitations + * under the License. + */ + +package io.druid.java.util.metrics; + +import com.google.common.base.Preconditions; +import io.druid.java.util.emitter.service.ServiceMetricEvent; + +public abstract class FeedDefiningMonitor extends AbstractMonitor +{ + public static final String DEFAULT_METRICS_FEED = "metrics"; + protected final String feed; + + public FeedDefiningMonitor(String feed) + { + Preconditions.checkNotNull(feed); + this.feed = feed; + } + + protected ServiceMetricEvent.Builder builder() + { + return ServiceMetricEvent.builder().setFeed(feed); + } +} diff --git a/java-util/src/main/java/io/druid/java/util/metrics/HttpPostEmitterMonitor.java b/java-util/src/main/java/io/druid/java/util/metrics/HttpPostEmitterMonitor.java new file mode 100644 index 000000000000..590eabda17d6 --- /dev/null +++ b/java-util/src/main/java/io/druid/java/util/metrics/HttpPostEmitterMonitor.java @@ -0,0 +1,91 @@ +/* + * Licensed to Metamarkets Group Inc. (Metamarkets) under one + * or more contributor license agreements. See the NOTICE file + * distributed with this work for additional information + * regarding copyright ownership. Metamarkets licenses this file + * to you under the Apache License, Version 2.0 (the + * "License"); you may not use this file except in compliance + * with the License. You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, + * software distributed under the License is distributed on an + * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY + * KIND, either express or implied. See the License for the + * specific language governing permissions and limitations + * under the License. + */ + +package io.druid.java.util.metrics; + +import com.google.common.collect.ImmutableMap; +import io.druid.java.util.emitter.core.ConcurrentTimeCounter; +import io.druid.java.util.emitter.core.HttpPostEmitter; +import io.druid.java.util.emitter.service.ServiceEmitter; +import io.druid.java.util.emitter.service.ServiceMetricEvent; + +public class HttpPostEmitterMonitor extends FeedDefiningMonitor +{ + private final HttpPostEmitter httpPostEmitter; + private final ImmutableMap extraDimensions; + private final ServiceMetricEvent.Builder builder; + private long lastTotalEmittedEvents = 0; + private int lastDroppedBuffers = 0; + + public HttpPostEmitterMonitor( + String feed, + HttpPostEmitter httpPostEmitter, + ImmutableMap extraDimensions + ) + { + super(feed); + this.httpPostEmitter = httpPostEmitter; + this.extraDimensions = extraDimensions; + this.builder = builder(); + } + + @Override + public boolean doMonitor(ServiceEmitter emitter) + { + long newTotalEmittedEvents = httpPostEmitter.getTotalEmittedEvents(); + long totalEmittedEventsDiff = newTotalEmittedEvents - lastTotalEmittedEvents; + emitter.emit(builder.build("emitter/events/emitted", totalEmittedEventsDiff)); + lastTotalEmittedEvents = newTotalEmittedEvents; + + int newDroppedBuffers = httpPostEmitter.getDroppedBuffers(); + int droppedBuffersDiff = newDroppedBuffers - lastDroppedBuffers; + emitter.emit(builder.build("emitter/buffers/dropped", droppedBuffersDiff)); + lastDroppedBuffers = newDroppedBuffers; + + emitTimeCounterMetrics(emitter, httpPostEmitter.getBatchFillingTimeCounter(), "emitter/batchFilling/"); + emitTimeCounterMetrics(emitter, httpPostEmitter.getSuccessfulSendingTimeCounter(), "emitter/successfulSending/"); + emitTimeCounterMetrics(emitter, httpPostEmitter.getFailedSendingTimeCounter(), "emitter/failedSending/"); + + emitter.emit(builder.build("emitter/events/emitQueue", httpPostEmitter.getEventsToEmit())); + emitter.emit(builder.build("emitter/events/large/emitQueue", httpPostEmitter.getLargeEventsToEmit())); + emitter.emit(builder.build("emitter/buffers/totalAllocated", httpPostEmitter.getTotalAllocatedBuffers())); + emitter.emit(builder.build("emitter/buffers/emitQueue", httpPostEmitter.getBuffersToEmit())); + emitter.emit(builder.build("emitter/buffers/failed", httpPostEmitter.getFailedBuffers())); + emitter.emit(builder.build("emitter/buffers/reuseQueue", httpPostEmitter.getBuffersToReuse())); + + return true; + } + + private void emitTimeCounterMetrics(ServiceEmitter emitter, ConcurrentTimeCounter timeCounter, String metricNameBase) + { + long timeSumAndCount = timeCounter.getTimeSumAndCountAndReset(); + emitter.emit(builder.build(metricNameBase + "timeMsSum", ConcurrentTimeCounter.timeSum(timeSumAndCount))); + emitter.emit(builder.build(metricNameBase + "count", ConcurrentTimeCounter.count(timeSumAndCount))); + emitter.emit(builder.build(metricNameBase + "maxTimeMs", timeCounter.getAndResetMaxTime())); + emitter.emit(builder.build(metricNameBase + "minTimeMs", timeCounter.getAndResetMinTime())); + } + + @Override + protected ServiceMetricEvent.Builder builder() + { + ServiceMetricEvent.Builder builder = super.builder(); + extraDimensions.forEach(builder::setDimension); + return builder; + } +} diff --git a/java-util/src/main/java/io/druid/java/util/metrics/JvmCpuMonitor.java b/java-util/src/main/java/io/druid/java/util/metrics/JvmCpuMonitor.java new file mode 100644 index 000000000000..fcefa3d9bfd3 --- /dev/null +++ b/java-util/src/main/java/io/druid/java/util/metrics/JvmCpuMonitor.java @@ -0,0 +1,89 @@ +/* + * Licensed to Metamarkets Group Inc. (Metamarkets) under one + * or more contributor license agreements. See the NOTICE file + * distributed with this work for additional information + * regarding copyright ownership. Metamarkets licenses this file + * to you under the Apache License, Version 2.0 (the + * "License"); you may not use this file except in compliance + * with the License. You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, + * software distributed under the License is distributed on an + * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY + * KIND, either express or implied. See the License for the + * specific language governing permissions and limitations + * under the License. + */ + +package io.druid.java.util.metrics; + +import com.google.common.base.Preconditions; +import com.google.common.collect.ImmutableMap; +import io.druid.java.util.common.logger.Logger; +import io.druid.java.util.emitter.service.ServiceEmitter; +import io.druid.java.util.emitter.service.ServiceMetricEvent; +import org.hyperic.sigar.ProcCpu; +import org.hyperic.sigar.Sigar; +import org.hyperic.sigar.SigarException; + +import java.util.Map; + +public class JvmCpuMonitor extends FeedDefiningMonitor +{ + private static final Logger log = new Logger(JvmCpuMonitor.class); + + private final Sigar sigar = SigarUtil.getSigar(); + private final long currentProcessId = sigar.getPid(); + + private final KeyedDiff diff = new KeyedDiff(); + + private Map dimensions; + + public JvmCpuMonitor() + { + this(ImmutableMap.of()); + } + + public JvmCpuMonitor(Map dimensions) + { + this(dimensions, DEFAULT_METRICS_FEED); + } + + public JvmCpuMonitor(Map dimensions, String feed) + { + super(feed); + Preconditions.checkNotNull(dimensions); + this.dimensions = ImmutableMap.copyOf(dimensions); + } + + @Override + public boolean doMonitor(ServiceEmitter emitter) + { + // process CPU + try { + ProcCpu procCpu = sigar.getProcCpu(currentProcessId); + final ServiceMetricEvent.Builder builder = builder(); + MonitorUtils.addDimensionsToBuilder(builder, dimensions); + // delta for total, sys, user + Map procDiff = diff.to( + "proc/cpu", ImmutableMap.of( + "jvm/cpu/total", procCpu.getTotal(), + "jvm/cpu/sys", procCpu.getSys(), + "jvm/cpu/user", procCpu.getUser() + ) + ); + if (procDiff != null) { + for (Map.Entry entry : procDiff.entrySet()) { + emitter.emit(builder.build(entry.getKey(), entry.getValue())); + } + } + emitter.emit(builder.build("jvm/cpu/percent", procCpu.getPercent())); + } + catch (SigarException e) { + log.error(e, "Failed to get ProcCpu"); + } + return true; + } +} diff --git a/java-util/src/main/java/io/druid/java/util/metrics/JvmMonitor.java b/java-util/src/main/java/io/druid/java/util/metrics/JvmMonitor.java new file mode 100644 index 000000000000..6f696be05be0 --- /dev/null +++ b/java-util/src/main/java/io/druid/java/util/metrics/JvmMonitor.java @@ -0,0 +1,315 @@ +/* + * Licensed to Metamarkets Group Inc. (Metamarkets) under one + * or more contributor license agreements. See the NOTICE file + * distributed with this work for additional information + * regarding copyright ownership. Metamarkets licenses this file + * to you under the Apache License, Version 2.0 (the + * "License"); you may not use this file except in compliance + * with the License. You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, + * software distributed under the License is distributed on an + * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY + * KIND, either express or implied. See the License for the + * specific language governing permissions and limitations + * under the License. + */ + +package io.druid.java.util.metrics; + +import com.google.common.base.Preconditions; +import com.google.common.collect.ImmutableMap; +import io.druid.java.util.common.StringUtils; +import io.druid.java.util.emitter.service.ServiceEmitter; +import io.druid.java.util.emitter.service.ServiceMetricEvent; +import org.gridkit.lab.jvm.perfdata.JStatData; +import org.gridkit.lab.jvm.perfdata.JStatData.LongCounter; +import org.gridkit.lab.jvm.perfdata.JStatData.StringCounter; +import org.gridkit.lab.jvm.perfdata.JStatData.TickCounter; + +import java.lang.management.BufferPoolMXBean; +import java.lang.management.ManagementFactory; +import java.lang.management.MemoryPoolMXBean; +import java.lang.management.MemoryType; +import java.lang.management.MemoryUsage; +import java.util.ArrayList; +import java.util.List; +import java.util.Map; + +public class JvmMonitor extends FeedDefiningMonitor +{ + private final Map dimensions; + private final long pid; + + private final GcCounters gcCounters = new GcCounters(); + + public JvmMonitor() + { + this(ImmutableMap.of()); + } + + public JvmMonitor(Map dimensions) + { + this(dimensions, DEFAULT_METRICS_FEED); + } + + public JvmMonitor(Map dimensions, String feed) + { + this(dimensions, feed, JvmPidDiscoverer.instance()); + } + + public JvmMonitor(Map dimensions, String feed, PidDiscoverer pidDiscoverer) + { + super(feed); + Preconditions.checkNotNull(dimensions); + this.dimensions = ImmutableMap.copyOf(dimensions); + this.pid = Preconditions.checkNotNull(pidDiscoverer).getPid(); + } + + @Override + public boolean doMonitor(ServiceEmitter emitter) + { + emitJvmMemMetrics(emitter); + emitDirectMemMetrics(emitter); + emitGcMetrics(emitter); + + return true; + } + + /** + * These metrics are going to be replaced by new jvm/gc/mem/* metrics + */ + @Deprecated + private void emitJvmMemMetrics(ServiceEmitter emitter) + { + // I have no idea why, but jvm/mem is slightly more than the sum of jvm/pool. Let's just include + // them both. + final Map usages = ImmutableMap.of( + "heap", ManagementFactory.getMemoryMXBean().getHeapMemoryUsage(), + "nonheap", ManagementFactory.getMemoryMXBean().getNonHeapMemoryUsage() + ); + for (Map.Entry entry : usages.entrySet()) { + final String kind = entry.getKey(); + final MemoryUsage usage = entry.getValue(); + final ServiceMetricEvent.Builder builder = builder().setDimension("memKind", kind); + MonitorUtils.addDimensionsToBuilder(builder, dimensions); + + emitter.emit(builder.build("jvm/mem/max", usage.getMax())); + emitter.emit(builder.build("jvm/mem/committed", usage.getCommitted())); + emitter.emit(builder.build("jvm/mem/used", usage.getUsed())); + emitter.emit(builder.build("jvm/mem/init", usage.getInit())); + } + + // jvm/pool + for (MemoryPoolMXBean pool : ManagementFactory.getMemoryPoolMXBeans()) { + final String kind = pool.getType() == MemoryType.HEAP ? "heap" : "nonheap"; + final MemoryUsage usage = pool.getUsage(); + final ServiceMetricEvent.Builder builder = builder() + .setDimension("poolKind", kind) + .setDimension("poolName", pool.getName()); + MonitorUtils.addDimensionsToBuilder(builder, dimensions); + + emitter.emit(builder.build("jvm/pool/max", usage.getMax())); + emitter.emit(builder.build("jvm/pool/committed", usage.getCommitted())); + emitter.emit(builder.build("jvm/pool/used", usage.getUsed())); + emitter.emit(builder.build("jvm/pool/init", usage.getInit())); + } + } + + private void emitDirectMemMetrics(ServiceEmitter emitter) + { + for (BufferPoolMXBean pool : ManagementFactory.getPlatformMXBeans(BufferPoolMXBean.class)) { + final ServiceMetricEvent.Builder builder = builder().setDimension("bufferpoolName", pool.getName()); + MonitorUtils.addDimensionsToBuilder(builder, dimensions); + + emitter.emit(builder.build("jvm/bufferpool/capacity", pool.getTotalCapacity())); + emitter.emit(builder.build("jvm/bufferpool/used", pool.getMemoryUsed())); + emitter.emit(builder.build("jvm/bufferpool/count", pool.getCount())); + } + } + + private void emitGcMetrics(ServiceEmitter emitter) + { + gcCounters.emit(emitter, dimensions); + } + + /** + * The following GC-related code is partially based on + * https://github.com/aragozin/jvm-tools/blob/e0e37692648951440aa1a4ea5046261cb360df70/ + * sjk-core/src/main/java/org/gridkit/jvmtool/PerfCounterGcCpuUsageMonitor.java + */ + private class GcCounters + { + private final List generations = new ArrayList<>(); + + GcCounters() + { + // connect to itself + final JStatData jStatData = JStatData.connect(pid); + final Map> jStatCounters = jStatData.getAllCounters(); + + generations.add(new GcGeneration(jStatCounters, 0, "young")); + generations.add(new GcGeneration(jStatCounters, 1, "old")); + // Removed in Java 8 but still actual for previous Java versions + if (jStatCounters.containsKey("sun.gc.generation.2.name")) { + generations.add(new GcGeneration(jStatCounters, 2, "perm")); + } + } + + void emit(ServiceEmitter emitter, Map dimensions) + { + for (GcGeneration generation : generations) { + generation.emit(emitter, dimensions); + } + } + } + + private class GcGeneration + { + private final String name; + private final GcGenerationCollector collector; + private final List spaces = new ArrayList<>(); + + GcGeneration(Map> jStatCounters, long genIndex, String name) + { + this.name = StringUtils.toLowerCase(name); + + long spacesCount = ((JStatData.LongCounter) jStatCounters.get( + StringUtils.format("sun.gc.generation.%d.spaces", genIndex) + )).getLong(); + for (long spaceIndex = 0; spaceIndex < spacesCount; spaceIndex++) { + spaces.add(new GcGenerationSpace(jStatCounters, genIndex, spaceIndex)); + } + + if (jStatCounters.containsKey(StringUtils.format("sun.gc.collector.%d.name", genIndex))) { + collector = new GcGenerationCollector(jStatCounters, genIndex); + } else { + collector = null; + } + } + + void emit(ServiceEmitter emitter, Map dimensions) + { + ImmutableMap.Builder dimensionsCopyBuilder = ImmutableMap + .builder() + .putAll(dimensions) + .put("gcGen", new String[]{name}); + + if (collector != null) { + dimensionsCopyBuilder.put("gcName", new String[]{collector.name}); + } + + Map dimensionsCopy = dimensionsCopyBuilder.build(); + + if (collector != null) { + collector.emit(emitter, dimensionsCopy); + } + + for (GcGenerationSpace space : spaces) { + space.emit(emitter, dimensionsCopy); + } + } + } + + private class GcGenerationCollector + { + private final String name; + private final LongCounter invocationsCounter; + private final TickCounter cpuCounter; + private long lastInvocations = 0; + private long lastCpuNanos = 0; + + GcGenerationCollector(Map> jStatCounters, long genIndex) + { + String collectorKeyPrefix = StringUtils.format("sun.gc.collector.%d", genIndex); + + String nameKey = StringUtils.format("%s.name", collectorKeyPrefix); + StringCounter nameCounter = (StringCounter) jStatCounters.get(nameKey); + name = getReadableName(nameCounter.getString()); + + invocationsCounter = (LongCounter) jStatCounters.get(StringUtils.format("%s.invocations", collectorKeyPrefix)); + cpuCounter = (TickCounter) jStatCounters.get(StringUtils.format("%s.time", collectorKeyPrefix)); + } + + void emit(ServiceEmitter emitter, Map dimensions) + { + final ServiceMetricEvent.Builder builder = builder(); + MonitorUtils.addDimensionsToBuilder(builder, dimensions); + + long newInvocations = invocationsCounter.getLong(); + emitter.emit(builder.build("jvm/gc/count", newInvocations - lastInvocations)); + lastInvocations = newInvocations; + + long newCpuNanos = cpuCounter.getNanos(); + emitter.emit(builder.build("jvm/gc/cpu", newCpuNanos - lastCpuNanos)); + lastCpuNanos = newCpuNanos; + } + + private String getReadableName(String name) + { + switch (name) { + // Young gen + case "Copy": + return "serial"; + case "PSScavenge": + return "parallel"; + case "PCopy": + return "cms"; + case "G1 incremental collections": + return "g1"; + + // Old gen + case "MCS": + return "serial"; + case "PSParallelCompact": + return "parallel"; + case "CMS": + return "cms"; + case "G1 stop-the-world full collections": + return "g1"; + + default: + return name; + } + } + } + + private class GcGenerationSpace + { + private final String name; + + private final LongCounter maxCounter; + private final LongCounter capacityCounter; + private final LongCounter usedCounter; + private final LongCounter initCounter; + + GcGenerationSpace(Map> jStatCounters, long genIndex, long spaceIndex) + { + String spaceKeyPrefix = StringUtils.format("sun.gc.generation.%d.space.%d", genIndex, spaceIndex); + + String nameKey = StringUtils.format("%s.name", spaceKeyPrefix); + StringCounter nameCounter = (StringCounter) jStatCounters.get(nameKey); + name = StringUtils.toLowerCase(nameCounter.toString()); + + maxCounter = (LongCounter) jStatCounters.get(StringUtils.format("%s.maxCapacity", spaceKeyPrefix)); + capacityCounter = (LongCounter) jStatCounters.get(StringUtils.format("%s.capacity", spaceKeyPrefix)); + usedCounter = (LongCounter) jStatCounters.get(StringUtils.format("%s.used", spaceKeyPrefix)); + initCounter = (LongCounter) jStatCounters.get(StringUtils.format("%s.initCapacity", spaceKeyPrefix)); + } + + void emit(ServiceEmitter emitter, Map dimensions) + { + final ServiceMetricEvent.Builder builder = builder(); + MonitorUtils.addDimensionsToBuilder(builder, dimensions); + + builder.setDimension("gcGenSpaceName", name); + + emitter.emit(builder.build("jvm/gc/mem/max", maxCounter.getLong())); + emitter.emit(builder.build("jvm/gc/mem/capacity", capacityCounter.getLong())); + emitter.emit(builder.build("jvm/gc/mem/used", usedCounter.getLong())); + emitter.emit(builder.build("jvm/gc/mem/init", initCounter.getLong())); + } + } +} diff --git a/java-util/src/main/java/io/druid/java/util/metrics/JvmPidDiscoverer.java b/java-util/src/main/java/io/druid/java/util/metrics/JvmPidDiscoverer.java new file mode 100644 index 000000000000..b279f43e18a4 --- /dev/null +++ b/java-util/src/main/java/io/druid/java/util/metrics/JvmPidDiscoverer.java @@ -0,0 +1,80 @@ +/* + * Licensed to Metamarkets Group Inc. (Metamarkets) under one + * or more contributor license agreements. See the NOTICE file + * distributed with this work for additional information + * regarding copyright ownership. Metamarkets licenses this file + * to you under the Apache License, Version 2.0 (the + * "License"); you may not use this file except in compliance + * with the License. You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, + * software distributed under the License is distributed on an + * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY + * KIND, either express or implied. See the License for the + * specific language governing permissions and limitations + * under the License. + */ + +package io.druid.java.util.metrics; + +import io.druid.java.util.common.RE; + +import java.lang.management.ManagementFactory; +import java.util.regex.Pattern; + +/** + * For systems that for whatever reason cannot use Sigar (through io.druid.java.util.metrics.SigarPidDiscoverer ), + * this attempts to get the PID from the JVM "name". + */ +public class JvmPidDiscoverer implements PidDiscoverer +{ + private static final JvmPidDiscoverer INSTANCE = new JvmPidDiscoverer(); + + public static JvmPidDiscoverer instance() + { + return INSTANCE; + } + + /** + * use {JvmPidDiscoverer.instance()} + */ + private JvmPidDiscoverer() + { + } + + /** + * Returns the PID as a best guess. This uses methods that are not guaranteed to actually be the PID. + *

    + * TODO: switch to ProcessHandle.current().getPid() for java9 potentially + * + * @return the PID of the current jvm if available + * + * @throws RuntimeException if the pid cannot be determined + */ + @Override + public long getPid() + { + return Inner.PID; + } + + private static class Inner + { + private static final long PID; + + static { + final String jvmName = ManagementFactory.getRuntimeMXBean().getName(); + final String[] nameSplits = jvmName.split(Pattern.quote("@")); + if (nameSplits.length != 2) { + throw new RE("Unable to determine pid from [%s]", jvmName); + } + try { + PID = Long.parseLong(nameSplits[0]); + } + catch (NumberFormatException nfe) { + throw new RE(nfe, "Unable to determine pid from [%s]", jvmName); + } + } + } +} diff --git a/java-util/src/main/java/io/druid/java/util/metrics/JvmThreadsMonitor.java b/java-util/src/main/java/io/druid/java/util/metrics/JvmThreadsMonitor.java new file mode 100644 index 000000000000..60efeb004472 --- /dev/null +++ b/java-util/src/main/java/io/druid/java/util/metrics/JvmThreadsMonitor.java @@ -0,0 +1,83 @@ +/* + * Licensed to Metamarkets Group Inc. (Metamarkets) under one + * or more contributor license agreements. See the NOTICE file + * distributed with this work for additional information + * regarding copyright ownership. Metamarkets licenses this file + * to you under the Apache License, Version 2.0 (the + * "License"); you may not use this file except in compliance + * with the License. You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, + * software distributed under the License is distributed on an + * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY + * KIND, either express or implied. See the License for the + * specific language governing permissions and limitations + * under the License. + */ + +package io.druid.java.util.metrics; + +import com.google.common.base.Preconditions; +import com.google.common.collect.ImmutableMap; +import io.druid.java.util.emitter.service.ServiceEmitter; +import io.druid.java.util.emitter.service.ServiceMetricEvent; + +import java.lang.management.ManagementFactory; +import java.lang.management.ThreadMXBean; +import java.util.Map; + +public class JvmThreadsMonitor extends FeedDefiningMonitor +{ + private final Map dimensions; + + private int lastLiveThreads = 0; + private long lastStartedThreads = 0; + + public JvmThreadsMonitor() + { + this(ImmutableMap.of()); + } + + public JvmThreadsMonitor(Map dimensions) + { + this(dimensions, DEFAULT_METRICS_FEED); + } + + public JvmThreadsMonitor(Map dimensions, String feed) + { + super(feed); + Preconditions.checkNotNull(dimensions); + this.dimensions = ImmutableMap.copyOf(dimensions); + } + + @Override + public boolean doMonitor(ServiceEmitter emitter) + { + ThreadMXBean threadBean = ManagementFactory.getThreadMXBean(); + + final ServiceMetricEvent.Builder builder = builder(); + MonitorUtils.addDimensionsToBuilder(builder, dimensions); + + // Because between next two calls on ThreadMXBean new threads can be started we can observe some inconsistency + // in counters values and finished counter could be even negative + int newLiveThreads = threadBean.getThreadCount(); + long newStartedThreads = threadBean.getTotalStartedThreadCount(); + + long startedThreadsDiff = newStartedThreads - lastStartedThreads; + + emitter.emit(builder.build("jvm/threads/started", startedThreadsDiff)); + emitter.emit(builder.build("jvm/threads/finished", lastLiveThreads + startedThreadsDiff - newLiveThreads)); + emitter.emit(builder.build("jvm/threads/live", newLiveThreads)); + emitter.emit(builder.build("jvm/threads/liveDaemon", threadBean.getDaemonThreadCount())); + + emitter.emit(builder.build("jvm/threads/livePeak", threadBean.getPeakThreadCount())); + threadBean.resetPeakThreadCount(); + + lastStartedThreads = newStartedThreads; + lastLiveThreads = newLiveThreads; + + return true; + } +} diff --git a/java-util/src/main/java/io/druid/java/util/metrics/KeyedDiff.java b/java-util/src/main/java/io/druid/java/util/metrics/KeyedDiff.java new file mode 100644 index 000000000000..7378071f7191 --- /dev/null +++ b/java-util/src/main/java/io/druid/java/util/metrics/KeyedDiff.java @@ -0,0 +1,53 @@ +/* + * Licensed to Metamarkets Group Inc. (Metamarkets) under one + * or more contributor license agreements. See the NOTICE file + * distributed with this work for additional information + * regarding copyright ownership. Metamarkets licenses this file + * to you under the Apache License, Version 2.0 (the + * "License"); you may not use this file except in compliance + * with the License. You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, + * software distributed under the License is distributed on an + * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY + * KIND, either express or implied. See the License for the + * specific language governing permissions and limitations + * under the License. + */ + +package io.druid.java.util.metrics; + +import io.druid.java.util.common.logger.Logger; + +import java.util.HashMap; +import java.util.Map; + +public class KeyedDiff +{ + private static final Logger log = new Logger(KeyedDiff.class); + + private final Map> prevs = new HashMap>(); + + public Map to(String key, Map curr) + { + final Map prev = prevs.put(key, curr); + if (prev != null) { + return subtract(curr, prev); + } else { + log.debug("No previous data for key[%s]", key); + return null; + } + } + + public static Map subtract(Map xs, Map ys) + { + assert xs.keySet().equals(ys.keySet()); + final Map zs = new HashMap(); + for (String k : xs.keySet()) { + zs.put(k, xs.get(k) - ys.get(k)); + } + return zs; + } +} diff --git a/java-util/src/main/java/io/druid/java/util/metrics/Monitor.java b/java-util/src/main/java/io/druid/java/util/metrics/Monitor.java new file mode 100644 index 000000000000..666477b108b6 --- /dev/null +++ b/java-util/src/main/java/io/druid/java/util/metrics/Monitor.java @@ -0,0 +1,33 @@ +/* + * Licensed to Metamarkets Group Inc. (Metamarkets) under one + * or more contributor license agreements. See the NOTICE file + * distributed with this work for additional information + * regarding copyright ownership. Metamarkets licenses this file + * to you under the Apache License, Version 2.0 (the + * "License"); you may not use this file except in compliance + * with the License. You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, + * software distributed under the License is distributed on an + * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY + * KIND, either express or implied. See the License for the + * specific language governing permissions and limitations + * under the License. + */ + +package io.druid.java.util.metrics; + +import io.druid.java.util.emitter.service.ServiceEmitter; + +/** + */ +public interface Monitor +{ + void start(); + + void stop(); + + boolean monitor(ServiceEmitter emitter); +} diff --git a/java-util/src/main/java/io/druid/java/util/metrics/MonitorOfTheMonitors.java b/java-util/src/main/java/io/druid/java/util/metrics/MonitorOfTheMonitors.java new file mode 100644 index 000000000000..edfd94dd32d8 --- /dev/null +++ b/java-util/src/main/java/io/druid/java/util/metrics/MonitorOfTheMonitors.java @@ -0,0 +1,35 @@ +/* + * Licensed to Metamarkets Group Inc. (Metamarkets) under one + * or more contributor license agreements. See the NOTICE file + * distributed with this work for additional information + * regarding copyright ownership. Metamarkets licenses this file + * to you under the Apache License, Version 2.0 (the + * "License"); you may not use this file except in compliance + * with the License. You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, + * software distributed under the License is distributed on an + * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY + * KIND, either express or implied. See the License for the + * specific language governing permissions and limitations + * under the License. + */ + +package io.druid.java.util.metrics; + +import io.druid.java.util.common.logger.Logger; +import io.druid.java.util.emitter.service.ServiceEmitter; + +public class MonitorOfTheMonitors extends AbstractMonitor +{ + private static final Logger log = new Logger(MonitorOfTheMonitors.class); + + @Override + public boolean doMonitor(ServiceEmitter emitter) + { + log.info("I am watching..."); + return true; + } +} diff --git a/java-util/src/main/java/io/druid/java/util/metrics/MonitorScheduler.java b/java-util/src/main/java/io/druid/java/util/metrics/MonitorScheduler.java new file mode 100644 index 000000000000..46f374609f89 --- /dev/null +++ b/java-util/src/main/java/io/druid/java/util/metrics/MonitorScheduler.java @@ -0,0 +1,142 @@ +/* + * Licensed to Metamarkets Group Inc. (Metamarkets) under one + * or more contributor license agreements. See the NOTICE file + * distributed with this work for additional information + * regarding copyright ownership. Metamarkets licenses this file + * to you under the Apache License, Version 2.0 (the + * "License"); you may not use this file except in compliance + * with the License. You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, + * software distributed under the License is distributed on an + * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY + * KIND, either express or implied. See the License for the + * specific language governing permissions and limitations + * under the License. + */ + +package io.druid.java.util.metrics; + +import com.google.common.collect.Sets; +import io.druid.java.util.common.ISE; +import io.druid.java.util.common.concurrent.ScheduledExecutors; +import io.druid.java.util.common.lifecycle.LifecycleStart; +import io.druid.java.util.common.lifecycle.LifecycleStop; +import io.druid.java.util.emitter.service.ServiceEmitter; + +import java.util.List; +import java.util.Set; +import java.util.concurrent.Callable; +import java.util.concurrent.ScheduledExecutorService; + +/** + */ +public class MonitorScheduler +{ + private final MonitorSchedulerConfig config; + private final ScheduledExecutorService exec; + private final ServiceEmitter emitter; + private final Set monitors; + private final Object lock = new Object(); + + private volatile boolean started = false; + + public MonitorScheduler( + MonitorSchedulerConfig config, + ScheduledExecutorService exec, + ServiceEmitter emitter, + List monitors + ) + { + this.config = config; + this.exec = exec; + this.emitter = emitter; + this.monitors = Sets.newHashSet(monitors); + } + + @LifecycleStart + public void start() + { + synchronized (lock) { + if (started) { + return; + } + started = true; + + for (final Monitor monitor : monitors) { + startMonitor(monitor); + } + } + } + + public void addMonitor(final Monitor monitor) + { + synchronized (lock) { + if (!started) { + throw new ISE("addMonitor must be called after start"); + } + if (hasMonitor(monitor)) { + throw new ISE("Monitor already monitoring: %s", monitor); + } + monitors.add(monitor); + startMonitor(monitor); + } + } + + public void removeMonitor(final Monitor monitor) + { + synchronized (lock) { + monitors.remove(monitor); + monitor.stop(); + } + } + + @LifecycleStop + public void stop() + { + synchronized (lock) { + if (!started) { + return; + } + + started = false; + for (Monitor monitor : monitors) { + monitor.stop(); + } + } + } + + private void startMonitor(final Monitor monitor) + { + synchronized (lock) { + monitor.start(); + ScheduledExecutors.scheduleAtFixedRate( + exec, + config.getEmitterPeriod(), + new Callable() + { + @Override + public ScheduledExecutors.Signal call() throws Exception + { + // Run one more time even if the monitor was removed, in case there's some extra data to flush + if (monitor.monitor(emitter) && hasMonitor(monitor)) { + return ScheduledExecutors.Signal.REPEAT; + } else { + removeMonitor(monitor); + return ScheduledExecutors.Signal.STOP; + } + } + } + ); + } + } + + private boolean hasMonitor(final Monitor monitor) + { + synchronized (lock) { + return monitors.contains(monitor); + } + } +} diff --git a/java-util/src/main/java/io/druid/java/util/metrics/MonitorSchedulerConfig.java b/java-util/src/main/java/io/druid/java/util/metrics/MonitorSchedulerConfig.java new file mode 100644 index 000000000000..e1675d16dd20 --- /dev/null +++ b/java-util/src/main/java/io/druid/java/util/metrics/MonitorSchedulerConfig.java @@ -0,0 +1,33 @@ +/* + * Licensed to Metamarkets Group Inc. (Metamarkets) under one + * or more contributor license agreements. See the NOTICE file + * distributed with this work for additional information + * regarding copyright ownership. Metamarkets licenses this file + * to you under the Apache License, Version 2.0 (the + * "License"); you may not use this file except in compliance + * with the License. You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, + * software distributed under the License is distributed on an + * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY + * KIND, either express or implied. See the License for the + * specific language governing permissions and limitations + * under the License. + */ + +package io.druid.java.util.metrics; + +import org.joda.time.Duration; +import org.skife.config.Config; +import org.skife.config.Default; + +/** + */ +public abstract class MonitorSchedulerConfig +{ + @Config({"io.druid.java.util.metrics.emitter.period", "com.metamx.druid.emitter.period"}) + @Default("PT60s") + public abstract Duration getEmitterPeriod(); +} diff --git a/java-util/src/main/java/io/druid/java/util/metrics/MonitorUtils.java b/java-util/src/main/java/io/druid/java/util/metrics/MonitorUtils.java new file mode 100644 index 000000000000..bd48e0460347 --- /dev/null +++ b/java-util/src/main/java/io/druid/java/util/metrics/MonitorUtils.java @@ -0,0 +1,34 @@ +/* + * Licensed to Metamarkets Group Inc. (Metamarkets) under one + * or more contributor license agreements. See the NOTICE file + * distributed with this work for additional information + * regarding copyright ownership. Metamarkets licenses this file + * to you under the Apache License, Version 2.0 (the + * "License"); you may not use this file except in compliance + * with the License. You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, + * software distributed under the License is distributed on an + * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY + * KIND, either express or implied. See the License for the + * specific language governing permissions and limitations + * under the License. + */ + +package io.druid.java.util.metrics; + +import io.druid.java.util.emitter.service.ServiceMetricEvent; + +import java.util.Map; + +public class MonitorUtils +{ + public static void addDimensionsToBuilder(ServiceMetricEvent.Builder builder, Map dimensions) + { + for (Map.Entry keyValue : dimensions.entrySet()) { + builder.setDimension(keyValue.getKey(), keyValue.getValue()); + } + } +} diff --git a/java-util/src/main/java/io/druid/java/util/metrics/Monitors.java b/java-util/src/main/java/io/druid/java/util/metrics/Monitors.java new file mode 100644 index 000000000000..24f155fff7cb --- /dev/null +++ b/java-util/src/main/java/io/druid/java/util/metrics/Monitors.java @@ -0,0 +1,94 @@ +/* + * Licensed to Metamarkets Group Inc. (Metamarkets) under one + * or more contributor license agreements. See the NOTICE file + * distributed with this work for additional information + * regarding copyright ownership. Metamarkets licenses this file + * to you under the Apache License, Version 2.0 (the + * "License"); you may not use this file except in compliance + * with the License. You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, + * software distributed under the License is distributed on an + * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY + * KIND, either express or implied. See the License for the + * specific language governing permissions and limitations + * under the License. + */ + +package io.druid.java.util.metrics; + +import java.util.List; +import java.util.Map; + +public class Monitors +{ + /** + * Creates a JVM monitor, configured with the given dimensions, that gathers all currently available JVM-wide + * monitors. Emitted events have default feed {@link FeedDefiningMonitor#DEFAULT_METRICS_FEED} + * See: {@link Monitors#createCompoundJvmMonitor(Map, String)} + * + * @param dimensions common dimensions to configure the JVM monitor with + * + * @return a universally useful JVM-wide monitor + */ + public static Monitor createCompoundJvmMonitor(Map dimensions) + { + return createCompoundJvmMonitor(dimensions, FeedDefiningMonitor.DEFAULT_METRICS_FEED); + } + + /** + * Creates a JVM monitor, configured with the given dimensions, that gathers all currently available JVM-wide + * monitors: {@link JvmMonitor}, {@link JvmCpuMonitor} and {@link JvmThreadsMonitor} (this list may + * change in any future release of this library, including a minor release). + * + * @param dimensions common dimensions to configure the JVM monitor with + * @param feed feed for all emitted events + * + * @return a universally useful JVM-wide monitor + */ + public static Monitor createCompoundJvmMonitor(Map dimensions, String feed) + { + // This list doesn't include SysMonitor because it should probably be run only in one JVM, if several JVMs are + // running on the same instance, so most of the time SysMonitor should be configured/set up differently than + // "simple" JVM monitors, created below. + return and(// Could equally be or(), because all member monitors always return true from their monitor() methods. + new JvmMonitor(dimensions, feed), + new JvmCpuMonitor(dimensions, feed), + new JvmThreadsMonitor(dimensions, feed) + ); + } + + public static Monitor and(Monitor... monitors) + { + return new CompoundMonitor(monitors) + { + @Override + public boolean shouldReschedule(List reschedules) + { + boolean b = true; + for (boolean reschedule : reschedules) { + b = b && reschedule; + } + return b; + } + }; + } + + public static Monitor or(Monitor... monitors) + { + return new CompoundMonitor(monitors) + { + @Override + public boolean shouldReschedule(List reschedules) + { + boolean b = false; + for (boolean reschedule : reschedules) { + b = b || reschedule; + } + return b; + } + }; + } +} diff --git a/java-util/src/main/java/io/druid/java/util/metrics/ParametrizedUriEmitterMonitor.java b/java-util/src/main/java/io/druid/java/util/metrics/ParametrizedUriEmitterMonitor.java new file mode 100644 index 000000000000..219e82105fc8 --- /dev/null +++ b/java-util/src/main/java/io/druid/java/util/metrics/ParametrizedUriEmitterMonitor.java @@ -0,0 +1,75 @@ +/* + * Licensed to Metamarkets Group Inc. (Metamarkets) under one + * or more contributor license agreements. See the NOTICE file + * distributed with this work for additional information + * regarding copyright ownership. Metamarkets licenses this file + * to you under the Apache License, Version 2.0 (the + * "License"); you may not use this file except in compliance + * with the License. You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, + * software distributed under the License is distributed on an + * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY + * KIND, either express or implied. See the License for the + * specific language governing permissions and limitations + * under the License. + */ + +package io.druid.java.util.metrics; + +import com.google.common.collect.ImmutableMap; +import io.druid.java.util.emitter.core.ParametrizedUriEmitter; +import io.druid.java.util.emitter.service.ServiceEmitter; + +import java.net.URI; +import java.util.HashMap; +import java.util.Map; + +public class ParametrizedUriEmitterMonitor extends FeedDefiningMonitor +{ + private final ParametrizedUriEmitter parametrizedUriEmitter; + private final Map monitors = new HashMap<>(); + + public ParametrizedUriEmitterMonitor(String feed, ParametrizedUriEmitter parametrizedUriEmitter) + { + super(feed); + this.parametrizedUriEmitter = parametrizedUriEmitter; + } + + private void updateMonitors() + { + parametrizedUriEmitter.forEachEmitter( + (uri, emitter) -> { + monitors.computeIfAbsent( + uri, + u -> { + HttpPostEmitterMonitor monitor = new HttpPostEmitterMonitor( + feed, + emitter, + ImmutableMap.of("uri", uri.toString()) + ); + monitor.start(); + return monitor; + } + ); + } + ); + } + + @Override + public void stop() + { + monitors.values().forEach(AbstractMonitor::stop); + super.stop(); + } + + @Override + public boolean doMonitor(ServiceEmitter emitter) + { + updateMonitors(); + monitors.values().forEach(m -> m.doMonitor(emitter)); + return true; + } +} diff --git a/java-util/src/main/java/io/druid/java/util/metrics/PidDiscoverer.java b/java-util/src/main/java/io/druid/java/util/metrics/PidDiscoverer.java new file mode 100644 index 000000000000..3857027395f2 --- /dev/null +++ b/java-util/src/main/java/io/druid/java/util/metrics/PidDiscoverer.java @@ -0,0 +1,25 @@ +/* + * Licensed to Metamarkets Group Inc. (Metamarkets) under one + * or more contributor license agreements. See the NOTICE file + * distributed with this work for additional information + * regarding copyright ownership. Metamarkets licenses this file + * to you under the Apache License, Version 2.0 (the + * "License"); you may not use this file except in compliance + * with the License. You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, + * software distributed under the License is distributed on an + * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY + * KIND, either express or implied. See the License for the + * specific language governing permissions and limitations + * under the License. + */ + +package io.druid.java.util.metrics; + +public interface PidDiscoverer +{ + long getPid(); +} diff --git a/java-util/src/main/java/io/druid/java/util/metrics/SigarPidDiscoverer.java b/java-util/src/main/java/io/druid/java/util/metrics/SigarPidDiscoverer.java new file mode 100644 index 000000000000..2ae4babe3a32 --- /dev/null +++ b/java-util/src/main/java/io/druid/java/util/metrics/SigarPidDiscoverer.java @@ -0,0 +1,44 @@ +/* + * Licensed to Metamarkets Group Inc. (Metamarkets) under one + * or more contributor license agreements. See the NOTICE file + * distributed with this work for additional information + * regarding copyright ownership. Metamarkets licenses this file + * to you under the Apache License, Version 2.0 (the + * "License"); you may not use this file except in compliance + * with the License. You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, + * software distributed under the License is distributed on an + * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY + * KIND, either express or implied. See the License for the + * specific language governing permissions and limitations + * under the License. + */ + +package io.druid.java.util.metrics; + +public class SigarPidDiscoverer implements PidDiscoverer +{ + private static final SigarPidDiscoverer INSTANCE = new SigarPidDiscoverer(); + + public static SigarPidDiscoverer instance() + { + return INSTANCE; + } + + /** + * use {SigarPidDiscoverer.instance()} + */ + private SigarPidDiscoverer() + { + + } + + @Override + public long getPid() + { + return SigarUtil.getCurrentProcessId(); + } +} diff --git a/java-util/src/main/java/io/druid/java/util/metrics/SigarUtil.java b/java-util/src/main/java/io/druid/java/util/metrics/SigarUtil.java new file mode 100644 index 000000000000..824826f8df61 --- /dev/null +++ b/java-util/src/main/java/io/druid/java/util/metrics/SigarUtil.java @@ -0,0 +1,85 @@ +/* + * Licensed to Metamarkets Group Inc. (Metamarkets) under one + * or more contributor license agreements. See the NOTICE file + * distributed with this work for additional information + * regarding copyright ownership. Metamarkets licenses this file + * to you under the Apache License, Version 2.0 (the + * "License"); you may not use this file except in compliance + * with the License. You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, + * software distributed under the License is distributed on an + * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY + * KIND, either express or implied. See the License for the + * specific language governing permissions and limitations + * under the License. + */ + +package io.druid.java.util.metrics; + +import com.google.common.base.Throwables; +import io.druid.java.util.common.StreamUtils; +import io.druid.java.util.common.logger.Logger; +import org.hyperic.jni.ArchLoaderException; +import org.hyperic.jni.ArchNotSupportedException; +import org.hyperic.sigar.Sigar; +import org.hyperic.sigar.SigarLoader; + +import java.io.File; +import java.io.IOException; +import java.net.URL; +import java.nio.file.Files; + +public class SigarUtil +{ + private static final Logger log = new Logger(SigarUtil.class); + + // Note: this is required to load the sigar native lib. + static { + SigarLoader loader = new SigarLoader(Sigar.class); + try { + String libName = loader.getLibraryName(); + + final URL url = SysMonitor.class.getResource("/" + libName); + if (url != null) { + final File tmpDir = Files.createTempDirectory("sigar").toFile(); + // As per java.io.DeleteOnExitHook.runHooks() deletion order is reversed from registration order + tmpDir.deleteOnExit(); + final File nativeLibTmpFile = new File(tmpDir, libName); + nativeLibTmpFile.deleteOnExit(); + StreamUtils.copyToFileAndClose(url.openStream(), nativeLibTmpFile); + log.info("Loading sigar native lib at tmpPath[%s]", nativeLibTmpFile); + loader.load(nativeLibTmpFile.getParent()); + } else { + log.info("No native libs found in jar, letting the normal load mechanisms figger it out."); + } + } + catch (ArchNotSupportedException | ArchLoaderException | IOException e) { + throw Throwables.propagate(e); + } + } + + public static Sigar getSigar() + { + return new Sigar(); + } + + /** + * CurrentProcessIdHolder class is initialized after SigarUtil, that guarantees that new Sigar() is executed after + * static block (which loads the library) of SigarUtil is executed. This is anyway guaranteed by JLS if the static + * field goes below the static block in textual order, but fragile e. g. if someone applies automatic reformatting and + * the static field is moved above the static block. + */ + private static class CurrentProcessIdHolder + { + private static final long currentProcessId = new Sigar().getPid(); + } + + public static long getCurrentProcessId() + { + return CurrentProcessIdHolder.currentProcessId; + } + +} diff --git a/java-util/src/main/java/io/druid/java/util/metrics/SysMonitor.java b/java-util/src/main/java/io/druid/java/util/metrics/SysMonitor.java new file mode 100644 index 000000000000..21ac987e01cb --- /dev/null +++ b/java-util/src/main/java/io/druid/java/util/metrics/SysMonitor.java @@ -0,0 +1,584 @@ +/* + * Licensed to Metamarkets Group Inc. (Metamarkets) under one + * or more contributor license agreements. See the NOTICE file + * distributed with this work for additional information + * regarding copyright ownership. Metamarkets licenses this file + * to you under the Apache License, Version 2.0 (the + * "License"); you may not use this file except in compliance + * with the License. You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, + * software distributed under the License is distributed on an + * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY + * KIND, either express or implied. See the License for the + * specific language governing permissions and limitations + * under the License. + */ + +package io.druid.java.util.metrics; + +import com.google.common.base.Joiner; +import com.google.common.base.Preconditions; +import com.google.common.collect.ImmutableList; +import com.google.common.collect.ImmutableMap; +import io.druid.java.util.common.logger.Logger; +import io.druid.java.util.emitter.service.ServiceEmitter; +import io.druid.java.util.emitter.service.ServiceMetricEvent; +import org.hyperic.sigar.Cpu; +import org.hyperic.sigar.DirUsage; +import org.hyperic.sigar.DiskUsage; +import org.hyperic.sigar.FileSystem; +import org.hyperic.sigar.FileSystemUsage; +import org.hyperic.sigar.Mem; +import org.hyperic.sigar.NetInterfaceConfig; +import org.hyperic.sigar.NetInterfaceStat; +import org.hyperic.sigar.NetStat; +import org.hyperic.sigar.Sigar; +import org.hyperic.sigar.SigarException; +import org.hyperic.sigar.Swap; +import org.hyperic.sigar.Tcp; +import org.hyperic.sigar.Uptime; + +import java.util.ArrayList; +import java.util.Arrays; +import java.util.List; +import java.util.Map; + +public class SysMonitor extends FeedDefiningMonitor +{ + private static final Logger log = new Logger(SysMonitor.class); + + private final Sigar sigar = SigarUtil.getSigar(); + + private final List fsTypeWhitelist = ImmutableList.of("local"); + private final List netAddressBlacklist = ImmutableList.of("0.0.0.0", "127.0.0.1"); + + private final List statsList; + + private Map dimensions; + + public SysMonitor() + { + this(ImmutableMap.of()); + } + + public SysMonitor(Map dimensions) + { + this(dimensions, DEFAULT_METRICS_FEED); + } + + public SysMonitor(Map dimensions, String feed) + { + super(feed); + Preconditions.checkNotNull(dimensions); + this.dimensions = ImmutableMap.copyOf(dimensions); + + sigar.enableLogging(true); + + this.statsList = new ArrayList(); + this.statsList.addAll( + Arrays.asList( + new MemStats(), + new FsStats(), + new DiskStats(), + new NetStats(), + new CpuStats(), + new SwapStats(), + new SysStats(), + new TcpStats() + ) + ); + } + + public void addDirectoriesToMonitor(String[] dirList) + { + for (int i = 0; i < dirList.length; i++) { + dirList[i] = dirList[i].trim(); + } + statsList.add(new DirStats(dirList)); + } + + @Override + public boolean doMonitor(ServiceEmitter emitter) + { + for (Stats stats : statsList) { + stats.emit(emitter); + } + return true; + } + + private interface Stats + { + void emit(ServiceEmitter emitter); + } + + private class MemStats implements Stats + { + @Override + public void emit(ServiceEmitter emitter) + { + Mem mem = null; + try { + mem = sigar.getMem(); + } + catch (SigarException e) { + log.error(e, "Failed to get Mem"); + } + if (mem != null) { + final Map stats = ImmutableMap.of( + "sys/mem/max", mem.getTotal(), + "sys/mem/used", mem.getUsed(), + "sys/mem/actual/used", mem.getActualUsed(), + "sys/mem/actual/free", mem.getActualFree() + ); + final ServiceMetricEvent.Builder builder = builder(); + MonitorUtils.addDimensionsToBuilder(builder, dimensions); + for (Map.Entry entry : stats.entrySet()) { + emitter.emit(builder.build(entry.getKey(), entry.getValue())); + } + } + } + } + + /** + * Gets the swap stats from sigar and emits the periodic pages in & pages out of memory + * along with the max swap and free swap memory. + */ + private class SwapStats implements Stats + { + private long prevPageIn = 0, prevPageOut = 0; + + private SwapStats() + { + try { + Swap swap = sigar.getSwap(); + this.prevPageIn = swap.getPageIn(); + this.prevPageOut = swap.getPageOut(); + } + catch (SigarException e) { + log.error(e, "Failed to get Swap"); + } + } + + @Override + public void emit(ServiceEmitter emitter) + { + Swap swap = null; + try { + swap = sigar.getSwap(); + } + catch (SigarException e) { + log.error(e, "Failed to get Swap"); + } + if (swap != null) { + long currPageIn = swap.getPageIn(); + long currPageOut = swap.getPageOut(); + + final Map stats = ImmutableMap.of( + "sys/swap/pageIn", (currPageIn - prevPageIn), + "sys/swap/pageOut", (currPageOut - prevPageOut), + "sys/swap/max", swap.getTotal(), + "sys/swap/free", swap.getFree() + ); + + final ServiceMetricEvent.Builder builder = builder(); + MonitorUtils.addDimensionsToBuilder(builder, dimensions); + for (Map.Entry entry : stats.entrySet()) { + emitter.emit(builder.build(entry.getKey(), entry.getValue())); + } + + this.prevPageIn = currPageIn; + this.prevPageOut = currPageOut; + } + } + } + + /** + * Gets the disk usage of a particular directory. + */ + private class DirStats implements Stats + { + private final String[] dirList; + + private DirStats(String[] dirList) + { + this.dirList = dirList; + } + + @Override + public void emit(ServiceEmitter emitter) + { + for (String dir : dirList) { + DirUsage du = null; + try { + du = sigar.getDirUsage(dir); + } + catch (SigarException e) { + log.error("Failed to get DiskUsage for [%s] due to [%s]", dir, e.getMessage()); + } + if (du != null) { + final Map stats = ImmutableMap.of( + "sys/storage/used", du.getDiskUsage() + ); + final ServiceMetricEvent.Builder builder = builder() + .setDimension("fsDirName", dir); // fsDirName because FsStats uses fsDirName + MonitorUtils.addDimensionsToBuilder(builder, dimensions); + for (Map.Entry entry : stats.entrySet()) { + emitter.emit(builder.build(entry.getKey(), entry.getValue())); + } + } + } + } + } + + private class FsStats implements Stats + { + @Override + public void emit(ServiceEmitter emitter) + { + FileSystem[] fss = null; + try { + fss = sigar.getFileSystemList(); + } + catch (SigarException e) { + log.error(e, "Failed to get FileSystem list"); + } + if (fss != null) { + log.debug("Found FileSystem list: [%s]", Joiner.on(", ").join(fss)); + for (FileSystem fs : fss) { + final String name = fs.getDirName(); // (fs.getDevName() does something wonky here!) + if (fsTypeWhitelist.contains(fs.getTypeName())) { + FileSystemUsage fsu = null; + try { + fsu = sigar.getFileSystemUsage(name); + } + catch (SigarException e) { + log.error(e, "Failed to get FileSystemUsage[%s]", name); + } + if (fsu != null) { + final Map stats = ImmutableMap.builder() + .put("sys/fs/max", fsu.getTotal() * 1024) + .put("sys/fs/used", fsu.getUsed() * 1024) + .put("sys/fs/files/count", fsu.getFiles()) + .put("sys/fs/files/free", fsu.getFreeFiles()) + .build(); + final ServiceMetricEvent.Builder builder = builder() + .setDimension("fsDevName", fs.getDevName()) + .setDimension("fsDirName", fs.getDirName()) + .setDimension("fsTypeName", fs.getTypeName()) + .setDimension("fsSysTypeName", fs.getSysTypeName()) + .setDimension("fsOptions", fs.getOptions().split(",")); + MonitorUtils.addDimensionsToBuilder(builder, dimensions); + for (Map.Entry entry : stats.entrySet()) { + emitter.emit(builder.build(entry.getKey(), entry.getValue())); + } + } + } else { + log.debug("Not monitoring fs stats for name[%s] with typeName[%s]", name, fs.getTypeName()); + } + } + } + } + } + + private class DiskStats implements Stats + { + private final KeyedDiff diff = new KeyedDiff(); + + @Override + public void emit(ServiceEmitter emitter) + { + FileSystem[] fss = null; + try { + fss = sigar.getFileSystemList(); + } + catch (SigarException e) { + log.error(e, "Failed to get FileSystem list"); + } + if (fss != null) { + log.debug("Found FileSystem list: [%s]", Joiner.on(", ").join(fss)); + for (FileSystem fs : fss) { + // fs.getDevName() appears to give the same results here, but on some nodes results for one disc were substituted by another + // LOG: Sigar - /proc/diskstats /dev/xvdj -> /dev/xvdb [202,16] + final String name = fs.getDirName(); + if (fsTypeWhitelist.contains(fs.getTypeName())) { + DiskUsage du = null; + try { + du = sigar.getDiskUsage(name); + } + catch (SigarException e) { + log.error(e, "Failed to get DiskUsage[%s]", name); + } + if (du != null) { + final Map stats = diff.to( + name, ImmutableMap.builder() + .put("sys/disk/read/size", du.getReadBytes()) + .put("sys/disk/read/count", du.getReads()) + .put("sys/disk/write/size", du.getWriteBytes()) + .put("sys/disk/write/count", du.getWrites()) + .put("sys/disk/queue", Double.valueOf(du.getQueue()).longValue()) + .put("sys/disk/serviceTime", Double.valueOf(du.getServiceTime()).longValue()) + .build() + ); + log.debug("DiskUsage diff for [%s]: %s", name, stats); + if (stats != null) { + final ServiceMetricEvent.Builder builder = builder() + .setDimension("fsDevName", fs.getDevName()) + .setDimension("fsDirName", fs.getDirName()) + .setDimension("fsTypeName", fs.getTypeName()) + .setDimension("fsSysTypeName", fs.getSysTypeName()) + .setDimension("fsOptions", fs.getOptions().split(",")); + MonitorUtils.addDimensionsToBuilder(builder, dimensions); + for (Map.Entry entry : stats.entrySet()) { + emitter.emit(builder.build(entry.getKey(), entry.getValue())); + } + } + } + } else { + log.debug("Not monitoring disk stats for name[%s] with typeName[%s]", name, fs.getTypeName()); + } + } + } + } + } + + private class NetStats implements Stats + { + private final KeyedDiff diff = new KeyedDiff(); + + @Override + public void emit(ServiceEmitter emitter) + { + String[] ifaces = null; + try { + ifaces = sigar.getNetInterfaceList(); + } + catch (SigarException e) { + log.error(e, "Failed to get NetInterface list"); + } + if (ifaces != null) { + log.debug("Found NetInterface list: [%s]", Joiner.on(", ").join(ifaces)); + for (String name : ifaces) { + NetInterfaceConfig netconf = null; + try { + netconf = sigar.getNetInterfaceConfig(name); + } + catch (SigarException e) { + log.error(e, "Failed to get NetInterfaceConfig[%s]", name); + } + if (netconf != null) { + if (!(netAddressBlacklist.contains(netconf.getAddress()))) { + NetInterfaceStat netstat = null; + try { + netstat = sigar.getNetInterfaceStat(name); + } + catch (SigarException e) { + log.error(e, "Failed to get NetInterfaceStat[%s]", name); + } + if (netstat != null) { + final Map stats = diff.to( + name, ImmutableMap.builder() + .put("sys/net/read/size", netstat.getRxBytes()) + .put("sys/net/read/packets", netstat.getRxPackets()) + .put("sys/net/read/errors", netstat.getRxErrors()) + .put("sys/net/read/dropped", netstat.getRxDropped()) + .put("sys/net/read/overruns", netstat.getRxOverruns()) + .put("sys/net/read/frame", netstat.getRxFrame()) + .put("sys/net/write/size", netstat.getTxBytes()) + .put("sys/net/write/packets", netstat.getTxPackets()) + .put("sys/net/write/errors", netstat.getTxErrors()) + .put("sys/net/write/dropped", netstat.getTxDropped()) + .put("sys/net/write/collisions", netstat.getTxCollisions()) + .put("sys/net/write/overruns", netstat.getTxOverruns()) + .build() + ); + if (stats != null) { + final ServiceMetricEvent.Builder builder = builder() + .setDimension("netName", netconf.getName()) + .setDimension("netAddress", netconf.getAddress()) + .setDimension("netHwaddr", netconf.getHwaddr()); + MonitorUtils.addDimensionsToBuilder(builder, dimensions); + for (Map.Entry entry : stats.entrySet()) { + emitter.emit(builder.build(entry.getKey(), entry.getValue())); + } + } + } + } else { + log.debug("Not monitoring net stats for name[%s] with address[%s]", name, netconf.getAddress()); + } + } + } + } + } + } + + private class CpuStats implements Stats + { + private final KeyedDiff diff = new KeyedDiff(); + + @Override + public void emit(ServiceEmitter emitter) + { + Cpu[] cpus = null; + try { + cpus = sigar.getCpuList(); + } + catch (SigarException e) { + log.error(e, "Failed to get Cpu list"); + } + if (cpus != null) { + log.debug("Found Cpu list: [%s]", Joiner.on(", ").join(cpus)); + for (int i = 0; i < cpus.length; ++i) { + final Cpu cpu = cpus[i]; + final String name = Integer.toString(i); + final Map stats = diff.to( + name, ImmutableMap.builder() + .put("user", cpu.getUser()) // user = Δuser / Δtotal + .put("sys", cpu.getSys()) // sys = Δsys / Δtotal + .put("nice", cpu.getNice()) // nice = Δnice / Δtotal + .put("wait", cpu.getWait()) // wait = Δwait / Δtotal + .put("irq", cpu.getIrq()) // irq = Δirq / Δtotal + .put("softIrq", cpu.getSoftIrq()) // softIrq = ΔsoftIrq / Δtotal + .put("stolen", cpu.getStolen()) // stolen = Δstolen / Δtotal + .put("_total", cpu.getTotal()) // (not reported) + .build() + ); + if (stats != null) { + final long total = stats.remove("_total"); + for (Map.Entry entry : stats.entrySet()) { + final ServiceMetricEvent.Builder builder = builder() + .setDimension("cpuName", name) + .setDimension("cpuTime", entry.getKey()); + MonitorUtils.addDimensionsToBuilder(builder, dimensions); + emitter.emit(builder.build("sys/cpu", entry.getValue() * 100 / total)); // [0,100] + } + } + } + } + } + } + + private class SysStats implements Stats + { + @Override + public void emit(ServiceEmitter emitter) + { + final ServiceMetricEvent.Builder builder = builder(); + MonitorUtils.addDimensionsToBuilder(builder, dimensions); + + Uptime uptime = null; + try { + uptime = sigar.getUptime(); + } + catch (SigarException e) { + log.error(e, "Failed to get Uptime"); + } + + double[] la = null; + try { + la = sigar.getLoadAverage(); + } + catch (SigarException e) { + log.error(e, "Failed to get Load Average"); + } + + if (uptime != null) { + final Map stats = ImmutableMap.of( + "sys/uptime", Double.valueOf(uptime.getUptime()).longValue() + ); + for (Map.Entry entry : stats.entrySet()) { + emitter.emit(builder.build(entry.getKey(), entry.getValue())); + } + } + + if (la != null) { + final Map stats = ImmutableMap.of( + "sys/la/1", la[0], + "sys/la/5", la[1], + "sys/la/15", la[2] + ); + for (Map.Entry entry : stats.entrySet()) { + emitter.emit(builder.build(entry.getKey(), entry.getValue())); + } + } + } + } + + private class TcpStats implements Stats + { + private final KeyedDiff diff = new KeyedDiff(); + + @Override + public void emit(ServiceEmitter emitter) + { + final ServiceMetricEvent.Builder builder = builder(); + MonitorUtils.addDimensionsToBuilder(builder, dimensions); + + Tcp tcp = null; + try { + tcp = sigar.getTcp(); + } + catch (SigarException e) { + log.error(e, "Failed to get Tcp"); + } + + if (tcp != null) { + final Map stats = diff.to( + "tcp", ImmutableMap.builder() + .put("sys/tcp/activeOpens", tcp.getActiveOpens()) + .put("sys/tcp/passiveOpens", tcp.getPassiveOpens()) + .put("sys/tcp/attemptFails", tcp.getAttemptFails()) + .put("sys/tcp/estabResets", tcp.getEstabResets()) + .put("sys/tcp/in/segs", tcp.getInSegs()) + .put("sys/tcp/in/errs", tcp.getInErrs()) + .put("sys/tcp/out/segs", tcp.getOutSegs()) + .put("sys/tcp/out/rsts", tcp.getOutRsts()) + .put("sys/tcp/retrans/segs", tcp.getRetransSegs()) + .build() + ); + if (stats != null) { + for (Map.Entry entry : stats.entrySet()) { + emitter.emit(builder.build(entry.getKey(), entry.getValue())); + } + } + } + + NetStat netStat = null; + try { + netStat = sigar.getNetStat(); + } + catch (SigarException e) { + log.error(e, "Failed to get NetStat"); + } + if (netStat != null) { + final Map stats = ImmutableMap.builder() + .put("sys/net/inbound", (long) netStat.getAllInboundTotal()) + .put("sys/net/outbound", (long) netStat.getAllOutboundTotal()) + .put("sys/tcp/inbound", (long) netStat.getTcpInboundTotal()) + .put("sys/tcp/outbound", (long) netStat.getTcpOutboundTotal()) + .put( + "sys/tcp/state/established", + (long) netStat.getTcpEstablished() + ) + .put("sys/tcp/state/synSent", (long) netStat.getTcpSynSent()) + .put("sys/tcp/state/synRecv", (long) netStat.getTcpSynRecv()) + .put("sys/tcp/state/finWait1", (long) netStat.getTcpFinWait1()) + .put("sys/tcp/state/finWait2", (long) netStat.getTcpFinWait2()) + .put("sys/tcp/state/timeWait", (long) netStat.getTcpTimeWait()) + .put("sys/tcp/state/close", (long) netStat.getTcpClose()) + .put("sys/tcp/state/closeWait", (long) netStat.getTcpCloseWait()) + .put("sys/tcp/state/lastAck", (long) netStat.getTcpLastAck()) + .put("sys/tcp/state/listen", (long) netStat.getTcpListen()) + .put("sys/tcp/state/closing", (long) netStat.getTcpClosing()) + .put("sys/tcp/state/idle", (long) netStat.getTcpIdle()) + .put("sys/tcp/state/bound", (long) netStat.getTcpBound()) + .build(); + for (Map.Entry entry : stats.entrySet()) { + emitter.emit(builder.build(entry.getKey(), entry.getValue())); + } + } + } + } +} diff --git a/java-util/src/main/java/io/druid/java/util/metrics/cgroups/CgroupDiscoverer.java b/java-util/src/main/java/io/druid/java/util/metrics/cgroups/CgroupDiscoverer.java new file mode 100644 index 000000000000..83308947b34d --- /dev/null +++ b/java-util/src/main/java/io/druid/java/util/metrics/cgroups/CgroupDiscoverer.java @@ -0,0 +1,32 @@ +/* + * Licensed to Metamarkets Group Inc. (Metamarkets) under one + * or more contributor license agreements. See the NOTICE file + * distributed with this work for additional information + * regarding copyright ownership. Metamarkets licenses this file + * to you under the Apache License, Version 2.0 (the + * "License"); you may not use this file except in compliance + * with the License. You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, + * software distributed under the License is distributed on an + * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY + * KIND, either express or implied. See the License for the + * specific language governing permissions and limitations + * under the License. + */ + +package io.druid.java.util.metrics.cgroups; + +import java.nio.file.Path; + +public interface CgroupDiscoverer +{ + /** + * Returns a path for a specific cgroup. This path should contain the interesting cgroup files without further traversing needed. + * @param cgroup The cgroup + * @return The path that contains that cgroup's interesting bits. + */ + Path discover(String cgroup); +} diff --git a/java-util/src/main/java/io/druid/java/util/metrics/cgroups/CpuAcct.java b/java-util/src/main/java/io/druid/java/util/metrics/cgroups/CpuAcct.java new file mode 100644 index 000000000000..ad0ceb53525f --- /dev/null +++ b/java-util/src/main/java/io/druid/java/util/metrics/cgroups/CpuAcct.java @@ -0,0 +1,158 @@ +/* + * Licensed to Metamarkets Group Inc. (Metamarkets) under one + * or more contributor license agreements. See the NOTICE file + * distributed with this work for additional information + * regarding copyright ownership. Metamarkets licenses this file + * to you under the Apache License, Version 2.0 (the + * "License"); you may not use this file except in compliance + * with the License. You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, + * software distributed under the License is distributed on an + * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY + * KIND, either express or implied. See the License for the + * specific language governing permissions and limitations + * under the License. + */ + +package io.druid.java.util.metrics.cgroups; + +import com.google.common.base.Charsets; +import com.google.common.base.Preconditions; +import io.druid.java.util.common.RE; +import io.druid.java.util.common.logger.Logger; +import io.druid.java.util.metrics.CgroupUtil; + +import java.io.File; +import java.io.IOException; +import java.nio.file.Files; +import java.util.List; +import java.util.stream.LongStream; + +public class CpuAcct +{ + private static final Logger LOG = new Logger(CpuAcct.class); + private static final String CGROUP = "cpuacct"; + private static final String CGROUP_ACCT_FILE = "cpuacct.usage_all"; + + // Private because it requires a specific format and cant' take a generic list of strings + private static CpuAcctMetric parse(final List lines) + { + // File has a header. We skip it + // See src/test/resources/cpuacct.usage_all for an example + final int ncpus = lines.size() - 1; + final long[] usrTime = new long[ncpus]; + final long[] sysTime = new long[ncpus]; + for (int i = 1; i < lines.size(); i++) { + final String[] splits = lines.get(i).split(CgroupUtil.SPACE_MATCH, 3); + if (splits.length != 3) { + throw new RE("Error parsing [%s]", lines.get(i)); + } + final int cpuNum = Integer.parseInt(splits[0]); + usrTime[cpuNum] = Long.parseLong(splits[1]); + sysTime[cpuNum] = Long.parseLong(splits[2]); + } + return new CpuAcctMetric(usrTime, sysTime); + } + + private final CgroupDiscoverer cgroupDiscoverer; + + public CpuAcct(CgroupDiscoverer cgroupDiscoverer) + { + this.cgroupDiscoverer = cgroupDiscoverer; + } + + /** + * Take a snapshot of the existing data. + * + * @return A snapshot with the data populated or a snapshot with zero-length arrays for data. + */ + public CpuAcctMetric snapshot() + { + final File cpuacct; + try { + cpuacct = new File( + cgroupDiscoverer.discover(CGROUP).toFile(), + CGROUP_ACCT_FILE + ); + } + catch (RuntimeException re) { + LOG.error(re, "Unable to fetch snapshot"); + return new CpuAcctMetric(new long[0], new long[0]); + } + try { + return parse(Files.readAllLines(cpuacct.toPath(), Charsets.UTF_8)); + } + catch (IOException e) { + throw new RuntimeException(e); + } + } + + public static class CpuAcctMetric + { + private final long[] usrTimes; + private final long[] sysTimes; + + CpuAcctMetric(long[] usrTimes, long[] sysTimes) + { + Preconditions.checkArgument(usrTimes.length == sysTimes.length, "Lengths must match"); + this.usrTimes = usrTimes; + this.sysTimes = sysTimes; + } + + public final int cpuCount() + { + return usrTimes.length; + } + + public final long[] sysTimes() + { + return sysTimes; + } + + public final long[] usrTimes() + { + return usrTimes; + } + + public final long usrTime(int cpuNum) + { + return usrTimes[cpuNum]; + } + + public final long sysTime(int cpu_Num) + { + return sysTimes[cpu_Num]; + } + + public final long usrTime() + { + return LongStream.of(usrTimes).sum(); + } + + public final long sysTime() + { + return LongStream.of(sysTimes).sum(); + } + + public final long time() + { + return usrTime() + sysTime(); + } + + public final CpuAcctMetric cumulativeSince(CpuAcctMetric other) + { + final int cpuCount = cpuCount(); + Preconditions.checkArgument(cpuCount == other.cpuCount(), "Cpu count missmatch"); + final long[] sysTimes = new long[cpuCount]; + final long[] usrTimes = new long[cpuCount]; + for (int i = 0; i < cpuCount; i++) { + sysTimes[i] = this.sysTimes[i] - other.sysTimes[i]; + usrTimes[i] = this.usrTimes[i] - other.usrTimes[i]; + } + return new CpuAcctMetric(usrTimes, sysTimes); + } + } +} diff --git a/java-util/src/main/java/io/druid/java/util/metrics/cgroups/ProcCgroupDiscoverer.java b/java-util/src/main/java/io/druid/java/util/metrics/cgroups/ProcCgroupDiscoverer.java new file mode 100644 index 000000000000..ca595cb3aa8e --- /dev/null +++ b/java-util/src/main/java/io/druid/java/util/metrics/cgroups/ProcCgroupDiscoverer.java @@ -0,0 +1,174 @@ +/* + * Licensed to Metamarkets Group Inc. (Metamarkets) under one + * or more contributor license agreements. See the NOTICE file + * distributed with this work for additional information + * regarding copyright ownership. Metamarkets licenses this file + * to you under the Apache License, Version 2.0 (the + * "License"); you may not use this file except in compliance + * with the License. You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, + * software distributed under the License is distributed on an + * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY + * KIND, either express or implied. See the License for the + * specific language governing permissions and limitations + * under the License. + */ + +package io.druid.java.util.metrics.cgroups; + +import com.google.common.base.Charsets; +import com.google.common.base.Preconditions; +import com.google.common.collect.ImmutableSet; +import com.google.common.io.Files; +import io.druid.java.util.common.RE; +import io.druid.java.util.metrics.CgroupUtil; + +import java.io.File; +import java.io.IOException; +import java.nio.file.Path; +import java.nio.file.Paths; +import java.util.Arrays; +import java.util.Collection; +import java.util.HashSet; +import java.util.List; +import java.util.Set; +import java.util.regex.Pattern; + +public class ProcCgroupDiscoverer implements CgroupDiscoverer +{ + private static final String CGROUP_TYPE = "cgroup"; + + private final File procDir; + + /** + * Create a proc discovery mechanism based on a `/proc` directory. + * + * @param procDir The directory under proc. This is usually `/proc/self` or `/proc/#pid` + */ + public ProcCgroupDiscoverer(Path procDir) + { + this.procDir = Preconditions.checkNotNull(procDir, "procDir").toFile(); + Preconditions.checkArgument(this.procDir.isDirectory(), "Not a directory: [%s]", procDir); + } + + @Override + public Path discover(final String cgroup) + { + Preconditions.checkNotNull(cgroup, "cgroup required"); + final File procMounts = new File(procDir, "mounts"); + final File pidCgroups = new File(procDir, "cgroup"); + final PidCgroupEntry pidCgroupsEntry = getCgroupEntry(pidCgroups, cgroup); + final ProcMountsEntry procMountsEntry = getMountEntry(procMounts, cgroup); + final File cgroupDir = new File( + procMountsEntry.path.toString(), + pidCgroupsEntry.path.toString() + ); + if (cgroupDir.exists() && cgroupDir.isDirectory()) { + return cgroupDir.toPath(); + } + throw new RE("Invalid cgroup directory [%s]", cgroupDir); + } + + private PidCgroupEntry getCgroupEntry(final File procCgroup, final String cgroup) + { + final List lines; + try { + lines = Files.readLines(procCgroup, Charsets.UTF_8); + } + catch (IOException e) { + throw new RuntimeException(e); + } + for (final String line : lines) { + if (line.startsWith("#")) { + continue; + } + final PidCgroupEntry entry = PidCgroupEntry.parse(line); + if (entry.controllers.contains(cgroup)) { + return entry; + } + } + throw new RE("Hierarchy for [%s] not found", cgroup); + } + + private ProcMountsEntry getMountEntry(final File procMounts, final String cgroup) + { + final List lines; + try { + lines = Files.readLines(procMounts, Charsets.UTF_8); + } + catch (IOException e) { + throw new RuntimeException(e); + } + + for (final String line : lines) { + final ProcMountsEntry entry = ProcMountsEntry.parse(line); + if (CGROUP_TYPE.equals(entry.type) && entry.options.contains(cgroup)) { + return entry; + } + } + throw new RE("Cgroup [%s] not found", cgroup); + } + + /** + * Doesn't use the last two mount entries for priority/boot stuff + */ + static class ProcMountsEntry + { + // Example: cgroup /sys/fs/cgroup/cpu,cpuacct cgroup rw,nosuid,nodev,noexec,relatime,cpu,cpuacct 0 0 + static ProcMountsEntry parse(String entry) + { + final String[] splits = entry.split(CgroupUtil.SPACE_MATCH, 6); + Preconditions.checkArgument(splits.length == 6, "Invalid entry: [%s]", entry); + return new ProcMountsEntry( + splits[0], + Paths.get(splits[1]), + splits[2], + ImmutableSet.copyOf(splits[3].split(CgroupUtil.COMMA_MATCH)) + ); + } + + final String dev; + final Path path; + final String type; + final Set options; + + ProcMountsEntry(String dev, Path path, String type, Collection options) + { + this.dev = dev; + this.path = path; + this.type = type; + this.options = ImmutableSet.copyOf(options); + } + } + + // See man CGROUPS(7) + static class PidCgroupEntry + { + static PidCgroupEntry parse(String entry) + { + // For example, entries with a port number will have an extra `:` in it somewhere, or ipv6 addresses. + final String[] parts = entry.split(Pattern.quote(":"), 3); + if (parts.length != 3) { + throw new RE("Bad entry [%s]", entry); + } + final int heirarchyId = Integer.parseInt(parts[0]); + final Set controllers = new HashSet<>(Arrays.asList(parts[1].split(Pattern.quote(",")))); + final Path path = Paths.get(parts[2]); + return new PidCgroupEntry(heirarchyId, controllers, path); + } + + final int heirarchyId; + final Set controllers; + final Path path; + + private PidCgroupEntry(int heirarchyId, Set controllers, Path path) + { + this.heirarchyId = heirarchyId; + this.controllers = controllers; + this.path = path; + } + } +} diff --git a/java-util/src/main/java/io/druid/java/util/metrics/cgroups/ProcPidCgroupDiscoverer.java b/java-util/src/main/java/io/druid/java/util/metrics/cgroups/ProcPidCgroupDiscoverer.java new file mode 100644 index 000000000000..23feb3d2466f --- /dev/null +++ b/java-util/src/main/java/io/druid/java/util/metrics/cgroups/ProcPidCgroupDiscoverer.java @@ -0,0 +1,41 @@ +/* + * Licensed to Metamarkets Group Inc. (Metamarkets) under one + * or more contributor license agreements. See the NOTICE file + * distributed with this work for additional information + * regarding copyright ownership. Metamarkets licenses this file + * to you under the Apache License, Version 2.0 (the + * "License"); you may not use this file except in compliance + * with the License. You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, + * software distributed under the License is distributed on an + * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY + * KIND, either express or implied. See the License for the + * specific language governing permissions and limitations + * under the License. + */ + +package io.druid.java.util.metrics.cgroups; + +import io.druid.java.util.metrics.PidDiscoverer; + +import java.nio.file.Path; +import java.nio.file.Paths; + +public class ProcPidCgroupDiscoverer implements CgroupDiscoverer +{ + private final ProcCgroupDiscoverer delegate; + + public ProcPidCgroupDiscoverer(PidDiscoverer pidDiscoverer) + { + delegate = new ProcCgroupDiscoverer(Paths.get("/proc", Long.toString(pidDiscoverer.getPid()))); + } + + @Override + public Path discover(String cgroup) + { + return delegate.discover(cgroup); + } +} diff --git a/java-util/src/main/java/io/druid/java/util/metrics/cgroups/ProcSelfCgroupDiscoverer.java b/java-util/src/main/java/io/druid/java/util/metrics/cgroups/ProcSelfCgroupDiscoverer.java new file mode 100644 index 000000000000..b695dbba7ce8 --- /dev/null +++ b/java-util/src/main/java/io/druid/java/util/metrics/cgroups/ProcSelfCgroupDiscoverer.java @@ -0,0 +1,39 @@ +/* + * Licensed to Metamarkets Group Inc. (Metamarkets) under one + * or more contributor license agreements. See the NOTICE file + * distributed with this work for additional information + * regarding copyright ownership. Metamarkets licenses this file + * to you under the Apache License, Version 2.0 (the + * "License"); you may not use this file except in compliance + * with the License. You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, + * software distributed under the License is distributed on an + * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY + * KIND, either express or implied. See the License for the + * specific language governing permissions and limitations + * under the License. + */ + +package io.druid.java.util.metrics.cgroups; + +import java.nio.file.Path; +import java.nio.file.Paths; + +public class ProcSelfCgroupDiscoverer implements CgroupDiscoverer +{ + private final ProcCgroupDiscoverer delegate; + + public ProcSelfCgroupDiscoverer() + { + delegate = new ProcCgroupDiscoverer(Paths.get("/proc/self")); + } + + @Override + public Path discover(String cgroup) + { + return delegate.discover(cgroup); + } +} diff --git a/java-util/src/test/java/io/druid/concurrent/ConcurrentAwaitableCounterTest.java b/java-util/src/test/java/io/druid/concurrent/ConcurrentAwaitableCounterTest.java new file mode 100644 index 000000000000..c13fa7b7d7c3 --- /dev/null +++ b/java-util/src/test/java/io/druid/concurrent/ConcurrentAwaitableCounterTest.java @@ -0,0 +1,89 @@ +/* + * Licensed to Metamarkets Group Inc. (Metamarkets) under one + * or more contributor license agreements. See the NOTICE file + * distributed with this work for additional information + * regarding copyright ownership. Metamarkets licenses this file + * to you under the Apache License, Version 2.0 (the + * "License"); you may not use this file except in compliance + * with the License. You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, + * software distributed under the License is distributed on an + * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY + * KIND, either express or implied. See the License for the + * specific language governing permissions and limitations + * under the License. + */ + +package io.druid.concurrent; + +import org.junit.Assert; +import org.junit.Test; + +import java.util.concurrent.CountDownLatch; +import java.util.concurrent.TimeUnit; + +public class ConcurrentAwaitableCounterTest +{ + + @Test(timeout = 1000) + public void smokeTest() throws InterruptedException + { + ConcurrentAwaitableCounter counter = new ConcurrentAwaitableCounter(); + CountDownLatch start = new CountDownLatch(1); + CountDownLatch finish = new CountDownLatch(7); + for (int i = 0; i < 2; i++) { + new Thread(() -> { + try { + start.await(); + for (int j = 0; j < 10_000; j++) { + counter.increment(); + } + finish.countDown(); + } + catch (InterruptedException e) { + throw new RuntimeException(e); + } + + }).start(); + } + for (int awaitCount : new int[] {0, 1, 100, 10_000, 20_000}) { + new Thread(() -> { + try { + start.await(); + counter.awaitCount(awaitCount); + finish.countDown(); + } + catch (InterruptedException e) { + throw new RuntimeException(e); + } + }).start(); + } + start.countDown(); + finish.await(); + } + + @Test + public void testAwaitFirstUpdate() throws InterruptedException + { + int[] value = new int[1]; + ConcurrentAwaitableCounter counter = new ConcurrentAwaitableCounter(); + Thread t = new Thread(() -> { + try { + Assert.assertTrue(counter.awaitFirstIncrement(10, TimeUnit.SECONDS)); + Assert.assertEquals(1, value[0]); + } + catch (InterruptedException e) { + throw new RuntimeException(e); + } + + }); + t.start(); + Thread.sleep(2_000); + value[0] = 1; + counter.increment(); + t.join(); + } +} diff --git a/java-util/src/test/java/io/druid/java/util/common/FileUtilsTest.java b/java-util/src/test/java/io/druid/java/util/common/FileUtilsTest.java index a9594f7a02a1..3e4aea4eca8e 100644 --- a/java-util/src/test/java/io/druid/java/util/common/FileUtilsTest.java +++ b/java-util/src/test/java/io/druid/java/util/common/FileUtilsTest.java @@ -27,6 +27,7 @@ import java.io.File; import java.io.IOException; import java.io.RandomAccessFile; +import java.nio.file.Files; public class FileUtilsTest { @@ -48,4 +49,29 @@ public void testMap() throws IOException long buffersMemoryAfter = BufferUtils.totalMemoryUsedByDirectAndMappedBuffers(); Assert.assertEquals(buffersMemoryBefore, buffersMemoryAfter); } + + @Test + public void testWriteAtomically() throws IOException + { + final File tmpDir = folder.newFolder(); + final File tmpFile = new File(tmpDir, "file1"); + FileUtils.writeAtomically(tmpFile, out -> out.write(StringUtils.toUtf8("foo"))); + Assert.assertEquals("foo", StringUtils.fromUtf8(Files.readAllBytes(tmpFile.toPath()))); + + // Try writing again, throw error partway through. + try { + FileUtils.writeAtomically(tmpFile, out -> { + out.write(StringUtils.toUtf8("bar")); + out.flush(); + throw new ISE("OMG!"); + }); + } + catch (IllegalStateException e) { + // Suppress + } + Assert.assertEquals("foo", StringUtils.fromUtf8(Files.readAllBytes(tmpFile.toPath()))); + + FileUtils.writeAtomically(tmpFile, out -> out.write(StringUtils.toUtf8("baz"))); + Assert.assertEquals("baz", StringUtils.fromUtf8(Files.readAllBytes(tmpFile.toPath()))); + } } diff --git a/java-util/src/test/java/io/druid/java/util/emitter/core/ComposingEmitterTest.java b/java-util/src/test/java/io/druid/java/util/emitter/core/ComposingEmitterTest.java new file mode 100644 index 000000000000..0b9a3fe22d9f --- /dev/null +++ b/java-util/src/test/java/io/druid/java/util/emitter/core/ComposingEmitterTest.java @@ -0,0 +1,100 @@ +/* + * Licensed to Metamarkets Group Inc. (Metamarkets) under one + * or more contributor license agreements. See the NOTICE file + * distributed with this work for additional information + * regarding copyright ownership. Metamarkets licenses this file + * to you under the Apache License, Version 2.0 (the + * "License"); you may not use this file except in compliance + * with the License. You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, + * software distributed under the License is distributed on an + * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY + * KIND, either express or implied. See the License for the + * specific language governing permissions and limitations + * under the License. + */ + +package io.druid.java.util.emitter.core; + +import com.google.common.collect.ImmutableList; +import org.easymock.EasyMock; +import org.junit.After; +import org.junit.Before; +import org.junit.Test; + +import java.io.IOException; +import java.util.List; + +public class ComposingEmitterTest +{ + + private List childEmitters; + private ComposingEmitter composingEmitter; + + @Before + public void setup() + { + this.childEmitters = ImmutableList.of( + EasyMock.createMock(Emitter.class), + EasyMock.createMock(Emitter.class) + ); + this.composingEmitter = new ComposingEmitter(childEmitters); + } + + @Test + public void testStart() + { + for (Emitter emitter : childEmitters) { + emitter.start(); + EasyMock.replay(emitter); + } + + composingEmitter.start(); + } + + @Test + public void testEmit() + { + Event e = EasyMock.createMock(Event.class); + + for (Emitter emitter : childEmitters) { + emitter.emit(e); + EasyMock.replay(emitter); + } + + composingEmitter.emit(e); + } + + @Test + public void testFlush() throws IOException + { + for (Emitter emitter : childEmitters) { + emitter.flush(); + EasyMock.replay(emitter); + } + + composingEmitter.flush(); + } + + @Test + public void testClose() throws IOException + { + for (Emitter emitter : childEmitters) { + emitter.close(); + EasyMock.replay(emitter); + } + + composingEmitter.close(); + } + + @After + public void tearDown() + { + for (Emitter emitter : childEmitters) { + EasyMock.verify(emitter); + } + } +} diff --git a/java-util/src/test/java/io/druid/java/util/emitter/core/CustomEmitterFactoryTest.java b/java-util/src/test/java/io/druid/java/util/emitter/core/CustomEmitterFactoryTest.java new file mode 100644 index 000000000000..34eb20d64967 --- /dev/null +++ b/java-util/src/test/java/io/druid/java/util/emitter/core/CustomEmitterFactoryTest.java @@ -0,0 +1,103 @@ +/* + * Licensed to Metamarkets Group Inc. (Metamarkets) under one + * or more contributor license agreements. See the NOTICE file + * distributed with this work for additional information + * regarding copyright ownership. Metamarkets licenses this file + * to you under the Apache License, Version 2.0 (the + * "License"); you may not use this file except in compliance + * with the License. You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, + * software distributed under the License is distributed on an + * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY + * KIND, either express or implied. See the License for the + * specific language governing permissions and limitations + * under the License. + */ + +package io.druid.java.util.emitter.core; + +import com.fasterxml.jackson.annotation.JsonProperty; +import com.fasterxml.jackson.annotation.JsonTypeName; +import com.fasterxml.jackson.databind.ObjectMapper; +import io.druid.java.util.common.lifecycle.Lifecycle; +import io.druid.java.util.emitter.factory.EmitterFactory; +import org.asynchttpclient.AsyncHttpClient; +import org.junit.Assert; +import org.junit.Test; + +import java.io.IOException; +import java.util.Properties; + +public class CustomEmitterFactoryTest +{ + @JsonTypeName("test") + public static class TestEmitterConfig implements EmitterFactory + { + @JsonProperty + private String stringProperty; + @JsonProperty + private int intProperty; + + @Override + public Emitter makeEmitter(ObjectMapper objectMapper, AsyncHttpClient httpClient, Lifecycle lifecycle) + { + return new StubEmitter(stringProperty, intProperty); + } + } + + public static class StubEmitter implements Emitter + { + private String stringProperty; + private int intProperty; + + public StubEmitter(String stringProperty, int intProperty) + { + this.stringProperty = stringProperty; + this.intProperty = intProperty; + } + + public String getStringProperty() + { + return stringProperty; + } + + public int getIntProperty() + { + return intProperty; + } + + @Override + public void start() {} + + @Override + public void emit(Event event) {} + + @Override + public void flush() throws IOException {} + + @Override + public void close() throws IOException {} + } + + @Test + public void testCustomEmitter() + { + final Properties props = new Properties(); + props.put("io.druid.java.util.emitter.stringProperty", "http://example.com/"); + props.put("io.druid.java.util.emitter.intProperty", "1"); + props.put("io.druid.java.util.emitter.type", "test"); + + final ObjectMapper objectMapper = new ObjectMapper(); + objectMapper.registerSubtypes(TestEmitterConfig.class); + final Lifecycle lifecycle = new Lifecycle(); + final Emitter emitter = Emitters.create(props, null, objectMapper, lifecycle); + + Assert.assertTrue("created emitter should be of class StubEmitter", emitter instanceof StubEmitter); + StubEmitter stubEmitter = (StubEmitter) emitter; + Assert.assertEquals("http://example.com/", stubEmitter.getStringProperty()); + Assert.assertEquals(1, stubEmitter.getIntProperty()); + } +} diff --git a/java-util/src/test/java/io/druid/java/util/emitter/core/EmitterTest.java b/java-util/src/test/java/io/druid/java/util/emitter/core/EmitterTest.java new file mode 100644 index 000000000000..d94700be9d43 --- /dev/null +++ b/java-util/src/test/java/io/druid/java/util/emitter/core/EmitterTest.java @@ -0,0 +1,595 @@ +/* + * Licensed to Metamarkets Group Inc. (Metamarkets) under one + * or more contributor license agreements. See the NOTICE file + * distributed with this work for additional information + * regarding copyright ownership. Metamarkets licenses this file + * to you under the Apache License, Version 2.0 (the + * "License"); you may not use this file except in compliance + * with the License. You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, + * software distributed under the License is distributed on an + * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY + * KIND, either express or implied. See the License for the + * specific language governing permissions and limitations + * under the License. + */ + +package io.druid.java.util.emitter.core; + +import com.fasterxml.jackson.core.JsonProcessingException; +import com.fasterxml.jackson.databind.ObjectMapper; +import com.google.common.base.Charsets; +import com.google.common.io.BaseEncoding; +import io.druid.java.util.common.CompressionUtils; +import io.druid.java.util.common.StringUtils; +import io.druid.java.util.common.lifecycle.Lifecycle; +import io.druid.java.util.emitter.service.UnitEvent; +import io.netty.buffer.Unpooled; +import io.netty.handler.codec.http.DefaultHttpResponse; +import io.netty.handler.codec.http.HttpHeaders; +import io.netty.handler.codec.http.HttpResponseStatus; +import io.netty.handler.codec.http.HttpVersion; +import org.asynchttpclient.DefaultAsyncHttpClientConfig; +import org.asynchttpclient.ListenableFuture; +import org.asynchttpclient.Request; +import org.asynchttpclient.Response; +import org.asynchttpclient.netty.EagerResponseBodyPart; +import org.asynchttpclient.netty.NettyResponseStatus; +import org.asynchttpclient.uri.Uri; +import org.junit.After; +import org.junit.Assert; +import org.junit.Before; +import org.junit.Test; + +import java.io.ByteArrayInputStream; +import java.io.ByteArrayOutputStream; +import java.io.IOException; +import java.nio.ByteBuffer; +import java.nio.charset.StandardCharsets; +import java.util.Arrays; +import java.util.List; +import java.util.Properties; +import java.util.concurrent.CountDownLatch; +import java.util.concurrent.atomic.AtomicInteger; + +/** + */ +public class EmitterTest +{ + private static final ObjectMapper jsonMapper = new ObjectMapper(); + public static String TARGET_URL = "http://metrics.foo.bar/"; + public static final Response OK_RESPONSE = responseBuilder(HttpVersion.HTTP_1_1, HttpResponseStatus.CREATED) + .accumulate(new EagerResponseBodyPart(Unpooled.wrappedBuffer("Yay".getBytes(StandardCharsets.UTF_8)), true)) + .build(); + + public static final Response BAD_RESPONSE = responseBuilder(HttpVersion.HTTP_1_1, HttpResponseStatus.FORBIDDEN) + .accumulate(new EagerResponseBodyPart(Unpooled.wrappedBuffer("Not yay".getBytes(StandardCharsets.UTF_8)), true)) + .build(); + + private static Response.ResponseBuilder responseBuilder(HttpVersion version, HttpResponseStatus status) + { + return new Response.ResponseBuilder() + .accumulate( + new NettyResponseStatus( + Uri.create(TARGET_URL), + new DefaultAsyncHttpClientConfig.Builder().build(), + new DefaultHttpResponse(version, status), + null + ) + ); + } + + + MockHttpClient httpClient; + HttpPostEmitter emitter; + + public static Response okResponse() + { + return OK_RESPONSE; + } + + @Before + public void setUp() throws Exception + { + httpClient = new MockHttpClient(); + } + + @After + public void tearDown() throws Exception + { + if (emitter != null) { + emitter.close(); + } + } + + private HttpPostEmitter timeBasedEmitter(long timeInMillis) + { + HttpEmitterConfig config = new HttpEmitterConfig.Builder(TARGET_URL) + .setFlushMillis(timeInMillis) + .setFlushCount(Integer.MAX_VALUE) + .build(); + HttpPostEmitter emitter = new HttpPostEmitter( + config, + httpClient, + jsonMapper + ); + emitter.start(); + return emitter; + } + + private HttpPostEmitter sizeBasedEmitter(int size) + { + HttpEmitterConfig config = new HttpEmitterConfig.Builder(TARGET_URL) + .setFlushMillis(Long.MAX_VALUE) + .setFlushCount(size) + .build(); + HttpPostEmitter emitter = new HttpPostEmitter( + config, + httpClient, + jsonMapper + ); + emitter.start(); + return emitter; + } + + private HttpPostEmitter sizeBasedEmitterGeneralizedCreation(int size) + { + Properties props = new Properties(); + props.setProperty("io.druid.java.util.emitter.type", "http"); + props.setProperty("io.druid.java.util.emitter.recipientBaseUrl", TARGET_URL); + props.setProperty("io.druid.java.util.emitter.flushMillis", String.valueOf(Long.MAX_VALUE)); + props.setProperty("io.druid.java.util.emitter.flushCount", String.valueOf(size)); + + Lifecycle lifecycle = new Lifecycle(); + Emitter emitter = Emitters.create(props, httpClient, jsonMapper, lifecycle); + Assert.assertTrue(StringUtils.format( + "HttpPostEmitter emitter should be created, but found %s", + emitter.getClass().getName() + ), emitter instanceof HttpPostEmitter); + emitter.start(); + return (HttpPostEmitter) emitter; + } + + private HttpPostEmitter sizeBasedEmitterWithContentEncoding(int size, ContentEncoding encoding) + { + HttpEmitterConfig config = new HttpEmitterConfig.Builder(TARGET_URL) + .setFlushMillis(Long.MAX_VALUE) + .setFlushCount(size) + .setContentEncoding(encoding) + .build(); + HttpPostEmitter emitter = new HttpPostEmitter( + config, + httpClient, + jsonMapper + ); + emitter.start(); + return emitter; + } + + private HttpPostEmitter manualFlushEmitterWithBasicAuthenticationAndNewlineSeparating(String authentication) + { + HttpEmitterConfig config = new HttpEmitterConfig.Builder(TARGET_URL) + .setFlushMillis(Long.MAX_VALUE) + .setFlushCount(Integer.MAX_VALUE) + .setBasicAuthentication(authentication) + .setBatchingStrategy(BatchingStrategy.NEWLINES) + .setMaxBatchSize(1024 * 1024) + .build(); + HttpPostEmitter emitter = new HttpPostEmitter( + config, + httpClient, + jsonMapper + ); + emitter.start(); + return emitter; + } + + private HttpPostEmitter manualFlushEmitterWithBatchSizeAndBufferSize(int batchSize, long bufferSize) + { + HttpEmitterConfig config = new HttpEmitterConfig.Builder(TARGET_URL) + .setFlushMillis(Long.MAX_VALUE) + .setFlushCount(Integer.MAX_VALUE) + .setMaxBatchSize(batchSize) + .build(); + HttpPostEmitter emitter = new HttpPostEmitter( + config, + httpClient, + jsonMapper + ); + emitter.start(); + return emitter; + } + + @Test + public void testSanity() throws Exception + { + final List events = Arrays.asList( + new UnitEvent("test", 1), + new UnitEvent("test", 2) + ); + emitter = sizeBasedEmitter(2); + + httpClient.setGoHandler( + new GoHandler() + { + @Override + protected ListenableFuture go(Request request) throws JsonProcessingException + { + Assert.assertEquals(TARGET_URL, request.getUrl()); + Assert.assertEquals( + "application/json", + request.getHeaders().get(HttpHeaders.Names.CONTENT_TYPE) + ); + Assert.assertEquals( + StringUtils.format( + "[%s,%s]\n", + jsonMapper.writeValueAsString(events.get(0)), + jsonMapper.writeValueAsString(events.get(1)) + ), + Charsets.UTF_8.decode(request.getByteBufferData().slice()).toString() + ); + + return GoHandlers.immediateFuture(okResponse()); + } + }.times(1) + ); + + for (UnitEvent event : events) { + emitter.emit(event); + } + waitForEmission(emitter, 1); + closeNoFlush(emitter); + Assert.assertTrue(httpClient.succeeded()); + } + + @Test + public void testSanityWithGeneralizedCreation() throws Exception + { + final List events = Arrays.asList( + new UnitEvent("test", 1), + new UnitEvent("test", 2) + ); + emitter = sizeBasedEmitterGeneralizedCreation(2); + + httpClient.setGoHandler( + new GoHandler() + { + @Override + protected ListenableFuture go(Request request) throws JsonProcessingException + { + Assert.assertEquals(TARGET_URL, request.getUrl()); + Assert.assertEquals( + "application/json", + request.getHeaders().get(HttpHeaders.Names.CONTENT_TYPE) + ); + Assert.assertEquals( + StringUtils.format( + "[%s,%s]\n", + jsonMapper.writeValueAsString(events.get(0)), + jsonMapper.writeValueAsString(events.get(1)) + ), + Charsets.UTF_8.decode(request.getByteBufferData().slice()).toString() + ); + + return GoHandlers.immediateFuture(okResponse()); + } + }.times(1) + ); + + for (UnitEvent event : events) { + emitter.emit(event); + } + waitForEmission(emitter, 1); + closeNoFlush(emitter); + Assert.assertTrue(httpClient.succeeded()); + } + + @Test + public void testSizeBasedEmission() throws Exception + { + emitter = sizeBasedEmitter(3); + + httpClient.setGoHandler(GoHandlers.failingHandler()); + emitter.emit(new UnitEvent("test", 1)); + emitter.emit(new UnitEvent("test", 2)); + + httpClient.setGoHandler(GoHandlers.passingHandler(okResponse()).times(1)); + emitter.emit(new UnitEvent("test", 3)); + waitForEmission(emitter, 1); + + httpClient.setGoHandler(GoHandlers.failingHandler()); + emitter.emit(new UnitEvent("test", 4)); + emitter.emit(new UnitEvent("test", 5)); + + closeAndExpectFlush(emitter); + Assert.assertTrue(httpClient.succeeded()); + } + + @Test + public void testTimeBasedEmission() throws Exception + { + final int timeBetweenEmissions = 100; + emitter = timeBasedEmitter(timeBetweenEmissions); + + final CountDownLatch latch = new CountDownLatch(1); + + httpClient.setGoHandler( + new GoHandler() + { + @Override + protected ListenableFuture go(Request request) + { + latch.countDown(); + return GoHandlers.immediateFuture(okResponse()); + } + }.times(1) + ); + + long emitTime = System.currentTimeMillis(); + emitter.emit(new UnitEvent("test", 1)); + + latch.await(); + long timeWaited = System.currentTimeMillis() - emitTime; + Assert.assertTrue( + StringUtils.format("timeWaited[%s] !< %s", timeWaited, timeBetweenEmissions * 2), + timeWaited < timeBetweenEmissions * 2 + ); + + waitForEmission(emitter, 1); + + final CountDownLatch thisLatch = new CountDownLatch(1); + httpClient.setGoHandler( + new GoHandler() + { + @Override + protected ListenableFuture go(Request request) + { + thisLatch.countDown(); + return GoHandlers.immediateFuture(okResponse()); + } + }.times(1) + ); + + emitTime = System.currentTimeMillis(); + emitter.emit(new UnitEvent("test", 2)); + + thisLatch.await(); + timeWaited = System.currentTimeMillis() - emitTime; + Assert.assertTrue( + StringUtils.format("timeWaited[%s] !< %s", timeWaited, timeBetweenEmissions * 2), + timeWaited < timeBetweenEmissions * 2 + ); + + waitForEmission(emitter, 2); + closeNoFlush(emitter); + Assert.assertTrue("httpClient.succeeded()", httpClient.succeeded()); + } + + @Test(timeout = 60_000) + public void testFailedEmission() throws Exception + { + final UnitEvent event1 = new UnitEvent("test", 1); + final UnitEvent event2 = new UnitEvent("test", 2); + emitter = sizeBasedEmitter(1); + Assert.assertEquals(0, emitter.getTotalEmittedEvents()); + + httpClient.setGoHandler( + new GoHandler() + { + @Override + protected ListenableFuture go(Request request) + { + Response response = responseBuilder(HttpVersion.HTTP_1_1, HttpResponseStatus.BAD_REQUEST).build(); + return GoHandlers.immediateFuture(response); + } + } + ); + emitter.emit(event1); + emitter.flush(); + waitForEmission(emitter, 1); + Assert.assertTrue(httpClient.succeeded()); + + // Failed to emit the first event. + Assert.assertEquals(0, emitter.getTotalEmittedEvents()); + + httpClient.setGoHandler( + new GoHandler() + { + @Override + protected ListenableFuture go(Request request) + { + return GoHandlers.immediateFuture(okResponse()); + } + }.times(2) + ); + + emitter.emit(event2); + emitter.flush(); + waitForEmission(emitter, 2); + closeNoFlush(emitter); + // Failed event is emitted inside emitter thread, there is no other way to wait for it other than joining the + // emitterThread + emitter.joinEmitterThread(); + + // Succeed to emit both events. + Assert.assertEquals(2, emitter.getTotalEmittedEvents()); + + Assert.assertTrue(httpClient.succeeded()); + } + + @Test + public void testBasicAuthenticationAndNewlineSeparating() throws Exception + { + final List events = Arrays.asList( + new UnitEvent("test", 1), + new UnitEvent("test", 2) + ); + emitter = manualFlushEmitterWithBasicAuthenticationAndNewlineSeparating("foo:bar"); + + httpClient.setGoHandler( + new GoHandler() + { + @Override + protected ListenableFuture go(Request request) throws JsonProcessingException + { + Assert.assertEquals(TARGET_URL, request.getUrl()); + Assert.assertEquals( + "application/json", + request.getHeaders().get(HttpHeaders.Names.CONTENT_TYPE) + ); + Assert.assertEquals( + "Basic " + BaseEncoding.base64().encode(StringUtils.toUtf8("foo:bar")), + request.getHeaders().get(HttpHeaders.Names.AUTHORIZATION) + ); + Assert.assertEquals( + StringUtils.format( + "%s\n%s\n", + jsonMapper.writeValueAsString(events.get(0)), + jsonMapper.writeValueAsString(events.get(1)) + ), + Charsets.UTF_8.decode(request.getByteBufferData().slice()).toString() + ); + + return GoHandlers.immediateFuture(okResponse()); + } + }.times(1) + ); + + for (UnitEvent event : events) { + emitter.emit(event); + } + emitter.flush(); + waitForEmission(emitter, 1); + closeNoFlush(emitter); + Assert.assertTrue(httpClient.succeeded()); + } + + @Test + public void testBatchSplitting() throws Exception + { + final byte[] big = new byte[500 * 1024]; + for (int i = 0; i < big.length; i++) { + big[i] = 'x'; + } + final String bigString = StringUtils.fromUtf8(big); + final List events = Arrays.asList( + new UnitEvent(bigString, 1), + new UnitEvent(bigString, 2), + new UnitEvent(bigString, 3), + new UnitEvent(bigString, 4) + ); + final AtomicInteger counter = new AtomicInteger(); + emitter = manualFlushEmitterWithBatchSizeAndBufferSize(1024 * 1024, 5 * 1024 * 1024); + Assert.assertEquals(0, emitter.getTotalEmittedEvents()); + + httpClient.setGoHandler( + new GoHandler() + { + @Override + protected ListenableFuture go(Request request) throws JsonProcessingException + { + Assert.assertEquals(TARGET_URL, request.getUrl()); + Assert.assertEquals( + "application/json", + request.getHeaders().get(HttpHeaders.Names.CONTENT_TYPE) + ); + Assert.assertEquals( + StringUtils.format( + "[%s,%s]\n", + jsonMapper.writeValueAsString(events.get(counter.getAndIncrement())), + jsonMapper.writeValueAsString(events.get(counter.getAndIncrement())) + ), + Charsets.UTF_8.decode(request.getByteBufferData().slice()).toString() + ); + + return GoHandlers.immediateFuture(okResponse()); + } + }.times(3) + ); + + for (UnitEvent event : events) { + emitter.emit(event); + } + waitForEmission(emitter, 1); + Assert.assertEquals(2, emitter.getTotalEmittedEvents()); + + emitter.flush(); + waitForEmission(emitter, 2); + Assert.assertEquals(4, emitter.getTotalEmittedEvents()); + closeNoFlush(emitter); + Assert.assertTrue(httpClient.succeeded()); + } + + @Test + public void testGzipContentEncoding() throws Exception + { + final List events = Arrays.asList( + new UnitEvent("plain-text", 1), + new UnitEvent("plain-text", 2) + ); + + emitter = sizeBasedEmitterWithContentEncoding(2, ContentEncoding.GZIP); + + httpClient.setGoHandler( + new GoHandler() + { + @Override + protected ListenableFuture go(Request request) throws IOException + { + Assert.assertEquals(TARGET_URL, request.getUrl()); + Assert.assertEquals( + "application/json", + request.getHeaders().get(HttpHeaders.Names.CONTENT_TYPE) + ); + Assert.assertEquals( + HttpHeaders.Values.GZIP, + request.getHeaders().get(HttpHeaders.Names.CONTENT_ENCODING) + ); + + ByteArrayOutputStream baos = new ByteArrayOutputStream(); + ByteBuffer data = request.getByteBufferData().slice(); + byte[] dataArray = new byte[data.remaining()]; + data.get(dataArray); + CompressionUtils.gunzip(new ByteArrayInputStream(dataArray), baos); + + Assert.assertEquals( + StringUtils.format( + "[%s,%s]\n", + jsonMapper.writeValueAsString(events.get(0)), + jsonMapper.writeValueAsString(events.get(1)) + ), + baos.toString(Charsets.UTF_8.name()) + ); + + return GoHandlers.immediateFuture(okResponse()); + } + }.times(1) + ); + + for (UnitEvent event : events) { + emitter.emit(event); + } + waitForEmission(emitter, 1); + closeNoFlush(emitter); + Assert.assertTrue(httpClient.succeeded()); + } + + private void closeAndExpectFlush(Emitter emitter) throws IOException + { + httpClient.setGoHandler(GoHandlers.passingHandler(okResponse()).times(1)); + emitter.close(); + } + + private void closeNoFlush(Emitter emitter) throws IOException + { + emitter.close(); + } + + private void waitForEmission(HttpPostEmitter emitter, int batchNumber) throws Exception + { + emitter.waitForEmission(batchNumber); + } +} diff --git a/java-util/src/test/java/io/druid/java/util/emitter/core/GoHandler.java b/java-util/src/test/java/io/druid/java/util/emitter/core/GoHandler.java new file mode 100644 index 000000000000..0e2226236131 --- /dev/null +++ b/java-util/src/test/java/io/druid/java/util/emitter/core/GoHandler.java @@ -0,0 +1,77 @@ +/* + * Licensed to Metamarkets Group Inc. (Metamarkets) under one + * or more contributor license agreements. See the NOTICE file + * distributed with this work for additional information + * regarding copyright ownership. Metamarkets licenses this file + * to you under the Apache License, Version 2.0 (the + * "License"); you may not use this file except in compliance + * with the License. You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, + * software distributed under the License is distributed on an + * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY + * KIND, either express or implied. See the License for the + * specific language governing permissions and limitations + * under the License. + */ + +package io.druid.java.util.emitter.core; + +import com.google.common.base.Throwables; +import io.druid.java.util.common.ISE; +import org.asynchttpclient.ListenableFuture; +import org.asynchttpclient.Request; +import org.asynchttpclient.Response; + +import java.util.concurrent.atomic.AtomicInteger; + +/** +*/ +public abstract class GoHandler +{ + /******* Abstract Methods *********/ + protected abstract ListenableFuture go(Request request) throws X; + + /******* Non Abstract Methods ********/ + private volatile boolean succeeded = false; + + public boolean succeeded() + { + return succeeded; + } + + public ListenableFuture run(Request request) + { + try { + final ListenableFuture retVal = go(request); + succeeded = true; + return retVal; + } + catch (Throwable e) { + succeeded = false; + throw Throwables.propagate(e); + } + } + + public GoHandler times(final int n) + { + final GoHandler myself = this; + + return new GoHandler() + { + AtomicInteger counter = new AtomicInteger(0); + + @Override + public ListenableFuture go(final Request request) + { + if (counter.getAndIncrement() < n) { + return myself.go(request); + } + succeeded = false; + throw new ISE("Called more than %d times", n); + } + }; + } +} diff --git a/java-util/src/test/java/io/druid/java/util/emitter/core/GoHandlers.java b/java-util/src/test/java/io/druid/java/util/emitter/core/GoHandlers.java new file mode 100644 index 000000000000..4cd7e738b8a2 --- /dev/null +++ b/java-util/src/test/java/io/druid/java/util/emitter/core/GoHandlers.java @@ -0,0 +1,125 @@ +/* + * Licensed to Metamarkets Group Inc. (Metamarkets) under one + * or more contributor license agreements. See the NOTICE file + * distributed with this work for additional information + * regarding copyright ownership. Metamarkets licenses this file + * to you under the Apache License, Version 2.0 (the + * "License"); you may not use this file except in compliance + * with the License. You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, + * software distributed under the License is distributed on an + * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY + * KIND, either express or implied. See the License for the + * specific language governing permissions and limitations + * under the License. + */ + +package io.druid.java.util.emitter.core; + +import io.druid.java.util.common.ISE; +import org.asynchttpclient.ListenableFuture; +import org.asynchttpclient.Request; +import org.asynchttpclient.Response; + +import java.util.concurrent.CompletableFuture; +import java.util.concurrent.ExecutionException; +import java.util.concurrent.Executor; +import java.util.concurrent.TimeUnit; +import java.util.concurrent.TimeoutException; + +/** + */ +public class GoHandlers +{ + public static GoHandler failingHandler() + { + return new GoHandler() + { + @Override + protected ListenableFuture go(Request request) + { + throw new ISE("Shouldn't be called"); + } + }; + } + + public static GoHandler passingHandler(final Response retVal) + { + return new GoHandler() + { + @Override + protected ListenableFuture go(Request request) + { + return immediateFuture(retVal); + } + }; + } + + static ListenableFuture immediateFuture(T val) + { + CompletableFuture future = CompletableFuture.completedFuture(val); + return new ListenableFuture() + { + @Override + public void done() + { + } + + @Override + public void abort(Throwable t) + { + } + + @Override + public void touch() + { + } + + @Override + public ListenableFuture addListener(Runnable listener, Executor exec) + { + future.thenAcceptAsync(r -> listener.run(), exec); + return this; + } + + @Override + public CompletableFuture toCompletableFuture() + { + return future; + } + + @Override + public boolean cancel(boolean mayInterruptIfRunning) + { + return false; + } + + @Override + public boolean isCancelled() + { + return false; + } + + @Override + public boolean isDone() + { + return true; + } + + @Override + public T get() throws InterruptedException, ExecutionException + { + return future.get(); + } + + @Override + public T get(long timeout, TimeUnit unit) throws InterruptedException, ExecutionException, TimeoutException + { + return future.get(timeout, unit); + } + }; + } +} diff --git a/java-util/src/test/java/io/druid/java/util/emitter/core/HttpEmitterConfigTest.java b/java-util/src/test/java/io/druid/java/util/emitter/core/HttpEmitterConfigTest.java new file mode 100644 index 000000000000..b94f92accda4 --- /dev/null +++ b/java-util/src/test/java/io/druid/java/util/emitter/core/HttpEmitterConfigTest.java @@ -0,0 +1,172 @@ +/* + * Licensed to Metamarkets Group Inc. (Metamarkets) under one + * or more contributor license agreements. See the NOTICE file + * distributed with this work for additional information + * regarding copyright ownership. Metamarkets licenses this file + * to you under the Apache License, Version 2.0 (the + * "License"); you may not use this file except in compliance + * with the License. You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, + * software distributed under the License is distributed on an + * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY + * KIND, either express or implied. See the License for the + * specific language governing permissions and limitations + * under the License. + */ + +package io.druid.java.util.emitter.core; + +import com.fasterxml.jackson.databind.ObjectMapper; +import io.druid.java.util.common.Pair; +import org.junit.Assert; +import org.junit.Test; + +import java.util.Properties; + +public class HttpEmitterConfigTest +{ + @Test + public void testDefaults() + { + final Properties props = new Properties(); + props.put("io.druid.java.util.emitter.recipientBaseUrl", "http://example.com/"); + + final ObjectMapper objectMapper = new ObjectMapper(); + final HttpEmitterConfig config = objectMapper.convertValue( + Emitters.makeCustomFactoryMap(props), + HttpEmitterConfig.class + ); + + Assert.assertEquals(60000, config.getFlushMillis()); + Assert.assertEquals(500, config.getFlushCount()); + Assert.assertEquals("http://example.com/", config.getRecipientBaseUrl()); + Assert.assertEquals(null, config.getBasicAuthentication()); + Assert.assertEquals(BatchingStrategy.ARRAY, config.getBatchingStrategy()); + Pair batchConfigPair = BaseHttpEmittingConfig.getDefaultBatchSizeAndLimit( + Runtime.getRuntime().maxMemory() + ); + Assert.assertEquals(batchConfigPair.lhs.intValue(), config.getMaxBatchSize()); + Assert.assertEquals(batchConfigPair.rhs.intValue(), config.getBatchQueueSizeLimit()); + Assert.assertEquals(Long.MAX_VALUE, config.getFlushTimeOut()); + Assert.assertEquals(2.0f, config.getHttpTimeoutAllowanceFactor(), 0.0f); + Assert.assertEquals(0, config.getMinHttpTimeoutMillis()); + } + + @Test + public void testDefaultsLegacy() + { + final Properties props = new Properties(); + props.put("io.druid.java.util.emitter.http.url", "http://example.com/"); + + final ObjectMapper objectMapper = new ObjectMapper(); + final HttpEmitterConfig config = objectMapper.convertValue(Emitters.makeHttpMap(props), HttpEmitterConfig.class); + + Assert.assertEquals(60000, config.getFlushMillis()); + Assert.assertEquals(300, config.getFlushCount()); + Assert.assertEquals("http://example.com/", config.getRecipientBaseUrl()); + Assert.assertEquals(null, config.getBasicAuthentication()); + Assert.assertEquals(BatchingStrategy.ARRAY, config.getBatchingStrategy()); + Pair batchConfigPair = BaseHttpEmittingConfig.getDefaultBatchSizeAndLimit( + Runtime.getRuntime().maxMemory() + ); + Assert.assertEquals(batchConfigPair.lhs.intValue(), config.getMaxBatchSize()); + Assert.assertEquals(batchConfigPair.rhs.intValue(), config.getBatchQueueSizeLimit()); + Assert.assertEquals(Long.MAX_VALUE, config.getFlushTimeOut()); + Assert.assertEquals(2.0f, config.getHttpTimeoutAllowanceFactor(), 0.0f); + Assert.assertEquals(0, config.getMinHttpTimeoutMillis()); + } + + @Test + public void testSettingEverything() + { + final Properties props = new Properties(); + props.setProperty("io.druid.java.util.emitter.flushMillis", "1"); + props.setProperty("io.druid.java.util.emitter.flushCount", "2"); + props.setProperty("io.druid.java.util.emitter.recipientBaseUrl", "http://example.com/"); + props.setProperty("io.druid.java.util.emitter.basicAuthentication", "a:b"); + props.setProperty("io.druid.java.util.emitter.batchingStrategy", "NEWLINES"); + props.setProperty("io.druid.java.util.emitter.maxBatchSize", "4"); + props.setProperty("io.druid.java.util.emitter.flushTimeOut", "1000"); + props.setProperty("io.druid.java.util.emitter.batchQueueSizeLimit", "2500"); + props.setProperty("io.druid.java.util.emitter.httpTimeoutAllowanceFactor", "3.0"); + props.setProperty("io.druid.java.util.emitter.minHttpTimeoutMillis", "100"); + + final ObjectMapper objectMapper = new ObjectMapper(); + final HttpEmitterConfig config = objectMapper.convertValue( + Emitters.makeCustomFactoryMap(props), + HttpEmitterConfig.class + ); + + Assert.assertEquals(1, config.getFlushMillis()); + Assert.assertEquals(2, config.getFlushCount()); + Assert.assertEquals("http://example.com/", config.getRecipientBaseUrl()); + Assert.assertEquals("a:b", config.getBasicAuthentication()); + Assert.assertEquals(BatchingStrategy.NEWLINES, config.getBatchingStrategy()); + Assert.assertEquals(4, config.getMaxBatchSize()); + Assert.assertEquals(1000, config.getFlushTimeOut()); + Assert.assertEquals(2500, config.getBatchQueueSizeLimit()); + Assert.assertEquals(3.0f, config.getHttpTimeoutAllowanceFactor(), 0.0f); + Assert.assertEquals(100, config.getMinHttpTimeoutMillis()); + } + + @Test + public void testSettingEverythingLegacy() + { + final Properties props = new Properties(); + props.setProperty("io.druid.java.util.emitter.flushMillis", "1"); + props.setProperty("io.druid.java.util.emitter.flushCount", "2"); + props.setProperty("io.druid.java.util.emitter.http.url", "http://example.com/"); + props.setProperty("io.druid.java.util.emitter.http.basicAuthentication", "a:b"); + props.setProperty("io.druid.java.util.emitter.http.batchingStrategy", "newlines"); + props.setProperty("io.druid.java.util.emitter.http.maxBatchSize", "4"); + props.setProperty("io.druid.java.util.emitter.http.flushTimeOut", "1000"); + props.setProperty("io.druid.java.util.emitter.http.batchQueueSizeLimit", "2500"); + props.setProperty("io.druid.java.util.emitter.http.httpTimeoutAllowanceFactor", "3.0"); + props.setProperty("io.druid.java.util.emitter.http.minHttpTimeoutMillis", "100"); + + final ObjectMapper objectMapper = new ObjectMapper(); + final HttpEmitterConfig config = objectMapper.convertValue(Emitters.makeHttpMap(props), HttpEmitterConfig.class); + + Assert.assertEquals(1, config.getFlushMillis()); + Assert.assertEquals(2, config.getFlushCount()); + Assert.assertEquals("http://example.com/", config.getRecipientBaseUrl()); + Assert.assertEquals("a:b", config.getBasicAuthentication()); + Assert.assertEquals(BatchingStrategy.NEWLINES, config.getBatchingStrategy()); + Assert.assertEquals(4, config.getMaxBatchSize()); + Assert.assertEquals(1000, config.getFlushTimeOut()); + Assert.assertEquals(2500, config.getBatchQueueSizeLimit()); + Assert.assertEquals(3.0f, config.getHttpTimeoutAllowanceFactor(), 0.0f); + Assert.assertEquals(100, config.getMinHttpTimeoutMillis()); + } + + @Test + public void testMemoryLimits() + { + Pair batchConfigPair = BaseHttpEmittingConfig.getDefaultBatchSizeAndLimit( + 64 * 1024 * 1024 + ); + Assert.assertEquals(3355443, batchConfigPair.lhs.intValue()); + Assert.assertEquals(2, batchConfigPair.rhs.intValue()); + + Pair batchConfigPair2 = BaseHttpEmittingConfig.getDefaultBatchSizeAndLimit( + 128 * 1024 * 1024 + ); + Assert.assertEquals(5242880, batchConfigPair2.lhs.intValue()); + Assert.assertEquals(2, batchConfigPair2.rhs.intValue()); + + Pair batchConfigPair3 = BaseHttpEmittingConfig.getDefaultBatchSizeAndLimit( + 256 * 1024 * 1024 + ); + Assert.assertEquals(5242880, batchConfigPair3.lhs.intValue()); + Assert.assertEquals(5, batchConfigPair3.rhs.intValue()); + + Pair batchConfigPair4 = BaseHttpEmittingConfig.getDefaultBatchSizeAndLimit( + Long.MAX_VALUE + ); + Assert.assertEquals(5242880, batchConfigPair4.lhs.intValue()); + Assert.assertEquals(50, batchConfigPair4.rhs.intValue()); + } +} diff --git a/java-util/src/test/java/io/druid/java/util/emitter/core/HttpEmitterTest.java b/java-util/src/test/java/io/druid/java/util/emitter/core/HttpEmitterTest.java new file mode 100644 index 000000000000..85e471466e7c --- /dev/null +++ b/java-util/src/test/java/io/druid/java/util/emitter/core/HttpEmitterTest.java @@ -0,0 +1,86 @@ +/* + * Licensed to Metamarkets Group Inc. (Metamarkets) under one + * or more contributor license agreements. See the NOTICE file + * distributed with this work for additional information + * regarding copyright ownership. Metamarkets licenses this file + * to you under the Apache License, Version 2.0 (the + * "License"); you may not use this file except in compliance + * with the License. You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, + * software distributed under the License is distributed on an + * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY + * KIND, either express or implied. See the License for the + * specific language governing permissions and limitations + * under the License. + */ + +package io.druid.java.util.emitter.core; + +import com.fasterxml.jackson.core.JsonProcessingException; +import com.fasterxml.jackson.databind.ObjectMapper; +import com.google.common.primitives.Ints; +import org.asynchttpclient.ListenableFuture; +import org.asynchttpclient.Request; +import org.asynchttpclient.Response; +import org.junit.Assert; +import org.junit.Before; +import org.junit.Test; + +import java.io.IOException; +import java.util.concurrent.atomic.AtomicLong; + +public class HttpEmitterTest +{ + private final MockHttpClient httpClient = new MockHttpClient(); + private static final ObjectMapper objectMapper = new ObjectMapper() + { + @Override + public byte[] writeValueAsBytes(Object value) throws JsonProcessingException + { + return Ints.toByteArray(((IntEvent) value).index); + } + }; + + private final AtomicLong timeoutUsed = new AtomicLong(); + + @Before + public void setup() + { + timeoutUsed.set(-1L); + + httpClient.setGoHandler(new GoHandler() + { + @Override + protected ListenableFuture go(Request request) + { + int timeout = request.getRequestTimeout(); + timeoutUsed.set(timeout); + return GoHandlers.immediateFuture(EmitterTest.okResponse()); + } + }); + } + + @Test + public void timeoutEmptyQueue() throws IOException, InterruptedException + { + final HttpEmitterConfig config = new HttpEmitterConfig.Builder("http://foo.bar") + .setBatchingStrategy(BatchingStrategy.ONLY_EVENTS) + .setHttpTimeoutAllowanceFactor(2.0f) + .build(); + final HttpPostEmitter emitter = new HttpPostEmitter(config, httpClient, objectMapper); + + emitter.start(); + emitter.emitAndReturnBatch(new IntEvent()); + emitter.flush(); + Assert.assertTrue(timeoutUsed.get() < 5); + + final Batch batch = emitter.emitAndReturnBatch(new IntEvent()); + Thread.sleep(1000); + batch.seal(); + emitter.flush(); + Assert.assertTrue(timeoutUsed.get() >= 2000 && timeoutUsed.get() < 3000); + } +} diff --git a/java-util/src/test/java/io/druid/java/util/emitter/core/HttpPostEmitterStressTest.java b/java-util/src/test/java/io/druid/java/util/emitter/core/HttpPostEmitterStressTest.java new file mode 100644 index 000000000000..a467d366a000 --- /dev/null +++ b/java-util/src/test/java/io/druid/java/util/emitter/core/HttpPostEmitterStressTest.java @@ -0,0 +1,259 @@ +/* + * Licensed to Metamarkets Group Inc. (Metamarkets) under one + * or more contributor license agreements. See the NOTICE file + * distributed with this work for additional information + * regarding copyright ownership. Metamarkets licenses this file + * to you under the Apache License, Version 2.0 (the + * "License"); you may not use this file except in compliance + * with the License. You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, + * software distributed under the License is distributed on an + * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY + * KIND, either express or implied. See the License for the + * specific language governing permissions and limitations + * under the License. + */ + +package io.druid.java.util.emitter.core; + +import com.fasterxml.jackson.core.JsonProcessingException; +import com.fasterxml.jackson.databind.ObjectMapper; +import com.google.common.primitives.Ints; +import com.google.common.util.concurrent.Futures; +import io.druid.java.util.emitter.service.ServiceMetricEvent; +import it.unimi.dsi.fastutil.ints.IntArrayList; +import it.unimi.dsi.fastutil.ints.IntList; +import org.asynchttpclient.ListenableFuture; +import org.asynchttpclient.Request; +import org.asynchttpclient.Response; +import org.junit.Assert; +import org.junit.Test; + +import java.io.IOException; +import java.nio.ByteBuffer; +import java.util.ArrayList; +import java.util.Arrays; +import java.util.BitSet; +import java.util.List; +import java.util.concurrent.CountDownLatch; +import java.util.concurrent.Future; +import java.util.concurrent.ThreadLocalRandom; + +public class HttpPostEmitterStressTest +{ + private static final int N = 10_000; + private static final Future OK_FUTURE = Futures.immediateFuture(EmitterTest.OK_RESPONSE); + private static final ObjectMapper objectMapper = new ObjectMapper() + { + @Override + public byte[] writeValueAsBytes(Object value) throws JsonProcessingException + { + return Ints.toByteArray(((IntEvent) value).index); + } + }; + + private final MockHttpClient httpClient = new MockHttpClient(); + + @Test + public void eventCountBased() throws InterruptedException, IOException + { + HttpEmitterConfig config = new HttpEmitterConfig.Builder("http://foo.bar") + .setFlushMillis(100) + .setFlushCount(4) + .setBatchingStrategy(BatchingStrategy.ONLY_EVENTS) + .setMaxBatchSize(1024 * 1024) + // For this test, we don't need any batches to be dropped, i. e. "gaps" in data + .setBatchQueueSizeLimit(1000) + .build(); + final HttpPostEmitter emitter = new HttpPostEmitter(config, httpClient, objectMapper); + int nThreads = Runtime.getRuntime().availableProcessors() * 2; + final List eventsPerThread = new ArrayList<>(nThreads); + final List> eventBatchesPerThread = new ArrayList<>(nThreads); + for (int i = 0; i < nThreads; i++) { + eventsPerThread.add(new IntArrayList()); + eventBatchesPerThread.add(new ArrayList()); + } + for (int i = 0; i < N; i++) { + eventsPerThread.get(ThreadLocalRandom.current().nextInt(nThreads)).add(i); + } + final BitSet emittedEvents = new BitSet(N); + httpClient.setGoHandler(new GoHandler() + { + @Override + protected ListenableFuture go(Request request) + { + ByteBuffer batch = request.getByteBufferData().slice(); + while (batch.remaining() > 0) { + emittedEvents.set(batch.getInt()); + } + return GoHandlers.immediateFuture(EmitterTest.okResponse()); + } + }); + emitter.start(); + final CountDownLatch threadsCompleted = new CountDownLatch(nThreads); + for (int i = 0; i < nThreads; i++) { + final int threadIndex = i; + new Thread() { + @Override + public void run() + { + IntList events = eventsPerThread.get(threadIndex); + List eventBatches = eventBatchesPerThread.get(threadIndex); + IntEvent event = new IntEvent(); + for (int i = 0, eventsSize = events.size(); i < eventsSize; i++) { + event.index = events.getInt(i); + eventBatches.add(emitter.emitAndReturnBatch(event)); + if (i % 16 == 0) { + try { + Thread.sleep(10); + } + catch (InterruptedException e) { + throw new RuntimeException(e); + } + } + } + threadsCompleted.countDown(); + } + }.start(); + } + threadsCompleted.await(); + emitter.flush(); + System.out.println("Allocated buffers: " + emitter.getTotalAllocatedBuffers()); + for (int eventIndex = 0; eventIndex < N; eventIndex++) { + if (!emittedEvents.get(eventIndex)) { + for (int threadIndex = 0; threadIndex < eventsPerThread.size(); threadIndex++) { + IntList threadEvents = eventsPerThread.get(threadIndex); + int indexOfEvent = threadEvents.indexOf(eventIndex); + if (indexOfEvent >= 0) { + Batch batch = eventBatchesPerThread.get(threadIndex).get(indexOfEvent); + System.err.println(batch); + int bufferWatermark = batch.getSealedBufferWatermark(); + ByteBuffer batchBuffer = ByteBuffer.wrap(batch.buffer); + batchBuffer.limit(bufferWatermark); + while (batchBuffer.remaining() > 0) { + System.err.println(batchBuffer.getInt()); + } + break; + } + } + throw new AssertionError("event " + eventIndex); + } + } + } + + @Test + public void testLargeEventsQueueLimit() throws InterruptedException, IOException + { + ObjectMapper mapper = new ObjectMapper(); + + HttpEmitterConfig config = new HttpEmitterConfig.Builder("http://foo.bar") + .setFlushMillis(100) + .setFlushCount(4) + .setBatchingStrategy(BatchingStrategy.ONLY_EVENTS) + .setMaxBatchSize(1024 * 1024) + .setBatchQueueSizeLimit(10) + .build(); + final HttpPostEmitter emitter = new HttpPostEmitter(config, httpClient, new ObjectMapper()); + + emitter.start(); + + httpClient.setGoHandler(new GoHandler() { + @Override + protected ListenableFuture go(Request request) throws X + { + return GoHandlers.immediateFuture(EmitterTest.BAD_RESPONSE); + } + }); + + char[] chars = new char[600000]; + Arrays.fill(chars, '*'); + String bigString = new String(chars); + + Event bigEvent = ServiceMetricEvent.builder() + .setFeed("bigEvents") + .setDimension("test", bigString) + .build("metric", 10) + .build("qwerty", "asdfgh"); + + for (int i = 0; i < 1000; i++) { + emitter.emit(bigEvent); + Assert.assertTrue(emitter.getLargeEventsToEmit() <= 11); + } + + emitter.flush(); + } + + @Test + public void testLargeAndSmallEventsQueueLimit() throws InterruptedException, IOException + { + HttpEmitterConfig config = new HttpEmitterConfig.Builder("http://foo.bar") + .setFlushMillis(100) + .setFlushCount(4) + .setBatchingStrategy(BatchingStrategy.ONLY_EVENTS) + .setMaxBatchSize(1024 * 1024) + .setBatchQueueSizeLimit(10) + .build(); + final HttpPostEmitter emitter = new HttpPostEmitter(config, httpClient, new ObjectMapper()); + + emitter.start(); + + httpClient.setGoHandler(new GoHandler() { + @Override + protected ListenableFuture go(Request request) throws X + { + return GoHandlers.immediateFuture(EmitterTest.BAD_RESPONSE); + } + }); + + char[] chars = new char[600000]; + Arrays.fill(chars, '*'); + String bigString = new String(chars); + + Event smallEvent = ServiceMetricEvent.builder() + .setFeed("smallEvents") + .setDimension("test", "hi") + .build("metric", 10) + .build("qwerty", "asdfgh"); + + Event bigEvent = ServiceMetricEvent.builder() + .setFeed("bigEvents") + .setDimension("test", bigString) + .build("metric", 10) + .build("qwerty", "asdfgh"); + + final CountDownLatch threadsCompleted = new CountDownLatch(2); + new Thread() { + @Override + public void run() + { + for (int i = 0; i < 1000; i++) { + + emitter.emit(smallEvent); + + Assert.assertTrue(emitter.getFailedBuffers() <= 10); + Assert.assertTrue(emitter.getBuffersToEmit() <= 12); + } + threadsCompleted.countDown(); + } + }.start(); + new Thread() { + @Override + public void run() + { + for (int i = 0; i < 1000; i++) { + + emitter.emit(bigEvent); + + Assert.assertTrue(emitter.getFailedBuffers() <= 10); + Assert.assertTrue(emitter.getBuffersToEmit() <= 12); + } + threadsCompleted.countDown(); + } + }.start(); + threadsCompleted.await(); + emitter.flush(); + } +} diff --git a/java-util/src/test/java/io/druid/java/util/emitter/core/IntEvent.java b/java-util/src/test/java/io/druid/java/util/emitter/core/IntEvent.java new file mode 100644 index 000000000000..e58ae2f5dbca --- /dev/null +++ b/java-util/src/test/java/io/druid/java/util/emitter/core/IntEvent.java @@ -0,0 +1,57 @@ +/* + * Licensed to Metamarkets Group Inc. (Metamarkets) under one + * or more contributor license agreements. See the NOTICE file + * distributed with this work for additional information + * regarding copyright ownership. Metamarkets licenses this file + * to you under the Apache License, Version 2.0 (the + * "License"); you may not use this file except in compliance + * with the License. You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, + * software distributed under the License is distributed on an + * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY + * KIND, either express or implied. See the License for the + * specific language governing permissions and limitations + * under the License. + */ + +package io.druid.java.util.emitter.core; + +import org.joda.time.DateTime; + +import java.util.Map; + +class IntEvent implements Event +{ + int index; + + IntEvent() + { + } + + @Override + public Map toMap() + { + return null; + } + + @Override + public String getFeed() + { + return null; + } + + @Override + public DateTime getCreatedTime() + { + return null; + } + + @Override + public boolean isSafeToBuffer() + { + return false; + } +} diff --git a/java-util/src/test/java/io/druid/java/util/emitter/core/LoggingEmitterConfigTest.java b/java-util/src/test/java/io/druid/java/util/emitter/core/LoggingEmitterConfigTest.java new file mode 100644 index 000000000000..2a7898a10529 --- /dev/null +++ b/java-util/src/test/java/io/druid/java/util/emitter/core/LoggingEmitterConfigTest.java @@ -0,0 +1,90 @@ +/* + * Licensed to Metamarkets Group Inc. (Metamarkets) under one + * or more contributor license agreements. See the NOTICE file + * distributed with this work for additional information + * regarding copyright ownership. Metamarkets licenses this file + * to you under the Apache License, Version 2.0 (the + * "License"); you may not use this file except in compliance + * with the License. You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, + * software distributed under the License is distributed on an + * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY + * KIND, either express or implied. See the License for the + * specific language governing permissions and limitations + * under the License. + */ + +package io.druid.java.util.emitter.core; + +import com.fasterxml.jackson.databind.ObjectMapper; +import org.junit.Assert; +import org.junit.Test; + +import java.util.Properties; + +public class LoggingEmitterConfigTest +{ + @Test + public void testDefaults() + { + final Properties props = new Properties(); + final ObjectMapper objectMapper = new ObjectMapper(); + final LoggingEmitterConfig config = objectMapper.convertValue( + Emitters.makeCustomFactoryMap(props), + LoggingEmitterConfig.class + ); + Assert.assertEquals("getLoggerClass", LoggingEmitter.class.getName(), config.getLoggerClass()); + Assert.assertEquals("getLogLevel", "info", config.getLogLevel()); + } + + @Test + public void testDefaultsLegacy() + { + final Properties props = new Properties(); + final ObjectMapper objectMapper = new ObjectMapper(); + final LoggingEmitterConfig config = objectMapper.convertValue( + Emitters.makeLoggingMap(props), + LoggingEmitterConfig.class + ); + + Assert.assertEquals("getLoggerClass", LoggingEmitter.class.getName(), config.getLoggerClass()); + Assert.assertEquals("getLogLevel", "debug", config.getLogLevel()); + } + + @Test + public void testSettingEverything() + { + final Properties props = new Properties(); + props.setProperty("io.druid.java.util.emitter.loggerClass", "Foo"); + props.setProperty("io.druid.java.util.emitter.logLevel", "INFO"); + + final ObjectMapper objectMapper = new ObjectMapper(); + final LoggingEmitterConfig config = objectMapper.convertValue( + Emitters.makeCustomFactoryMap(props), + LoggingEmitterConfig.class + ); + + Assert.assertEquals("getLoggerClass", "Foo", config.getLoggerClass()); + Assert.assertEquals("getLogLevel", "INFO", config.getLogLevel()); + } + + @Test + public void testSettingEverythingLegacy() + { + final Properties props = new Properties(); + props.setProperty("io.druid.java.util.emitter.logging.class", "Foo"); + props.setProperty("io.druid.java.util.emitter.logging.level", "INFO"); + + final ObjectMapper objectMapper = new ObjectMapper(); + final LoggingEmitterConfig config = objectMapper.convertValue( + Emitters.makeLoggingMap(props), + LoggingEmitterConfig.class + ); + + Assert.assertEquals("getLoggerClass", "Foo", config.getLoggerClass()); + Assert.assertEquals("getLogLevel", "INFO", config.getLogLevel()); + } +} diff --git a/java-util/src/test/java/io/druid/java/util/emitter/core/MockHttpClient.java b/java-util/src/test/java/io/druid/java/util/emitter/core/MockHttpClient.java new file mode 100644 index 000000000000..06a3b4fee290 --- /dev/null +++ b/java-util/src/test/java/io/druid/java/util/emitter/core/MockHttpClient.java @@ -0,0 +1,57 @@ +/* + * Licensed to Metamarkets Group Inc. (Metamarkets) under one + * or more contributor license agreements. See the NOTICE file + * distributed with this work for additional information + * regarding copyright ownership. Metamarkets licenses this file + * to you under the Apache License, Version 2.0 (the + * "License"); you may not use this file except in compliance + * with the License. You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, + * software distributed under the License is distributed on an + * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY + * KIND, either express or implied. See the License for the + * specific language governing permissions and limitations + * under the License. + */ + +package io.druid.java.util.emitter.core; + +import org.asynchttpclient.DefaultAsyncHttpClient; +import org.asynchttpclient.ListenableFuture; +import org.asynchttpclient.Request; +import org.asynchttpclient.Response; + +/** + */ +public class MockHttpClient extends DefaultAsyncHttpClient +{ + private volatile GoHandler goHandler; + + public MockHttpClient() + { + } + + public GoHandler getGoHandler() + { + return goHandler; + } + + public void setGoHandler(GoHandler goHandler) + { + this.goHandler = goHandler; + } + + public boolean succeeded() + { + return goHandler.succeeded(); + } + + @Override + public ListenableFuture executeRequest(Request request) + { + return goHandler.run(request); + } +} diff --git a/java-util/src/test/java/io/druid/java/util/emitter/core/ParametrizedUriEmitterConfigTest.java b/java-util/src/test/java/io/druid/java/util/emitter/core/ParametrizedUriEmitterConfigTest.java new file mode 100644 index 000000000000..c860483a1578 --- /dev/null +++ b/java-util/src/test/java/io/druid/java/util/emitter/core/ParametrizedUriEmitterConfigTest.java @@ -0,0 +1,76 @@ +/* + * Licensed to Metamarkets Group Inc. (Metamarkets) under one + * or more contributor license agreements. See the NOTICE file + * distributed with this work for additional information + * regarding copyright ownership. Metamarkets licenses this file + * to you under the Apache License, Version 2.0 (the + * "License"); you may not use this file except in compliance + * with the License. You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, + * software distributed under the License is distributed on an + * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY + * KIND, either express or implied. See the License for the + * specific language governing permissions and limitations + * under the License. + */ + +package io.druid.java.util.emitter.core; + +import com.fasterxml.jackson.databind.ObjectMapper; +import io.druid.java.util.common.Pair; +import org.junit.Assert; +import org.junit.Test; + +import java.util.Properties; + +public class ParametrizedUriEmitterConfigTest +{ + @Test + public void testDefaults() + { + final Properties props = new Properties(); + + final ObjectMapper objectMapper = new ObjectMapper(); + final ParametrizedUriEmitterConfig paramConfig = objectMapper.convertValue(Emitters.makeCustomFactoryMap(props), ParametrizedUriEmitterConfig.class); + final HttpEmitterConfig config = paramConfig.buildHttpEmitterConfig("http://example.com/topic"); + + Assert.assertEquals(60000, config.getFlushMillis()); + Assert.assertEquals(500, config.getFlushCount()); + Assert.assertEquals("http://example.com/topic", config.getRecipientBaseUrl()); + Assert.assertEquals(null, config.getBasicAuthentication()); + Assert.assertEquals(BatchingStrategy.ARRAY, config.getBatchingStrategy()); + Pair batchConfigPair = BaseHttpEmittingConfig.getDefaultBatchSizeAndLimit( + Runtime.getRuntime().maxMemory() + ); + Assert.assertEquals(batchConfigPair.lhs.intValue(), config.getMaxBatchSize()); + Assert.assertEquals(batchConfigPair.rhs.intValue(), config.getBatchQueueSizeLimit()); + Assert.assertEquals(Long.MAX_VALUE, config.getFlushTimeOut()); + } + + @Test + public void testSettingEverything() + { + final Properties props = new Properties(); + props.setProperty("io.druid.java.util.emitter.httpEmitting.flushMillis", "1"); + props.setProperty("io.druid.java.util.emitter.httpEmitting.flushCount", "2"); + props.setProperty("io.druid.java.util.emitter.httpEmitting.basicAuthentication", "a:b"); + props.setProperty("io.druid.java.util.emitter.httpEmitting.batchingStrategy", "NEWLINES"); + props.setProperty("io.druid.java.util.emitter.httpEmitting.maxBatchSize", "4"); + props.setProperty("io.druid.java.util.emitter.httpEmitting.flushTimeOut", "1000"); + + final ObjectMapper objectMapper = new ObjectMapper(); + final ParametrizedUriEmitterConfig paramConfig = objectMapper.convertValue(Emitters.makeCustomFactoryMap(props), ParametrizedUriEmitterConfig.class); + final HttpEmitterConfig config = paramConfig.buildHttpEmitterConfig("http://example.com/topic"); + + Assert.assertEquals(1, config.getFlushMillis()); + Assert.assertEquals(2, config.getFlushCount()); + Assert.assertEquals("http://example.com/topic", config.getRecipientBaseUrl()); + Assert.assertEquals("a:b", config.getBasicAuthentication()); + Assert.assertEquals(BatchingStrategy.NEWLINES, config.getBatchingStrategy()); + Assert.assertEquals(4, config.getMaxBatchSize()); + Assert.assertEquals(1000, config.getFlushTimeOut()); + } +} diff --git a/java-util/src/test/java/io/druid/java/util/emitter/core/ParametrizedUriEmitterTest.java b/java-util/src/test/java/io/druid/java/util/emitter/core/ParametrizedUriEmitterTest.java new file mode 100644 index 000000000000..0f794e15f72f --- /dev/null +++ b/java-util/src/test/java/io/druid/java/util/emitter/core/ParametrizedUriEmitterTest.java @@ -0,0 +1,216 @@ +/* + * Licensed to Metamarkets Group Inc. (Metamarkets) under one + * or more contributor license agreements. See the NOTICE file + * distributed with this work for additional information + * regarding copyright ownership. Metamarkets licenses this file + * to you under the Apache License, Version 2.0 (the + * "License"); you may not use this file except in compliance + * with the License. You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, + * software distributed under the License is distributed on an + * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY + * KIND, either express or implied. See the License for the + * specific language governing permissions and limitations + * under the License. + */ + +package io.druid.java.util.emitter.core; + +import com.fasterxml.jackson.core.JsonProcessingException; +import com.fasterxml.jackson.databind.ObjectMapper; +import com.google.common.base.Charsets; +import com.google.common.collect.ImmutableMap; +import io.druid.java.util.common.StringUtils; +import io.druid.java.util.common.lifecycle.Lifecycle; +import io.druid.java.util.emitter.service.UnitEvent; +import org.asynchttpclient.ListenableFuture; +import org.asynchttpclient.Request; +import org.asynchttpclient.Response; +import org.junit.After; +import org.junit.Assert; +import org.junit.Before; +import org.junit.Test; + +import java.util.Arrays; +import java.util.HashMap; +import java.util.List; +import java.util.Map; +import java.util.Properties; + +import static io.druid.java.util.emitter.core.EmitterTest.okResponse; +import static org.junit.Assert.assertEquals; + +public class ParametrizedUriEmitterTest +{ + private static final ObjectMapper jsonMapper = new ObjectMapper(); + + private MockHttpClient httpClient; + private Lifecycle lifecycle; + + @Before + public void setUp() throws Exception + { + httpClient = new MockHttpClient(); + } + + @After + public void tearDown() throws Exception + { + if (lifecycle != null) { + lifecycle.stop(); + } + } + + private Emitter parametrizedEmmiter(String uriPattern) throws Exception + { + final Properties props = new Properties(); + props.setProperty("io.druid.java.util.emitter.type", "parametrized"); + props.setProperty("io.druid.java.util.emitter.recipientBaseUrlPattern", uriPattern); + lifecycle = new Lifecycle(); + Emitter emitter = Emitters.create(props, httpClient, lifecycle); + assertEquals(ParametrizedUriEmitter.class, emitter.getClass()); + lifecycle.start(); + return emitter; + } + + @Test + public void testParametrizedEmitterCreated() throws Exception + { + parametrizedEmmiter("http://example.com/"); + } + + @Test + public void testEmitterWithFeedUriExtractor() throws Exception + { + Emitter emitter = parametrizedEmmiter("http://example.com/{feed}"); + final List events = Arrays.asList( + new UnitEvent("test", 1), + new UnitEvent("test", 2) + ); + + httpClient.setGoHandler( + new GoHandler() + { + @Override + public ListenableFuture go(Request request) throws JsonProcessingException + { + Assert.assertEquals("http://example.com/test", request.getUrl()); + Assert.assertEquals( + StringUtils.format( + "[%s,%s]\n", + jsonMapper.writeValueAsString(events.get(0)), + jsonMapper.writeValueAsString(events.get(1)) + ), + Charsets.UTF_8.decode(request.getByteBufferData().slice()).toString() + ); + + return GoHandlers.immediateFuture(okResponse()); + } + }.times(1) + ); + + for (UnitEvent event : events) { + emitter.emit(event); + } + emitter.flush(); + Assert.assertTrue(httpClient.succeeded()); + } + + @Test + public void testEmitterWithMultipleFeeds() throws Exception + { + Emitter emitter = parametrizedEmmiter("http://example.com/{feed}"); + final List events = Arrays.asList( + new UnitEvent("test1", 1), + new UnitEvent("test2", 2) + ); + + final Map results = new HashMap<>(); + + httpClient.setGoHandler( + new GoHandler() + { + @Override + protected ListenableFuture go(Request request) + { + results.put( + request.getUrl().toString(), + Charsets.UTF_8.decode(request.getByteBufferData().slice()).toString() + ); + return GoHandlers.immediateFuture(okResponse()); + } + }.times(2) + ); + + for (UnitEvent event : events) { + emitter.emit(event); + } + emitter.flush(); + Assert.assertTrue(httpClient.succeeded()); + Map expected = ImmutableMap.of( + "http://example.com/test1", StringUtils.format("[%s]\n", jsonMapper.writeValueAsString(events.get(0))), + "http://example.com/test2", StringUtils.format("[%s]\n", jsonMapper.writeValueAsString(events.get(1)))); + Assert.assertEquals(expected, results); + } + + @Test + public void testEmitterWithParametrizedUriExtractor() throws Exception + { + Emitter emitter = parametrizedEmmiter("http://example.com/{key1}/{key2}"); + final List events = Arrays.asList( + new UnitEvent("test", 1, ImmutableMap.of("key1", "val1", "key2", "val2")), + new UnitEvent("test", 2, ImmutableMap.of("key1", "val1", "key2", "val2")) + ); + + httpClient.setGoHandler( + new GoHandler() + { + @Override + protected ListenableFuture go(Request request) throws JsonProcessingException + { + Assert.assertEquals("http://example.com/val1/val2", request.getUrl()); + Assert.assertEquals( + StringUtils.format( + "[%s,%s]\n", + jsonMapper.writeValueAsString(events.get(0)), + jsonMapper.writeValueAsString(events.get(1)) + ), + Charsets.UTF_8.decode(request.getByteBufferData().slice()).toString() + ); + + return GoHandlers.immediateFuture(okResponse()); + } + }.times(1) + ); + + for (UnitEvent event : events) { + emitter.emit(event); + } + emitter.flush(); + Assert.assertTrue(httpClient.succeeded()); + } + + @Test + public void failEmitMalformedEvent() throws Exception + { + Emitter emitter = parametrizedEmmiter("http://example.com/{keyNotSetInEvents}"); + Event event = new UnitEvent("test", 1); + + httpClient.setGoHandler(GoHandlers.failingHandler()); + + try { + emitter.emit(event); + emitter.flush(); + } + catch (IllegalArgumentException e) { + Assert.assertEquals( + e.getMessage(), + StringUtils.format( + "ParametrizedUriExtractor with pattern http://example.com/{keyNotSetInEvents} requires keyNotSetInEvents to be set in event, but found %s", event.toMap()) + ); + } + } +} diff --git a/java-util/src/test/java/io/druid/java/util/emitter/service/AlertEventTest.java b/java-util/src/test/java/io/druid/java/util/emitter/service/AlertEventTest.java new file mode 100644 index 000000000000..a2c5853ef069 --- /dev/null +++ b/java-util/src/test/java/io/druid/java/util/emitter/service/AlertEventTest.java @@ -0,0 +1,181 @@ +/* + * Licensed to Metamarkets Group Inc. (Metamarkets) under one + * or more contributor license agreements. See the NOTICE file + * distributed with this work for additional information + * regarding copyright ownership. Metamarkets licenses this file + * to you under the Apache License, Version 2.0 (the + * "License"); you may not use this file except in compliance + * with the License. You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, + * software distributed under the License is distributed on an + * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY + * KIND, either express or implied. See the License for the + * specific language governing permissions and limitations + * under the License. + */ + +package io.druid.java.util.emitter.service; + +import com.google.common.base.Predicate; +import com.google.common.collect.ImmutableMap; +import com.google.common.collect.Maps; +import io.druid.java.util.emitter.service.AlertEvent.Severity; +import org.junit.Assert; +import org.junit.Test; + +import java.util.Map; + +/** + */ +public class AlertEventTest +{ + @Test + public void testStupid() throws Exception + { + AlertEvent event = AlertBuilder.create("blargy") + .addData("something1", "a") + .addData("something2", "b") + .build("test", "localhost"); + + Assert.assertEquals( + ImmutableMap.builder() + .put("feed", "alerts") + .put("timestamp", event.getCreatedTime().toString()) + .put("service", "test") + .put("host", "localhost") + .put("severity", "component-failure") + .put("description", "blargy") + .put("data", ImmutableMap.of("something1", "a", "something2", "b")) + .build(), + event.toMap() + ); + } + + @Test + public void testAnomaly() throws Exception + { + AlertEvent event = AlertBuilder.create("blargy") + .severity(Severity.ANOMALY) + .addData("something1", "a") + .addData("something2", "b") + .build("test", "localhost"); + + Assert.assertEquals( + ImmutableMap.builder() + .put("feed", "alerts") + .put("timestamp", event.getCreatedTime().toString()) + .put("service", "test") + .put("host", "localhost") + .put("severity", "anomaly") + .put("description", "blargy") + .put("data", ImmutableMap.of("something1", "a", "something2", "b")) + .build(), + event.toMap() + ); + } + + @Test + public void testComponentFailure() throws Exception + { + AlertEvent event = AlertBuilder.create("blargy") + .severity(Severity.COMPONENT_FAILURE) + .addData("something1", "a") + .addData("something2", "b") + .build("test", "localhost"); + + Assert.assertEquals( + ImmutableMap.builder() + .put("feed", "alerts") + .put("timestamp", event.getCreatedTime().toString()) + .put("service", "test") + .put("host", "localhost") + .put("severity", "component-failure") + .put("description", "blargy") + .put("data", ImmutableMap.of("something1", "a", "something2", "b")) + .build(), + event.toMap() + ); + } + + @Test + public void testServiceFailure() throws Exception + { + AlertEvent event = AlertBuilder.create("blargy") + .severity(Severity.SERVICE_FAILURE) + .addData("something1", "a") + .addData("something2", "b") + .build("test", "localhost"); + + Assert.assertEquals( + ImmutableMap.builder() + .put("feed", "alerts") + .put("timestamp", event.getCreatedTime().toString()) + .put("service", "test") + .put("host", "localhost") + .put("severity", "service-failure") + .put("description", "blargy") + .put("data", ImmutableMap.of("something1", "a", "something2", "b")) + .build(), + event.toMap() + ); + } + + @Test + public void testDefaulting() throws Exception + { + final String service = "some service"; + final String host = "some host"; + final String desc = "some description"; + final Map data = ImmutableMap.builder().put("a", "1").put("b", "2").build(); + for (Severity severity : new Severity[]{Severity.ANOMALY, Severity.COMPONENT_FAILURE, Severity.SERVICE_FAILURE}) { + Assert.assertEquals( + contents(new AlertEvent(service, host, desc, data)), + contents(new AlertEvent(service, host, Severity.COMPONENT_FAILURE, desc, data)) + ); + + Assert.assertEquals( + contents(new AlertEvent(service, host, desc)), + contents(new AlertEvent(service, host, Severity.COMPONENT_FAILURE, desc, ImmutableMap.of())) + ); + + Assert.assertEquals( + contents(AlertBuilder.create(desc).addData("a", "1").addData("b", "2").build(service, host)), + contents(new AlertEvent(service, host, Severity.COMPONENT_FAILURE, desc, data)) + ); + + Assert.assertEquals( + contents(AlertBuilder.create(desc).addData(data).build(service, host)), + contents(new AlertEvent(service, host, Severity.COMPONENT_FAILURE, desc, data)) + ); + + Assert.assertEquals( + contents(AlertBuilder.create(desc) + .severity(severity) + .addData("a", "1") + .addData("b", "2") + .build(service, host)), + contents(new AlertEvent(service, host, severity, desc, data)) + ); + + Assert.assertEquals( + contents(AlertBuilder.create(desc).severity(severity).addData(data).build(service, host)), + contents(new AlertEvent(service, host, severity, desc, data)) + ); + } + } + + public Map contents(AlertEvent a) + { + return Maps.filterKeys(a.toMap(), new Predicate() + { + @Override + public boolean apply(String k) + { + return !k.equals("timestamp"); + } + }); + } +} diff --git a/java-util/src/test/java/io/druid/java/util/emitter/service/ServiceMetricEventTest.java b/java-util/src/test/java/io/druid/java/util/emitter/service/ServiceMetricEventTest.java new file mode 100644 index 000000000000..ba64100ee719 --- /dev/null +++ b/java-util/src/test/java/io/druid/java/util/emitter/service/ServiceMetricEventTest.java @@ -0,0 +1,292 @@ +/* + * Licensed to Metamarkets Group Inc. (Metamarkets) under one + * or more contributor license agreements. See the NOTICE file + * distributed with this work for additional information + * regarding copyright ownership. Metamarkets licenses this file + * to you under the Apache License, Version 2.0 (the + * "License"); you may not use this file except in compliance + * with the License. You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, + * software distributed under the License is distributed on an + * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY + * KIND, either express or implied. See the License for the + * specific language governing permissions and limitations + * under the License. + */ + + +package io.druid.java.util.emitter.service; + +import com.google.common.collect.ImmutableMap; +import io.druid.java.util.common.DateTimes; +import org.junit.Assert; +import org.junit.Test; + +import java.util.Arrays; + +/** + */ +public class ServiceMetricEventTest +{ + @Test + public void testStupidTest() throws Exception + { + ServiceMetricEvent builderEvent = new ServiceMetricEvent.Builder() + .setDimension("user1", "a") + .setDimension("user2", "b") + .setDimension("user3", "c") + .setDimension("user4", "d") + .setDimension("user5", "e") + .setDimension("user6", "f") + .setDimension("user7", "g") + .setDimension("user8", "h") + .setDimension("user9", "i") + .setDimension("user10", "j") + .build("test-metric", 1234) + .build("test", "localhost"); + Assert.assertEquals( + ImmutableMap.builder() + .put("feed", "metrics") + .put("timestamp", builderEvent.getCreatedTime().toString()) + .put("service", "test") + .put("host", "localhost") + .put("metric", "test-metric") + .put("user1", "a") + .put("user2", "b") + .put("user3", "c") + .put("user4", "d") + .put("user5", "e") + .put("user6", "f") + .put("user7", "g") + .put("user8", "h") + .put("user9", "i") + .put("user10", "j") + .put("value", 1234) + .build(), + builderEvent.toMap() + ); + + ServiceMetricEvent constructorEvent = ServiceMetricEvent + .builder() + .setDimension("user1", "a") + .setDimension("user2", "b") + .setDimension("user3", "c") + .setDimension("user4", "d") + .setDimension("user5", "e") + .setDimension("user6", "f") + .setDimension("user7", "g") + .setDimension("user8", "h") + .setDimension("user9", "i") + .setDimension("user10", "j") + .build("test-metric", 1234) + .build("test", "localhost"); + + Assert.assertEquals( + ImmutableMap.builder() + .put("feed", "metrics") + .put("timestamp", constructorEvent.getCreatedTime().toString()) + .put("service", "test") + .put("host", "localhost") + .put("metric", "test-metric") + .put("user1", "a") + .put("user2", "b") + .put("user3", "c") + .put("user4", "d") + .put("user5", "e") + .put("user6", "f") + .put("user7", "g") + .put("user8", "h") + .put("user9", "i") + .put("user10", "j") + .put("value", 1234) + .build(), constructorEvent.toMap() + ); + + ServiceMetricEvent arrayConstructorEvent = ServiceMetricEvent + .builder() + .setDimension("user1", new String[]{"a"}) + .setDimension("user2", new String[]{"b"}) + .setDimension("user3", new String[]{"c"}) + .setDimension("user4", new String[]{"d"}) + .setDimension("user5", new String[]{"e"}) + .setDimension("user6", new String[]{"f"}) + .setDimension("user7", new String[]{"g"}) + .setDimension("user8", new String[]{"h"}) + .setDimension("user9", new String[]{"i"}) + .setDimension("user10", new String[]{"j"}) + .build("test-metric", 1234) + .build("test", "localhost"); + + Assert.assertEquals( + ImmutableMap.builder() + .put("feed", "metrics") + .put("timestamp", arrayConstructorEvent.getCreatedTime().toString()) + .put("service", "test") + .put("host", "localhost") + .put("metric", "test-metric") + .put("user1", Arrays.asList("a")) + .put("user2", Arrays.asList("b")) + .put("user3", Arrays.asList("c")) + .put("user4", Arrays.asList("d")) + .put("user5", Arrays.asList("e")) + .put("user6", Arrays.asList("f")) + .put("user7", Arrays.asList("g")) + .put("user8", Arrays.asList("h")) + .put("user9", Arrays.asList("i")) + .put("user10", Arrays.asList("j")) + .put("value", 1234) + .build(), arrayConstructorEvent.toMap() + ); + + Assert.assertNotNull( + new ServiceMetricEvent.Builder() + .setDimension("user1", "a") + .setDimension("user2", "b") + .setDimension("user3", "c") + .setDimension("user4", "d") + .setDimension("user5", "e") + .setDimension("user6", "f") + .setDimension("user7", "g") + .setDimension("user8", "h") + .setDimension("user9", "i") + .setDimension("user10", "j") + .build(null, "test-metric", 1234) + .build("test", "localhost") + .getCreatedTime() + ); + + Assert.assertNotNull( + ServiceMetricEvent.builder() + .setDimension("user1", new String[]{"a"}) + .setDimension("user2", new String[]{"b"}) + .setDimension("user3", new String[]{"c"}) + .setDimension("user4", new String[]{"d"}) + .setDimension("user5", new String[]{"e"}) + .setDimension("user6", new String[]{"f"}) + .setDimension("user7", new String[]{"g"}) + .setDimension("user8", new String[]{"h"}) + .setDimension("user9", new String[]{"i"}) + .setDimension("user10", new String[]{"j"}) + .build("test-metric", 1234) + .build("test", "localhost") + .getCreatedTime() + ); + + Assert.assertEquals( + ImmutableMap.builder() + .put("feed", "metrics") + .put("timestamp", DateTimes.utc(42).toString()) + .put("service", "test") + .put("host", "localhost") + .put("metric", "test-metric") + .put("user1", "a") + .put("user2", "b") + .put("user3", "c") + .put("user4", "d") + .put("user5", "e") + .put("user6", "f") + .put("user7", "g") + .put("user8", "h") + .put("user9", "i") + .put("user10", "j") + .put("value", 1234) + .build(), + new ServiceMetricEvent.Builder() + .setDimension("user1", "a") + .setDimension("user2", "b") + .setDimension("user3", "c") + .setDimension("user4", "d") + .setDimension("user5", "e") + .setDimension("user6", "f") + .setDimension("user7", "g") + .setDimension("user8", "h") + .setDimension("user9", "i") + .setDimension("user10", "j") + .build(DateTimes.utc(42), "test-metric", 1234) + .build("test", "localhost") + .toMap() + ); + + Assert.assertEquals( + ImmutableMap.builder() + .put("feed", "metrics") + .put("timestamp", DateTimes.utc(42).toString()) + .put("service", "test") + .put("host", "localhost") + .put("metric", "test-metric") + .put("user1", Arrays.asList("a")) + .put("user2", Arrays.asList("b")) + .put("user3", Arrays.asList("c")) + .put("user4", Arrays.asList("d")) + .put("user5", Arrays.asList("e")) + .put("user6", Arrays.asList("f")) + .put("user7", Arrays.asList("g")) + .put("user8", Arrays.asList("h")) + .put("user9", Arrays.asList("i")) + .put("user10", Arrays.asList("j")) + .put("value", 1234) + .build(), + ServiceMetricEvent.builder() + .setDimension("user1", new String[]{"a"}) + .setDimension("user2", new String[]{"b"}) + .setDimension("user3", new String[]{"c"}) + .setDimension("user4", new String[]{"d"}) + .setDimension("user5", new String[]{"e"}) + .setDimension("user6", new String[]{"f"}) + .setDimension("user7", new String[]{"g"}) + .setDimension("user8", new String[]{"h"}) + .setDimension("user9", new String[]{"i"}) + .setDimension("user10", new String[]{"j"}) + .build(DateTimes.utc(42), "test-metric", 1234) + .build("test", "localhost") + .toMap() + ); + + Assert.assertEquals( + ImmutableMap.builder() + .put("feed", "metrics") + .put("timestamp", DateTimes.utc(42).toString()) + .put("service", "test") + .put("host", "localhost") + .put("metric", "test-metric") + .put("foo", "bar") + .put("baz", Arrays.asList("foo", "qux")) + .put("value", 1234) + .build(), + ServiceMetricEvent.builder() + .setDimension("foo", "bar") + .setDimension("baz", new String[]{"foo", "qux"}) + .build(DateTimes.utc(42), "test-metric", 1234) + .build("test", "localhost") + .toMap() + ); + } + + @Test(expected = IllegalStateException.class) + public void testInfinite() throws Exception + { + ServiceMetricEvent.builder().build("foo", 1 / 0d); + } + + @Test(expected = IllegalStateException.class) + public void testInfinite2() throws Exception + { + ServiceMetricEvent.builder().build("foo", 1 / 0f); + } + + + @Test(expected = IllegalStateException.class) + public void testNaN() throws Exception + { + ServiceMetricEvent.builder().build("foo", 0 / 0d); + } + + @Test(expected = IllegalStateException.class) + public void testNaN2() throws Exception + { + ServiceMetricEvent.builder().build("foo", 0 / 0f); + } +} diff --git a/java-util/src/test/java/io/druid/java/util/emitter/service/UnitEvent.java b/java-util/src/test/java/io/druid/java/util/emitter/service/UnitEvent.java new file mode 100644 index 000000000000..91385961309c --- /dev/null +++ b/java-util/src/test/java/io/druid/java/util/emitter/service/UnitEvent.java @@ -0,0 +1,84 @@ +/* + * Licensed to Metamarkets Group Inc. (Metamarkets) under one + * or more contributor license agreements. See the NOTICE file + * distributed with this work for additional information + * regarding copyright ownership. Metamarkets licenses this file + * to you under the Apache License, Version 2.0 (the + * "License"); you may not use this file except in compliance + * with the License. You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, + * software distributed under the License is distributed on an + * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY + * KIND, either express or implied. See the License for the + * specific language governing permissions and limitations + * under the License. + */ + + +package io.druid.java.util.emitter.service; + +import com.fasterxml.jackson.annotation.JsonValue; +import com.google.common.collect.ImmutableMap; +import io.druid.java.util.common.DateTimes; +import io.druid.java.util.emitter.core.Event; +import org.joda.time.DateTime; + +import java.util.Collections; +import java.util.HashMap; +import java.util.Map; + +/** + */ +public class UnitEvent implements Event +{ + private final String feed; + private final Number value; + private final Map dimensions; + private final DateTime createdTime; + + public UnitEvent(String feed, Number value) + { + this(feed, value, Collections.emptyMap()); + } + + public UnitEvent(String feed, Number value, Map dimensions) + { + this.feed = feed; + this.value = value; + this.dimensions = dimensions; + + createdTime = DateTimes.nowUtc(); + } + + @Override + @JsonValue + public Map toMap() + { + Map result = new HashMap<>(); + result.putAll(dimensions); + result.put("feed", feed); + result.put("metrics", ImmutableMap.of("value", value)); + return ImmutableMap.copyOf(result); + } + + @Override + public DateTime getCreatedTime() + { + return createdTime; + } + + @Override + public String getFeed() + { + return feed; + } + + @Override + public boolean isSafeToBuffer() + { + return true; + } +} diff --git a/java-util/src/test/java/io/druid/java/util/http/client/AsyncHttpClientTest.java b/java-util/src/test/java/io/druid/java/util/http/client/AsyncHttpClientTest.java new file mode 100644 index 000000000000..5650fc61976c --- /dev/null +++ b/java-util/src/test/java/io/druid/java/util/http/client/AsyncHttpClientTest.java @@ -0,0 +1,115 @@ +/* + * Licensed to Metamarkets Group Inc. (Metamarkets) under one + * or more contributor license agreements. See the NOTICE file + * distributed with this work for additional information + * regarding copyright ownership. Metamarkets licenses this file + * to you under the Apache License, Version 2.0 (the + * "License"); you may not use this file except in compliance + * with the License. You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, + * software distributed under the License is distributed on an + * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY + * KIND, either express or implied. See the License for the + * specific language governing permissions and limitations + * under the License. + */ + +package io.druid.java.util.http.client; + +import com.google.common.base.Charsets; +import io.druid.java.util.common.StringUtils; +import org.asynchttpclient.DefaultAsyncHttpClient; +import org.junit.Assert; +import org.junit.Test; + +import java.io.BufferedReader; +import java.io.InputStreamReader; +import java.io.OutputStream; +import java.net.ServerSocket; +import java.net.Socket; +import java.nio.charset.StandardCharsets; +import java.util.concurrent.ExecutionException; +import java.util.concurrent.ExecutorService; +import java.util.concurrent.Executors; +import java.util.concurrent.Future; +import java.util.concurrent.TimeUnit; +import java.util.concurrent.TimeoutException; + +public class AsyncHttpClientTest +{ + + @Test + public void testRequestTimeout() throws Exception + { + final ExecutorService exec = Executors.newSingleThreadExecutor(); + final ServerSocket serverSocket = new ServerSocket(0); + exec.submit( + new Runnable() + { + @Override + public void run() + { + while (!Thread.currentThread().isInterrupted()) { + try ( + Socket clientSocket = serverSocket.accept(); + BufferedReader in = new BufferedReader( + new InputStreamReader(clientSocket.getInputStream(), StandardCharsets.UTF_8) + ); + OutputStream out = clientSocket.getOutputStream() + ) { + while (!in.readLine().equals("")) { + // skip lines + } + Thread.sleep(5000); // times out + out.write("HTTP/1.1 200 OK\r\nContent-Length: 6\r\n\r\nhello!".getBytes(Charsets.UTF_8)); + } + catch (Exception e) { + // Suppress + } + } + } + } + ); + + long requestStart = 0; + DefaultAsyncHttpClient client = new DefaultAsyncHttpClient(); + // Creation of connection for the first time takes long time, probably because of DNS cache or something like that + warmUp(serverSocket, client); + try { + requestStart = System.currentTimeMillis(); + Future future = client + .prepareGet(StringUtils.format("http://localhost:%d/", serverSocket.getLocalPort())) + .setRequestTimeout(2000) + .execute(); + System.out.println("created future in: " + (System.currentTimeMillis() - requestStart)); + future.get(3000, TimeUnit.MILLISECONDS); + Assert.fail("Expected timeout"); + } + catch (ExecutionException | TimeoutException e) { + long elapsed = System.currentTimeMillis() - requestStart; + // Within 10% of timeout + Assert.assertTrue("elapsed: " + elapsed, elapsed < 2200); + } + finally { + exec.shutdownNow(); + serverSocket.close(); + } + } + + private void warmUp(ServerSocket serverSocket, DefaultAsyncHttpClient client) + { + try { + Future future = client + .prepareGet(StringUtils.format("http://localhost:%d/", serverSocket.getLocalPort())) + .setRequestTimeout(100) + .execute(); + future.get(); + } + catch (Exception e) { + // ignore + } + } +} diff --git a/java-util/src/test/java/io/druid/java/util/http/client/FriendlyServersTest.java b/java-util/src/test/java/io/druid/java/util/http/client/FriendlyServersTest.java new file mode 100644 index 000000000000..1496c904bd9c --- /dev/null +++ b/java-util/src/test/java/io/druid/java/util/http/client/FriendlyServersTest.java @@ -0,0 +1,309 @@ +/* + * Licensed to Metamarkets Group Inc. (Metamarkets) under one + * or more contributor license agreements. See the NOTICE file + * distributed with this work for additional information + * regarding copyright ownership. Metamarkets licenses this file + * to you under the Apache License, Version 2.0 (the + * "License"); you may not use this file except in compliance + * with the License. You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, + * software distributed under the License is distributed on an + * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY + * KIND, either express or implied. See the License for the + * specific language governing permissions and limitations + * under the License. + */ + +package io.druid.java.util.http.client; + +import com.google.common.base.Charsets; +import com.google.common.util.concurrent.ListenableFuture; +import io.druid.java.util.common.StringUtils; +import io.druid.java.util.common.lifecycle.Lifecycle; +import io.druid.java.util.http.client.response.StatusResponseHandler; +import io.druid.java.util.http.client.response.StatusResponseHolder; +import org.eclipse.jetty.server.Connector; +import org.eclipse.jetty.server.HttpConfiguration; +import org.eclipse.jetty.server.HttpConnectionFactory; +import org.eclipse.jetty.server.SecureRequestCustomizer; +import org.eclipse.jetty.server.Server; +import org.eclipse.jetty.server.ServerConnector; +import org.eclipse.jetty.server.SslConnectionFactory; +import org.eclipse.jetty.util.ssl.SslContextFactory; +import org.jboss.netty.channel.ChannelException; +import org.jboss.netty.handler.codec.http.HttpMethod; +import org.jboss.netty.handler.codec.http.HttpResponseStatus; +import org.junit.Assert; +import org.junit.Ignore; +import org.junit.Test; + +import javax.net.ssl.SSLContext; +import javax.net.ssl.SSLHandshakeException; +import java.io.BufferedReader; +import java.io.InputStreamReader; +import java.io.OutputStream; +import java.net.ServerSocket; +import java.net.Socket; +import java.net.URL; +import java.nio.charset.StandardCharsets; +import java.util.concurrent.ExecutionException; +import java.util.concurrent.ExecutorService; +import java.util.concurrent.Executors; +import java.util.concurrent.atomic.AtomicBoolean; + +/** + * Tests with servers that are at least moderately well-behaving. + */ +public class FriendlyServersTest +{ + @Test + public void testFriendlyHttpServer() throws Exception + { + final ExecutorService exec = Executors.newSingleThreadExecutor(); + final ServerSocket serverSocket = new ServerSocket(0); + exec.submit( + new Runnable() + { + @Override + public void run() + { + while (!Thread.currentThread().isInterrupted()) { + try ( + Socket clientSocket = serverSocket.accept(); + BufferedReader in = new BufferedReader( + new InputStreamReader(clientSocket.getInputStream(), StandardCharsets.UTF_8) + ); + OutputStream out = clientSocket.getOutputStream() + ) { + while (!in.readLine().equals("")) { + // skip lines + } + out.write("HTTP/1.1 200 OK\r\nContent-Length: 6\r\n\r\nhello!".getBytes(Charsets.UTF_8)); + } + catch (Exception e) { + // Suppress + } + } + } + } + ); + + final Lifecycle lifecycle = new Lifecycle(); + try { + final HttpClientConfig config = HttpClientConfig.builder().build(); + final HttpClient client = HttpClientInit.createClient(config, lifecycle); + final StatusResponseHolder response = client + .go( + new Request(HttpMethod.GET, new URL(StringUtils.format("http://localhost:%d/", serverSocket.getLocalPort()))), + new StatusResponseHandler(Charsets.UTF_8) + ).get(); + + Assert.assertEquals(200, response.getStatus().getCode()); + Assert.assertEquals("hello!", response.getContent()); + } + finally { + exec.shutdownNow(); + serverSocket.close(); + lifecycle.stop(); + } + } + + @Test + public void testCompressionCodecConfig() throws Exception + { + final ExecutorService exec = Executors.newSingleThreadExecutor(); + final ServerSocket serverSocket = new ServerSocket(0); + final AtomicBoolean foundAcceptEncoding = new AtomicBoolean(); + exec.submit( + new Runnable() + { + @Override + public void run() + { + while (!Thread.currentThread().isInterrupted()) { + try ( + Socket clientSocket = serverSocket.accept(); + BufferedReader in = new BufferedReader( + new InputStreamReader(clientSocket.getInputStream(), StandardCharsets.UTF_8) + ); + OutputStream out = clientSocket.getOutputStream() + ) { + // Read headers + String header; + while (!(header = in.readLine()).equals("")) { + if (header.equals("Accept-Encoding: identity")) { + foundAcceptEncoding.set(true); + } + } + out.write("HTTP/1.1 200 OK\r\nContent-Length: 6\r\n\r\nhello!".getBytes(Charsets.UTF_8)); + } + catch (Exception e) { + // Suppress + } + } + } + } + ); + + final Lifecycle lifecycle = new Lifecycle(); + try { + final HttpClientConfig config = HttpClientConfig.builder() + .withCompressionCodec(HttpClientConfig.CompressionCodec.IDENTITY) + .build(); + final HttpClient client = HttpClientInit.createClient(config, lifecycle); + final StatusResponseHolder response = client + .go( + new Request(HttpMethod.GET, new URL(StringUtils.format("http://localhost:%d/", serverSocket.getLocalPort()))), + new StatusResponseHandler(Charsets.UTF_8) + ).get(); + + Assert.assertEquals(200, response.getStatus().getCode()); + Assert.assertEquals("hello!", response.getContent()); + Assert.assertTrue(foundAcceptEncoding.get()); + } + finally { + exec.shutdownNow(); + serverSocket.close(); + lifecycle.stop(); + } + } + + @Test + public void testFriendlySelfSignedHttpsServer() throws Exception + { + final Lifecycle lifecycle = new Lifecycle(); + final String keyStorePath = getClass().getClassLoader().getResource("keystore.jks").getFile(); + Server server = new Server(); + + HttpConfiguration https = new HttpConfiguration(); + https.addCustomizer(new SecureRequestCustomizer()); + + SslContextFactory sslContextFactory = new SslContextFactory(); + sslContextFactory.setKeyStorePath(keyStorePath); + sslContextFactory.setKeyStorePassword("abc123"); + sslContextFactory.setKeyManagerPassword("abc123"); + + ServerConnector sslConnector = new ServerConnector( + server, + new SslConnectionFactory(sslContextFactory, "http/1.1"), + new HttpConnectionFactory(https) + ); + + sslConnector.setPort(0); + server.setConnectors(new Connector[]{sslConnector}); + server.start(); + + try { + final SSLContext mySsl = HttpClientInit.sslContextWithTrustedKeyStore(keyStorePath, "abc123"); + final HttpClientConfig trustingConfig = HttpClientConfig.builder().withSslContext(mySsl).build(); + final HttpClient trustingClient = HttpClientInit.createClient(trustingConfig, lifecycle); + + final HttpClientConfig skepticalConfig = HttpClientConfig.builder() + .withSslContext(SSLContext.getDefault()) + .build(); + final HttpClient skepticalClient = HttpClientInit.createClient(skepticalConfig, lifecycle); + + // Correct name ("localhost") + { + final HttpResponseStatus status = trustingClient + .go( + new Request( + HttpMethod.GET, + new URL(StringUtils.format("https://localhost:%d/", sslConnector.getLocalPort())) + ), + new StatusResponseHandler(Charsets.UTF_8) + ).get().getStatus(); + Assert.assertEquals(404, status.getCode()); + } + + // Incorrect name ("127.0.0.1") + { + final ListenableFuture response1 = trustingClient + .go( + new Request( + HttpMethod.GET, + new URL(StringUtils.format("https://127.0.0.1:%d/", sslConnector.getLocalPort())) + ), + new StatusResponseHandler(Charsets.UTF_8) + ); + + Throwable ea = null; + try { + response1.get(); + } + catch (ExecutionException e) { + ea = e.getCause(); + } + + Assert.assertTrue("ChannelException thrown by 'get'", ea instanceof ChannelException); + Assert.assertTrue("Expected error message", ea.getCause().getMessage().matches(".*Failed to handshake.*")); + } + + { + // Untrusting client + final ListenableFuture response2 = skepticalClient + .go( + new Request( + HttpMethod.GET, new URL(StringUtils.format("https://localhost:%d/", sslConnector.getLocalPort())) + ), + new StatusResponseHandler(Charsets.UTF_8) + ); + + Throwable eb = null; + try { + response2.get(); + } + catch (ExecutionException e) { + eb = e.getCause(); + } + Assert.assertNotNull("ChannelException thrown by 'get'", eb); + Assert.assertTrue( + "Root cause is SSLHandshakeException", + eb.getCause().getCause() instanceof SSLHandshakeException + ); + } + } + finally { + lifecycle.stop(); + server.stop(); + } + } + + @Test + @Ignore + public void testHttpBin() throws Throwable + { + final Lifecycle lifecycle = new Lifecycle(); + try { + final HttpClientConfig config = HttpClientConfig.builder().withSslContext(SSLContext.getDefault()).build(); + final HttpClient client = HttpClientInit.createClient(config, lifecycle); + + { + final HttpResponseStatus status = client + .go( + new Request(HttpMethod.GET, new URL("https://httpbin.org/get")), + new StatusResponseHandler(Charsets.UTF_8) + ).get().getStatus(); + + Assert.assertEquals(200, status.getCode()); + } + + { + final HttpResponseStatus status = client + .go( + new Request(HttpMethod.POST, new URL("https://httpbin.org/post")) + .setContent(new byte[]{'a', 'b', 'c', 1, 2, 3}), + new StatusResponseHandler(Charsets.UTF_8) + ).get().getStatus(); + + Assert.assertEquals(200, status.getCode()); + } + } + finally { + lifecycle.stop(); + } + } +} diff --git a/java-util/src/test/java/io/druid/java/util/http/client/JankyServersTest.java b/java-util/src/test/java/io/druid/java/util/http/client/JankyServersTest.java new file mode 100644 index 000000000000..1923ba9fb6bd --- /dev/null +++ b/java-util/src/test/java/io/druid/java/util/http/client/JankyServersTest.java @@ -0,0 +1,356 @@ +/* + * Licensed to Metamarkets Group Inc. (Metamarkets) under one + * or more contributor license agreements. See the NOTICE file + * distributed with this work for additional information + * regarding copyright ownership. Metamarkets licenses this file + * to you under the Apache License, Version 2.0 (the + * "License"); you may not use this file except in compliance + * with the License. You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, + * software distributed under the License is distributed on an + * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY + * KIND, either express or implied. See the License for the + * specific language governing permissions and limitations + * under the License. + */ + +package io.druid.java.util.http.client; + +import com.google.common.base.Charsets; +import com.google.common.util.concurrent.ListenableFuture; +import io.druid.java.util.common.StringUtils; +import io.druid.java.util.common.lifecycle.Lifecycle; +import io.druid.java.util.http.client.response.StatusResponseHandler; +import io.druid.java.util.http.client.response.StatusResponseHolder; +import org.jboss.netty.channel.ChannelException; +import org.jboss.netty.handler.codec.http.HttpMethod; +import org.jboss.netty.handler.timeout.ReadTimeoutException; +import org.joda.time.Duration; +import org.junit.AfterClass; +import org.junit.Assert; +import org.junit.BeforeClass; +import org.junit.Test; + +import javax.net.ssl.SSLContext; +import java.io.IOException; +import java.io.InputStream; +import java.io.OutputStream; +import java.net.ServerSocket; +import java.net.Socket; +import java.net.URL; +import java.util.concurrent.ExecutionException; +import java.util.concurrent.ExecutorService; +import java.util.concurrent.Executors; + +/** + * Tests with a bunch of goofy not-actually-http servers. + */ +public class JankyServersTest +{ + static ExecutorService exec; + static ServerSocket silentServerSocket; + static ServerSocket echoServerSocket; + static ServerSocket closingServerSocket; + + @BeforeClass + public static void setUp() throws Exception + { + exec = Executors.newCachedThreadPool(); + + silentServerSocket = new ServerSocket(0); + echoServerSocket = new ServerSocket(0); + closingServerSocket = new ServerSocket(0); + + exec.submit( + new Runnable() + { + @Override + public void run() + { + while (!Thread.currentThread().isInterrupted()) { + try ( + Socket clientSocket = silentServerSocket.accept(); + InputStream in = clientSocket.getInputStream() + ) { + while (in.read() != -1) { + } + } + catch (Exception e) { + // Suppress + } + } + } + } + ); + + exec.submit( + new Runnable() + { + @Override + public void run() + { + while (!Thread.currentThread().isInterrupted()) { + try ( + Socket clientSocket = closingServerSocket.accept(); + InputStream in = clientSocket.getInputStream() + ) { + in.read(); + clientSocket.close(); + } + catch (Exception e) { + // Suppress + } + } + } + } + ); + + exec.submit( + new Runnable() + { + @Override + public void run() + { + while (!Thread.currentThread().isInterrupted()) { + try ( + Socket clientSocket = echoServerSocket.accept(); + OutputStream out = clientSocket.getOutputStream(); + InputStream in = clientSocket.getInputStream() + ) { + int b; + while ((b = in.read()) != -1) { + out.write(b); + } + } + catch (Exception e) { + // Suppress + } + } + } + } + ); + } + + @AfterClass + public static void tearDown() throws Exception + { + exec.shutdownNow(); + silentServerSocket.close(); + echoServerSocket.close(); + closingServerSocket.close(); + } + + @Test + public void testHttpSilentServerWithGlobalTimeout() throws Throwable + { + final Lifecycle lifecycle = new Lifecycle(); + try { + final HttpClientConfig config = HttpClientConfig.builder().withReadTimeout(new Duration(100)).build(); + final HttpClient client = HttpClientInit.createClient(config, lifecycle); + final ListenableFuture future = client + .go( + new Request(HttpMethod.GET, new URL(StringUtils.format("http://localhost:%d/", silentServerSocket.getLocalPort()))), + new StatusResponseHandler(Charsets.UTF_8) + ); + + Throwable e = null; + try { + future.get(); + } + catch (ExecutionException e1) { + e = e1.getCause(); + } + + Assert.assertTrue("ReadTimeoutException thrown by 'get'", e instanceof ReadTimeoutException); + } + finally { + lifecycle.stop(); + } + } + + @Test + public void testHttpSilentServerWithRequestTimeout() throws Throwable + { + final Lifecycle lifecycle = new Lifecycle(); + try { + final HttpClientConfig config = HttpClientConfig.builder().withReadTimeout(new Duration(86400L * 365)).build(); + final HttpClient client = HttpClientInit.createClient(config, lifecycle); + final ListenableFuture future = client + .go( + new Request(HttpMethod.GET, new URL(StringUtils.format("http://localhost:%d/", silentServerSocket.getLocalPort()))), + new StatusResponseHandler(Charsets.UTF_8), + new Duration(100L) + ); + + Throwable e = null; + try { + future.get(); + } + catch (ExecutionException e1) { + e = e1.getCause(); + } + + Assert.assertTrue("ReadTimeoutException thrown by 'get'", e instanceof ReadTimeoutException); + } + finally { + lifecycle.stop(); + } + } + + @Test + public void testHttpsSilentServer() throws Throwable + { + final Lifecycle lifecycle = new Lifecycle(); + try { + final HttpClientConfig config = HttpClientConfig.builder() + .withSslContext(SSLContext.getDefault()) + .withSslHandshakeTimeout(new Duration(100)) + .build(); + final HttpClient client = HttpClientInit.createClient(config, lifecycle); + + final ListenableFuture response = client + .go( + new Request(HttpMethod.GET, new URL(StringUtils.format("https://localhost:%d/", silentServerSocket.getLocalPort()))), + new StatusResponseHandler(Charsets.UTF_8) + ); + + Throwable e = null; + try { + response.get(); + } + catch (ExecutionException e1) { + e = e1.getCause(); + } + + Assert.assertTrue("ChannelException thrown by 'get'", e instanceof ChannelException); + } + finally { + lifecycle.stop(); + } + } + + @Test + public void testHttpConnectionClosingServer() throws Throwable + { + final Lifecycle lifecycle = new Lifecycle(); + try { + final HttpClientConfig config = HttpClientConfig.builder().build(); + final HttpClient client = HttpClientInit.createClient(config, lifecycle); + final ListenableFuture response = client + .go( + new Request(HttpMethod.GET, new URL(StringUtils.format("http://localhost:%d/", closingServerSocket.getLocalPort()))), + new StatusResponseHandler(Charsets.UTF_8) + ); + Throwable e = null; + try { + response.get(); + } + catch (ExecutionException e1) { + e = e1.getCause(); + e1.printStackTrace(); + } + + Assert.assertTrue("ChannelException thrown by 'get'", isChannelClosedException(e)); + } + finally { + lifecycle.stop(); + } + } + + @Test + public void testHttpsConnectionClosingServer() throws Throwable + { + final Lifecycle lifecycle = new Lifecycle(); + try { + final HttpClientConfig config = HttpClientConfig.builder().withSslContext(SSLContext.getDefault()).build(); + final HttpClient client = HttpClientInit.createClient(config, lifecycle); + + final ListenableFuture response = client + .go( + new Request(HttpMethod.GET, new URL(StringUtils.format("https://localhost:%d/", closingServerSocket.getLocalPort()))), + new StatusResponseHandler(Charsets.UTF_8) + ); + + Throwable e = null; + try { + response.get(); + } + catch (ExecutionException e1) { + e = e1.getCause(); + e1.printStackTrace(); + } + + Assert.assertTrue("ChannelException thrown by 'get'", isChannelClosedException(e)); + } + finally { + lifecycle.stop(); + } + } + + public boolean isChannelClosedException(Throwable e) + { + return e instanceof ChannelException || + (e instanceof IOException && e.getMessage().matches(".*Connection reset by peer.*")); + } + + @Test + public void testHttpEchoServer() throws Throwable + { + final Lifecycle lifecycle = new Lifecycle(); + try { + final HttpClientConfig config = HttpClientConfig.builder().build(); + final HttpClient client = HttpClientInit.createClient(config, lifecycle); + final ListenableFuture response = client + .go( + new Request(HttpMethod.GET, new URL(StringUtils.format("http://localhost:%d/", echoServerSocket.getLocalPort()))), + new StatusResponseHandler(Charsets.UTF_8) + ); + + Throwable e = null; + try { + response.get(); + } + catch (ExecutionException e1) { + e = e1.getCause(); + } + + Assert.assertTrue("IllegalArgumentException thrown by 'get'", e instanceof IllegalArgumentException); + Assert.assertTrue("Expected error message", e.getMessage().matches(".*invalid version format:.*")); + } + finally { + lifecycle.stop(); + } + } + + @Test + public void testHttpsEchoServer() throws Throwable + { + final Lifecycle lifecycle = new Lifecycle(); + try { + final HttpClientConfig config = HttpClientConfig.builder().withSslContext(SSLContext.getDefault()).build(); + final HttpClient client = HttpClientInit.createClient(config, lifecycle); + + final ListenableFuture response = client + .go( + new Request(HttpMethod.GET, new URL(StringUtils.format("https://localhost:%d/", echoServerSocket.getLocalPort()))), + new StatusResponseHandler(Charsets.UTF_8) + ); + + Throwable e = null; + try { + response.get(); + } + catch (ExecutionException e1) { + e = e1.getCause(); + } + + Assert.assertNotNull("ChannelException thrown by 'get'", e); + } + finally { + lifecycle.stop(); + } + } +} diff --git a/java-util/src/test/java/io/druid/java/util/http/client/io/AppendableByteArrayInputStreamTest.java b/java-util/src/test/java/io/druid/java/util/http/client/io/AppendableByteArrayInputStreamTest.java new file mode 100644 index 000000000000..4289e8104686 --- /dev/null +++ b/java-util/src/test/java/io/druid/java/util/http/client/io/AppendableByteArrayInputStreamTest.java @@ -0,0 +1,261 @@ +/* + * Licensed to Metamarkets Group Inc. (Metamarkets) under one + * or more contributor license agreements. See the NOTICE file + * distributed with this work for additional information + * regarding copyright ownership. Metamarkets licenses this file + * to you under the Apache License, Version 2.0 (the + * "License"); you may not use this file except in compliance + * with the License. You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, + * software distributed under the License is distributed on an + * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY + * KIND, either express or implied. See the License for the + * specific language governing permissions and limitations + * under the License. + */ + +package io.druid.java.util.http.client.io; + +import io.druid.java.util.common.StringUtils; +import io.druid.java.util.common.logger.Logger; +import org.junit.Assert; +import org.junit.Test; + +import java.io.IOException; +import java.util.Arrays; +import java.util.concurrent.Callable; +import java.util.concurrent.CountDownLatch; +import java.util.concurrent.Executors; +import java.util.concurrent.Future; +import java.util.concurrent.atomic.AtomicReference; + +/** + */ +public class AppendableByteArrayInputStreamTest +{ + private static final Logger log = new Logger(AppendableByteArrayInputStreamTest.class); + + @Test + public void testSingleByteArray() throws Exception + { + byte[][] bytesToWrite = new byte[][]{{0, 1, 2, 3, 4, 5, 6}}; + + testAll(bytesToWrite, bytesToWrite[0]); + } + + @Test + public void testMultiByteArray() throws Exception + { + byte[] expectedBytes = new byte[]{0, 1, 2, 3, 4, 5, 6}; + + testAll(new byte[][]{{0, 1, 2, 3}, {4, 5, 6}}, expectedBytes); + testAll(new byte[][]{{0, 1}, {2, 3}, {4, 5, 6}}, expectedBytes); + testAll(new byte[][]{{0}, {1}, {2}, {3}, {4}, {5}, {6}}, expectedBytes); + } + + public void testAll(byte[][] writtenBytes, byte[] expectedBytes) throws Exception + { + testFullRead(writtenBytes, expectedBytes); + testIndividualRead(writtenBytes, expectedBytes); + } + + public void testIndividualRead(byte[][] writtenBytes, byte[] expectedBytes) throws IOException + { + AppendableByteArrayInputStream in = new AppendableByteArrayInputStream(); + + for (byte[] writtenByte : writtenBytes) { + in.add(writtenByte); + } + + for (int i = 0; i < expectedBytes.length; i++) { + final int readByte = in.read(); + if (expectedBytes[i] != (byte) readByte) { + Assert.assertEquals(StringUtils.format("%s[%d]", Arrays.toString(expectedBytes), i), expectedBytes[i], readByte); + } + } + } + + public void testFullRead(byte[][] writtenBytes, byte[] expectedBytes) throws IOException + { + AppendableByteArrayInputStream in = new AppendableByteArrayInputStream(); + byte[] readBytes = new byte[expectedBytes.length]; + + for (byte[] writtenByte : writtenBytes) { + in.add(writtenByte); + } + Assert.assertEquals(readBytes.length, in.read(readBytes)); + Assert.assertArrayEquals(expectedBytes, readBytes); + } + + @Test + public void testReadsAndWritesInterspersed() throws Exception + { + AppendableByteArrayInputStream in = new AppendableByteArrayInputStream(); + + in.add(new byte[]{0, 1, 2}); + + byte[] readBytes = new byte[3]; + Assert.assertEquals(3, in.read(readBytes)); + Assert.assertArrayEquals(new byte[]{0, 1, 2}, readBytes); + + in.add(new byte[]{3, 4}); + in.add(new byte[]{5, 6, 7}); + + readBytes = new byte[5]; + Assert.assertEquals(5, in.read(readBytes)); + Assert.assertArrayEquals(new byte[]{3, 4, 5, 6, 7}, readBytes); + } + + @Test + public void testReadLessThanWritten() throws Exception + { + AppendableByteArrayInputStream in = new AppendableByteArrayInputStream(); + + in.add(new byte[]{0, 1, 2, 3, 4, 5, 6, 7, 8, 9}); + + byte[] readBytes = new byte[4]; + + Assert.assertEquals(4, in.read(readBytes)); + Assert.assertArrayEquals(new byte[]{0, 1, 2, 3}, readBytes); + + Assert.assertEquals(4, in.read(readBytes)); + Assert.assertArrayEquals(new byte[]{4, 5, 6, 7}, readBytes); + + Assert.assertEquals(2, in.read(readBytes, 0, 2)); + Assert.assertArrayEquals(new byte[]{8, 9, 6, 7}, readBytes); + } + + @Test + public void testReadLessThanWrittenMultiple() throws Exception + { + AppendableByteArrayInputStream in = new AppendableByteArrayInputStream(); + + in.add(new byte[]{0, 1, 2}); + in.add(new byte[]{3, 4, 5}); + in.add(new byte[]{6, 7}); + in.add(new byte[]{8, 9}); + + byte[] readBytes = new byte[4]; + + Assert.assertEquals(4, in.read(readBytes)); + Assert.assertArrayEquals(new byte[]{0, 1, 2, 3}, readBytes); + + Assert.assertEquals(4, in.read(readBytes)); + Assert.assertArrayEquals(new byte[]{4, 5, 6, 7}, readBytes); + + Assert.assertEquals(2, in.read(readBytes, 0, 2)); + Assert.assertArrayEquals(new byte[]{8, 9, 6, 7}, readBytes); + } + + @Test + public void testBlockingRead() throws Exception + { + final AppendableByteArrayInputStream in = new AppendableByteArrayInputStream(); + + in.add(new byte[]{0, 1, 2, 3, 4}); + + Assert.assertEquals(5, in.available()); + + Future bytesFuture = Executors.newSingleThreadExecutor().submit( + new Callable() + { + @Override + public byte[] call() throws Exception + { + byte[] readBytes = new byte[10]; + in.read(readBytes); + return readBytes; + } + } + ); + + int count = 0; + while (in.available() != 0) { + if (count >= 100) { + Assert.fail("available didn't become 0 fast enough."); + } + count++; + Thread.sleep(10); + } + + in.add(new byte[]{5, 6, 7, 8, 9, 10}); + + count = 0; + while (in.available() != 1) { + if (count >= 100) { + Assert.fail("available didn't become 1 fast enough."); + } + count++; + Thread.sleep(10); + } + + Assert.assertArrayEquals(new byte[]{0, 1, 2, 3, 4, 5, 6, 7, 8, 9}, bytesFuture.get()); + Assert.assertEquals(10, in.read()); + Assert.assertEquals(0, in.available()); + } + + @Test + public void testAddEmptyByteArray() throws Exception + { + AppendableByteArrayInputStream in = new AppendableByteArrayInputStream(); + + in.add(new byte[]{}); + in.add(new byte[]{1}); + in.add(new byte[]{}); + in.done(); + + Assert.assertEquals(1, in.available()); + Assert.assertEquals(1, in.read()); + Assert.assertEquals(0, in.available()); + Assert.assertEquals(-1, in.read()); + } + + @Test + public void testExceptionUnblocks() throws InterruptedException + { + final AppendableByteArrayInputStream in = new AppendableByteArrayInputStream(); + in.add(new byte[]{}); + in.add(new byte[]{1}); + in.add(new byte[]{}); + final AtomicReference exceptionThrown = new AtomicReference(); + final CountDownLatch latch = new CountDownLatch(1); + Executors.newSingleThreadExecutor().submit( + new Callable() + { + @Override + public byte[] call() throws Exception + { + try { + byte[] readBytes = new byte[10]; + while (in.read(readBytes) != -1) { + } + return readBytes; + } + catch (IOException e) { + exceptionThrown.set(e); + latch.countDown(); + } + return null; + } + } + ); + + Exception expected = new Exception(); + in.exceptionCaught(expected); + + latch.await(); + Assert.assertEquals(expected, exceptionThrown.get().getCause()); + + try { + in.read(); + Assert.fail(); + } + catch (IOException thrown) { + Assert.assertEquals(expected, thrown.getCause()); + } + + } +} diff --git a/java-util/src/test/java/io/druid/java/util/http/client/pool/ResourcePoolTest.java b/java-util/src/test/java/io/druid/java/util/http/client/pool/ResourcePoolTest.java new file mode 100644 index 000000000000..b6167dcd831e --- /dev/null +++ b/java-util/src/test/java/io/druid/java/util/http/client/pool/ResourcePoolTest.java @@ -0,0 +1,328 @@ +/* + * Licensed to Metamarkets Group Inc. (Metamarkets) under one + * or more contributor license agreements. See the NOTICE file + * distributed with this work for additional information + * regarding copyright ownership. Metamarkets licenses this file + * to you under the Apache License, Version 2.0 (the + * "License"); you may not use this file except in compliance + * with the License. You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, + * software distributed under the License is distributed on an + * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY + * KIND, either express or implied. See the License for the + * specific language governing permissions and limitations + * under the License. + */ + +package io.druid.java.util.http.client.pool; + +import io.druid.java.util.common.ISE; +import org.easymock.EasyMock; +import org.easymock.IAnswer; +import org.junit.Assert; +import org.junit.Before; +import org.junit.Test; + +import java.util.concurrent.CountDownLatch; +import java.util.concurrent.TimeUnit; + +/** + */ +public class ResourcePoolTest +{ + ResourceFactory resourceFactory; + ResourcePool pool; + + @Before + public void setUp() throws Exception + { + resourceFactory = (ResourceFactory) EasyMock.createMock(ResourceFactory.class); + + EasyMock.replay(resourceFactory); + pool = new ResourcePool( + resourceFactory, + new ResourcePoolConfig(2, TimeUnit.MINUTES.toMillis(4)) + ); + + EasyMock.verify(resourceFactory); + EasyMock.reset(resourceFactory); + } + + @Test + public void testSanity() throws Exception + { + primePool(); + EasyMock.replay(resourceFactory); + } + + private void primePool() + { + EasyMock.expect(resourceFactory.generate("billy")).andAnswer(new StringIncrementingAnswer("billy")).times(2); + EasyMock.expect(resourceFactory.generate("sally")).andAnswer(new StringIncrementingAnswer("sally")).times(2); + EasyMock.expect(resourceFactory.isGood("billy0")).andReturn(true).times(1); + EasyMock.expect(resourceFactory.isGood("sally0")).andReturn(true).times(1); + EasyMock.replay(resourceFactory); + + ResourceContainer billyString = pool.take("billy"); + ResourceContainer sallyString = pool.take("sally"); + Assert.assertEquals("billy0", billyString.get()); + Assert.assertEquals("sally0", sallyString.get()); + + EasyMock.verify(resourceFactory); + EasyMock.reset(resourceFactory); + + billyString.returnResource(); + sallyString.returnResource(); + } + + @Test + public void testFailedResource() throws Exception + { + primePool(); + + EasyMock.expect(resourceFactory.isGood("billy1")).andReturn(false).times(1); + resourceFactory.close("billy1"); + EasyMock.expectLastCall(); + EasyMock.expect(resourceFactory.generate("billy")).andReturn("billy2").times(1); + EasyMock.replay(resourceFactory); + + ResourceContainer billy = pool.take("billy"); + Assert.assertEquals("billy2", billy.get()); + billy.returnResource(); + + EasyMock.verify(resourceFactory); + EasyMock.reset(resourceFactory); + } + + @Test + public void testFaultyFailedResourceReplacement() throws Exception + { + primePool(); + + EasyMock.expect(resourceFactory.isGood("billy1")).andReturn(false).times(1); + resourceFactory.close("billy1"); + EasyMock.expectLastCall(); + EasyMock.expect(resourceFactory.generate("billy")).andThrow(new ISE("where's billy?")).times(1); + EasyMock.expect(resourceFactory.isGood("billy0")).andReturn(false).times(1); + resourceFactory.close("billy0"); + EasyMock.expectLastCall(); + EasyMock.expect(resourceFactory.generate("billy")).andThrow(new ISE("where's billy?")).times(1); + EasyMock.expect(resourceFactory.generate("billy")).andReturn("billy2").times(1); + EasyMock.replay(resourceFactory); + + IllegalStateException e1 = null; + try { + pool.take("billy"); + } + catch (IllegalStateException e) { + e1 = e; + } + Assert.assertNotNull("exception", e1); + Assert.assertEquals("where's billy?", e1.getMessage()); + + IllegalStateException e2 = null; + try { + pool.take("billy"); + } + catch (IllegalStateException e) { + e2 = e; + } + Assert.assertNotNull("exception", e2); + Assert.assertEquals("where's billy?", e2.getMessage()); + + ResourceContainer billy = pool.take("billy"); + Assert.assertEquals("billy2", billy.get()); + billy.returnResource(); + + EasyMock.verify(resourceFactory); + EasyMock.reset(resourceFactory); + } + + @Test + public void testTakeMoreThanAllowed() throws Exception + { + primePool(); + EasyMock.expect(resourceFactory.isGood("billy1")).andReturn(true).times(1); + EasyMock.expect(resourceFactory.isGood("billy0")).andReturn(true).times(1); + EasyMock.replay(resourceFactory); + + CountDownLatch latch1 = new CountDownLatch(1); + CountDownLatch latch2 = new CountDownLatch(1); + CountDownLatch latch3 = new CountDownLatch(1); + + MyThread billy1Thread = new MyThread(latch1, "billy"); + billy1Thread.start(); + billy1Thread.waitForValueToBeGotten(1, TimeUnit.SECONDS); + MyThread billy0Thread = new MyThread(latch2, "billy"); + billy0Thread.start(); + billy0Thread.waitForValueToBeGotten(1, TimeUnit.SECONDS); + + MyThread blockedThread = new MyThread(latch3, "billy"); + blockedThread.start(); + + EasyMock.verify(resourceFactory); + EasyMock.reset(resourceFactory); + EasyMock.expect(resourceFactory.isGood("billy0")).andReturn(true).times(1); + EasyMock.replay(resourceFactory); + + latch2.countDown(); + blockedThread.waitForValueToBeGotten(1, TimeUnit.SECONDS); + + EasyMock.verify(resourceFactory); + EasyMock.reset(resourceFactory); + + latch1.countDown(); + latch3.countDown(); + + Assert.assertEquals("billy1", billy1Thread.getValue()); + Assert.assertEquals("billy0", billy0Thread.getValue()); + Assert.assertEquals("billy0", blockedThread.getValue()); + } + + @Test + public void testCloseUnblocks() throws InterruptedException + { + primePool(); + EasyMock.expect(resourceFactory.isGood("billy1")).andReturn(true).times(1); + EasyMock.expect(resourceFactory.isGood("billy0")).andReturn(true).times(1); + resourceFactory.close("sally1"); + EasyMock.expectLastCall().times(1); + resourceFactory.close("sally0"); + EasyMock.expectLastCall().times(1); + EasyMock.replay(resourceFactory); + CountDownLatch latch1 = new CountDownLatch(1); + CountDownLatch latch2 = new CountDownLatch(1); + CountDownLatch latch3 = new CountDownLatch(1); + + MyThread billy1Thread = new MyThread(latch1, "billy"); + billy1Thread.start(); + billy1Thread.waitForValueToBeGotten(1, TimeUnit.SECONDS); + MyThread billy0Thread = new MyThread(latch2, "billy"); + billy0Thread.start(); + billy0Thread.waitForValueToBeGotten(1, TimeUnit.SECONDS); + + MyThread blockedThread = new MyThread(latch3, "billy"); + blockedThread.start(); + blockedThread.waitForValueToBeGotten(1, TimeUnit.SECONDS); + pool.close(); + + + EasyMock.verify(resourceFactory); + EasyMock.reset(resourceFactory); + EasyMock.replay(resourceFactory); + + latch2.countDown(); + blockedThread.waitForValueToBeGotten(1, TimeUnit.SECONDS); + + EasyMock.verify(resourceFactory); + EasyMock.reset(resourceFactory); + + latch1.countDown(); + latch3.countDown(); + + Assert.assertEquals("billy1", billy1Thread.getValue()); + Assert.assertEquals("billy0", billy0Thread.getValue()); + blockedThread.join(); + // pool returns null after close + Assert.assertEquals(null, blockedThread.getValue()); + } + + @Test + public void testTimedOutResource() throws Exception + { + resourceFactory = (ResourceFactory) EasyMock.createMock(ResourceFactory.class); + + pool = new ResourcePool( + resourceFactory, + new ResourcePoolConfig(2, TimeUnit.MILLISECONDS.toMillis(10)) + ); + + EasyMock.expect(resourceFactory.generate("billy")).andAnswer(new StringIncrementingAnswer("billy")).times(2); + EasyMock.expect(resourceFactory.isGood("billy0")).andReturn(true).times(1); + EasyMock.replay(resourceFactory); + + ResourceContainer billyString = pool.take("billy"); + Assert.assertEquals("billy0", billyString.get()); + + EasyMock.verify(resourceFactory); + EasyMock.reset(resourceFactory); + + billyString.returnResource(); + + //make sure resources have been timed out. + Thread.sleep(100); + + EasyMock.expect(resourceFactory.generate("billy")).andReturn("billy1").times(1); + resourceFactory.close("billy1"); + EasyMock.expect(resourceFactory.isGood("billy1")).andReturn(true).times(1); + EasyMock.replay(resourceFactory); + + ResourceContainer billy = pool.take("billy"); + Assert.assertEquals("billy1", billy.get()); + billy.returnResource(); + + EasyMock.verify(resourceFactory); + EasyMock.reset(resourceFactory); + } + + private static class StringIncrementingAnswer implements IAnswer + { + int count = 0; + private String string; + + public StringIncrementingAnswer(String string) + { + this.string = string; + } + + @Override + public String answer() throws Throwable + { + return string + count++; + } + } + + private class MyThread extends Thread + { + private final CountDownLatch gotValueLatch = new CountDownLatch(1); + + private final CountDownLatch latch1; + private String resourceName; + + volatile String value = null; + + public MyThread(CountDownLatch latch1, String resourceName) + { + this.latch1 = latch1; + this.resourceName = resourceName; + } + + @Override + public void run() + { + ResourceContainer resourceContainer = pool.take(resourceName); + value = resourceContainer.get(); + gotValueLatch.countDown(); + try { + latch1.await(); + } + catch (InterruptedException e) { + + } + resourceContainer.returnResource(); + } + + public String getValue() + { + return value; + } + + public void waitForValueToBeGotten(long length, TimeUnit timeUnit) throws InterruptedException + { + gotValueLatch.await(length, timeUnit); + } + } +} diff --git a/java-util/src/test/java/io/druid/java/util/http/client/response/SequenceInputStreamResponseHandlerTest.java b/java-util/src/test/java/io/druid/java/util/http/client/response/SequenceInputStreamResponseHandlerTest.java new file mode 100644 index 000000000000..22554743d229 --- /dev/null +++ b/java-util/src/test/java/io/druid/java/util/http/client/response/SequenceInputStreamResponseHandlerTest.java @@ -0,0 +1,226 @@ +/* + * Licensed to Metamarkets Group Inc. (Metamarkets) under one + * or more contributor license agreements. See the NOTICE file + * distributed with this work for additional information + * regarding copyright ownership. Metamarkets licenses this file + * to you under the Apache License, Version 2.0 (the + * "License"); you may not use this file except in compliance + * with the License. You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, + * software distributed under the License is distributed on an + * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY + * KIND, either express or implied. See the License for the + * specific language governing permissions and limitations + * under the License. + */ + +package io.druid.java.util.http.client.response; + +import org.jboss.netty.buffer.BigEndianHeapChannelBuffer; +import org.jboss.netty.handler.codec.http.DefaultHttpChunk; +import org.jboss.netty.handler.codec.http.DefaultHttpResponse; +import org.jboss.netty.handler.codec.http.HttpResponse; +import org.jboss.netty.handler.codec.http.HttpResponseStatus; +import org.jboss.netty.handler.codec.http.HttpVersion; +import org.junit.AfterClass; +import org.junit.Assert; +import org.junit.BeforeClass; +import org.junit.Test; + +import java.io.ByteArrayInputStream; +import java.io.IOException; +import java.io.InputStream; +import java.nio.ByteBuffer; +import java.util.ArrayList; +import java.util.Iterator; +import java.util.Random; + +public class SequenceInputStreamResponseHandlerTest +{ + private static final int TOTAL_BYTES = 1 << 10; + private static final ArrayList BYTE_LIST = new ArrayList<>(); + private static final Random RANDOM = new Random(378134789L); + private static byte[] allBytes = new byte[TOTAL_BYTES]; + + @BeforeClass + public static void setUp() + { + final ByteBuffer buffer = ByteBuffer.wrap(allBytes); + while (buffer.hasRemaining()) { + final byte[] bytes = new byte[Math.min(Math.abs(RANDOM.nextInt()) % 128, buffer.remaining())]; + RANDOM.nextBytes(bytes); + buffer.put(bytes); + BYTE_LIST.add(bytes); + } + } + + @AfterClass + public static void tearDown() + { + BYTE_LIST.clear(); + allBytes = null; + } + + private static void fillBuff(InputStream inputStream, byte[] buff) throws IOException + { + int off = 0; + while (off < buff.length) { + final int read = inputStream.read(buff, off, buff.length - off); + if (read < 0) { + throw new IOException("Unexpected end of stream"); + } + off += read; + } + } + + @Test(expected = TesterException.class) + public void testExceptionalChunkedStream() throws IOException + { + Iterator it = BYTE_LIST.iterator(); + + SequenceInputStreamResponseHandler responseHandler = new SequenceInputStreamResponseHandler(); + final HttpResponse response = new DefaultHttpResponse(HttpVersion.HTTP_1_1, HttpResponseStatus.OK); + response.setChunked(true); + ClientResponse clientResponse = responseHandler.handleResponse(response); + final int failAt = Math.abs(RANDOM.nextInt()) % allBytes.length; + while (it.hasNext()) { + final DefaultHttpChunk chunk = new DefaultHttpChunk( + new BigEndianHeapChannelBuffer(it.next()) + { + @Override + public void getBytes(int index, byte[] dst, int dstIndex, int length) + { + if (dstIndex + length >= failAt) { + throw new TesterException(); + } + super.getBytes(index, dst, dstIndex, length); + } + } + ); + clientResponse = responseHandler.handleChunk(clientResponse, chunk); + } + clientResponse = responseHandler.done(clientResponse); + + final InputStream stream = clientResponse.getObj(); + final byte[] buff = new byte[allBytes.length]; + fillBuff(stream, buff); + } + + public static class TesterException extends RuntimeException + { + } + + @Test(expected = TesterException.class) + public void testExceptionalSingleStream() throws IOException + { + SequenceInputStreamResponseHandler responseHandler = new SequenceInputStreamResponseHandler(); + final HttpResponse response = new DefaultHttpResponse(HttpVersion.HTTP_1_1, HttpResponseStatus.OK); + response.setChunked(false); + response.setContent( + new BigEndianHeapChannelBuffer(allBytes) + { + @Override + public void getBytes(int index, byte[] dst, int dstIndex, int length) + { + if (dstIndex + length >= allBytes.length) { + throw new TesterException(); + } + super.getBytes(index, dst, dstIndex, length); + } + } + ); + ClientResponse clientResponse = responseHandler.handleResponse(response); + clientResponse = responseHandler.done(clientResponse); + + final InputStream stream = clientResponse.getObj(); + final byte[] buff = new byte[allBytes.length]; + fillBuff(stream, buff); + } + + @Test + public void simpleMultiStreamTest() throws IOException + { + Iterator it = BYTE_LIST.iterator(); + + SequenceInputStreamResponseHandler responseHandler = new SequenceInputStreamResponseHandler(); + final HttpResponse response = new DefaultHttpResponse(HttpVersion.HTTP_1_1, HttpResponseStatus.OK); + response.setChunked(true); + ClientResponse clientResponse = responseHandler.handleResponse(response); + while (it.hasNext()) { + final DefaultHttpChunk chunk = new DefaultHttpChunk(new BigEndianHeapChannelBuffer(it.next())); + clientResponse = responseHandler.handleChunk(clientResponse, chunk); + } + clientResponse = responseHandler.done(clientResponse); + + final InputStream stream = clientResponse.getObj(); + final InputStream expectedStream = new ByteArrayInputStream(allBytes); + int read = 0; + while (read < allBytes.length) { + final byte[] expectedBytes = new byte[Math.min(Math.abs(RANDOM.nextInt()) % 128, allBytes.length - read)]; + final byte[] actualBytes = new byte[expectedBytes.length]; + fillBuff(stream, actualBytes); + fillBuff(expectedStream, expectedBytes); + Assert.assertArrayEquals(expectedBytes, actualBytes); + read += expectedBytes.length; + } + Assert.assertEquals(allBytes.length, responseHandler.getByteCount()); + } + + + @Test + public void alignedMultiStreamTest() throws IOException + { + Iterator it = BYTE_LIST.iterator(); + + SequenceInputStreamResponseHandler responseHandler = new SequenceInputStreamResponseHandler(); + final HttpResponse response = new DefaultHttpResponse(HttpVersion.HTTP_1_1, HttpResponseStatus.OK); + response.setChunked(true); + ClientResponse clientResponse = responseHandler.handleResponse(response); + while (it.hasNext()) { + final DefaultHttpChunk chunk = new DefaultHttpChunk(new BigEndianHeapChannelBuffer(it.next())); + clientResponse = responseHandler.handleChunk(clientResponse, chunk); + } + clientResponse = responseHandler.done(clientResponse); + + final InputStream stream = clientResponse.getObj(); + final InputStream expectedStream = new ByteArrayInputStream(allBytes); + + for (byte[] bytes : BYTE_LIST) { + final byte[] expectedBytes = new byte[bytes.length]; + final byte[] actualBytes = new byte[expectedBytes.length]; + fillBuff(stream, actualBytes); + fillBuff(expectedStream, expectedBytes); + Assert.assertArrayEquals(expectedBytes, actualBytes); + Assert.assertArrayEquals(expectedBytes, bytes); + } + Assert.assertEquals(allBytes.length, responseHandler.getByteCount()); + } + + @Test + public void simpleSingleStreamTest() throws IOException + { + SequenceInputStreamResponseHandler responseHandler = new SequenceInputStreamResponseHandler(); + final HttpResponse response = new DefaultHttpResponse(HttpVersion.HTTP_1_1, HttpResponseStatus.OK); + response.setChunked(false); + response.setContent(new BigEndianHeapChannelBuffer(allBytes)); + ClientResponse clientResponse = responseHandler.handleResponse(response); + clientResponse = responseHandler.done(clientResponse); + + final InputStream stream = clientResponse.getObj(); + final InputStream expectedStream = new ByteArrayInputStream(allBytes); + int read = 0; + while (read < allBytes.length) { + final byte[] expectedBytes = new byte[Math.min(Math.abs(RANDOM.nextInt()) % 128, allBytes.length - read)]; + final byte[] actualBytes = new byte[expectedBytes.length]; + fillBuff(stream, actualBytes); + fillBuff(expectedStream, expectedBytes); + Assert.assertArrayEquals(expectedBytes, actualBytes); + read += expectedBytes.length; + } + Assert.assertEquals(allBytes.length, responseHandler.getByteCount()); + } + +} diff --git a/java-util/src/test/java/io/druid/java/util/metrics/CpuAcctDeltaMonitorTest.java b/java-util/src/test/java/io/druid/java/util/metrics/CpuAcctDeltaMonitorTest.java new file mode 100644 index 000000000000..81273ccfec7f --- /dev/null +++ b/java-util/src/test/java/io/druid/java/util/metrics/CpuAcctDeltaMonitorTest.java @@ -0,0 +1,105 @@ +/* + * Licensed to Metamarkets Group Inc. (Metamarkets) under one + * or more contributor license agreements. See the NOTICE file + * distributed with this work for additional information + * regarding copyright ownership. Metamarkets licenses this file + * to you under the Apache License, Version 2.0 (the + * "License"); you may not use this file except in compliance + * with the License. You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, + * software distributed under the License is distributed on an + * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY + * KIND, either express or implied. See the License for the + * specific language governing permissions and limitations + * under the License. + */ + +package io.druid.java.util.metrics; + +import com.google.common.collect.ImmutableMap; +import io.druid.java.util.common.StringUtils; +import io.druid.java.util.metrics.cgroups.CgroupDiscoverer; +import io.druid.java.util.metrics.cgroups.ProcCgroupDiscoverer; +import io.druid.java.util.metrics.cgroups.TestUtils; +import org.junit.Assert; +import org.junit.Before; +import org.junit.Rule; +import org.junit.Test; +import org.junit.rules.ExpectedException; +import org.junit.rules.TemporaryFolder; + +import java.io.File; +import java.io.FileOutputStream; +import java.io.IOException; + +public class CpuAcctDeltaMonitorTest +{ + @Rule + public ExpectedException expectedException = ExpectedException.none(); + @Rule + public TemporaryFolder temporaryFolder = new TemporaryFolder(); + private File procDir; + private File cgroupDir; + private File cpuacctDir; + private CgroupDiscoverer discoverer; + + @Before + public void setUp() throws IOException + { + cgroupDir = temporaryFolder.newFolder(); + procDir = temporaryFolder.newFolder(); + discoverer = new ProcCgroupDiscoverer(procDir.toPath()); + TestUtils.setUpCgroups(procDir, cgroupDir); + cpuacctDir = new File( + cgroupDir, + "cpu,cpuacct/system.slice/mesos-agent-druid.service/f12ba7e0-fa16-462e-bb9d-652ccc27f0ee" + ); + Assert.assertTrue((cpuacctDir.isDirectory() && cpuacctDir.exists()) || cpuacctDir.mkdirs()); + TestUtils.copyResource("/cpuacct.usage_all", new File(cpuacctDir, "cpuacct.usage_all")); + } + + @Test + public void testMonitorWontCrash() throws Exception + { + final CpuAcctDeltaMonitor monitor = new CpuAcctDeltaMonitor( + "some_feed", + ImmutableMap.of(), + cgroup -> { + throw new RuntimeException("Should continue"); + } + ); + final StubServiceEmitter emitter = new StubServiceEmitter("service", "host"); + monitor.doMonitor(emitter); + monitor.doMonitor(emitter); + monitor.doMonitor(emitter); + Assert.assertTrue(emitter.getEvents().isEmpty()); + } + + @Test + public void testSimpleMonitor() throws Exception + { + final File cpuacct = new File(cpuacctDir, "cpuacct.usage_all"); + try (final FileOutputStream fos = new FileOutputStream(cpuacct)) { + fos.write(StringUtils.toUtf8("cpu user system\n")); + for (int i = 0; i < 128; ++i) { + fos.write(StringUtils.toUtf8(StringUtils.format("%d 0 0\n", i))); + } + } + final CpuAcctDeltaMonitor monitor = new CpuAcctDeltaMonitor( + "some_feed", + ImmutableMap.of(), + (cgroup) -> cpuacctDir.toPath() + ); + final StubServiceEmitter emitter = new StubServiceEmitter("service", "host"); + Assert.assertFalse(monitor.doMonitor(emitter)); + // First should just cache + Assert.assertEquals(0, emitter.getEvents().size()); + Assert.assertTrue(cpuacct.delete()); + TestUtils.copyResource("/cpuacct.usage_all", cpuacct); + Assert.assertTrue(monitor.doMonitor(emitter)); + Assert.assertEquals(2 * 128 + 1, emitter.getEvents().size()); + } +} diff --git a/java-util/src/test/java/io/druid/java/util/metrics/GcNameTest.java b/java-util/src/test/java/io/druid/java/util/metrics/GcNameTest.java new file mode 100644 index 000000000000..21a84277718d --- /dev/null +++ b/java-util/src/test/java/io/druid/java/util/metrics/GcNameTest.java @@ -0,0 +1,33 @@ +/* + * Licensed to Metamarkets Group Inc. (Metamarkets) under one + * or more contributor license agreements. See the NOTICE file + * distributed with this work for additional information + * regarding copyright ownership. Metamarkets licenses this file + * to you under the Apache License, Version 2.0 (the + * "License"); you may not use this file except in compliance + * with the License. You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, + * software distributed under the License is distributed on an + * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY + * KIND, either express or implied. See the License for the + * specific language governing permissions and limitations + * under the License. + */ + +package io.druid.java.util.metrics; + +import org.gridkit.lab.jvm.perfdata.JStatData; + +public class GcNameTest +{ + + public static void main(String[] args) + { + JStatData jStatData = JStatData.connect(SigarUtil.getCurrentProcessId()); + System.out.println(jStatData.getAllCounters().get("sun.gc.collector.0.name").getValue()); + System.out.println(jStatData.getAllCounters().get("sun.gc.collector.1.name").getValue()); + } +} diff --git a/java-util/src/test/java/io/druid/java/util/metrics/JvmMonitorTest.java b/java-util/src/test/java/io/druid/java/util/metrics/JvmMonitorTest.java new file mode 100644 index 000000000000..ac04f9d2f06c --- /dev/null +++ b/java-util/src/test/java/io/druid/java/util/metrics/JvmMonitorTest.java @@ -0,0 +1,148 @@ +/* + * Licensed to Metamarkets Group Inc. (Metamarkets) under one + * or more contributor license agreements. See the NOTICE file + * distributed with this work for additional information + * regarding copyright ownership. Metamarkets licenses this file + * to you under the Apache License, Version 2.0 (the + * "License"); you may not use this file except in compliance + * with the License. You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, + * software distributed under the License is distributed on an + * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY + * KIND, either express or implied. See the License for the + * specific language governing permissions and limitations + * under the License. + */ + +package io.druid.java.util.metrics; + +import io.druid.java.util.emitter.core.Emitter; +import io.druid.java.util.emitter.core.Event; +import io.druid.java.util.emitter.service.ServiceEmitter; +import io.druid.java.util.emitter.service.ServiceMetricEvent; +import org.junit.Assert; +import org.junit.Test; + +import java.io.IOException; +import java.util.List; + +public class JvmMonitorTest +{ + + @Test(timeout = 60000) + public void testGcCounts() throws InterruptedException + { + GcTrackingEmitter emitter = new GcTrackingEmitter(); + + final ServiceEmitter serviceEmitter = new ServiceEmitter("test", "localhost", emitter); + serviceEmitter.start(); + final JvmMonitor jvmMonitor = new JvmMonitor(); + + while (true) { + // generate some garbage to see gc counters incremented + @SuppressWarnings("unused") + byte[] b = new byte[1024 * 1024 * 50]; + emitter.reset(); + jvmMonitor.doMonitor(serviceEmitter); + if (emitter.gcSeen()) { + return; + } + Thread.sleep(10); + } + } + + private static class GcTrackingEmitter implements Emitter + { + private Number oldGcCount; + private Number oldGcCpu; + private Number youngGcCount; + private Number youngGcCpu; + + @Override + public void start() + { + + } + + void reset() + { + oldGcCount = null; + oldGcCpu = null; + youngGcCount = null; + youngGcCpu = null; + } + + @Override + public void emit(Event e) + { + ServiceMetricEvent event = (ServiceMetricEvent) e; + String gcGen = null; + if (event.toMap().get("gcGen") != null) { + gcGen = ((List) event.toMap().get("gcGen")).get(0).toString(); + } + + switch (event.getMetric() + "/" + gcGen) { + case "jvm/gc/count/old": + oldGcCount = event.getValue(); + break; + case "jvm/gc/cpu/old": + oldGcCpu = event.getValue(); + break; + case "jvm/gc/count/young": + youngGcCount = event.getValue(); + break; + case "jvm/gc/cpu/young": + youngGcCpu = event.getValue(); + break; + } + } + + boolean gcSeen() + { + return oldGcSeen() || youngGcSeen(); + } + + private boolean oldGcSeen() + { + boolean oldGcCountSeen = oldGcCount != null && oldGcCount.longValue() > 0; + boolean oldGcCpuSeen = oldGcCpu != null && oldGcCpu.longValue() > 0; + if (oldGcCountSeen || oldGcCpuSeen) { + System.out.println("old count: " + oldGcCount + ", cpu: " + oldGcCpu); + } + Assert.assertFalse( + "expected to see old gc count and cpu both zero or non-existent or both positive", + oldGcCountSeen ^ oldGcCpuSeen + ); + return oldGcCountSeen; + } + + private boolean youngGcSeen() + { + boolean youngGcCountSeen = youngGcCount != null && youngGcCount.longValue() > 0; + boolean youngGcCpuSeen = youngGcCpu != null && youngGcCpu.longValue() > 0; + if (youngGcCountSeen || youngGcCpuSeen) { + System.out.println("young count: " + youngGcCount + ", cpu: " + youngGcCpu); + } + Assert.assertFalse( + "expected to see young gc count and cpu both zero/non-existent or both positive", + youngGcCountSeen ^ youngGcCpuSeen + ); + return youngGcCountSeen; + } + + @Override + public void flush() throws IOException + { + + } + + @Override + public void close() throws IOException + { + + } + } +} diff --git a/java-util/src/test/java/io/druid/java/util/metrics/JvmPidDiscovererTest.java b/java-util/src/test/java/io/druid/java/util/metrics/JvmPidDiscovererTest.java new file mode 100644 index 000000000000..98ea6ca261b0 --- /dev/null +++ b/java-util/src/test/java/io/druid/java/util/metrics/JvmPidDiscovererTest.java @@ -0,0 +1,32 @@ +/* + * Licensed to Metamarkets Group Inc. (Metamarkets) under one + * or more contributor license agreements. See the NOTICE file + * distributed with this work for additional information + * regarding copyright ownership. Metamarkets licenses this file + * to you under the Apache License, Version 2.0 (the + * "License"); you may not use this file except in compliance + * with the License. You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, + * software distributed under the License is distributed on an + * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY + * KIND, either express or implied. See the License for the + * specific language governing permissions and limitations + * under the License. + */ + +package io.druid.java.util.metrics; + +import org.junit.Assert; +import org.junit.Test; + +public class JvmPidDiscovererTest +{ + @Test + public void getPid() throws Exception + { + Assert.assertNotNull(JvmPidDiscoverer.instance().getPid()); + } +} diff --git a/java-util/src/test/java/io/druid/java/util/metrics/MonitorUtilsTest.java b/java-util/src/test/java/io/druid/java/util/metrics/MonitorUtilsTest.java new file mode 100644 index 000000000000..adecb0145e0a --- /dev/null +++ b/java-util/src/test/java/io/druid/java/util/metrics/MonitorUtilsTest.java @@ -0,0 +1,46 @@ +/* + * Licensed to Metamarkets Group Inc. (Metamarkets) under one + * or more contributor license agreements. See the NOTICE file + * distributed with this work for additional information + * regarding copyright ownership. Metamarkets licenses this file + * to you under the Apache License, Version 2.0 (the + * "License"); you may not use this file except in compliance + * with the License. You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, + * software distributed under the License is distributed on an + * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY + * KIND, either express or implied. See the License for the + * specific language governing permissions and limitations + * under the License. + */ + +package io.druid.java.util.metrics; + +import com.google.common.collect.ImmutableList; +import com.google.common.collect.ImmutableMap; +import io.druid.java.util.emitter.service.ServiceMetricEvent; +import org.junit.Assert; +import org.junit.Test; + +import java.util.Map; + +public class MonitorUtilsTest +{ + @Test + public void testAddDimensionsToBuilder() + { + ServiceMetricEvent.Builder builder = new ServiceMetricEvent.Builder(); + Map dimensions = ImmutableMap.of( + "dim1", new String[]{"value1"}, + "dim2", new String[]{"value2.1", "value2.2"} + ); + + MonitorUtils.addDimensionsToBuilder(builder, dimensions); + + Assert.assertEquals(builder.getDimension("dim1"), ImmutableList.of("value1")); + Assert.assertEquals(builder.getDimension("dim2"), ImmutableList.of("value2.1", "value2.2")); + } +} diff --git a/java-util/src/test/java/io/druid/java/util/metrics/MonitorsTest.java b/java-util/src/test/java/io/druid/java/util/metrics/MonitorsTest.java new file mode 100644 index 000000000000..885118b96703 --- /dev/null +++ b/java-util/src/test/java/io/druid/java/util/metrics/MonitorsTest.java @@ -0,0 +1,66 @@ +/* + * Licensed to Metamarkets Group Inc. (Metamarkets) under one + * or more contributor license agreements. See the NOTICE file + * distributed with this work for additional information + * regarding copyright ownership. Metamarkets licenses this file + * to you under the Apache License, Version 2.0 (the + * "License"); you may not use this file except in compliance + * with the License. You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, + * software distributed under the License is distributed on an + * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY + * KIND, either express or implied. See the License for the + * specific language governing permissions and limitations + * under the License. + */ + +package io.druid.java.util.metrics; + +import com.google.common.collect.ImmutableMap; +import io.druid.java.util.common.StringUtils; +import io.druid.java.util.emitter.core.Event; +import org.junit.Assert; +import org.junit.Test; + +import java.util.List; + +public class MonitorsTest +{ + + @Test + public void testSetFeed() + { + String feed = "testFeed"; + StubServiceEmitter emitter = new StubServiceEmitter("dev/monitor-test", "localhost:0000"); + Monitor m = Monitors.createCompoundJvmMonitor(ImmutableMap.of(), feed); + m.start(); + m.monitor(emitter); + m.stop(); + checkEvents(emitter.getEvents(), feed); + } + + @Test + public void testDefaultFeed() + { + StubServiceEmitter emitter = new StubServiceEmitter("dev/monitor-test", "localhost:0000"); + Monitor m = Monitors.createCompoundJvmMonitor(ImmutableMap.of()); + m.start(); + m.monitor(emitter); + m.stop(); + checkEvents(emitter.getEvents(), "metrics"); + } + + private void checkEvents(List events, String expectedFeed) + { + Assert.assertFalse("no events emitted", events.isEmpty()); + for (Event e : events) { + if (!expectedFeed.equals(e.getFeed())) { + String message = StringUtils.format("\"feed\" in event: %s", e.toMap().toString()); + Assert.assertEquals(message, expectedFeed, e.getFeed()); + } + } + } +} diff --git a/java-util/src/test/java/io/druid/java/util/metrics/SigarLoadTest.java b/java-util/src/test/java/io/druid/java/util/metrics/SigarLoadTest.java new file mode 100644 index 000000000000..032097579fa0 --- /dev/null +++ b/java-util/src/test/java/io/druid/java/util/metrics/SigarLoadTest.java @@ -0,0 +1,35 @@ +/* + * Licensed to Metamarkets Group Inc. (Metamarkets) under one + * or more contributor license agreements. See the NOTICE file + * distributed with this work for additional information + * regarding copyright ownership. Metamarkets licenses this file + * to you under the Apache License, Version 2.0 (the + * "License"); you may not use this file except in compliance + * with the License. You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, + * software distributed under the License is distributed on an + * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY + * KIND, either express or implied. See the License for the + * specific language governing permissions and limitations + * under the License. + */ + +package io.druid.java.util.metrics; + +import junit.framework.Assert; +import org.hyperic.sigar.Sigar; +import org.hyperic.sigar.SigarException; +import org.junit.Test; + +public class SigarLoadTest +{ + @Test + public void testSigarLoad() throws SigarException + { + Sigar sigar = SigarUtil.getSigar(); + Assert.assertTrue(sigar.getPid() > 0); + } +} diff --git a/java-util/src/test/java/io/druid/java/util/metrics/SigarPidDiscovererTest.java b/java-util/src/test/java/io/druid/java/util/metrics/SigarPidDiscovererTest.java new file mode 100644 index 000000000000..96219b5fa6d0 --- /dev/null +++ b/java-util/src/test/java/io/druid/java/util/metrics/SigarPidDiscovererTest.java @@ -0,0 +1,32 @@ +/* + * Licensed to Metamarkets Group Inc. (Metamarkets) under one + * or more contributor license agreements. See the NOTICE file + * distributed with this work for additional information + * regarding copyright ownership. Metamarkets licenses this file + * to you under the Apache License, Version 2.0 (the + * "License"); you may not use this file except in compliance + * with the License. You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, + * software distributed under the License is distributed on an + * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY + * KIND, either express or implied. See the License for the + * specific language governing permissions and limitations + * under the License. + */ + +package io.druid.java.util.metrics; + +import org.junit.Test; + +public class SigarPidDiscovererTest +{ + @Test + public void simpleTest() + { + // Just make sure we don't crash + SigarPidDiscoverer.instance().getPid(); + } +} diff --git a/java-util/src/test/java/io/druid/java/util/metrics/StubServiceEmitter.java b/java-util/src/test/java/io/druid/java/util/metrics/StubServiceEmitter.java new file mode 100644 index 000000000000..ed9aa18b7acc --- /dev/null +++ b/java-util/src/test/java/io/druid/java/util/metrics/StubServiceEmitter.java @@ -0,0 +1,63 @@ +/* + * Licensed to Metamarkets Group Inc. (Metamarkets) under one + * or more contributor license agreements. See the NOTICE file + * distributed with this work for additional information + * regarding copyright ownership. Metamarkets licenses this file + * to you under the Apache License, Version 2.0 (the + * "License"); you may not use this file except in compliance + * with the License. You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, + * software distributed under the License is distributed on an + * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY + * KIND, either express or implied. See the License for the + * specific language governing permissions and limitations + * under the License. + */ + +package io.druid.java.util.metrics; + +import io.druid.java.util.emitter.core.Event; +import io.druid.java.util.emitter.service.ServiceEmitter; + +import java.io.IOException; +import java.util.ArrayList; +import java.util.List; + +class StubServiceEmitter extends ServiceEmitter +{ + private List events = new ArrayList<>(); + + public StubServiceEmitter(String service, String host) + { + super(service, host, null); + } + + @Override + public void emit(Event event) + { + events.add(event); + } + + public List getEvents() + { + return events; + } + + @Override + public void start() + { + } + + @Override + public void flush() throws IOException + { + } + + @Override + public void close() throws IOException + { + } +} diff --git a/java-util/src/test/java/io/druid/java/util/metrics/cgroups/CpuAcctTest.java b/java-util/src/test/java/io/druid/java/util/metrics/cgroups/CpuAcctTest.java new file mode 100644 index 000000000000..22b8973fbe8c --- /dev/null +++ b/java-util/src/test/java/io/druid/java/util/metrics/cgroups/CpuAcctTest.java @@ -0,0 +1,390 @@ +/* + * Licensed to Metamarkets Group Inc. (Metamarkets) under one + * or more contributor license agreements. See the NOTICE file + * distributed with this work for additional information + * regarding copyright ownership. Metamarkets licenses this file + * to you under the Apache License, Version 2.0 (the + * "License"); you may not use this file except in compliance + * with the License. You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, + * software distributed under the License is distributed on an + * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY + * KIND, either express or implied. See the License for the + * specific language governing permissions and limitations + * under the License. + */ + +package io.druid.java.util.metrics.cgroups; + +import org.junit.Assert; +import org.junit.Before; +import org.junit.Rule; +import org.junit.Test; +import org.junit.rules.ExpectedException; +import org.junit.rules.TemporaryFolder; + +import java.io.File; +import java.io.IOException; +import java.util.Arrays; +import java.util.Random; +import java.util.stream.LongStream; + + +public class CpuAcctTest +{ + @Rule + public ExpectedException expectedException = ExpectedException.none(); + @Rule + public TemporaryFolder temporaryFolder = new TemporaryFolder(); + private File procDir; + private File cgroupDir; + private CgroupDiscoverer discoverer; + + @Before + public void setUp() throws IOException + { + cgroupDir = temporaryFolder.newFolder(); + procDir = temporaryFolder.newFolder(); + discoverer = new ProcCgroupDiscoverer(procDir.toPath()); + TestUtils.setUpCgroups(procDir, cgroupDir); + final File cpuacctDir = new File( + cgroupDir, + "cpu,cpuacct/system.slice/mesos-agent-druid.service/f12ba7e0-fa16-462e-bb9d-652ccc27f0ee" + ); + Assert.assertTrue((cpuacctDir.isDirectory() && cpuacctDir.exists()) || cpuacctDir.mkdirs()); + TestUtils.copyResource("/cpuacct.usage_all", new File(cpuacctDir, "cpuacct.usage_all")); + } + + @Test + public void testWontCrash() + { + final CpuAcct cpuAcct = new CpuAcct(cgroup -> { + throw new RuntimeException("Should still continue"); + }); + final CpuAcct.CpuAcctMetric metric = cpuAcct.snapshot(); + Assert.assertEquals(0L, metric.cpuCount()); + Assert.assertEquals(0L, metric.usrTime()); + Assert.assertEquals(0L, metric.sysTime()); + } + + @Test + public void testSimpleLoad() + { + final CpuAcct cpuAcct = new CpuAcct(discoverer); + final CpuAcct.CpuAcctMetric snapshot = cpuAcct.snapshot(); + Assert.assertEquals(128, snapshot.cpuCount()); + Assert.assertArrayEquals(new long[]{ + 7344294132655L, + 28183572804378L, + 29552215219002L, + 29478124053329L, + 29829248571038L, + 30290864470719L, + 30561719193413L, + 30638606697446L, + 39251561450889L, + 39082643428276L, + 38829852195583L, + 39158341842449L, + 39490263697181L, + 39363774325162L, + 39569806302164L, + 39410558504372L, + 44907796060505L, + 42522297123640L, + 41920625622542L, + 40593391967420L, + 40350585953295L, + 40139554930678L, + 40019783380923L, + 40182686097717L, + 39778858132385L, + 40252938541440L, + 40476150948365L, + 40277874584618L, + 39938509407084L, + 39914644718371L, + 40010393213659L, + 39938252119551L, + 44958993952996L, + 42967015146867L, + 41742610896758L, + 40751067975683L, + 40390633464986L, + 40143331504478L, + 40486014164571L, + 40565630824649L, + 39976774290845L, + 39942348143441L, + 40149234675554L, + 39895306827546L, + 40062736204343L, + 39208930836306L, + 40098687814379L, + 39803234124100L, + 44894501101599L, + 43470418903266L, + 41844924510711L, + 41137017142223L, + 40958534485692L, + 40996749346830L, + 40722256755299L, + 40715123538100L, + 40756697196452L, + 40388351638364L, + 40607150623932L, + 40799783862688L, + 41085552637672L, + 40406189914954L, + 40723714534227L, + 40594766265305L, + 47966186930606L, + 40950398764685L, + 39773685629470L, + 39799299693868L, + 39962809136735L, + 39621597321912L, + 39576312003193L, + 39306677714061L, + 37450385749152L, + 37262591956707L, + 37867848418162L, + 37583170923549L, + 37565074790371L, + 37490674520644L, + 37627356285158L, + 37841963931932L, + 36467248910690L, + 37168392893625L, + 37299551044970L, + 37765703017416L, + 37799573327332L, + 38049895238765L, + 37985869086888L, + 37696241330128L, + 38292683839783L, + 38120890685615L, + 38045845683675L, + 38182343881607L, + 37729375994055L, + 38074905443126L, + 37912923241296L, + 37937307782462L, + 36325058440018L, + 37157290185847L, + 37403692187351L, + 38153199365119L, + 37880374831086L, + 37651504251556L, + 37739944955714L, + 37627835848111L, + 37903369827551L, + 37981555620129L, + 37848203152449L, + 37990323769817L, + 38347560243684L, + 37887959856632L, + 37937702600487L, + 38221455324656L, + 37035158753494L, + 37519359498531L, + 38185495941617L, + 38947633192125L, + 38497334926906L, + 38621231881393L, + 38817038222494L, + 38911615674430L, + 38384669525324L, + 38597980524270L, + 38477107776771L, + 38483564156449L, + 38471547310020L, + 38827188957783L, + 38554167083817L, + 38870461161179L + }, snapshot.usrTimes()); + Assert.assertArrayEquals(new long[]{ + 4583688852335L, + 385888457233L, + 370465239852L, + 363572894675L, + 334329517808L, + 327800567541L, + 289395249935L, + 302853990791L, + 255558344564L, + 274043522998L, + 256014012773L, + 253276920707L, + 257971375228L, + 244926573383L, + 240692265222L, + 241820386298L, + 74203786299L, + 79965589957L, + 73735621808L, + 74851270413L, + 84689908189L, + 69618323977L, + 76316055513L, + 87434816528L, + 80680588218L, + 70083776283L, + 59931280896L, + 67678277638L, + 85713335794L, + 74594785949L, + 69367790895L, + 68817875729L, + 83346318662L, + 80872459027L, + 69882000641L, + 59230207177L, + 80985355290L, + 88305162767L, + 79610055475L, + 77097429500L, + 72748340407L, + 77647202034L, + 61982641775L, + 63292828955L, + 71501429739L, + 101050648913L, + 67603152691L, + 85242844849L, + 53320735254L, + 59480233446L, + 53738738094L, + 50064771695L, + 49322497528L, + 60437383202L, + 58974386647L, + 57254872107L, + 59214245666L, + 60135823463L, + 53295222550L, + 54850380995L, + 55260978656L, + 58478426264L, + 54870256138L, + 57541909382L, + 563254758724L, + 310547129324L, + 297956013630L, + 314333221636L, + 301425507083L, + 307849203392L, + 302818464346L, + 300708730620L, + 201706674463L, + 237643132530L, + 203202644049L, + 200889116684L, + 211272183842L, + 211346947120L, + 204440036165L, + 214268740497L, + 79712665639L, + 75090730135L, + 78826984017L, + 80223883420L, + 74309961837L, + 73501632933L, + 84726122364L, + 89432904192L, + 86169574674L, + 61631022854L, + 63064271269L, + 71350572902L, + 79603442903L, + 80632248259L, + 73143984225L, + 68368595815L, + 93212419757L, + 79875769874L, + 71939175528L, + 61279926839L, + 80531076242L, + 97919090064L, + 62379356509L, + 73687439569L, + 65237344029L, + 88874846168L, + 73032242506L, + 66715221936L, + 75213471783L, + 85339287415L, + 70516346051L, + 80265423715L, + 55020848133L, + 68103451501L, + 56282008328L, + 57420205172L, + 51517654341L, + 53668755335L, + 58390679981L, + 59254129440L, + 60781018005L, + 60445939750L, + 62771597837L, + 57294449683L, + 57716404007L, + 55643587481L, + 53593212339L, + 52866253864L + }, snapshot.sysTimes()); + Assert.assertEquals(LongStream.of(snapshot.sysTimes()).sum(), snapshot.sysTime()); + Assert.assertEquals(LongStream.of(snapshot.usrTimes()).sum(), snapshot.usrTime()); + Assert.assertEquals( + LongStream.of(snapshot.sysTimes()).sum() + LongStream.of(snapshot.usrTimes()).sum(), + snapshot.time() + ); + } + + @Test + public void testSimpleMetricFunctions() + { + final long[] usrTime = new long[]{1, 2, 3}; + final long[] sysTime = new long[]{4, 5, 6}; + final CpuAcct.CpuAcctMetric metric = new CpuAcct.CpuAcctMetric(usrTime, sysTime); + Assert.assertEquals(6, metric.usrTime()); + Assert.assertEquals(15, metric.sysTime()); + Assert.assertArrayEquals(usrTime, metric.usrTimes()); + Assert.assertArrayEquals(sysTime, metric.sysTimes()); + for (int i = 0; i < usrTime.length; ++i) { + Assert.assertEquals(usrTime[i], metric.usrTime(i)); + } + for (int i = 0; i < sysTime.length; ++i) { + Assert.assertEquals(sysTime[i], metric.sysTime(i)); + } + } + + @Test + public void testDiff() + { + final Random random = new Random(364781L); + final long[] zeroes = new long[32]; + Arrays.fill(zeroes, 0); + final long[] usr = new long[zeroes.length]; + final long[] sys = new long[zeroes.length]; + long total = 0L; + for (int i = 0; i < usr.length; ++i) { + int add = random.nextInt(Integer.MAX_VALUE >>> 2); + usr[i] = add; + sys[i] = add << 1; + total += add; + } + final CpuAcct.CpuAcctMetric metric0 = new CpuAcct.CpuAcctMetric(zeroes, zeroes); + final CpuAcct.CpuAcctMetric metric1 = new CpuAcct.CpuAcctMetric(usr, sys); + final CpuAcct.CpuAcctMetric diff = metric1.cumulativeSince(metric0); + Assert.assertEquals(total, diff.usrTime()); + Assert.assertEquals(total << 1, diff.sysTime()); + Assert.assertNotEquals(0, total); + final CpuAcct.CpuAcctMetric zeroDiff = metric1.cumulativeSince(metric1); + Assert.assertEquals(0, zeroDiff.usrTime()); + Assert.assertEquals(0, zeroDiff.sysTime()); + } +} diff --git a/java-util/src/test/java/io/druid/java/util/metrics/cgroups/ProcCgroupDiscovererTest.java b/java-util/src/test/java/io/druid/java/util/metrics/cgroups/ProcCgroupDiscovererTest.java new file mode 100644 index 000000000000..195422e59523 --- /dev/null +++ b/java-util/src/test/java/io/druid/java/util/metrics/cgroups/ProcCgroupDiscovererTest.java @@ -0,0 +1,91 @@ +/* + * Licensed to Metamarkets Group Inc. (Metamarkets) under one + * or more contributor license agreements. See the NOTICE file + * distributed with this work for additional information + * regarding copyright ownership. Metamarkets licenses this file + * to you under the Apache License, Version 2.0 (the + * "License"); you may not use this file except in compliance + * with the License. You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, + * software distributed under the License is distributed on an + * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY + * KIND, either express or implied. See the License for the + * specific language governing permissions and limitations + * under the License. + */ + +package io.druid.java.util.metrics.cgroups; + +import com.google.common.collect.ImmutableSet; +import org.junit.Assert; +import org.junit.Before; +import org.junit.Rule; +import org.junit.Test; +import org.junit.rules.ExpectedException; +import org.junit.rules.TemporaryFolder; + +import java.io.File; +import java.nio.file.Paths; + +public class ProcCgroupDiscovererTest +{ + @Rule + public ExpectedException expectedException = ExpectedException.none(); + @Rule + public TemporaryFolder temporaryFolder = new TemporaryFolder(); + private File procDir; + private File cgroupDir; + private CgroupDiscoverer discoverer; + + @Before + public void setUp() throws Exception + { + cgroupDir = temporaryFolder.newFolder(); + procDir = temporaryFolder.newFolder(); + discoverer = new ProcCgroupDiscoverer(procDir.toPath()); + TestUtils.setUpCgroups(procDir, cgroupDir); + } + + @Test + public void testSimpleProc() throws Exception + { + Assert.assertEquals( + new File( + cgroupDir, + "cpu,cpuacct/system.slice/mesos-agent-druid.service/f12ba7e0-fa16-462e-bb9d-652ccc27f0ee" + ).toPath(), + discoverer.discover("cpu") + ); + } + + @Test + public void testParse() throws Exception + { + final ProcCgroupDiscoverer.ProcMountsEntry entry = ProcCgroupDiscoverer.ProcMountsEntry.parse( + "/dev/md126 /ebs xfs rw,seclabel,noatime,attr2,inode64,sunit=1024,swidth=16384,noquota 0 0" + ); + Assert.assertEquals("/dev/md126", entry.dev); + Assert.assertEquals(Paths.get("/ebs"), entry.path); + Assert.assertEquals("xfs", entry.type); + Assert.assertEquals(ImmutableSet.of( + "rw", + "seclabel", + "noatime", + "attr2", + "inode64", + "sunit=1024", + "swidth=16384", + "noquota" + ), entry.options); + } + + @Test + public void testNullCgroup() + { + expectedException.expect(NullPointerException.class); + Assert.assertNull(new ProcCgroupDiscoverer(procDir.toPath()).discover(null)); + } +} diff --git a/java-util/src/test/java/io/druid/java/util/metrics/cgroups/TestUtils.java b/java-util/src/test/java/io/druid/java/util/metrics/cgroups/TestUtils.java new file mode 100644 index 000000000000..1ddc006ca567 --- /dev/null +++ b/java-util/src/test/java/io/druid/java/util/metrics/cgroups/TestUtils.java @@ -0,0 +1,63 @@ +/* + * Licensed to Metamarkets Group Inc. (Metamarkets) under one + * or more contributor license agreements. See the NOTICE file + * distributed with this work for additional information + * regarding copyright ownership. Metamarkets licenses this file + * to you under the Apache License, Version 2.0 (the + * "License"); you may not use this file except in compliance + * with the License. You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, + * software distributed under the License is distributed on an + * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY + * KIND, either express or implied. See the License for the + * specific language governing permissions and limitations + * under the License. + */ + +package io.druid.java.util.metrics.cgroups; + +import io.druid.java.util.common.StringUtils; +import org.junit.Assert; + +import java.io.File; +import java.io.IOException; +import java.nio.file.Files; +import java.util.regex.Pattern; + +public class TestUtils +{ + public static void setUpCgroups( + File procDir, + File cgroupDir + ) throws IOException + { + final File procMountsTemplate = new File(procDir, "mounts.template"); + final File procMounts = new File(procDir, "mounts"); + copyResource("/proc.mounts", procMountsTemplate); + + final String procMountsString = StringUtils.fromUtf8(Files.readAllBytes(procMountsTemplate.toPath())); + Files.write( + procMounts.toPath(), + StringUtils.toUtf8(procMountsString.replaceAll( + Pattern.quote("/sys/fs/cgroup"), + cgroupDir.getAbsolutePath() + )) + ); + + Assert.assertTrue(new File( + cgroupDir, + "cpu,cpuacct/system.slice/mesos-agent-druid.service/f12ba7e0-fa16-462e-bb9d-652ccc27f0ee" + ).mkdirs()); + copyResource("/proc.pid.cgroup", new File(procDir, "cgroup")); + } + + public static void copyResource(String resource, File out) throws IOException + { + Files.copy(TestUtils.class.getResourceAsStream(resource), out.toPath()); + Assert.assertTrue(out.exists()); + Assert.assertNotEquals(0, out.length()); + } +} diff --git a/java-util/src/test/resources/cpuacct.usage_all b/java-util/src/test/resources/cpuacct.usage_all new file mode 100644 index 000000000000..f3d1c534708a --- /dev/null +++ b/java-util/src/test/resources/cpuacct.usage_all @@ -0,0 +1,129 @@ +cpu user system +0 7344294132655 4583688852335 +1 28183572804378 385888457233 +2 29552215219002 370465239852 +3 29478124053329 363572894675 +4 29829248571038 334329517808 +5 30290864470719 327800567541 +6 30561719193413 289395249935 +7 30638606697446 302853990791 +8 39251561450889 255558344564 +9 39082643428276 274043522998 +10 38829852195583 256014012773 +11 39158341842449 253276920707 +12 39490263697181 257971375228 +13 39363774325162 244926573383 +14 39569806302164 240692265222 +15 39410558504372 241820386298 +16 44907796060505 74203786299 +17 42522297123640 79965589957 +18 41920625622542 73735621808 +19 40593391967420 74851270413 +20 40350585953295 84689908189 +21 40139554930678 69618323977 +22 40019783380923 76316055513 +23 40182686097717 87434816528 +24 39778858132385 80680588218 +25 40252938541440 70083776283 +26 40476150948365 59931280896 +27 40277874584618 67678277638 +28 39938509407084 85713335794 +29 39914644718371 74594785949 +30 40010393213659 69367790895 +31 39938252119551 68817875729 +32 44958993952996 83346318662 +33 42967015146867 80872459027 +34 41742610896758 69882000641 +35 40751067975683 59230207177 +36 40390633464986 80985355290 +37 40143331504478 88305162767 +38 40486014164571 79610055475 +39 40565630824649 77097429500 +40 39976774290845 72748340407 +41 39942348143441 77647202034 +42 40149234675554 61982641775 +43 39895306827546 63292828955 +44 40062736204343 71501429739 +45 39208930836306 101050648913 +46 40098687814379 67603152691 +47 39803234124100 85242844849 +48 44894501101599 53320735254 +49 43470418903266 59480233446 +50 41844924510711 53738738094 +51 41137017142223 50064771695 +52 40958534485692 49322497528 +53 40996749346830 60437383202 +54 40722256755299 58974386647 +55 40715123538100 57254872107 +56 40756697196452 59214245666 +57 40388351638364 60135823463 +58 40607150623932 53295222550 +59 40799783862688 54850380995 +60 41085552637672 55260978656 +61 40406189914954 58478426264 +62 40723714534227 54870256138 +63 40594766265305 57541909382 +64 47966186930606 563254758724 +65 40950398764685 310547129324 +66 39773685629470 297956013630 +67 39799299693868 314333221636 +68 39962809136735 301425507083 +69 39621597321912 307849203392 +70 39576312003193 302818464346 +71 39306677714061 300708730620 +72 37450385749152 201706674463 +73 37262591956707 237643132530 +74 37867848418162 203202644049 +75 37583170923549 200889116684 +76 37565074790371 211272183842 +77 37490674520644 211346947120 +78 37627356285158 204440036165 +79 37841963931932 214268740497 +80 36467248910690 79712665639 +81 37168392893625 75090730135 +82 37299551044970 78826984017 +83 37765703017416 80223883420 +84 37799573327332 74309961837 +85 38049895238765 73501632933 +86 37985869086888 84726122364 +87 37696241330128 89432904192 +88 38292683839783 86169574674 +89 38120890685615 61631022854 +90 38045845683675 63064271269 +91 38182343881607 71350572902 +92 37729375994055 79603442903 +93 38074905443126 80632248259 +94 37912923241296 73143984225 +95 37937307782462 68368595815 +96 36325058440018 93212419757 +97 37157290185847 79875769874 +98 37403692187351 71939175528 +99 38153199365119 61279926839 +100 37880374831086 80531076242 +101 37651504251556 97919090064 +102 37739944955714 62379356509 +103 37627835848111 73687439569 +104 37903369827551 65237344029 +105 37981555620129 88874846168 +106 37848203152449 73032242506 +107 37990323769817 66715221936 +108 38347560243684 75213471783 +109 37887959856632 85339287415 +110 37937702600487 70516346051 +111 38221455324656 80265423715 +112 37035158753494 55020848133 +113 37519359498531 68103451501 +114 38185495941617 56282008328 +115 38947633192125 57420205172 +116 38497334926906 51517654341 +117 38621231881393 53668755335 +118 38817038222494 58390679981 +119 38911615674430 59254129440 +120 38384669525324 60781018005 +121 38597980524270 60445939750 +122 38477107776771 62771597837 +123 38483564156449 57294449683 +124 38471547310020 57716404007 +125 38827188957783 55643587481 +126 38554167083817 53593212339 +127 38870461161179 52866253864 diff --git a/java-util/src/test/resources/keystore.jks b/java-util/src/test/resources/keystore.jks new file mode 100644 index 000000000000..a98918283982 Binary files /dev/null and b/java-util/src/test/resources/keystore.jks differ diff --git a/java-util/src/test/resources/log4j2.xml b/java-util/src/test/resources/log4j2.xml new file mode 100644 index 000000000000..689cbbf6b1c2 --- /dev/null +++ b/java-util/src/test/resources/log4j2.xml @@ -0,0 +1,32 @@ + + + + + + + + + + + + + + + diff --git a/java-util/src/test/resources/proc.cgroups b/java-util/src/test/resources/proc.cgroups new file mode 100644 index 000000000000..09eff7760532 --- /dev/null +++ b/java-util/src/test/resources/proc.cgroups @@ -0,0 +1,13 @@ +#subsys_name hierarchy num_cgroups enabled +cpuset 4 1 1 +cpu 9 108 1 +cpuacct 9 108 1 +blkio 8 60 1 +memory 3 160 1 +devices 7 60 1 +freezer 6 52 1 +net_cls 2 1 1 +perf_event 10 1 1 +net_prio 2 1 1 +hugetlb 5 1 1 +pids 11 63 1 diff --git a/java-util/src/test/resources/proc.mounts b/java-util/src/test/resources/proc.mounts new file mode 100644 index 000000000000..c1b3ee74fe3a --- /dev/null +++ b/java-util/src/test/resources/proc.mounts @@ -0,0 +1,36 @@ +sysfs /sys sysfs rw,seclabel,nosuid,nodev,noexec,relatime 0 0 +proc /proc proc rw,nosuid,nodev,noexec,relatime 0 0 +devtmpfs /dev devtmpfs rw,seclabel,nosuid,size=1007354668k,nr_inodes=251838667,mode=755 0 0 +securityfs /sys/kernel/security securityfs rw,nosuid,nodev,noexec,relatime 0 0 +tmpfs /dev/shm tmpfs rw,seclabel,nosuid,nodev 0 0 +devpts /dev/pts devpts rw,seclabel,nosuid,noexec,relatime,gid=5,mode=620,ptmxmode=000 0 0 +tmpfs /run tmpfs rw,seclabel,nosuid,nodev,mode=755 0 0 +tmpfs /sys/fs/cgroup tmpfs ro,seclabel,nosuid,nodev,noexec,mode=755 0 0 +cgroup /sys/fs/cgroup/systemd cgroup rw,nosuid,nodev,noexec,relatime,xattr,release_agent=/usr/lib/systemd/systemd-cgroups-agent,name=systemd 0 0 +pstore /sys/fs/pstore pstore rw,seclabel,nosuid,nodev,noexec,relatime 0 0 +cgroup /sys/fs/cgroup/net_cls,net_prio cgroup rw,nosuid,nodev,noexec,relatime,net_cls,net_prio 0 0 +cgroup /sys/fs/cgroup/memory cgroup rw,nosuid,nodev,noexec,relatime,memory 0 0 +cgroup /sys/fs/cgroup/cpuset cgroup rw,nosuid,nodev,noexec,relatime,cpuset 0 0 +cgroup /sys/fs/cgroup/hugetlb cgroup rw,nosuid,nodev,noexec,relatime,hugetlb 0 0 +cgroup /sys/fs/cgroup/freezer cgroup rw,nosuid,nodev,noexec,relatime,freezer 0 0 +cgroup /sys/fs/cgroup/devices cgroup rw,nosuid,nodev,noexec,relatime,devices 0 0 +cgroup /sys/fs/cgroup/blkio cgroup rw,nosuid,nodev,noexec,relatime,blkio 0 0 +cgroup /sys/fs/cgroup/cpu,cpuacct cgroup rw,nosuid,nodev,noexec,relatime,cpu,cpuacct 0 0 +cgroup /sys/fs/cgroup/perf_event cgroup rw,nosuid,nodev,noexec,relatime,perf_event 0 0 +cgroup /sys/fs/cgroup/pids cgroup rw,nosuid,nodev,noexec,relatime,pids 0 0 +/dev/xvda9 / ext4 rw,seclabel,relatime,data=ordered 0 0 +/dev/mapper/usr /usr ext4 ro,seclabel,relatime,block_validity,delalloc,barrier,user_xattr,acl 0 0 +selinuxfs /sys/fs/selinux selinuxfs rw,relatime 0 0 +tmpfs /media tmpfs rw,seclabel,nosuid,nodev,noexec,relatime 0 0 +systemd-1 /boot autofs rw,relatime,fd=34,pgrp=1,timeout=0,minproto=5,maxproto=5,direct,pipe_ino=18428 0 0 +xenfs /proc/xen xenfs rw,relatime 0 0 +tmpfs /tmp tmpfs rw,seclabel,nosuid,nodev 0 0 +hugetlbfs /dev/hugepages hugetlbfs rw,seclabel,relatime 0 0 +systemd-1 /proc/sys/fs/binfmt_misc autofs rw,relatime,fd=38,pgrp=1,timeout=0,minproto=5,maxproto=5,direct,pipe_ino=140296 0 0 +debugfs /sys/kernel/debug debugfs rw,seclabel,relatime 0 0 +mqueue /dev/mqueue mqueue rw,seclabel,relatime 0 0 +/dev/xvda6 /usr/share/oem ext4 rw,seclabel,nodev,relatime,commit=600,data=ordered 0 0 +/dev/xvda1 /boot vfat rw,relatime,fmask=0022,dmask=0022,codepage=437,iocharset=ascii,shortname=mixed,errors=remount-ro 0 0 +/dev/md127 /mnt xfs rw,seclabel,noatime,attr2,inode64,sunit=1024,swidth=2048,noquota 0 0 +/dev/md126 /ebs xfs rw,seclabel,noatime,attr2,inode64,sunit=1024,swidth=16384,noquota 0 0 +/dev/xvda9 /var/lib/docker/overlay ext4 rw,seclabel,relatime,data=ordered 0 0 diff --git a/java-util/src/test/resources/proc.pid.cgroup b/java-util/src/test/resources/proc.pid.cgroup new file mode 100644 index 000000000000..139f81b29c8f --- /dev/null +++ b/java-util/src/test/resources/proc.pid.cgroup @@ -0,0 +1,11 @@ +11:pids:/system.slice/mesos-agent-druid.service +10:perf_event:/ +9:cpu,cpuacct:/system.slice/mesos-agent-druid.service/f12ba7e0-fa16-462e-bb9d-652ccc27f0ee +8:blkio:/system.slice/mesos-agent-druid.service +7:devices:/system.slice/mesos-agent-druid.service +6:freezer:/system.slice/mesos-agent-druid.service/f12ba7e0-fa16-462e-bb9d-652ccc27f0ee +5:hugetlb:/ +4:cpuset:/ +3:memory:/system.slice/mesos-agent-druid.service +2:net_cls,net_prio:/ +1:name=systemd:/mesos_executors.slice diff --git a/pom.xml b/pom.xml index a4c039532dfc..69cfacaa1991 100644 --- a/pom.xml +++ b/pom.xml @@ -16,8 +16,7 @@ ~ limitations under the License. --> - + 4.0.0 @@ -27,7 +26,7 @@ druid - 0.12.0-SNAPSHOT + 0.12.2-SNAPSHOT pom ${project.groupId}:${project.artifactId} @@ -62,7 +61,8 @@ 4.0.0 2.12.0 1.10.0 - 1.14.0 + 1.15.0 + 8.1.0 16.0.1 4.1.0 9.3.19.v20170502 @@ -70,9 +70,11 @@ 2.4.6 2.5 + + 3.10.6.Final - 4.0.52.Final + 4.0.52.Final 1.7.12 2.7.3 @@ -100,7 +102,6 @@ benchmarks aws-common java-util - bytebuffer-collections extendedset hll @@ -148,11 +149,6 @@ - - com.metamx - java-util - 1.3.2 - commons-codec commons-codec @@ -483,10 +479,15 @@ jetty-security ${jetty.version} + + io.netty + netty + ${netty3.version} + io.netty netty-all - ${netty.version} + ${netty4.version} joda-time @@ -709,6 +710,16 @@ asm-commons 5.2 + + org.asynchttpclient + async-http-client + 2.0.37 + + + org.gridkit.lab + jvm-attach-api + 1.2 + diff --git a/bytebuffer-collections/benchmarks/io.druid.collections.bitmap.RangeBitmapBenchmarkTest.html b/processing/benchmarks/io.druid.collections.bitmap.RangeBitmapBenchmarkTest.html old mode 100755 new mode 100644 similarity index 100% rename from bytebuffer-collections/benchmarks/io.druid.collections.bitmap.RangeBitmapBenchmarkTest.html rename to processing/benchmarks/io.druid.collections.bitmap.RangeBitmapBenchmarkTest.html diff --git a/bytebuffer-collections/benchmarks/io.druid.collections.bitmap.RangeBitmapBenchmarkTest.jsonp b/processing/benchmarks/io.druid.collections.bitmap.RangeBitmapBenchmarkTest.jsonp old mode 100755 new mode 100644 similarity index 100% rename from bytebuffer-collections/benchmarks/io.druid.collections.bitmap.RangeBitmapBenchmarkTest.jsonp rename to processing/benchmarks/io.druid.collections.bitmap.RangeBitmapBenchmarkTest.jsonp diff --git a/bytebuffer-collections/benchmarks/io.druid.collections.bitmap.UniformBitmapBenchmarkTest.html b/processing/benchmarks/io.druid.collections.bitmap.UniformBitmapBenchmarkTest.html old mode 100755 new mode 100644 similarity index 100% rename from bytebuffer-collections/benchmarks/io.druid.collections.bitmap.UniformBitmapBenchmarkTest.html rename to processing/benchmarks/io.druid.collections.bitmap.UniformBitmapBenchmarkTest.html diff --git a/bytebuffer-collections/benchmarks/io.druid.collections.bitmap.UniformBitmapBenchmarkTest.jsonp b/processing/benchmarks/io.druid.collections.bitmap.UniformBitmapBenchmarkTest.jsonp old mode 100755 new mode 100644 similarity index 100% rename from bytebuffer-collections/benchmarks/io.druid.collections.bitmap.UniformBitmapBenchmarkTest.jsonp rename to processing/benchmarks/io.druid.collections.bitmap.UniformBitmapBenchmarkTest.jsonp diff --git a/processing/pom.xml b/processing/pom.xml index c5c74ab39d1f..b70814e3d07d 100644 --- a/processing/pom.xml +++ b/processing/pom.xml @@ -26,7 +26,7 @@ io.druid druid - 0.12.0-SNAPSHOT + 0.12.2-SNAPSHOT @@ -35,11 +35,11 @@ druid-common ${project.parent.version} - - io.druid - java-util - ${project.parent.version} - + + io.druid + java-util + ${project.parent.version} + io.druid druid-hll @@ -47,16 +47,28 @@ io.druid - bytebuffer-collections + extendedset ${project.parent.version} - it.unimi.dsi - fastutil + com.fasterxml.jackson.core + jackson-annotations + + + com.fasterxml.jackson.core + jackson-core - com.metamx - java-util + com.fasterxml.jackson.core + jackson-databind + + + org.roaringbitmap + RoaringBitmap + + + it.unimi.dsi + fastutil com.ning @@ -125,6 +137,17 @@ JUnitParams test + + com.h2database + h2 + 1.4.182 + test + + + com.google.guava + guava-testlib + test + @@ -156,7 +179,32 @@ + + org.apache.maven.plugins + maven-surefire-plugin + + io.druid.collections.test.annotation.Benchmark + + + + + benchmark + + + + maven-surefire-plugin + + -server -Xms3G -Xmx3G -Djub.consumers=CONSOLE,H2 -Djub.db.file=benchmarks/benchmarks + io.druid.collections.test.annotation.Benchmark + io.druid.collections.test.annotation.Dummy + + + + + + + diff --git a/bytebuffer-collections/src/main/java/io/druid/collections/IntegerSet.java b/processing/src/main/java/io/druid/collections/IntegerSet.java old mode 100755 new mode 100644 similarity index 100% rename from bytebuffer-collections/src/main/java/io/druid/collections/IntegerSet.java rename to processing/src/main/java/io/druid/collections/IntegerSet.java diff --git a/bytebuffer-collections/src/main/java/io/druid/collections/bitmap/BitSetBitmapFactory.java b/processing/src/main/java/io/druid/collections/bitmap/BitSetBitmapFactory.java old mode 100755 new mode 100644 similarity index 100% rename from bytebuffer-collections/src/main/java/io/druid/collections/bitmap/BitSetBitmapFactory.java rename to processing/src/main/java/io/druid/collections/bitmap/BitSetBitmapFactory.java diff --git a/bytebuffer-collections/src/main/java/io/druid/collections/bitmap/BitmapFactory.java b/processing/src/main/java/io/druid/collections/bitmap/BitmapFactory.java old mode 100755 new mode 100644 similarity index 100% rename from bytebuffer-collections/src/main/java/io/druid/collections/bitmap/BitmapFactory.java rename to processing/src/main/java/io/druid/collections/bitmap/BitmapFactory.java diff --git a/bytebuffer-collections/src/main/java/io/druid/collections/bitmap/ConciseBitmapFactory.java b/processing/src/main/java/io/druid/collections/bitmap/ConciseBitmapFactory.java old mode 100755 new mode 100644 similarity index 100% rename from bytebuffer-collections/src/main/java/io/druid/collections/bitmap/ConciseBitmapFactory.java rename to processing/src/main/java/io/druid/collections/bitmap/ConciseBitmapFactory.java diff --git a/bytebuffer-collections/src/main/java/io/druid/collections/bitmap/ImmutableBitmap.java b/processing/src/main/java/io/druid/collections/bitmap/ImmutableBitmap.java old mode 100755 new mode 100644 similarity index 100% rename from bytebuffer-collections/src/main/java/io/druid/collections/bitmap/ImmutableBitmap.java rename to processing/src/main/java/io/druid/collections/bitmap/ImmutableBitmap.java diff --git a/bytebuffer-collections/src/main/java/io/druid/collections/bitmap/MutableBitmap.java b/processing/src/main/java/io/druid/collections/bitmap/MutableBitmap.java old mode 100755 new mode 100644 similarity index 100% rename from bytebuffer-collections/src/main/java/io/druid/collections/bitmap/MutableBitmap.java rename to processing/src/main/java/io/druid/collections/bitmap/MutableBitmap.java diff --git a/bytebuffer-collections/src/main/java/io/druid/collections/bitmap/RoaringBitmapFactory.java b/processing/src/main/java/io/druid/collections/bitmap/RoaringBitmapFactory.java old mode 100755 new mode 100644 similarity index 100% rename from bytebuffer-collections/src/main/java/io/druid/collections/bitmap/RoaringBitmapFactory.java rename to processing/src/main/java/io/druid/collections/bitmap/RoaringBitmapFactory.java diff --git a/bytebuffer-collections/src/main/java/io/druid/collections/bitmap/WrappedBitSetBitmap.java b/processing/src/main/java/io/druid/collections/bitmap/WrappedBitSetBitmap.java old mode 100755 new mode 100644 similarity index 100% rename from bytebuffer-collections/src/main/java/io/druid/collections/bitmap/WrappedBitSetBitmap.java rename to processing/src/main/java/io/druid/collections/bitmap/WrappedBitSetBitmap.java diff --git a/bytebuffer-collections/src/main/java/io/druid/collections/bitmap/WrappedConciseBitmap.java b/processing/src/main/java/io/druid/collections/bitmap/WrappedConciseBitmap.java old mode 100755 new mode 100644 similarity index 100% rename from bytebuffer-collections/src/main/java/io/druid/collections/bitmap/WrappedConciseBitmap.java rename to processing/src/main/java/io/druid/collections/bitmap/WrappedConciseBitmap.java diff --git a/bytebuffer-collections/src/main/java/io/druid/collections/bitmap/WrappedImmutableBitSetBitmap.java b/processing/src/main/java/io/druid/collections/bitmap/WrappedImmutableBitSetBitmap.java old mode 100755 new mode 100644 similarity index 100% rename from bytebuffer-collections/src/main/java/io/druid/collections/bitmap/WrappedImmutableBitSetBitmap.java rename to processing/src/main/java/io/druid/collections/bitmap/WrappedImmutableBitSetBitmap.java diff --git a/bytebuffer-collections/src/main/java/io/druid/collections/bitmap/WrappedImmutableConciseBitmap.java b/processing/src/main/java/io/druid/collections/bitmap/WrappedImmutableConciseBitmap.java old mode 100755 new mode 100644 similarity index 94% rename from bytebuffer-collections/src/main/java/io/druid/collections/bitmap/WrappedImmutableConciseBitmap.java rename to processing/src/main/java/io/druid/collections/bitmap/WrappedImmutableConciseBitmap.java index 01768cfecf97..349fb493743e --- a/bytebuffer-collections/src/main/java/io/druid/collections/bitmap/WrappedImmutableConciseBitmap.java +++ b/processing/src/main/java/io/druid/collections/bitmap/WrappedImmutableConciseBitmap.java @@ -64,11 +64,6 @@ public byte[] toBytes() return bitmap.toBytes(); } - public int compareTo(ImmutableBitmap other) - { - return bitmap.compareTo(((WrappedImmutableConciseBitmap) other).getBitmap()); - } - @Override public String toString() { diff --git a/bytebuffer-collections/src/main/java/io/druid/collections/bitmap/WrappedImmutableRoaringBitmap.java b/processing/src/main/java/io/druid/collections/bitmap/WrappedImmutableRoaringBitmap.java old mode 100755 new mode 100644 similarity index 100% rename from bytebuffer-collections/src/main/java/io/druid/collections/bitmap/WrappedImmutableRoaringBitmap.java rename to processing/src/main/java/io/druid/collections/bitmap/WrappedImmutableRoaringBitmap.java diff --git a/bytebuffer-collections/src/main/java/io/druid/collections/bitmap/WrappedRoaringBitmap.java b/processing/src/main/java/io/druid/collections/bitmap/WrappedRoaringBitmap.java old mode 100755 new mode 100644 similarity index 100% rename from bytebuffer-collections/src/main/java/io/druid/collections/bitmap/WrappedRoaringBitmap.java rename to processing/src/main/java/io/druid/collections/bitmap/WrappedRoaringBitmap.java diff --git a/bytebuffer-collections/src/main/java/io/druid/collections/spatial/ImmutableNode.java b/processing/src/main/java/io/druid/collections/spatial/ImmutableNode.java old mode 100755 new mode 100644 similarity index 100% rename from bytebuffer-collections/src/main/java/io/druid/collections/spatial/ImmutableNode.java rename to processing/src/main/java/io/druid/collections/spatial/ImmutableNode.java diff --git a/bytebuffer-collections/src/main/java/io/druid/collections/spatial/ImmutablePoint.java b/processing/src/main/java/io/druid/collections/spatial/ImmutablePoint.java old mode 100755 new mode 100644 similarity index 100% rename from bytebuffer-collections/src/main/java/io/druid/collections/spatial/ImmutablePoint.java rename to processing/src/main/java/io/druid/collections/spatial/ImmutablePoint.java diff --git a/bytebuffer-collections/src/main/java/io/druid/collections/spatial/ImmutableRTree.java b/processing/src/main/java/io/druid/collections/spatial/ImmutableRTree.java old mode 100755 new mode 100644 similarity index 79% rename from bytebuffer-collections/src/main/java/io/druid/collections/spatial/ImmutableRTree.java rename to processing/src/main/java/io/druid/collections/spatial/ImmutableRTree.java index 2cb3e3d4acf0..9b442d3c5bc8 --- a/bytebuffer-collections/src/main/java/io/druid/collections/spatial/ImmutableRTree.java +++ b/processing/src/main/java/io/druid/collections/spatial/ImmutableRTree.java @@ -27,24 +27,36 @@ import io.druid.collections.spatial.search.Bound; import io.druid.collections.spatial.search.GutmanSearchStrategy; import io.druid.collections.spatial.search.SearchStrategy; +import io.druid.io.Channels; +import io.druid.segment.writeout.WriteOutBytes; +import it.unimi.dsi.fastutil.bytes.ByteArrays; +import java.io.IOException; import java.nio.ByteBuffer; /** * An immutable representation of an {@link RTree} for spatial indexing. */ -public class ImmutableRTree +public final class ImmutableRTree { - private static byte VERSION = 0x0; + private static final byte VERSION = 0x0; + + private static final ImmutableRTree EMPTY = new ImmutableRTree(); + + public static ImmutableRTree empty() + { + return EMPTY; + } + private final int numDims; private final ImmutableNode root; private final ByteBuffer data; private final SearchStrategy defaultSearchStrategy = new GutmanSearchStrategy(); - public ImmutableRTree() + private ImmutableRTree() { this.numDims = 0; - this.data = ByteBuffer.wrap(new byte[]{}); + this.data = ByteBuffer.wrap(ByteArrays.EMPTY_ARRAY); this.root = null; } @@ -52,7 +64,7 @@ public ImmutableRTree(ByteBuffer data, BitmapFactory bitmapFactory) { data = data.asReadOnlyBuffer(); final int initPosition = data.position(); - Preconditions.checkArgument(data.get(0) == VERSION, "Mismatching versions"); + Preconditions.checkArgument(data.get(initPosition) == VERSION, "Mismatching versions"); this.numDims = data.getInt(1 + initPosition) & 0x7FFF; this.data = data; this.root = new ImmutableNode(numDims, initPosition, 1 + Ints.BYTES, data, bitmapFactory); @@ -61,7 +73,7 @@ public ImmutableRTree(ByteBuffer data, BitmapFactory bitmapFactory) public static ImmutableRTree newImmutableFromMutable(RTree rTree) { if (rTree.getSize() == 0) { - return new ImmutableRTree(); + return empty(); } ByteBuffer buffer = ByteBuffer.wrap(new byte[calcNumBytes(rTree)]); @@ -103,7 +115,7 @@ private static int calcNodeBytes(Node node) public int size() { - return data.capacity(); + return data.remaining(); } public Iterable search(Bound bound) @@ -123,9 +135,19 @@ public Iterable search(SearchStrategy strategy, Bound bound) public byte[] toBytes() { - ByteBuffer buf = ByteBuffer.allocate(data.capacity()); - buf.put(data.asReadOnlyBuffer()); - return buf.array(); + if (size() == 0) { + return ByteArrays.EMPTY_ARRAY; + } + byte[] res = new byte[data.remaining()]; + data.duplicate().get(res); + return res; + } + + public void writeTo(WriteOutBytes out) throws IOException + { + if (size() != 0) { + Channels.writeFully(out, data.duplicate()); + } } public int compareTo(ImmutableRTree other) diff --git a/bytebuffer-collections/src/main/java/io/druid/collections/spatial/Node.java b/processing/src/main/java/io/druid/collections/spatial/Node.java old mode 100755 new mode 100644 similarity index 100% rename from bytebuffer-collections/src/main/java/io/druid/collections/spatial/Node.java rename to processing/src/main/java/io/druid/collections/spatial/Node.java diff --git a/bytebuffer-collections/src/main/java/io/druid/collections/spatial/Point.java b/processing/src/main/java/io/druid/collections/spatial/Point.java old mode 100755 new mode 100644 similarity index 100% rename from bytebuffer-collections/src/main/java/io/druid/collections/spatial/Point.java rename to processing/src/main/java/io/druid/collections/spatial/Point.java diff --git a/bytebuffer-collections/src/main/java/io/druid/collections/spatial/RTree.java b/processing/src/main/java/io/druid/collections/spatial/RTree.java old mode 100755 new mode 100644 similarity index 100% rename from bytebuffer-collections/src/main/java/io/druid/collections/spatial/RTree.java rename to processing/src/main/java/io/druid/collections/spatial/RTree.java diff --git a/bytebuffer-collections/src/main/java/io/druid/collections/spatial/RTreeUtils.java b/processing/src/main/java/io/druid/collections/spatial/RTreeUtils.java old mode 100755 new mode 100644 similarity index 100% rename from bytebuffer-collections/src/main/java/io/druid/collections/spatial/RTreeUtils.java rename to processing/src/main/java/io/druid/collections/spatial/RTreeUtils.java diff --git a/bytebuffer-collections/src/main/java/io/druid/collections/spatial/search/Bound.java b/processing/src/main/java/io/druid/collections/spatial/search/Bound.java old mode 100755 new mode 100644 similarity index 100% rename from bytebuffer-collections/src/main/java/io/druid/collections/spatial/search/Bound.java rename to processing/src/main/java/io/druid/collections/spatial/search/Bound.java diff --git a/bytebuffer-collections/src/main/java/io/druid/collections/spatial/search/GutmanSearchStrategy.java b/processing/src/main/java/io/druid/collections/spatial/search/GutmanSearchStrategy.java old mode 100755 new mode 100644 similarity index 100% rename from bytebuffer-collections/src/main/java/io/druid/collections/spatial/search/GutmanSearchStrategy.java rename to processing/src/main/java/io/druid/collections/spatial/search/GutmanSearchStrategy.java diff --git a/bytebuffer-collections/src/main/java/io/druid/collections/spatial/search/PolygonBound.java b/processing/src/main/java/io/druid/collections/spatial/search/PolygonBound.java old mode 100755 new mode 100644 similarity index 100% rename from bytebuffer-collections/src/main/java/io/druid/collections/spatial/search/PolygonBound.java rename to processing/src/main/java/io/druid/collections/spatial/search/PolygonBound.java diff --git a/bytebuffer-collections/src/main/java/io/druid/collections/spatial/search/RadiusBound.java b/processing/src/main/java/io/druid/collections/spatial/search/RadiusBound.java old mode 100755 new mode 100644 similarity index 100% rename from bytebuffer-collections/src/main/java/io/druid/collections/spatial/search/RadiusBound.java rename to processing/src/main/java/io/druid/collections/spatial/search/RadiusBound.java diff --git a/bytebuffer-collections/src/main/java/io/druid/collections/spatial/search/RectangularBound.java b/processing/src/main/java/io/druid/collections/spatial/search/RectangularBound.java old mode 100755 new mode 100644 similarity index 100% rename from bytebuffer-collections/src/main/java/io/druid/collections/spatial/search/RectangularBound.java rename to processing/src/main/java/io/druid/collections/spatial/search/RectangularBound.java diff --git a/bytebuffer-collections/src/main/java/io/druid/collections/spatial/search/SearchStrategy.java b/processing/src/main/java/io/druid/collections/spatial/search/SearchStrategy.java old mode 100755 new mode 100644 similarity index 100% rename from bytebuffer-collections/src/main/java/io/druid/collections/spatial/search/SearchStrategy.java rename to processing/src/main/java/io/druid/collections/spatial/search/SearchStrategy.java diff --git a/bytebuffer-collections/src/main/java/io/druid/collections/spatial/split/GutmanSplitStrategy.java b/processing/src/main/java/io/druid/collections/spatial/split/GutmanSplitStrategy.java old mode 100755 new mode 100644 similarity index 100% rename from bytebuffer-collections/src/main/java/io/druid/collections/spatial/split/GutmanSplitStrategy.java rename to processing/src/main/java/io/druid/collections/spatial/split/GutmanSplitStrategy.java diff --git a/bytebuffer-collections/src/main/java/io/druid/collections/spatial/split/LinearGutmanSplitStrategy.java b/processing/src/main/java/io/druid/collections/spatial/split/LinearGutmanSplitStrategy.java old mode 100755 new mode 100644 similarity index 100% rename from bytebuffer-collections/src/main/java/io/druid/collections/spatial/split/LinearGutmanSplitStrategy.java rename to processing/src/main/java/io/druid/collections/spatial/split/LinearGutmanSplitStrategy.java diff --git a/bytebuffer-collections/src/main/java/io/druid/collections/spatial/split/SplitStrategy.java b/processing/src/main/java/io/druid/collections/spatial/split/SplitStrategy.java old mode 100755 new mode 100644 similarity index 100% rename from bytebuffer-collections/src/main/java/io/druid/collections/spatial/split/SplitStrategy.java rename to processing/src/main/java/io/druid/collections/spatial/split/SplitStrategy.java diff --git a/processing/src/main/java/io/druid/query/BaseQuery.java b/processing/src/main/java/io/druid/query/BaseQuery.java index 7b511097a832..86587d47ae84 100644 --- a/processing/src/main/java/io/druid/query/BaseQuery.java +++ b/processing/src/main/java/io/druid/query/BaseQuery.java @@ -25,12 +25,17 @@ import com.google.common.collect.Maps; import com.google.common.collect.Ordering; import io.druid.guice.annotations.ExtensionPoint; +import io.druid.java.util.common.granularity.Granularities; +import io.druid.java.util.common.granularity.Granularity; +import io.druid.java.util.common.granularity.PeriodGranularity; import io.druid.query.spec.QuerySegmentSpec; +import org.joda.time.DateTimeZone; import org.joda.time.Duration; import org.joda.time.Interval; import java.util.List; import java.util.Map; +import java.util.Objects; /** */ @@ -50,6 +55,7 @@ public static void checkInterrupted() private final Map context; private final QuerySegmentSpec querySegmentSpec; private volatile Duration duration; + private final Granularity granularity; public BaseQuery( DataSource dataSource, @@ -57,14 +63,27 @@ public BaseQuery( boolean descending, Map context ) + { + this(dataSource, querySegmentSpec, descending, context, Granularities.ALL); + } + + public BaseQuery( + DataSource dataSource, + QuerySegmentSpec querySegmentSpec, + boolean descending, + Map context, + Granularity granularity + ) { Preconditions.checkNotNull(dataSource, "dataSource can't be null"); Preconditions.checkNotNull(querySegmentSpec, "querySegmentSpec can't be null"); + Preconditions.checkNotNull(granularity, "Must specify a granularity"); this.dataSource = dataSource; this.context = context; this.querySegmentSpec = querySegmentSpec; this.descending = descending; + this.granularity = granularity; } @JsonProperty @@ -115,6 +134,21 @@ public Duration getDuration() return duration; } + @Override + @JsonProperty + public Granularity getGranularity() + { + return granularity; + } + + @Override + public DateTimeZone getTimezone() + { + return granularity instanceof PeriodGranularity + ? ((PeriodGranularity) granularity).getTimeZone() + : DateTimeZone.UTC; + } + @Override @JsonProperty public Map getContext() @@ -193,38 +227,19 @@ public boolean equals(Object o) if (o == null || getClass() != o.getClass()) { return false; } - - BaseQuery baseQuery = (BaseQuery) o; - - if (descending != baseQuery.descending) { - return false; - } - if (context != null ? !context.equals(baseQuery.context) : baseQuery.context != null) { - return false; - } - if (dataSource != null ? !dataSource.equals(baseQuery.dataSource) : baseQuery.dataSource != null) { - return false; - } - if (duration != null ? !duration.equals(baseQuery.duration) : baseQuery.duration != null) { - return false; - } - if (querySegmentSpec != null - ? !querySegmentSpec.equals(baseQuery.querySegmentSpec) - : baseQuery.querySegmentSpec != null) { - return false; - } - - return true; + BaseQuery baseQuery = (BaseQuery) o; + return descending == baseQuery.descending && + Objects.equals(dataSource, baseQuery.dataSource) && + Objects.equals(context, baseQuery.context) && + Objects.equals(querySegmentSpec, baseQuery.querySegmentSpec) && + Objects.equals(duration, baseQuery.duration) && + Objects.equals(granularity, baseQuery.granularity); } @Override public int hashCode() { - int result = dataSource != null ? dataSource.hashCode() : 0; - result = 31 * result + (descending ? 1 : 0); - result = 31 * result + (context != null ? context.hashCode() : 0); - result = 31 * result + (querySegmentSpec != null ? querySegmentSpec.hashCode() : 0); - result = 31 * result + (duration != null ? duration.hashCode() : 0); - return result; + + return Objects.hash(dataSource, descending, context, querySegmentSpec, duration, granularity); } } diff --git a/processing/src/main/java/io/druid/query/CPUTimeMetricQueryRunner.java b/processing/src/main/java/io/druid/query/CPUTimeMetricQueryRunner.java index bce67514d97b..15bc40de9cf4 100644 --- a/processing/src/main/java/io/druid/query/CPUTimeMetricQueryRunner.java +++ b/processing/src/main/java/io/druid/query/CPUTimeMetricQueryRunner.java @@ -20,7 +20,7 @@ package io.druid.query; import com.google.common.base.Supplier; -import com.metamx.emitter.service.ServiceEmitter; +import io.druid.java.util.emitter.service.ServiceEmitter; import io.druid.common.utils.VMUtils; import io.druid.java.util.common.ISE; import io.druid.java.util.common.guava.Sequence; diff --git a/processing/src/main/java/io/druid/query/DefaultQueryMetrics.java b/processing/src/main/java/io/druid/query/DefaultQueryMetrics.java index 7d830fedac00..8e13ae5bead1 100644 --- a/processing/src/main/java/io/druid/query/DefaultQueryMetrics.java +++ b/processing/src/main/java/io/druid/query/DefaultQueryMetrics.java @@ -23,8 +23,8 @@ import com.fasterxml.jackson.databind.ObjectMapper; import com.google.common.base.Strings; import com.google.common.collect.ImmutableMap; -import com.metamx.emitter.service.ServiceEmitter; -import com.metamx.emitter.service.ServiceMetricEvent; +import io.druid.java.util.emitter.service.ServiceEmitter; +import io.druid.java.util.emitter.service.ServiceMetricEvent; import io.druid.collections.bitmap.BitmapFactory; import io.druid.query.filter.Filter; import org.joda.time.Interval; diff --git a/processing/src/main/java/io/druid/query/Druids.java b/processing/src/main/java/io/druid/query/Druids.java index 1576c01a1c79..e235a99d0220 100644 --- a/processing/src/main/java/io/druid/query/Druids.java +++ b/processing/src/main/java/io/druid/query/Druids.java @@ -591,6 +591,7 @@ public static class SegmentMetadataQueryBuilder private EnumSet analysisTypes; private Boolean merge; private Boolean lenientAggregatorMerge; + private Boolean usingDefaultInterval; private Map context; public SegmentMetadataQueryBuilder() @@ -601,6 +602,7 @@ public SegmentMetadataQueryBuilder() analysisTypes = null; merge = null; lenientAggregatorMerge = null; + usingDefaultInterval = null; context = null; } @@ -613,7 +615,7 @@ public SegmentMetadataQuery build() merge, context, analysisTypes, - false, + usingDefaultInterval, lenientAggregatorMerge ); } @@ -627,6 +629,7 @@ public static SegmentMetadataQueryBuilder copy(SegmentMetadataQuery query) .analysisTypes(query.getAnalysisTypes()) .merge(query.isMerge()) .lenientAggregatorMerge(query.isLenientAggregatorMerge()) + .usingDefaultInterval(query.isUsingDefaultInterval()) .context(query.getContext()); } @@ -696,6 +699,12 @@ public SegmentMetadataQueryBuilder lenientAggregatorMerge(boolean lenientAggrega return this; } + public SegmentMetadataQueryBuilder usingDefaultInterval(boolean usingDefaultInterval) + { + this.usingDefaultInterval = usingDefaultInterval; + return this; + } + public SegmentMetadataQueryBuilder context(Map c) { context = c; diff --git a/processing/src/main/java/io/druid/query/ExecutorServiceMonitor.java b/processing/src/main/java/io/druid/query/ExecutorServiceMonitor.java index bf15e9e99418..2c227884e731 100644 --- a/processing/src/main/java/io/druid/query/ExecutorServiceMonitor.java +++ b/processing/src/main/java/io/druid/query/ExecutorServiceMonitor.java @@ -21,9 +21,9 @@ import com.google.common.collect.Lists; import com.google.inject.Inject; -import com.metamx.emitter.service.ServiceEmitter; -import com.metamx.emitter.service.ServiceMetricEvent; -import com.metamx.metrics.AbstractMonitor; +import io.druid.java.util.emitter.service.ServiceEmitter; +import io.druid.java.util.emitter.service.ServiceMetricEvent; +import io.druid.java.util.metrics.AbstractMonitor; import java.util.List; @@ -58,5 +58,4 @@ public interface MetricEmitter { void emitMetrics(ServiceEmitter emitter, ServiceMetricEvent.Builder metricBuilder); } - } diff --git a/processing/src/main/java/io/druid/query/FluentQueryRunnerBuilder.java b/processing/src/main/java/io/druid/query/FluentQueryRunnerBuilder.java index 5d3fefaad802..8a79d7e00f9c 100644 --- a/processing/src/main/java/io/druid/query/FluentQueryRunnerBuilder.java +++ b/processing/src/main/java/io/druid/query/FluentQueryRunnerBuilder.java @@ -19,7 +19,7 @@ package io.druid.query; -import com.metamx.emitter.service.ServiceEmitter; +import io.druid.java.util.emitter.service.ServiceEmitter; import io.druid.java.util.common.guava.Sequence; import java.util.Map; diff --git a/processing/src/main/java/io/druid/query/IntervalChunkingQueryRunner.java b/processing/src/main/java/io/druid/query/IntervalChunkingQueryRunner.java index 264225105400..2e114f72124f 100644 --- a/processing/src/main/java/io/druid/query/IntervalChunkingQueryRunner.java +++ b/processing/src/main/java/io/druid/query/IntervalChunkingQueryRunner.java @@ -21,7 +21,7 @@ import com.google.common.base.Function; import com.google.common.collect.Lists; -import com.metamx.emitter.service.ServiceEmitter; +import io.druid.java.util.emitter.service.ServiceEmitter; import io.druid.java.util.common.DateTimes; import io.druid.java.util.common.granularity.PeriodGranularity; import io.druid.java.util.common.guava.FunctionalIterable; diff --git a/processing/src/main/java/io/druid/query/IntervalChunkingQueryRunnerDecorator.java b/processing/src/main/java/io/druid/query/IntervalChunkingQueryRunnerDecorator.java index 5f7e10c5bccf..4f47462b1b5f 100644 --- a/processing/src/main/java/io/druid/query/IntervalChunkingQueryRunnerDecorator.java +++ b/processing/src/main/java/io/druid/query/IntervalChunkingQueryRunnerDecorator.java @@ -20,7 +20,7 @@ package io.druid.query; import com.google.inject.Inject; -import com.metamx.emitter.service.ServiceEmitter; +import io.druid.java.util.emitter.service.ServiceEmitter; import io.druid.guice.annotations.Processing; import io.druid.guice.annotations.PublicApi; diff --git a/processing/src/main/java/io/druid/query/MetricsEmittingExecutorService.java b/processing/src/main/java/io/druid/query/MetricsEmittingExecutorService.java index 9fbcec1e47a3..da39026fa62e 100644 --- a/processing/src/main/java/io/druid/query/MetricsEmittingExecutorService.java +++ b/processing/src/main/java/io/druid/query/MetricsEmittingExecutorService.java @@ -22,8 +22,8 @@ import com.google.common.util.concurrent.ForwardingListeningExecutorService; import com.google.common.util.concurrent.ListenableFuture; import com.google.common.util.concurrent.ListeningExecutorService; -import com.metamx.emitter.service.ServiceEmitter; -import com.metamx.emitter.service.ServiceMetricEvent; +import io.druid.java.util.emitter.service.ServiceEmitter; +import io.druid.java.util.emitter.service.ServiceMetricEvent; import java.util.concurrent.Callable; diff --git a/processing/src/main/java/io/druid/query/MetricsEmittingQueryRunner.java b/processing/src/main/java/io/druid/query/MetricsEmittingQueryRunner.java index 69d1db8094c2..0734c2b6a948 100644 --- a/processing/src/main/java/io/druid/query/MetricsEmittingQueryRunner.java +++ b/processing/src/main/java/io/druid/query/MetricsEmittingQueryRunner.java @@ -19,7 +19,7 @@ package io.druid.query; -import com.metamx.emitter.service.ServiceEmitter; +import io.druid.java.util.emitter.service.ServiceEmitter; import io.druid.java.util.common.guava.LazySequence; import io.druid.java.util.common.guava.Sequence; import io.druid.java.util.common.guava.SequenceWrapper; diff --git a/processing/src/main/java/io/druid/query/Query.java b/processing/src/main/java/io/druid/query/Query.java index efb9fd500e09..06ff069b205d 100644 --- a/processing/src/main/java/io/druid/query/Query.java +++ b/processing/src/main/java/io/druid/query/Query.java @@ -23,6 +23,7 @@ import com.fasterxml.jackson.annotation.JsonTypeInfo; import com.google.common.collect.Ordering; import io.druid.guice.annotations.ExtensionPoint; +import io.druid.java.util.common.granularity.Granularity; import io.druid.query.datasourcemetadata.DataSourceMetadataQuery; import io.druid.query.filter.DimFilter; import io.druid.query.groupby.GroupByQuery; @@ -34,6 +35,7 @@ import io.druid.query.timeboundary.TimeBoundaryQuery; import io.druid.query.timeseries.TimeseriesQuery; import io.druid.query.topn.TopNQuery; +import org.joda.time.DateTimeZone; import org.joda.time.Duration; import org.joda.time.Interval; @@ -80,6 +82,12 @@ public interface Query Duration getDuration(); + // currently unused, but helping enforce the idea that all queries have a Granularity + @SuppressWarnings("unused") + Granularity getGranularity(); + + DateTimeZone getTimezone(); + Map getContext(); ContextType getContextValue(String key); diff --git a/processing/src/main/java/io/druid/query/QueryMetrics.java b/processing/src/main/java/io/druid/query/QueryMetrics.java index 9abd4ce0c619..c560dc908220 100644 --- a/processing/src/main/java/io/druid/query/QueryMetrics.java +++ b/processing/src/main/java/io/druid/query/QueryMetrics.java @@ -19,7 +19,7 @@ package io.druid.query; -import com.metamx.emitter.service.ServiceEmitter; +import io.druid.java.util.emitter.service.ServiceEmitter; import io.druid.collections.bitmap.BitmapFactory; import io.druid.guice.annotations.ExtensionPoint; import io.druid.guice.annotations.PublicApi; @@ -30,7 +30,7 @@ import java.util.List; /** - * Abstraction wrapping {@link com.metamx.emitter.service.ServiceMetricEvent.Builder} and allowing to control what + * Abstraction wrapping {@link io.druid.java.util.emitter.service.ServiceMetricEvent.Builder} and allowing to control what * metrics are actually emitted, what dimensions do they have, etc. * * diff --git a/processing/src/main/java/io/druid/query/RetryQueryRunner.java b/processing/src/main/java/io/druid/query/RetryQueryRunner.java index f4a7f178ce5f..5dbff2a3a7b6 100644 --- a/processing/src/main/java/io/druid/query/RetryQueryRunner.java +++ b/processing/src/main/java/io/druid/query/RetryQueryRunner.java @@ -23,7 +23,7 @@ import com.fasterxml.jackson.databind.ObjectMapper; import com.google.common.collect.Iterables; import com.google.common.collect.Lists; -import com.metamx.emitter.EmittingLogger; +import io.druid.java.util.emitter.EmittingLogger; import io.druid.java.util.common.guava.MergeSequence; import io.druid.java.util.common.guava.Sequence; import io.druid.java.util.common.guava.Sequences; diff --git a/processing/src/main/java/io/druid/query/TimewarpOperator.java b/processing/src/main/java/io/druid/query/TimewarpOperator.java index 64ed0cdd9af2..cee9047e5bf2 100644 --- a/processing/src/main/java/io/druid/query/TimewarpOperator.java +++ b/processing/src/main/java/io/druid/query/TimewarpOperator.java @@ -30,6 +30,7 @@ import io.druid.query.timeboundary.TimeBoundaryQuery; import io.druid.query.timeboundary.TimeBoundaryResultValue; import org.joda.time.DateTime; +import org.joda.time.DateTimeZone; import org.joda.time.Interval; import org.joda.time.Period; @@ -80,7 +81,8 @@ public QueryRunner postProcess(final QueryRunner baseRunner, final long no @Override public Sequence run(final QueryPlus queryPlus, final Map responseContext) { - final long offset = computeOffset(now); + final DateTimeZone tz = queryPlus.getQuery().getTimezone(); + final long offset = computeOffset(now, tz); final Interval interval = queryPlus.getQuery().getIntervals().get(0); final Interval modifiedInterval = new Interval( @@ -142,7 +144,7 @@ public T apply(T input) * * @return the offset between the mapped time and time t */ - protected long computeOffset(final long t) + protected long computeOffset(final long t, final DateTimeZone tz) { // start is the beginning of the last period ending within dataInterval long start = dataInterval.getEndMillis() - periodMillis; @@ -159,6 +161,6 @@ protected long computeOffset(final long t) tOffset += periodMillis; } tOffset += start; - return tOffset - t; + return tOffset - t - (tz.getOffset(tOffset) - tz.getOffset(t)); } } diff --git a/processing/src/main/java/io/druid/query/aggregation/first/DoubleFirstAggregatorFactory.java b/processing/src/main/java/io/druid/query/aggregation/first/DoubleFirstAggregatorFactory.java index be2dd5c84e4e..4c254a75b86f 100644 --- a/processing/src/main/java/io/druid/query/aggregation/first/DoubleFirstAggregatorFactory.java +++ b/processing/src/main/java/io/druid/query/aggregation/first/DoubleFirstAggregatorFactory.java @@ -24,7 +24,7 @@ import com.google.common.base.Preconditions; import com.google.common.primitives.Doubles; import com.google.common.primitives.Longs; -import com.metamx.common.StringUtils; +import io.druid.java.util.common.StringUtils; import io.druid.collections.SerializablePair; import io.druid.java.util.common.UOE; import io.druid.query.aggregation.AggregateCombiner; diff --git a/processing/src/main/java/io/druid/query/aggregation/first/FloatFirstAggregatorFactory.java b/processing/src/main/java/io/druid/query/aggregation/first/FloatFirstAggregatorFactory.java index 0b62e47aebb1..84f185e93939 100644 --- a/processing/src/main/java/io/druid/query/aggregation/first/FloatFirstAggregatorFactory.java +++ b/processing/src/main/java/io/druid/query/aggregation/first/FloatFirstAggregatorFactory.java @@ -24,7 +24,7 @@ import com.google.common.base.Preconditions; import com.google.common.primitives.Doubles; import com.google.common.primitives.Longs; -import com.metamx.common.StringUtils; +import io.druid.java.util.common.StringUtils; import io.druid.collections.SerializablePair; import io.druid.java.util.common.UOE; import io.druid.query.aggregation.AggregateCombiner; diff --git a/processing/src/main/java/io/druid/query/aggregation/first/LongFirstAggregatorFactory.java b/processing/src/main/java/io/druid/query/aggregation/first/LongFirstAggregatorFactory.java index b000d9d5c363..a2780e94e6a4 100644 --- a/processing/src/main/java/io/druid/query/aggregation/first/LongFirstAggregatorFactory.java +++ b/processing/src/main/java/io/druid/query/aggregation/first/LongFirstAggregatorFactory.java @@ -23,7 +23,7 @@ import com.fasterxml.jackson.annotation.JsonProperty; import com.google.common.base.Preconditions; import com.google.common.primitives.Longs; -import com.metamx.common.StringUtils; +import io.druid.java.util.common.StringUtils; import io.druid.collections.SerializablePair; import io.druid.java.util.common.UOE; import io.druid.query.aggregation.AggregateCombiner; diff --git a/processing/src/main/java/io/druid/query/aggregation/last/DoubleLastAggregatorFactory.java b/processing/src/main/java/io/druid/query/aggregation/last/DoubleLastAggregatorFactory.java index 3241d49aee62..c028247b5813 100644 --- a/processing/src/main/java/io/druid/query/aggregation/last/DoubleLastAggregatorFactory.java +++ b/processing/src/main/java/io/druid/query/aggregation/last/DoubleLastAggregatorFactory.java @@ -23,7 +23,7 @@ import com.fasterxml.jackson.annotation.JsonProperty; import com.google.common.base.Preconditions; import com.google.common.primitives.Longs; -import com.metamx.common.StringUtils; +import io.druid.java.util.common.StringUtils; import io.druid.collections.SerializablePair; import io.druid.java.util.common.UOE; import io.druid.query.aggregation.AggregateCombiner; diff --git a/processing/src/main/java/io/druid/query/aggregation/last/FloatLastAggregatorFactory.java b/processing/src/main/java/io/druid/query/aggregation/last/FloatLastAggregatorFactory.java index 73d671b69c96..2ab1f2856825 100644 --- a/processing/src/main/java/io/druid/query/aggregation/last/FloatLastAggregatorFactory.java +++ b/processing/src/main/java/io/druid/query/aggregation/last/FloatLastAggregatorFactory.java @@ -23,7 +23,7 @@ import com.fasterxml.jackson.annotation.JsonProperty; import com.google.common.base.Preconditions; import com.google.common.primitives.Longs; -import com.metamx.common.StringUtils; +import io.druid.java.util.common.StringUtils; import io.druid.collections.SerializablePair; import io.druid.java.util.common.UOE; import io.druid.query.aggregation.AggregateCombiner; diff --git a/processing/src/main/java/io/druid/query/aggregation/last/LongLastAggregatorFactory.java b/processing/src/main/java/io/druid/query/aggregation/last/LongLastAggregatorFactory.java index 60f9669f5913..4bee9e9a6659 100644 --- a/processing/src/main/java/io/druid/query/aggregation/last/LongLastAggregatorFactory.java +++ b/processing/src/main/java/io/druid/query/aggregation/last/LongLastAggregatorFactory.java @@ -22,7 +22,7 @@ import com.fasterxml.jackson.annotation.JsonCreator; import com.fasterxml.jackson.annotation.JsonProperty; import com.google.common.base.Preconditions; -import com.metamx.common.StringUtils; +import io.druid.java.util.common.StringUtils; import io.druid.collections.SerializablePair; import io.druid.java.util.common.UOE; import io.druid.query.aggregation.AggregateCombiner; diff --git a/processing/src/main/java/io/druid/query/expression/TimestampExtractExprMacro.java b/processing/src/main/java/io/druid/query/expression/TimestampExtractExprMacro.java index 19c64415e5fa..bb1b2ca5f243 100644 --- a/processing/src/main/java/io/druid/query/expression/TimestampExtractExprMacro.java +++ b/processing/src/main/java/io/druid/query/expression/TimestampExtractExprMacro.java @@ -91,7 +91,7 @@ public ExprEval eval(final ObjectBinding bindings) final DateTime dateTime = new DateTime(arg.eval(bindings).asLong(), chronology); switch (unit) { case EPOCH: - return ExprEval.of(dateTime.getMillis()); + return ExprEval.of(dateTime.getMillis() / 1000); case SECOND: return ExprEval.of(dateTime.secondOfMinute().get()); case MINUTE: diff --git a/processing/src/main/java/io/druid/query/groupby/GroupByQuery.java b/processing/src/main/java/io/druid/query/groupby/GroupByQuery.java index 72fd236402be..1f9b45e62eb0 100644 --- a/processing/src/main/java/io/druid/query/groupby/GroupByQuery.java +++ b/processing/src/main/java/io/druid/query/groupby/GroupByQuery.java @@ -96,7 +96,6 @@ public static Builder builder() private final LimitSpec limitSpec; private final HavingSpec havingSpec; private final DimFilter dimFilter; - private final Granularity granularity; private final List dimensions; private final List aggregatorSpecs; private final List postAggregatorSpecs; @@ -137,8 +136,13 @@ public GroupByQuery( private Function, Sequence> makePostProcessingFn() { - Function, Sequence> postProcessingFn = - limitSpec.build(dimensions, aggregatorSpecs, postAggregatorSpecs); + Function, Sequence> postProcessingFn = limitSpec.build( + dimensions, + aggregatorSpecs, + postAggregatorSpecs, + getGranularity(), + getContextSortByDimsFirst() + ); if (havingSpec != null) { postProcessingFn = Functions.compose( @@ -171,15 +175,15 @@ private GroupByQuery( final Map context ) { - super(dataSource, querySegmentSpec, false, context); + super(dataSource, querySegmentSpec, false, context, granularity); this.virtualColumns = VirtualColumns.nullToEmpty(virtualColumns); this.dimFilter = dimFilter; - this.granularity = granularity; this.dimensions = dimensions == null ? ImmutableList.of() : dimensions; for (DimensionSpec spec : this.dimensions) { Preconditions.checkArgument(spec != null, "dimensions has null DimensionSpec"); } + this.aggregatorSpecs = aggregatorSpecs == null ? ImmutableList.of() : aggregatorSpecs; this.postAggregatorSpecs = Queries.prepareAggregations( this.dimensions.stream().map(DimensionSpec::getOutputName).collect(Collectors.toList()), @@ -189,7 +193,6 @@ private GroupByQuery( this.havingSpec = havingSpec; this.limitSpec = LimitSpec.nullToNoopLimitSpec(limitSpec); - Preconditions.checkNotNull(this.granularity, "Must specify a granularity"); // Verify no duplicate names between dimensions, aggregators, and postAggregators. // They will all end up in the same namespace in the returned Rows and we can't have them clobbering each other. @@ -214,12 +217,6 @@ public DimFilter getDimFilter() return dimFilter; } - @JsonProperty - public Granularity getGranularity() - { - return granularity; - } - @JsonProperty public List getDimensions() { @@ -518,12 +515,12 @@ public Ordering getRowOrdering(final boolean granular) private Comparator getTimeComparator(boolean granular) { - if (Granularities.ALL.equals(granularity)) { + if (Granularities.ALL.equals(getGranularity())) { return null; } else if (granular) { return (lhs, rhs) -> Longs.compare( - granularity.bucketStart(lhs.getTimestamp()).getMillis(), - granularity.bucketStart(rhs.getTimestamp()).getMillis() + getGranularity().bucketStart(lhs.getTimestamp()).getMillis(), + getGranularity().bucketStart(rhs.getTimestamp()).getMillis() ); } else { return NON_GRANULAR_TIME_COMP; @@ -577,22 +574,14 @@ private static int compareDimsForLimitPushDown( final StringComparator comparator = comparators.get(i); final int dimCompare; - - Object lhsObj; - Object rhsObj; - if (needsReverseList.get(i)) { - lhsObj = rhs.getRaw(fieldName); - rhsObj = lhs.getRaw(fieldName); - } else { - lhsObj = lhs.getRaw(fieldName); - rhsObj = rhs.getRaw(fieldName); - } + final Object lhsObj = lhs.getRaw(fieldName); + final Object rhsObj = rhs.getRaw(fieldName); if (isNumericField.get(i)) { if (comparator.equals(StringComparators.NUMERIC)) { dimCompare = ((Ordering) Comparators.naturalNullsFirst()).compare( - lhs.getRaw(fieldName), - rhs.getRaw(fieldName) + lhsObj, + rhsObj ); } else { dimCompare = comparator.compare(String.valueOf(lhsObj), String.valueOf(rhsObj)); @@ -602,7 +591,7 @@ private static int compareDimsForLimitPushDown( } if (dimCompare != 0) { - return dimCompare; + return needsReverseList.get(i) ? -dimCompare : dimCompare; } } return 0; @@ -990,7 +979,7 @@ public String toString() ", virtualColumns=" + virtualColumns + ", limitSpec=" + limitSpec + ", dimFilter=" + dimFilter + - ", granularity=" + granularity + + ", granularity=" + getGranularity() + ", dimensions=" + dimensions + ", aggregatorSpecs=" + aggregatorSpecs + ", postAggregatorSpecs=" + postAggregatorSpecs + @@ -1015,7 +1004,6 @@ public boolean equals(final Object o) Objects.equals(limitSpec, that.limitSpec) && Objects.equals(havingSpec, that.havingSpec) && Objects.equals(dimFilter, that.dimFilter) && - Objects.equals(granularity, that.granularity) && Objects.equals(dimensions, that.dimensions) && Objects.equals(aggregatorSpecs, that.aggregatorSpecs) && Objects.equals(postAggregatorSpecs, that.postAggregatorSpecs); @@ -1030,7 +1018,6 @@ public int hashCode() limitSpec, havingSpec, dimFilter, - granularity, dimensions, aggregatorSpecs, postAggregatorSpecs diff --git a/processing/src/main/java/io/druid/query/groupby/epinephelinae/BufferArrayGrouper.java b/processing/src/main/java/io/druid/query/groupby/epinephelinae/BufferArrayGrouper.java index 7a39b5e34cf1..e816fbd3d9c0 100644 --- a/processing/src/main/java/io/druid/query/groupby/epinephelinae/BufferArrayGrouper.java +++ b/processing/src/main/java/io/druid/query/groupby/epinephelinae/BufferArrayGrouper.java @@ -62,7 +62,7 @@ public class BufferArrayGrouper implements IntGrouper private ByteBuffer usedFlagBuffer; private ByteBuffer valBuffer; - static int requiredBufferCapacity( + static long requiredBufferCapacity( int cardinality, AggregatorFactory[] aggregatorFactories ) @@ -73,7 +73,7 @@ static int requiredBufferCapacity( .sum(); return getUsedFlagBufferCapacity(cardinalityWithMissingValue) + // total used flags size - cardinalityWithMissingValue * recordSize; // total values size + (long) cardinalityWithMissingValue * recordSize; // total values size } /** diff --git a/processing/src/main/java/io/druid/query/groupby/epinephelinae/ByteBufferIntList.java b/processing/src/main/java/io/druid/query/groupby/epinephelinae/ByteBufferIntList.java index 958426fcce0d..5ee9d73d6d30 100644 --- a/processing/src/main/java/io/druid/query/groupby/epinephelinae/ByteBufferIntList.java +++ b/processing/src/main/java/io/druid/query/groupby/epinephelinae/ByteBufferIntList.java @@ -20,7 +20,7 @@ package io.druid.query.groupby.epinephelinae; import com.google.common.primitives.Ints; -import com.metamx.common.IAE; +import io.druid.java.util.common.IAE; import io.druid.java.util.common.StringUtils; import java.nio.ByteBuffer; diff --git a/processing/src/main/java/io/druid/query/groupby/epinephelinae/GroupByQueryEngineV2.java b/processing/src/main/java/io/druid/query/groupby/epinephelinae/GroupByQueryEngineV2.java index 6db22e1efb19..dffc4f626038 100644 --- a/processing/src/main/java/io/druid/query/groupby/epinephelinae/GroupByQueryEngineV2.java +++ b/processing/src/main/java/io/druid/query/groupby/epinephelinae/GroupByQueryEngineV2.java @@ -91,7 +91,7 @@ public static Sequence process( final GroupByQuery query, final StorageAdapter storageAdapter, final NonBlockingPool intermediateResultsBufferPool, - final GroupByQueryConfig config + final GroupByQueryConfig querySpecificConfig ) { if (storageAdapter == null) { @@ -150,10 +150,10 @@ public GroupByEngineIterator make() final ByteBuffer buffer = bufferHolder.get(); // Check array-based aggregation is applicable - if (isArrayAggregateApplicable(config, query, dims, storageAdapter, buffer)) { + if (isArrayAggregateApplicable(querySpecificConfig, query, dims, storageAdapter, buffer)) { return new ArrayAggregateIterator( query, - config, + querySpecificConfig, cursor, buffer, fudgeTimestamp, @@ -165,7 +165,7 @@ public GroupByEngineIterator make() } else { return new HashAggregateIterator( query, - config, + querySpecificConfig, cursor, buffer, fudgeTimestamp, @@ -186,14 +186,14 @@ public void cleanup(GroupByEngineIterator iterFromMake) } private static boolean isArrayAggregateApplicable( - GroupByQueryConfig config, + GroupByQueryConfig querySpecificConfig, GroupByQuery query, GroupByColumnSelectorPlus[] dims, StorageAdapter storageAdapter, ByteBuffer buffer ) { - if (config.isForceHashAggregation()) { + if (querySpecificConfig.isForceHashAggregation()) { return false; } @@ -219,7 +219,7 @@ private static boolean isArrayAggregateApplicable( final AggregatorFactory[] aggregatorFactories = query .getAggregatorSpecs() .toArray(new AggregatorFactory[query.getAggregatorSpecs().size()]); - final int requiredBufferCapacity = BufferArrayGrouper.requiredBufferCapacity( + final long requiredBufferCapacity = BufferArrayGrouper.requiredBufferCapacity( cardinality, aggregatorFactories ); @@ -276,7 +276,7 @@ private abstract static class GroupByEngineIterator implements Iterator public GroupByEngineIterator( final GroupByQuery query, - final GroupByQueryConfig config, + final GroupByQueryConfig querySpecificConfig, final Cursor cursor, final ByteBuffer buffer, final DateTime fudgeTimestamp, @@ -285,7 +285,7 @@ public GroupByEngineIterator( ) { this.query = query; - this.querySpecificConfig = config.withOverrides(query); + this.querySpecificConfig = querySpecificConfig; this.cursor = cursor; this.buffer = buffer; this.keySerde = new GroupByEngineKeySerde(dims); @@ -413,7 +413,7 @@ private static class HashAggregateIterator extends GroupByEngineIterator> combine( ) { // CombineBuffer is initialized when this method is called and closed after the result iterator is done + final Closer closer = Closer.create(); final ResourceHolder combineBufferHolder = combineBufferSupplier.get(); - final ByteBuffer combineBuffer = combineBufferHolder.get(); - final int minimumRequiredBufferCapacity = StreamingMergeSortedGrouper.requiredBufferCapacity( - combineKeySerdeFactory.factorizeWithDictionary(mergedDictionary), - combiningFactories - ); - // We want to maximize the parallelism while the size of buffer slice is greater than the minimum buffer size - // required by StreamingMergeSortedGrouper. Here, we find the leafCombineDegree of the cominbing tree and the - // required number of buffers maximizing the parallelism. - final Pair degreeAndNumBuffers = findLeafCombineDegreeAndNumBuffers( - combineBuffer, - minimumRequiredBufferCapacity, - concurrencyHint, - sortedIterators.size() - ); + closer.register(combineBufferHolder); - final int leafCombineDegree = degreeAndNumBuffers.lhs; - final int numBuffers = degreeAndNumBuffers.rhs; - final int sliceSize = combineBuffer.capacity() / numBuffers; + try { + final ByteBuffer combineBuffer = combineBufferHolder.get(); + final int minimumRequiredBufferCapacity = StreamingMergeSortedGrouper.requiredBufferCapacity( + combineKeySerdeFactory.factorizeWithDictionary(mergedDictionary), + combiningFactories + ); + // We want to maximize the parallelism while the size of buffer slice is greater than the minimum buffer size + // required by StreamingMergeSortedGrouper. Here, we find the leafCombineDegree of the cominbing tree and the + // required number of buffers maximizing the parallelism. + final Pair degreeAndNumBuffers = findLeafCombineDegreeAndNumBuffers( + combineBuffer, + minimumRequiredBufferCapacity, + concurrencyHint, + sortedIterators.size() + ); - final Supplier bufferSupplier = createCombineBufferSupplier(combineBuffer, numBuffers, sliceSize); + final int leafCombineDegree = degreeAndNumBuffers.lhs; + final int numBuffers = degreeAndNumBuffers.rhs; + final int sliceSize = combineBuffer.capacity() / numBuffers; - final Pair>>, List> combineIteratorAndFutures = buildCombineTree( - sortedIterators, - bufferSupplier, - combiningFactories, - leafCombineDegree, - mergedDictionary - ); + final Supplier bufferSupplier = createCombineBufferSupplier(combineBuffer, numBuffers, sliceSize); - final CloseableIterator> combineIterator = Iterables.getOnlyElement(combineIteratorAndFutures.lhs); - final List combineFutures = combineIteratorAndFutures.rhs; + final Pair>>, List> combineIteratorAndFutures = buildCombineTree( + sortedIterators, + bufferSupplier, + combiningFactories, + leafCombineDegree, + mergedDictionary + ); - final Closer closer = Closer.create(); - closer.register(combineBufferHolder); - closer.register(() -> checkCombineFutures(combineFutures)); + final CloseableIterator> combineIterator = Iterables.getOnlyElement(combineIteratorAndFutures.lhs); + final List combineFutures = combineIteratorAndFutures.rhs; + closer.register(() -> checkCombineFutures(combineFutures)); - return CloseableIterators.wrap(combineIterator, closer); + return CloseableIterators.wrap(combineIterator, closer); + } + catch (Throwable t) { + try { + closer.close(); + } + catch (Throwable t2) { + t.addSuppressed(t2); + } + throw t; + } } private static void checkCombineFutures(List combineFutures) @@ -289,11 +300,11 @@ private int computeRequiredBufferNum(int numChildNodes, int combineDegree) * Recursively build a combining tree in a bottom-up manner. Each node of the tree is a task that combines input * iterators asynchronously. * - * @param childIterators all iterators of the child level - * @param bufferSupplier combining buffer supplier - * @param combiningFactories array of combining aggregator factories - * @param combineDegree combining degree for the current level - * @param dictionary merged dictionary + * @param childIterators all iterators of the child level + * @param bufferSupplier combining buffer supplier + * @param combiningFactories array of combining aggregator factories + * @param combineDegree combining degree for the current level + * @param dictionary merged dictionary * * @return a pair of a list of iterators of the current level in the combining tree and a list of futures of all * executed combining tasks diff --git a/processing/src/main/java/io/druid/query/groupby/having/HavingSpecMetricComparator.java b/processing/src/main/java/io/druid/query/groupby/having/HavingSpecMetricComparator.java index ff08202bf7cb..1b041dc1be5a 100644 --- a/processing/src/main/java/io/druid/query/groupby/having/HavingSpecMetricComparator.java +++ b/processing/src/main/java/io/druid/query/groupby/having/HavingSpecMetricComparator.java @@ -20,13 +20,12 @@ package io.druid.query.groupby.having; import com.google.common.primitives.Doubles; -import com.google.common.primitives.Floats; -import com.google.common.primitives.Ints; import com.google.common.primitives.Longs; import io.druid.data.input.Row; -import io.druid.query.aggregation.AggregatorFactory; import io.druid.java.util.common.ISE; +import io.druid.query.aggregation.AggregatorFactory; +import java.math.BigDecimal; import java.util.Map; import java.util.regex.Pattern; @@ -46,18 +45,29 @@ static int compare(Row row, String aggregationName, Number value, Map, Sequence> build( List dimensions, List aggs, - List postAggs + List postAggs, + Granularity granularity, + boolean sortByDimsFirst ) { // Can avoid re-sorting if the natural ordering is good enough. - boolean sortingNeeded = false; - - if (dimensions.size() < columns.size()) { - sortingNeeded = true; - } + boolean sortingNeeded = dimensions.size() < columns.size(); final Set aggAndPostAggNames = Sets.newHashSet(); for (AggregatorFactory agg : aggs) { @@ -167,12 +167,17 @@ public Function, Sequence> build( } } + if (!sortingNeeded) { + // If granularity is ALL, sortByDimsFirst doesn't change the sorting order. + sortingNeeded = !granularity.equals(Granularities.ALL) && sortByDimsFirst; + } + if (!sortingNeeded) { return isLimited() ? new LimitingFn(limit) : Functions.identity(); } // Materialize the Comparator first for fast-fail error checking. - final Ordering ordering = makeComparator(dimensions, aggs, postAggs); + final Ordering ordering = makeComparator(dimensions, aggs, postAggs, sortByDimsFirst); if (isLimited()) { return new TopNFunction(ordering, limit); @@ -199,10 +204,13 @@ private ValueType getOrderByType(final OrderByColumnSpec columnSpec, final List< } private Ordering makeComparator( - List dimensions, List aggs, List postAggs + List dimensions, + List aggs, + List postAggs, + boolean sortByDimsFirst ) { - Ordering ordering = new Ordering() + Ordering timeOrdering = new Ordering() { @Override public int compare(Row left, Row right) @@ -226,6 +234,7 @@ public int compare(Row left, Row right) postAggregatorsMap.put(postAgg.getName(), postAgg); } + Ordering ordering = null; for (OrderByColumnSpec columnSpec : columns) { String columnName = columnSpec.getDimension(); Ordering nextOrdering = null; @@ -246,7 +255,13 @@ public int compare(Row left, Row right) nextOrdering = nextOrdering.reverse(); } - ordering = ordering.compound(nextOrdering); + ordering = ordering == null ? nextOrdering : ordering.compound(nextOrdering); + } + + if (ordering != null) { + ordering = sortByDimsFirst ? ordering.compound(timeOrdering) : timeOrdering.compound(ordering); + } else { + ordering = timeOrdering; } return ordering; diff --git a/processing/src/main/java/io/druid/query/groupby/orderby/LimitSpec.java b/processing/src/main/java/io/druid/query/groupby/orderby/LimitSpec.java index 4638e6a49dc4..2e0e489ce1e7 100644 --- a/processing/src/main/java/io/druid/query/groupby/orderby/LimitSpec.java +++ b/processing/src/main/java/io/druid/query/groupby/orderby/LimitSpec.java @@ -24,6 +24,7 @@ import com.google.common.base.Function; import io.druid.data.input.Row; import io.druid.java.util.common.Cacheable; +import io.druid.java.util.common.granularity.Granularity; import io.druid.java.util.common.guava.Sequence; import io.druid.query.aggregation.AggregatorFactory; import io.druid.query.aggregation.PostAggregator; @@ -48,16 +49,20 @@ static LimitSpec nullToNoopLimitSpec(@Nullable LimitSpec limitSpec) /** * Returns a function that applies a limit to an input sequence that is assumed to be sorted on dimensions. * - * @param dimensions query dimensions - * @param aggs query aggregators - * @param postAggs query postAggregators + * @param dimensions query dimensions + * @param aggs query aggregators + * @param postAggs query postAggregators + * @param granularity query granularity + * @param sortByDimsFirst 'sortByDimsFirst' value in queryContext * * @return limit function */ Function, Sequence> build( List dimensions, List aggs, - List postAggs + List postAggs, + Granularity granularity, + boolean sortByDimsFirst ); LimitSpec merge(LimitSpec other); diff --git a/processing/src/main/java/io/druid/query/groupby/orderby/NoopLimitSpec.java b/processing/src/main/java/io/druid/query/groupby/orderby/NoopLimitSpec.java index 2ba458a6755d..835c1d0a006c 100644 --- a/processing/src/main/java/io/druid/query/groupby/orderby/NoopLimitSpec.java +++ b/processing/src/main/java/io/druid/query/groupby/orderby/NoopLimitSpec.java @@ -23,6 +23,7 @@ import com.google.common.base.Function; import com.google.common.base.Functions; import io.druid.data.input.Row; +import io.druid.java.util.common.granularity.Granularity; import io.druid.java.util.common.guava.Sequence; import io.druid.query.aggregation.AggregatorFactory; import io.druid.query.aggregation.PostAggregator; @@ -52,7 +53,9 @@ private NoopLimitSpec() public Function, Sequence> build( List dimensions, List aggs, - List postAggs + List postAggs, + Granularity granularity, + boolean sortByDimsFirst ) { return Functions.identity(); diff --git a/processing/src/main/java/io/druid/query/groupby/strategy/GroupByStrategyV2.java b/processing/src/main/java/io/druid/query/groupby/strategy/GroupByStrategyV2.java index 5ad75004b760..24bbfe7af01a 100644 --- a/processing/src/main/java/io/druid/query/groupby/strategy/GroupByStrategyV2.java +++ b/processing/src/main/java/io/druid/query/groupby/strategy/GroupByStrategyV2.java @@ -352,6 +352,6 @@ public Sequence process( StorageAdapter storageAdapter ) { - return GroupByQueryEngineV2.process(query, storageAdapter, bufferPool, configSupplier.get()); + return GroupByQueryEngineV2.process(query, storageAdapter, bufferPool, configSupplier.get().withOverrides(query)); } } diff --git a/processing/src/main/java/io/druid/query/lookup/LookupExtractionFn.java b/processing/src/main/java/io/druid/query/lookup/LookupExtractionFn.java index b0c65a7e098f..1c2605fe9853 100644 --- a/processing/src/main/java/io/druid/query/lookup/LookupExtractionFn.java +++ b/processing/src/main/java/io/druid/query/lookup/LookupExtractionFn.java @@ -37,18 +37,14 @@ public class LookupExtractionFn extends FunctionalExtraction { private final LookupExtractor lookup; private final boolean optimize; - // Thes are retained for auto generated hashCode and Equals - private final boolean retainMissingValue; - private final String replaceMissingValueWith; - private final boolean injective; @JsonCreator public LookupExtractionFn( @JsonProperty("lookup") final LookupExtractor lookup, @JsonProperty("retainMissingValue") final boolean retainMissingValue, @Nullable @JsonProperty("replaceMissingValueWith") final String replaceMissingValueWith, - @JsonProperty("injective") final boolean injective, - @JsonProperty("optimize") Boolean optimize + @JsonProperty("injective") final Boolean injective, + @JsonProperty("optimize") final Boolean optimize ) { super( @@ -63,13 +59,10 @@ public String apply(String input) }, retainMissingValue, replaceMissingValueWith, - injective + injective != null ? injective : lookup.isOneToOne() ); this.lookup = lookup; this.optimize = optimize == null ? true : optimize; - this.retainMissingValue = retainMissingValue; - this.injective = injective; - this.replaceMissingValueWith = replaceMissingValueWith; } @@ -175,9 +168,9 @@ public String toString() return "LookupExtractionFn{" + "lookup=" + lookup + ", optimize=" + optimize + - ", retainMissingValue=" + retainMissingValue + - ", replaceMissingValueWith='" + replaceMissingValueWith + '\'' + - ", injective=" + injective + + ", retainMissingValue=" + isRetainMissingValue() + + ", replaceMissingValueWith='" + getReplaceMissingValueWith() + '\'' + + ", injective=" + isInjective() + '}'; } } diff --git a/processing/src/main/java/io/druid/query/lookup/LookupSnapshotTaker.java b/processing/src/main/java/io/druid/query/lookup/LookupSnapshotTaker.java index 3d67124610ff..52b69c03270c 100644 --- a/processing/src/main/java/io/druid/query/lookup/LookupSnapshotTaker.java +++ b/processing/src/main/java/io/druid/query/lookup/LookupSnapshotTaker.java @@ -19,13 +19,15 @@ package io.druid.query.lookup; - import com.fasterxml.jackson.core.type.TypeReference; import com.fasterxml.jackson.databind.ObjectMapper; +import com.google.common.annotations.VisibleForTesting; import com.google.common.base.Preconditions; import com.google.common.base.Strings; import io.druid.guice.annotations.Json; +import io.druid.java.util.common.FileUtils; import io.druid.java.util.common.ISE; +import io.druid.java.util.common.StringUtils; import io.druid.java.util.common.logger.Logger; import java.io.File; @@ -33,16 +35,13 @@ import java.util.Collections; import java.util.List; - public class LookupSnapshotTaker { private static final Logger LOGGER = new Logger(LookupSnapshotTaker.class); - protected static final String PERSIST_FILE_NAME = "lookupSnapshot.json"; + private static final String PERSIST_FILE_SUFFIX = "lookupSnapshot.json"; private final ObjectMapper objectMapper; private final File persistDirectory; - private final File persistFile; - public LookupSnapshotTaker( final @Json ObjectMapper jsonMapper, @@ -50,7 +49,10 @@ public LookupSnapshotTaker( ) { this.objectMapper = jsonMapper; - Preconditions.checkArgument(!Strings.isNullOrEmpty(persistDirectory), "can not work without specifying persistDirectory"); + Preconditions.checkArgument( + !Strings.isNullOrEmpty(persistDirectory), + "can not work without specifying persistDirectory" + ); this.persistDirectory = new File(persistDirectory); if (!this.persistDirectory.exists()) { Preconditions.checkArgument(this.persistDirectory.mkdirs(), "Oups was not able to create persist directory"); @@ -58,11 +60,12 @@ public LookupSnapshotTaker( if (!this.persistDirectory.isDirectory()) { throw new ISE("Can only persist to directories, [%s] wasn't a directory", persistDirectory); } - this.persistFile = new File(persistDirectory, PERSIST_FILE_NAME); } - public synchronized List pullExistingSnapshot() + public synchronized List pullExistingSnapshot(final String tier) { + final File persistFile = getPersistFile(tier); + List lookupBeanList; try { if (!persistFile.isFile()) { @@ -72,7 +75,7 @@ public synchronized List pullExistingSnapshot() LOGGER.warn("found empty file no lookups to load from [%s]", persistFile.getAbsolutePath()); return Collections.emptyList(); } - lookupBeanList = objectMapper.readValue(persistFile, new TypeReference>(){}); + lookupBeanList = objectMapper.readValue(persistFile, new TypeReference>() {}); return lookupBeanList; } catch (IOException e) { @@ -80,18 +83,21 @@ public synchronized List pullExistingSnapshot() } } - public synchronized void takeSnapshot(List lookups) + public synchronized void takeSnapshot(String tier, List lookups) { + final File persistFile = getPersistFile(tier); + try { - objectMapper.writeValue(persistFile, lookups); + FileUtils.writeAtomically(persistFile, out -> objectMapper.writeValue(out, lookups)); } catch (IOException e) { throw new ISE(e, "Exception during serialization of lookups using file [%s]", persistFile.getAbsolutePath()); } } - public File getPersistFile() + @VisibleForTesting + File getPersistFile(final String tier) { - return persistFile; + return new File(persistDirectory, StringUtils.format("%s.%s", tier, PERSIST_FILE_SUFFIX)); } } diff --git a/processing/src/main/java/io/druid/query/search/AutoStrategy.java b/processing/src/main/java/io/druid/query/search/AutoStrategy.java index b3b692c298dd..665669e5476c 100644 --- a/processing/src/main/java/io/druid/query/search/AutoStrategy.java +++ b/processing/src/main/java/io/druid/query/search/AutoStrategy.java @@ -19,7 +19,7 @@ package io.druid.query.search; -import com.metamx.emitter.EmittingLogger; +import io.druid.java.util.emitter.EmittingLogger; import io.druid.query.dimension.DimensionSpec; import io.druid.query.filter.BitmapIndexSelector; import io.druid.segment.ColumnSelectorBitmapIndexSelector; diff --git a/processing/src/main/java/io/druid/query/search/DefaultSearchQueryMetrics.java b/processing/src/main/java/io/druid/query/search/DefaultSearchQueryMetrics.java index e554ffa317cc..9fdf1a8a6af7 100644 --- a/processing/src/main/java/io/druid/query/search/DefaultSearchQueryMetrics.java +++ b/processing/src/main/java/io/druid/query/search/DefaultSearchQueryMetrics.java @@ -19,7 +19,7 @@ package io.druid.query.search; -import com.metamx.emitter.service.ServiceEmitter; +import io.druid.java.util.emitter.service.ServiceEmitter; import io.druid.collections.bitmap.BitmapFactory; import io.druid.java.util.common.ISE; import io.druid.query.BitmapResultFactory; diff --git a/processing/src/main/java/io/druid/query/search/SearchQuery.java b/processing/src/main/java/io/druid/query/search/SearchQuery.java index f9cd866e1f7f..df298d55c5ba 100644 --- a/processing/src/main/java/io/druid/query/search/SearchQuery.java +++ b/processing/src/main/java/io/druid/query/search/SearchQuery.java @@ -45,7 +45,6 @@ public class SearchQuery extends BaseQuery> private final DimFilter dimFilter; private final SearchSortSpec sortSpec; - private final Granularity granularity; private final List dimensions; private final SearchQuerySpec querySpec; private final int limit; @@ -63,12 +62,11 @@ public SearchQuery( @JsonProperty("context") Map context ) { - super(dataSource, querySegmentSpec, false, context); + super(dataSource, querySegmentSpec, false, context, Granularities.nullToAll(granularity)); Preconditions.checkNotNull(querySegmentSpec, "Must specify an interval"); this.dimFilter = dimFilter; this.sortSpec = sortSpec == null ? DEFAULT_SORT_SPEC : sortSpec; - this.granularity = granularity == null ? Granularities.ALL : granularity; this.limit = (limit == 0) ? 1000 : limit; this.dimensions = dimensions; this.querySpec = querySpec == null ? new AllSearchQuerySpec() : querySpec; @@ -122,12 +120,6 @@ public DimFilter getDimensionsFilter() return dimFilter; } - @JsonProperty - public Granularity getGranularity() - { - return granularity; - } - @JsonProperty public int getLimit() { @@ -161,14 +153,14 @@ public SearchQuery withLimit(int newLimit) public String toString() { return "SearchQuery{" + - "dataSource='" + getDataSource() + '\'' + - ", dimFilter=" + dimFilter + - ", granularity='" + granularity + '\'' + - ", dimensions=" + dimensions + - ", querySpec=" + querySpec + - ", querySegmentSpec=" + getQuerySegmentSpec() + - ", limit=" + limit + - '}'; + "dataSource='" + getDataSource() + '\'' + + ", dimFilter=" + dimFilter + + ", granularity='" + getGranularity() + '\'' + + ", dimensions=" + dimensions + + ", querySpec=" + querySpec + + ", querySegmentSpec=" + getQuerySegmentSpec() + + ", limit=" + limit + + '}'; } @Override @@ -195,9 +187,6 @@ public boolean equals(Object o) if (dimensions != null ? !dimensions.equals(that.dimensions) : that.dimensions != null) { return false; } - if (granularity != null ? !granularity.equals(that.granularity) : that.granularity != null) { - return false; - } if (querySpec != null ? !querySpec.equals(that.querySpec) : that.querySpec != null) { return false; } @@ -214,7 +203,6 @@ public int hashCode() int result = super.hashCode(); result = 31 * result + (dimFilter != null ? dimFilter.hashCode() : 0); result = 31 * result + (sortSpec != null ? sortSpec.hashCode() : 0); - result = 31 * result + (granularity != null ? granularity.hashCode() : 0); result = 31 * result + (dimensions != null ? dimensions.hashCode() : 0); result = 31 * result + (querySpec != null ? querySpec.hashCode() : 0); result = 31 * result + limit; diff --git a/processing/src/main/java/io/druid/query/search/SearchStrategySelector.java b/processing/src/main/java/io/druid/query/search/SearchStrategySelector.java index 91e431737914..ef5e40440cec 100644 --- a/processing/src/main/java/io/druid/query/search/SearchStrategySelector.java +++ b/processing/src/main/java/io/druid/query/search/SearchStrategySelector.java @@ -21,7 +21,7 @@ import com.google.common.base.Supplier; import com.google.inject.Inject; -import com.metamx.emitter.EmittingLogger; +import io.druid.java.util.emitter.EmittingLogger; import io.druid.java.util.common.ISE; public class SearchStrategySelector diff --git a/processing/src/main/java/io/druid/query/select/DefaultSelectQueryMetrics.java b/processing/src/main/java/io/druid/query/select/DefaultSelectQueryMetrics.java index ab6238a8bee5..7a73d527b9f4 100644 --- a/processing/src/main/java/io/druid/query/select/DefaultSelectQueryMetrics.java +++ b/processing/src/main/java/io/druid/query/select/DefaultSelectQueryMetrics.java @@ -19,7 +19,7 @@ package io.druid.query.select; -import com.metamx.emitter.service.ServiceEmitter; +import io.druid.java.util.emitter.service.ServiceEmitter; import io.druid.collections.bitmap.BitmapFactory; import io.druid.java.util.common.ISE; import io.druid.query.BitmapResultFactory; diff --git a/processing/src/main/java/io/druid/query/select/SelectQuery.java b/processing/src/main/java/io/druid/query/select/SelectQuery.java index 6676777ba3d1..2bc972b9fac7 100644 --- a/processing/src/main/java/io/druid/query/select/SelectQuery.java +++ b/processing/src/main/java/io/druid/query/select/SelectQuery.java @@ -45,7 +45,6 @@ public class SelectQuery extends BaseQuery> { private final DimFilter dimFilter; - private final Granularity granularity; private final List dimensions; private final List metrics; private final VirtualColumns virtualColumns; @@ -65,9 +64,8 @@ public SelectQuery( @JsonProperty("context") Map context ) { - super(dataSource, querySegmentSpec, descending, context); + super(dataSource, querySegmentSpec, descending, context, Granularities.nullToAll(granularity)); this.dimFilter = dimFilter; - this.granularity = granularity == null ? Granularities.ALL : granularity; this.dimensions = dimensions; this.virtualColumns = VirtualColumns.nullToEmpty(virtualColumns); this.metrics = metrics; @@ -111,12 +109,6 @@ public DimFilter getDimensionsFilter() return dimFilter; } - @JsonProperty - public Granularity getGranularity() - { - return granularity; - } - @JsonProperty public List getDimensions() { @@ -179,16 +171,16 @@ public SelectQuery withDimFilter(DimFilter dimFilter) public String toString() { return "SelectQuery{" + - "dataSource='" + getDataSource() + '\'' + - ", querySegmentSpec=" + getQuerySegmentSpec() + - ", descending=" + isDescending() + - ", dimFilter=" + dimFilter + - ", granularity=" + granularity + - ", dimensions=" + dimensions + - ", metrics=" + metrics + - ", virtualColumns=" + virtualColumns + - ", pagingSpec=" + pagingSpec + - '}'; + "dataSource='" + getDataSource() + '\'' + + ", querySegmentSpec=" + getQuerySegmentSpec() + + ", descending=" + isDescending() + + ", dimFilter=" + dimFilter + + ", granularity=" + getGranularity() + + ", dimensions=" + dimensions + + ", metrics=" + metrics + + ", virtualColumns=" + virtualColumns + + ", pagingSpec=" + pagingSpec + + '}'; } @Override @@ -209,9 +201,6 @@ public boolean equals(Object o) if (!Objects.equals(dimFilter, that.dimFilter)) { return false; } - if (!Objects.equals(granularity, that.granularity)) { - return false; - } if (!Objects.equals(dimensions, that.dimensions)) { return false; } @@ -233,7 +222,6 @@ public int hashCode() { int result = super.hashCode(); result = 31 * result + (dimFilter != null ? dimFilter.hashCode() : 0); - result = 31 * result + (granularity != null ? granularity.hashCode() : 0); result = 31 * result + (dimensions != null ? dimensions.hashCode() : 0); result = 31 * result + (metrics != null ? metrics.hashCode() : 0); result = 31 * result + (virtualColumns != null ? virtualColumns.hashCode() : 0); diff --git a/processing/src/main/java/io/druid/query/timeseries/TimeseriesQuery.java b/processing/src/main/java/io/druid/query/timeseries/TimeseriesQuery.java index 2cfa00b6235e..d1ac95cdadce 100644 --- a/processing/src/main/java/io/druid/query/timeseries/TimeseriesQuery.java +++ b/processing/src/main/java/io/druid/query/timeseries/TimeseriesQuery.java @@ -47,7 +47,6 @@ public class TimeseriesQuery extends BaseQuery> { private final VirtualColumns virtualColumns; private final DimFilter dimFilter; - private final Granularity granularity; private final List aggregatorSpecs; private final List postAggregatorSpecs; @@ -64,11 +63,10 @@ public TimeseriesQuery( @JsonProperty("context") Map context ) { - super(dataSource, querySegmentSpec, descending, context); + super(dataSource, querySegmentSpec, descending, context, granularity); this.virtualColumns = VirtualColumns.nullToEmpty(virtualColumns); this.dimFilter = dimFilter; - this.granularity = granularity; this.aggregatorSpecs = aggregatorSpecs == null ? ImmutableList.of() : aggregatorSpecs; this.postAggregatorSpecs = Queries.prepareAggregations( ImmutableList.of(), @@ -107,12 +105,6 @@ public DimFilter getDimensionsFilter() return dimFilter; } - @JsonProperty - public Granularity getGranularity() - { - return granularity; - } - @JsonProperty("aggregations") public List getAggregatorSpecs() { @@ -168,7 +160,7 @@ public String toString() ", descending=" + isDescending() + ", virtualColumns=" + virtualColumns + ", dimFilter=" + dimFilter + - ", granularity='" + granularity + '\'' + + ", granularity='" + getGranularity() + '\'' + ", aggregatorSpecs=" + aggregatorSpecs + ", postAggregatorSpecs=" + postAggregatorSpecs + ", context=" + getContext() + @@ -190,7 +182,6 @@ public boolean equals(final Object o) final TimeseriesQuery that = (TimeseriesQuery) o; return Objects.equals(virtualColumns, that.virtualColumns) && Objects.equals(dimFilter, that.dimFilter) && - Objects.equals(granularity, that.granularity) && Objects.equals(aggregatorSpecs, that.aggregatorSpecs) && Objects.equals(postAggregatorSpecs, that.postAggregatorSpecs); } @@ -198,6 +189,6 @@ public boolean equals(final Object o) @Override public int hashCode() { - return Objects.hash(super.hashCode(), virtualColumns, dimFilter, granularity, aggregatorSpecs, postAggregatorSpecs); + return Objects.hash(super.hashCode(), virtualColumns, dimFilter, aggregatorSpecs, postAggregatorSpecs); } } diff --git a/processing/src/main/java/io/druid/query/topn/TopNQuery.java b/processing/src/main/java/io/druid/query/topn/TopNQuery.java index 764990b063ed..844142d01c50 100644 --- a/processing/src/main/java/io/druid/query/topn/TopNQuery.java +++ b/processing/src/main/java/io/druid/query/topn/TopNQuery.java @@ -51,7 +51,6 @@ public class TopNQuery extends BaseQuery> private final TopNMetricSpec topNMetricSpec; private final int threshold; private final DimFilter dimFilter; - private final Granularity granularity; private final List aggregatorSpecs; private final List postAggregatorSpecs; @@ -70,7 +69,7 @@ public TopNQuery( @JsonProperty("context") Map context ) { - super(dataSource, querySegmentSpec, false, context); + super(dataSource, querySegmentSpec, false, context, granularity); this.virtualColumns = VirtualColumns.nullToEmpty(virtualColumns); this.dimensionSpec = dimensionSpec; @@ -78,7 +77,6 @@ public TopNQuery( this.threshold = threshold; this.dimFilter = dimFilter; - this.granularity = granularity; this.aggregatorSpecs = aggregatorSpecs == null ? ImmutableList.of() : aggregatorSpecs; this.postAggregatorSpecs = Queries.prepareAggregations( ImmutableList.of(dimensionSpec.getOutputName()), @@ -143,12 +141,6 @@ public DimFilter getDimensionsFilter() return dimFilter; } - @JsonProperty - public Granularity getGranularity() - { - return granularity; - } - @JsonProperty("aggregations") public List getAggregatorSpecs() { @@ -218,7 +210,7 @@ public String toString() ", querySegmentSpec=" + getQuerySegmentSpec() + ", virtualColumns=" + virtualColumns + ", dimFilter=" + dimFilter + - ", granularity='" + granularity + '\'' + + ", granularity='" + getGranularity() + '\'' + ", aggregatorSpecs=" + aggregatorSpecs + ", postAggregatorSpecs=" + postAggregatorSpecs + '}'; @@ -242,7 +234,6 @@ public boolean equals(final Object o) Objects.equals(dimensionSpec, topNQuery.dimensionSpec) && Objects.equals(topNMetricSpec, topNQuery.topNMetricSpec) && Objects.equals(dimFilter, topNQuery.dimFilter) && - Objects.equals(granularity, topNQuery.granularity) && Objects.equals(aggregatorSpecs, topNQuery.aggregatorSpecs) && Objects.equals(postAggregatorSpecs, topNQuery.postAggregatorSpecs); } @@ -257,7 +248,6 @@ public int hashCode() topNMetricSpec, threshold, dimFilter, - granularity, aggregatorSpecs, postAggregatorSpecs ); diff --git a/processing/src/main/java/io/druid/segment/ColumnSelectorBitmapIndexSelector.java b/processing/src/main/java/io/druid/segment/ColumnSelectorBitmapIndexSelector.java index ce9dbbf2727b..3ad6574a08dc 100644 --- a/processing/src/main/java/io/druid/segment/ColumnSelectorBitmapIndexSelector.java +++ b/processing/src/main/java/io/druid/segment/ColumnSelectorBitmapIndexSelector.java @@ -229,12 +229,12 @@ public ImmutableBitmap getBitmapIndex(String dimension, String value) public ImmutableRTree getSpatialIndex(String dimension) { if (isVirtualColumn(dimension)) { - return new ImmutableRTree(); + return ImmutableRTree.empty(); } final Column column = index.getColumn(dimension); if (column == null || !column.getCapabilities().hasSpatialIndexes()) { - return new ImmutableRTree(); + return ImmutableRTree.empty(); } return column.getSpatialIndex().getRTree(); diff --git a/processing/src/main/java/io/druid/segment/DimensionHandlerUtils.java b/processing/src/main/java/io/druid/segment/DimensionHandlerUtils.java index c10ed586325f..58f175adbcef 100644 --- a/processing/src/main/java/io/druid/segment/DimensionHandlerUtils.java +++ b/processing/src/main/java/io/druid/segment/DimensionHandlerUtils.java @@ -239,6 +239,12 @@ private static Colu @Nullable public static Long convertObjectToLong(@Nullable Object valObj) + { + return convertObjectToLong(valObj, false); + } + + @Nullable + public static Long convertObjectToLong(@Nullable Object valObj, boolean reportParseExceptions) { if (valObj == null) { return ZERO_LONG; @@ -249,7 +255,11 @@ public static Long convertObjectToLong(@Nullable Object valObj) } else if (valObj instanceof Number) { return ((Number) valObj).longValue(); } else if (valObj instanceof String) { - return DimensionHandlerUtils.getExactLongFromDecimalString((String) valObj); + Long ret = DimensionHandlerUtils.getExactLongFromDecimalString((String) valObj); + if (reportParseExceptions && ret == null) { + throw new ParseException("could not convert value [%s] to long", valObj); + } + return ret; } else { throw new ParseException("Unknown type[%s]", valObj.getClass()); } @@ -257,6 +267,12 @@ public static Long convertObjectToLong(@Nullable Object valObj) @Nullable public static Float convertObjectToFloat(@Nullable Object valObj) + { + return convertObjectToFloat(valObj, false); + } + + @Nullable + public static Float convertObjectToFloat(@Nullable Object valObj, boolean reportParseExceptions) { if (valObj == null) { return ZERO_FLOAT; @@ -267,7 +283,11 @@ public static Float convertObjectToFloat(@Nullable Object valObj) } else if (valObj instanceof Number) { return ((Number) valObj).floatValue(); } else if (valObj instanceof String) { - return Floats.tryParse((String) valObj); + Float ret = Floats.tryParse((String) valObj); + if (reportParseExceptions && ret == null) { + throw new ParseException("could not convert value [%s] to float", valObj); + } + return ret; } else { throw new ParseException("Unknown type[%s]", valObj.getClass()); } @@ -275,6 +295,12 @@ public static Float convertObjectToFloat(@Nullable Object valObj) @Nullable public static Double convertObjectToDouble(@Nullable Object valObj) + { + return convertObjectToDouble(valObj, false); + } + + @Nullable + public static Double convertObjectToDouble(@Nullable Object valObj, boolean reportParseExceptions) { if (valObj == null) { return ZERO_DOUBLE; @@ -285,7 +311,11 @@ public static Double convertObjectToDouble(@Nullable Object valObj) } else if (valObj instanceof Number) { return ((Number) valObj).doubleValue(); } else if (valObj instanceof String) { - return Doubles.tryParse((String) valObj); + Double ret = Doubles.tryParse((String) valObj); + if (reportParseExceptions && ret == null) { + throw new ParseException("could not convert value [%s] to double", valObj); + } + return ret; } else { throw new ParseException("Unknown type[%s]", valObj.getClass()); } diff --git a/processing/src/main/java/io/druid/segment/DimensionIndexer.java b/processing/src/main/java/io/druid/segment/DimensionIndexer.java index 390c1992ea63..cbc298c2f246 100644 --- a/processing/src/main/java/io/druid/segment/DimensionIndexer.java +++ b/processing/src/main/java/io/druid/segment/DimensionIndexer.java @@ -122,9 +122,10 @@ public interface DimensionIndexer * * @param dimValues Single row val to process * + * @param reportParseExceptions * @return An array containing an encoded representation of the input row value. */ - EncodedKeyComponentType processRowValsToUnsortedEncodedKeyComponent(Object dimValues); + EncodedKeyComponentType processRowValsToUnsortedEncodedKeyComponent(Object dimValues, boolean reportParseExceptions); /** diff --git a/processing/src/main/java/io/druid/segment/DoubleDimensionIndexer.java b/processing/src/main/java/io/druid/segment/DoubleDimensionIndexer.java index 26d71abc2933..2643ed6ff386 100644 --- a/processing/src/main/java/io/druid/segment/DoubleDimensionIndexer.java +++ b/processing/src/main/java/io/druid/segment/DoubleDimensionIndexer.java @@ -34,12 +34,14 @@ public class DoubleDimensionIndexer implements DimensionIndexer zero conversion when https://github.com/druid-io/druid/pull/5278 series of patches is merged + return ret == null ? DimensionHandlerUtils.ZERO_DOUBLE : ret; } @Override diff --git a/processing/src/main/java/io/druid/segment/FloatDimensionIndexer.java b/processing/src/main/java/io/druid/segment/FloatDimensionIndexer.java index 49c93f576107..6ac82d6abc36 100644 --- a/processing/src/main/java/io/druid/segment/FloatDimensionIndexer.java +++ b/processing/src/main/java/io/druid/segment/FloatDimensionIndexer.java @@ -34,13 +34,15 @@ public class FloatDimensionIndexer implements DimensionIndexer zero conversion when https://github.com/druid-io/druid/pull/5278 series of patches is merged + return ret == null ? DimensionHandlerUtils.ZERO_FLOAT : ret; } @Override diff --git a/processing/src/main/java/io/druid/segment/IndexIO.java b/processing/src/main/java/io/druid/segment/IndexIO.java index bab5c8432a91..5f28a7fd58df 100644 --- a/processing/src/main/java/io/druid/segment/IndexIO.java +++ b/processing/src/main/java/io/druid/segment/IndexIO.java @@ -31,7 +31,7 @@ import com.google.common.io.Files; import com.google.common.primitives.Ints; import com.google.inject.Inject; -import com.metamx.emitter.EmittingLogger; +import io.druid.java.util.emitter.EmittingLogger; import io.druid.collections.bitmap.ConciseBitmapFactory; import io.druid.collections.bitmap.ImmutableBitmap; import io.druid.collections.spatial.ImmutableRTree; diff --git a/processing/src/main/java/io/druid/segment/LongDimensionIndexer.java b/processing/src/main/java/io/druid/segment/LongDimensionIndexer.java index 8aeeca25395d..29afdde65b9c 100644 --- a/processing/src/main/java/io/druid/segment/LongDimensionIndexer.java +++ b/processing/src/main/java/io/druid/segment/LongDimensionIndexer.java @@ -34,13 +34,15 @@ public class LongDimensionIndexer implements DimensionIndexer { @Override - public Long processRowValsToUnsortedEncodedKeyComponent(Object dimValues) + public Long processRowValsToUnsortedEncodedKeyComponent(Object dimValues, boolean reportParseExceptions) { if (dimValues instanceof List) { throw new UnsupportedOperationException("Numeric columns do not support multivalue rows."); } - return DimensionHandlerUtils.convertObjectToLong(dimValues); + Long ret = DimensionHandlerUtils.convertObjectToLong(dimValues, reportParseExceptions); + // remove null -> zero conversion when https://github.com/druid-io/druid/pull/5278 series of patches is merged + return ret == null ? DimensionHandlerUtils.ZERO_LONG : ret; } @Override diff --git a/processing/src/main/java/io/druid/segment/QueryableIndex.java b/processing/src/main/java/io/druid/segment/QueryableIndex.java index 0f6dd5182f43..d1b697b43414 100644 --- a/processing/src/main/java/io/druid/segment/QueryableIndex.java +++ b/processing/src/main/java/io/druid/segment/QueryableIndex.java @@ -23,6 +23,7 @@ import io.druid.segment.data.Indexed; import org.joda.time.Interval; +import javax.annotation.Nullable; import java.io.Closeable; import java.io.IOException; import java.util.Map; @@ -40,7 +41,7 @@ public interface QueryableIndex extends ColumnSelector, Closeable int getNumRows(); Indexed getAvailableDimensions(); BitmapFactory getBitmapFactoryForDimensions(); - Metadata getMetadata(); + @Nullable Metadata getMetadata(); Map getDimensionHandlers(); /** diff --git a/processing/src/main/java/io/druid/segment/ReferenceCountingSegment.java b/processing/src/main/java/io/druid/segment/ReferenceCountingSegment.java index f8ec72b2be81..bcb5ea0f2979 100644 --- a/processing/src/main/java/io/druid/segment/ReferenceCountingSegment.java +++ b/processing/src/main/java/io/druid/segment/ReferenceCountingSegment.java @@ -20,7 +20,7 @@ package io.druid.segment; import com.google.common.base.Preconditions; -import com.metamx.emitter.EmittingLogger; +import io.druid.java.util.emitter.EmittingLogger; import org.joda.time.Interval; import java.io.Closeable; diff --git a/processing/src/main/java/io/druid/segment/SimpleQueryableIndex.java b/processing/src/main/java/io/druid/segment/SimpleQueryableIndex.java index 17d81d2119ba..754b93d98387 100644 --- a/processing/src/main/java/io/druid/segment/SimpleQueryableIndex.java +++ b/processing/src/main/java/io/druid/segment/SimpleQueryableIndex.java @@ -19,6 +19,7 @@ package io.druid.segment; +import com.google.common.annotations.VisibleForTesting; import com.google.common.base.Preconditions; import com.google.common.collect.Maps; import io.druid.collections.bitmap.BitmapFactory; @@ -52,11 +53,11 @@ public SimpleQueryableIndex( BitmapFactory bitmapFactory, Map columns, SmooshedFileMapper fileMapper, - Metadata metadata + @Nullable Metadata metadata ) { Preconditions.checkNotNull(columns.get(Column.TIME_COLUMN_NAME)); - this.dataInterval = dataInterval; + this.dataInterval = Preconditions.checkNotNull(dataInterval, "dataInterval"); this.columnNames = columnNames; this.availableDimensions = dimNames; this.bitmapFactory = bitmapFactory; @@ -67,6 +68,28 @@ public SimpleQueryableIndex( initDimensionHandlers(); } + @VisibleForTesting + public SimpleQueryableIndex( + Interval interval, + Indexed columnNames, + Indexed availableDimensions, + BitmapFactory bitmapFactory, + Map columns, + SmooshedFileMapper fileMapper, + @Nullable Metadata metadata, + Map dimensionHandlers + ) + { + this.dataInterval = interval; + this.columnNames = columnNames; + this.availableDimensions = availableDimensions; + this.bitmapFactory = bitmapFactory; + this.columns = columns; + this.fileMapper = fileMapper; + this.metadata = metadata; + this.dimensionHandlers = dimensionHandlers; + } + @Override public Interval getDataInterval() { @@ -104,6 +127,18 @@ public Column getColumn(String columnName) return columns.get(columnName); } + @VisibleForTesting + public Map getColumns() + { + return columns; + } + + @VisibleForTesting + public SmooshedFileMapper getFileMapper() + { + return fileMapper; + } + @Override public void close() throws IOException { diff --git a/processing/src/main/java/io/druid/segment/StringDimensionIndexer.java b/processing/src/main/java/io/druid/segment/StringDimensionIndexer.java index 71e1c3443fe4..96fd4d8f74d1 100644 --- a/processing/src/main/java/io/druid/segment/StringDimensionIndexer.java +++ b/processing/src/main/java/io/druid/segment/StringDimensionIndexer.java @@ -188,7 +188,7 @@ public StringDimensionIndexer(MultiValueHandling multiValueHandling) } @Override - public int[] processRowValsToUnsortedEncodedKeyComponent(Object dimValues) + public int[] processRowValsToUnsortedEncodedKeyComponent(Object dimValues, boolean reportParseExceptions) { final int[] encodedDimensionValues; final int oldDictSize = dimLookup.size(); diff --git a/processing/src/main/java/io/druid/segment/data/GenericIndexed.java b/processing/src/main/java/io/druid/segment/data/GenericIndexed.java index 2bbfa9ef6568..136658bf2a62 100644 --- a/processing/src/main/java/io/druid/segment/data/GenericIndexed.java +++ b/processing/src/main/java/io/druid/segment/data/GenericIndexed.java @@ -498,7 +498,9 @@ private static GenericIndexed fromIterableVersionOne( // for compatibility with the format, but this field is unused valuesOut.writeInt(0); - strategy.writeTo(next, valuesOut); + if (next != null) { + strategy.writeTo(next, valuesOut); + } headerOut.writeInt(Ints.checkedCast(valuesOut.size())); if (prevVal instanceof Closeable) { diff --git a/processing/src/main/java/io/druid/segment/data/GenericIndexedWriter.java b/processing/src/main/java/io/druid/segment/data/GenericIndexedWriter.java index 31822e13cc51..341026a1c579 100644 --- a/processing/src/main/java/io/druid/segment/data/GenericIndexedWriter.java +++ b/processing/src/main/java/io/druid/segment/data/GenericIndexedWriter.java @@ -217,7 +217,9 @@ public void write(T objectToWrite) throws IOException // for compatibility with the format (see GenericIndexed javadoc for description of the format), but this field is // unused. valuesOut.writeInt(0); - strategy.writeTo(objectToWrite, valuesOut); + if (objectToWrite != null) { + strategy.writeTo(objectToWrite, valuesOut); + } if (!requireMultipleFiles) { headerOut.writeInt(Ints.checkedCast(valuesOut.size())); diff --git a/processing/src/main/java/io/druid/segment/data/ImmutableRTreeObjectStrategy.java b/processing/src/main/java/io/druid/segment/data/ImmutableRTreeObjectStrategy.java index 14f38f4ef49e..6e49c8626b7a 100644 --- a/processing/src/main/java/io/druid/segment/data/ImmutableRTreeObjectStrategy.java +++ b/processing/src/main/java/io/druid/segment/data/ImmutableRTreeObjectStrategy.java @@ -22,8 +22,9 @@ import com.google.common.collect.Ordering; import io.druid.collections.bitmap.BitmapFactory; import io.druid.collections.spatial.ImmutableRTree; -import it.unimi.dsi.fastutil.bytes.ByteArrays; +import io.druid.segment.writeout.WriteOutBytes; +import java.io.IOException; import java.nio.ByteBuffer; public class ImmutableRTreeObjectStrategy implements ObjectStrategy @@ -71,12 +72,15 @@ public ImmutableRTree fromByteBuffer(ByteBuffer buffer, int numBytes) @Override public byte[] toBytes(ImmutableRTree val) { - if (val == null || val.size() == 0) { - return ByteArrays.EMPTY_ARRAY; - } return val.toBytes(); } + @Override + public void writeTo(ImmutableRTree val, WriteOutBytes out) throws IOException + { + val.writeTo(out); + } + @Override public int compare(ImmutableRTree o1, ImmutableRTree o2) { diff --git a/processing/src/main/java/io/druid/segment/incremental/IncrementalIndex.java b/processing/src/main/java/io/druid/segment/incremental/IncrementalIndex.java index 4aa4a1b976b3..a752c5d4425f 100644 --- a/processing/src/main/java/io/druid/segment/incremental/IncrementalIndex.java +++ b/processing/src/main/java/io/druid/segment/incremental/IncrementalIndex.java @@ -552,7 +552,10 @@ TimeAndDims toTimeAndDims(InputRow row) } DimensionHandler handler = desc.getHandler(); DimensionIndexer indexer = desc.getIndexer(); - Object dimsKey = indexer.processRowValsToUnsortedEncodedKeyComponent(row.getRaw(dimension)); + Object dimsKey = indexer.processRowValsToUnsortedEncodedKeyComponent( + row.getRaw(dimension), + reportParseExceptions + ); // Set column capabilities as data is coming in if (!capabilities.hasMultipleValues() && dimsKey != null && handler.getLengthOfEncodedKeyComponent(dimsKey) > 1) { diff --git a/processing/src/main/java/io/druid/segment/incremental/IncrementalIndexAdapter.java b/processing/src/main/java/io/druid/segment/incremental/IncrementalIndexAdapter.java index 8cb44ffa669b..8eb148d6bb4a 100644 --- a/processing/src/main/java/io/druid/segment/incremental/IncrementalIndexAdapter.java +++ b/processing/src/main/java/io/druid/segment/incremental/IncrementalIndexAdapter.java @@ -100,7 +100,7 @@ public IncrementalIndexAdapter( // Add 'null' to the dimension's dictionary. if (dimIndex >= dims.length || dims[dimIndex] == null) { - accessor.indexer.processRowValsToUnsortedEncodedKeyComponent(null); + accessor.indexer.processRowValsToUnsortedEncodedKeyComponent(null, true); continue; } final ColumnCapabilities capabilities = dimension.getCapabilities(); diff --git a/processing/src/main/java/io/druid/segment/serde/ComplexMetricSerde.java b/processing/src/main/java/io/druid/segment/serde/ComplexMetricSerde.java index dc3c6647f3a1..8698b36e1f53 100644 --- a/processing/src/main/java/io/druid/segment/serde/ComplexMetricSerde.java +++ b/processing/src/main/java/io/druid/segment/serde/ComplexMetricSerde.java @@ -25,7 +25,9 @@ import io.druid.segment.GenericColumnSerializer; import io.druid.segment.column.ColumnBuilder; import io.druid.segment.data.ObjectStrategy; +import it.unimi.dsi.fastutil.bytes.ByteArrays; +import javax.annotation.Nullable; import java.nio.ByteBuffer; /** @@ -80,9 +82,9 @@ public Function inputSizeFn() * * @return serialized intermediate representation of aggregate in byte[] */ - public byte[] toBytes(Object val) + public byte[] toBytes(@Nullable Object val) { - return getObjectStrategy().toBytes(val); + return val != null ? getObjectStrategy().toBytes(val) : ByteArrays.EMPTY_ARRAY; } /** diff --git a/bytebuffer-collections/src/test/java/io/druid/collections/IntSetTestUtility.java b/processing/src/test/java/io/druid/collections/IntSetTestUtility.java old mode 100755 new mode 100644 similarity index 100% rename from bytebuffer-collections/src/test/java/io/druid/collections/IntSetTestUtility.java rename to processing/src/test/java/io/druid/collections/IntSetTestUtility.java diff --git a/bytebuffer-collections/src/test/java/io/druid/collections/TestIntegerSet.java b/processing/src/test/java/io/druid/collections/TestIntegerSet.java old mode 100755 new mode 100644 similarity index 100% rename from bytebuffer-collections/src/test/java/io/druid/collections/TestIntegerSet.java rename to processing/src/test/java/io/druid/collections/TestIntegerSet.java diff --git a/bytebuffer-collections/src/test/java/io/druid/collections/bitmap/BitmapBenchmark.java b/processing/src/test/java/io/druid/collections/bitmap/BitmapBenchmark.java old mode 100755 new mode 100644 similarity index 99% rename from bytebuffer-collections/src/test/java/io/druid/collections/bitmap/BitmapBenchmark.java rename to processing/src/test/java/io/druid/collections/bitmap/BitmapBenchmark.java index 93ee6eb9c909..26a19fe8e230 --- a/bytebuffer-collections/src/test/java/io/druid/collections/bitmap/BitmapBenchmark.java +++ b/processing/src/test/java/io/druid/collections/bitmap/BitmapBenchmark.java @@ -39,7 +39,9 @@ import java.util.Locale; import java.util.Random; - +/** + * TODO rewrite this benchmark to JMH + */ @BenchmarkOptions(clock = Clock.NANO_TIME, benchmarkRounds = 50) public class BitmapBenchmark { diff --git a/bytebuffer-collections/src/test/java/io/druid/collections/bitmap/BitmapIterationTest.java b/processing/src/test/java/io/druid/collections/bitmap/BitmapIterationTest.java similarity index 100% rename from bytebuffer-collections/src/test/java/io/druid/collections/bitmap/BitmapIterationTest.java rename to processing/src/test/java/io/druid/collections/bitmap/BitmapIterationTest.java diff --git a/bytebuffer-collections/src/test/java/io/druid/collections/bitmap/ConciseBitmapFactoryTest.java b/processing/src/test/java/io/druid/collections/bitmap/ConciseBitmapFactoryTest.java old mode 100755 new mode 100644 similarity index 100% rename from bytebuffer-collections/src/test/java/io/druid/collections/bitmap/ConciseBitmapFactoryTest.java rename to processing/src/test/java/io/druid/collections/bitmap/ConciseBitmapFactoryTest.java diff --git a/bytebuffer-collections/src/test/java/io/druid/collections/bitmap/RangeBitmapBenchmarkTest.java b/processing/src/test/java/io/druid/collections/bitmap/RangeBitmapBenchmarkTest.java old mode 100755 new mode 100644 similarity index 96% rename from bytebuffer-collections/src/test/java/io/druid/collections/bitmap/RangeBitmapBenchmarkTest.java rename to processing/src/test/java/io/druid/collections/bitmap/RangeBitmapBenchmarkTest.java index 4c5d3769c9ab..74999ef80469 --- a/bytebuffer-collections/src/test/java/io/druid/collections/bitmap/RangeBitmapBenchmarkTest.java +++ b/processing/src/test/java/io/druid/collections/bitmap/RangeBitmapBenchmarkTest.java @@ -24,13 +24,16 @@ import io.druid.extendedset.intset.ConciseSet; import io.druid.extendedset.intset.ImmutableConciseSet; import io.druid.java.util.common.StringUtils; -import io.druid.test.annotation.Benchmark; +import io.druid.collections.test.annotation.Benchmark; import org.junit.BeforeClass; import org.junit.experimental.categories.Category; import org.roaringbitmap.buffer.MutableRoaringBitmap; import java.util.BitSet; +/** + * TODO rewrite this benchmark to JMH + */ @Category({Benchmark.class}) @BenchmarkHistoryChart(labelWith = LabelType.CUSTOM_KEY, maxRuns = 20) public class RangeBitmapBenchmarkTest extends BitmapBenchmark diff --git a/bytebuffer-collections/src/test/java/io/druid/collections/bitmap/RoaringBitmapFactoryTest.java b/processing/src/test/java/io/druid/collections/bitmap/RoaringBitmapFactoryTest.java old mode 100755 new mode 100644 similarity index 100% rename from bytebuffer-collections/src/test/java/io/druid/collections/bitmap/RoaringBitmapFactoryTest.java rename to processing/src/test/java/io/druid/collections/bitmap/RoaringBitmapFactoryTest.java diff --git a/bytebuffer-collections/src/test/java/io/druid/collections/bitmap/UniformBitmapBenchmarkTest.java b/processing/src/test/java/io/druid/collections/bitmap/UniformBitmapBenchmarkTest.java old mode 100755 new mode 100644 similarity index 96% rename from bytebuffer-collections/src/test/java/io/druid/collections/bitmap/UniformBitmapBenchmarkTest.java rename to processing/src/test/java/io/druid/collections/bitmap/UniformBitmapBenchmarkTest.java index 6a0438178891..eb6039528d92 --- a/bytebuffer-collections/src/test/java/io/druid/collections/bitmap/UniformBitmapBenchmarkTest.java +++ b/processing/src/test/java/io/druid/collections/bitmap/UniformBitmapBenchmarkTest.java @@ -24,13 +24,16 @@ import io.druid.extendedset.intset.ConciseSet; import io.druid.extendedset.intset.ImmutableConciseSet; import io.druid.java.util.common.StringUtils; -import io.druid.test.annotation.Benchmark; +import io.druid.collections.test.annotation.Benchmark; import org.junit.BeforeClass; import org.junit.experimental.categories.Category; import org.roaringbitmap.buffer.MutableRoaringBitmap; import java.util.BitSet; +/** + * TODO rewrite this benchmark to JMH + */ @Category({Benchmark.class}) @BenchmarkHistoryChart(labelWith = LabelType.CUSTOM_KEY, maxRuns = 20) public class UniformBitmapBenchmarkTest extends BitmapBenchmark diff --git a/bytebuffer-collections/src/test/java/io/druid/collections/bitmap/WrappedBitSetBitmapBitSetTest.java b/processing/src/test/java/io/druid/collections/bitmap/WrappedBitSetBitmapBitSetTest.java old mode 100755 new mode 100644 similarity index 100% rename from bytebuffer-collections/src/test/java/io/druid/collections/bitmap/WrappedBitSetBitmapBitSetTest.java rename to processing/src/test/java/io/druid/collections/bitmap/WrappedBitSetBitmapBitSetTest.java diff --git a/bytebuffer-collections/src/test/java/io/druid/collections/bitmap/WrappedRoaringBitmapTest.java b/processing/src/test/java/io/druid/collections/bitmap/WrappedRoaringBitmapTest.java old mode 100755 new mode 100644 similarity index 100% rename from bytebuffer-collections/src/test/java/io/druid/collections/bitmap/WrappedRoaringBitmapTest.java rename to processing/src/test/java/io/druid/collections/bitmap/WrappedRoaringBitmapTest.java diff --git a/bytebuffer-collections/src/test/java/io/druid/collections/spatial/ImmutableRTreeTest.java b/processing/src/test/java/io/druid/collections/spatial/ImmutableRTreeTest.java old mode 100755 new mode 100644 similarity index 95% rename from bytebuffer-collections/src/test/java/io/druid/collections/spatial/ImmutableRTreeTest.java rename to processing/src/test/java/io/druid/collections/spatial/ImmutableRTreeTest.java index 3c3bb9c9eba4..20e9d4d76be1 --- a/bytebuffer-collections/src/test/java/io/druid/collections/spatial/ImmutableRTreeTest.java +++ b/processing/src/test/java/io/druid/collections/spatial/ImmutableRTreeTest.java @@ -23,6 +23,7 @@ import com.google.common.base.Throwables; import com.google.common.collect.Iterables; import com.google.common.collect.Sets; +import com.google.common.primitives.Bytes; import io.druid.collections.bitmap.BitmapFactory; import io.druid.collections.bitmap.ConciseBitmapFactory; import io.druid.collections.bitmap.ImmutableBitmap; @@ -31,11 +32,14 @@ import io.druid.collections.spatial.search.RadiusBound; import io.druid.collections.spatial.search.RectangularBound; import io.druid.collections.spatial.split.LinearGutmanSplitStrategy; +import io.druid.segment.data.GenericIndexed; +import io.druid.segment.data.ImmutableRTreeObjectStrategy; import junit.framework.Assert; import org.junit.Test; import org.roaringbitmap.IntIterator; import java.nio.ByteBuffer; +import java.util.Arrays; import java.util.Locale; import java.util.Random; import java.util.Set; @@ -648,4 +652,24 @@ public void showBenchmarksBoundWithLimits() } } } + + @Test + public void testToBytes() + { + BitmapFactory bf = new RoaringBitmapFactory(); + ImmutableRTreeObjectStrategy rTreeObjectStrategy = new ImmutableRTreeObjectStrategy(bf); + RTree rTree = new RTree(2, new LinearGutmanSplitStrategy(0, 50, bf), bf); + rTree.insert(new float[]{0, 0}, 1); + ImmutableRTree immutableRTree = ImmutableRTree.newImmutableFromMutable(rTree); + byte[] bytes1 = immutableRTree.toBytes(); + + GenericIndexed genericIndexed = GenericIndexed.fromIterable( + Arrays.asList(immutableRTree, immutableRTree), + rTreeObjectStrategy + ); + + ImmutableRTree deserializedTree = genericIndexed.get(0); + byte[] bytes2 = deserializedTree.toBytes(); + org.junit.Assert.assertEquals(Bytes.asList(bytes1), Bytes.asList(bytes2)); + } } diff --git a/bytebuffer-collections/src/test/java/io/druid/collections/spatial/RTreeTest.java b/processing/src/test/java/io/druid/collections/spatial/RTreeTest.java old mode 100755 new mode 100644 similarity index 100% rename from bytebuffer-collections/src/test/java/io/druid/collections/spatial/RTreeTest.java rename to processing/src/test/java/io/druid/collections/spatial/RTreeTest.java diff --git a/bytebuffer-collections/src/test/java/io/druid/collections/spatial/search/PolygonBoundTest.java b/processing/src/test/java/io/druid/collections/spatial/search/PolygonBoundTest.java old mode 100755 new mode 100644 similarity index 100% rename from bytebuffer-collections/src/test/java/io/druid/collections/spatial/search/PolygonBoundTest.java rename to processing/src/test/java/io/druid/collections/spatial/search/PolygonBoundTest.java diff --git a/bytebuffer-collections/src/test/java/io/druid/collections/spatial/search/RadiusBoundTest.java b/processing/src/test/java/io/druid/collections/spatial/search/RadiusBoundTest.java old mode 100755 new mode 100644 similarity index 100% rename from bytebuffer-collections/src/test/java/io/druid/collections/spatial/search/RadiusBoundTest.java rename to processing/src/test/java/io/druid/collections/spatial/search/RadiusBoundTest.java diff --git a/bytebuffer-collections/src/test/java/io/druid/collections/spatial/search/RectangularBoundTest.java b/processing/src/test/java/io/druid/collections/spatial/search/RectangularBoundTest.java old mode 100755 new mode 100644 similarity index 100% rename from bytebuffer-collections/src/test/java/io/druid/collections/spatial/search/RectangularBoundTest.java rename to processing/src/test/java/io/druid/collections/spatial/search/RectangularBoundTest.java diff --git a/bytebuffer-collections/src/test/java/io/druid/collections/spatial/split/LinearGutmanSplitStrategyTest.java b/processing/src/test/java/io/druid/collections/spatial/split/LinearGutmanSplitStrategyTest.java old mode 100755 new mode 100644 similarity index 100% rename from bytebuffer-collections/src/test/java/io/druid/collections/spatial/split/LinearGutmanSplitStrategyTest.java rename to processing/src/test/java/io/druid/collections/spatial/split/LinearGutmanSplitStrategyTest.java diff --git a/bytebuffer-collections/src/test/java/io/druid/test/annotation/Benchmark.java b/processing/src/test/java/io/druid/collections/test/annotation/Benchmark.java old mode 100755 new mode 100644 similarity index 94% rename from bytebuffer-collections/src/test/java/io/druid/test/annotation/Benchmark.java rename to processing/src/test/java/io/druid/collections/test/annotation/Benchmark.java index 2fa616eef401..99aa5517a48c --- a/bytebuffer-collections/src/test/java/io/druid/test/annotation/Benchmark.java +++ b/processing/src/test/java/io/druid/collections/test/annotation/Benchmark.java @@ -17,7 +17,7 @@ * under the License. */ -package io.druid.test.annotation; +package io.druid.collections.test.annotation; public interface Benchmark { diff --git a/bytebuffer-collections/src/test/java/io/druid/test/annotation/Dummy.java b/processing/src/test/java/io/druid/collections/test/annotation/Dummy.java old mode 100755 new mode 100644 similarity index 94% rename from bytebuffer-collections/src/test/java/io/druid/test/annotation/Dummy.java rename to processing/src/test/java/io/druid/collections/test/annotation/Dummy.java index ae1d36869320..75b0fa2925c8 --- a/bytebuffer-collections/src/test/java/io/druid/test/annotation/Dummy.java +++ b/processing/src/test/java/io/druid/collections/test/annotation/Dummy.java @@ -17,7 +17,7 @@ * under the License. */ -package io.druid.test.annotation; +package io.druid.collections.test.annotation; public interface Dummy { diff --git a/processing/src/test/java/io/druid/query/CachingEmitter.java b/processing/src/test/java/io/druid/query/CachingEmitter.java index aa2663904744..be0b6cfd7ece 100644 --- a/processing/src/test/java/io/druid/query/CachingEmitter.java +++ b/processing/src/test/java/io/druid/query/CachingEmitter.java @@ -19,8 +19,8 @@ package io.druid.query; -import com.metamx.emitter.core.Emitter; -import com.metamx.emitter.core.Event; +import io.druid.java.util.emitter.core.Emitter; +import io.druid.java.util.emitter.core.Event; import java.io.IOException; diff --git a/processing/src/test/java/io/druid/query/DefaultQueryMetricsTest.java b/processing/src/test/java/io/druid/query/DefaultQueryMetricsTest.java index add3360d107c..e78618e7875a 100644 --- a/processing/src/test/java/io/druid/query/DefaultQueryMetricsTest.java +++ b/processing/src/test/java/io/druid/query/DefaultQueryMetricsTest.java @@ -20,7 +20,7 @@ package io.druid.query; import com.google.common.collect.ImmutableSet; -import com.metamx.emitter.service.ServiceEmitter; +import io.druid.java.util.emitter.service.ServiceEmitter; import io.druid.java.util.common.granularity.Granularities; import io.druid.query.aggregation.CountAggregatorFactory; import io.druid.query.dimension.DefaultDimensionSpec; diff --git a/processing/src/test/java/io/druid/query/IntervalChunkingQueryRunnerTest.java b/processing/src/test/java/io/druid/query/IntervalChunkingQueryRunnerTest.java index ef389424f009..cd62d4f47717 100644 --- a/processing/src/test/java/io/druid/query/IntervalChunkingQueryRunnerTest.java +++ b/processing/src/test/java/io/druid/query/IntervalChunkingQueryRunnerTest.java @@ -21,7 +21,7 @@ import com.google.common.collect.ImmutableMap; import com.google.common.collect.Lists; -import com.metamx.emitter.service.ServiceEmitter; +import io.druid.java.util.emitter.service.ServiceEmitter; import io.druid.java.util.common.guava.Sequences; import io.druid.query.Druids.TimeseriesQueryBuilder; import io.druid.query.aggregation.AggregatorFactory; diff --git a/processing/src/test/java/io/druid/query/QueryRunnerTestHelper.java b/processing/src/test/java/io/druid/query/QueryRunnerTestHelper.java index 0c2a781bf4f2..07733c946bfd 100644 --- a/processing/src/test/java/io/druid/query/QueryRunnerTestHelper.java +++ b/processing/src/test/java/io/druid/query/QueryRunnerTestHelper.java @@ -25,8 +25,8 @@ import com.google.common.collect.Iterables; import com.google.common.collect.Lists; import com.google.common.util.concurrent.MoreExecutors; -import com.metamx.emitter.core.NoopEmitter; -import com.metamx.emitter.service.ServiceEmitter; +import io.druid.java.util.emitter.core.NoopEmitter; +import io.druid.java.util.emitter.service.ServiceEmitter; import io.druid.java.util.common.DateTimes; import io.druid.java.util.common.Intervals; import io.druid.java.util.common.granularity.Granularities; diff --git a/processing/src/test/java/io/druid/query/TimewarpOperatorTest.java b/processing/src/test/java/io/druid/query/TimewarpOperatorTest.java index 8b5675b646fe..d8a859d298c7 100644 --- a/processing/src/test/java/io/druid/query/TimewarpOperatorTest.java +++ b/processing/src/test/java/io/druid/query/TimewarpOperatorTest.java @@ -24,6 +24,8 @@ import com.google.common.collect.Lists; import com.google.common.collect.Maps; import io.druid.java.util.common.DateTimes; +import io.druid.java.util.common.granularity.PeriodGranularity; +import io.druid.java.util.common.guava.Accumulators; import io.druid.java.util.common.guava.Sequence; import io.druid.java.util.common.guava.Sequences; import io.druid.query.aggregation.AggregatorFactory; @@ -31,11 +33,13 @@ import io.druid.query.timeboundary.TimeBoundaryResultValue; import io.druid.query.timeseries.TimeseriesResultValue; import org.joda.time.DateTime; +import org.joda.time.DateTimeZone; import org.joda.time.Interval; import org.joda.time.Period; import org.junit.Assert; import org.junit.Test; +import java.util.ArrayList; import java.util.Arrays; import java.util.Map; @@ -57,14 +61,24 @@ public void testComputeOffset() throws Exception final DateTime t = DateTimes.of("2014-01-23"); final DateTime tOffset = DateTimes.of("2014-01-09"); - Assert.assertEquals(tOffset, t.plus(testOperator.computeOffset(t.getMillis()))); + Assert.assertEquals(tOffset, t.plus(testOperator.computeOffset(t.getMillis(), DateTimeZone.UTC))); } { final DateTime t = DateTimes.of("2014-08-02"); final DateTime tOffset = DateTimes.of("2014-01-11"); - Assert.assertEquals(tOffset, t.plus(testOperator.computeOffset(t.getMillis()))); + Assert.assertEquals(tOffset, t.plus(testOperator.computeOffset(t.getMillis(), DateTimeZone.UTC))); + } + + { + final DateTime t = DateTimes.of("2014-08-02T-07"); + final DateTime tOffset = DateTimes.of("2014-01-11T-08"); + + Assert.assertEquals( + tOffset, + t.plus(testOperator.computeOffset(t.getMillis(), DateTimeZone.forID("America/Los_Angeles"))) + ); } } @@ -183,6 +197,126 @@ public Sequence> run( } + @Test + public void testPostProcessWithTimezonesAndDstShift() throws Exception + { + QueryRunner> queryRunner = testOperator.postProcess( + new QueryRunner>() + { + @Override + public Sequence> run( + QueryPlus> queryPlus, + Map responseContext + ) + { + return Sequences.simple( + ImmutableList.of( + new Result<>( + DateTimes.of("2014-01-09T-08"), + new TimeseriesResultValue(ImmutableMap.of("metric", 2)) + ), + new Result<>( + DateTimes.of("2014-01-11T-08"), + new TimeseriesResultValue(ImmutableMap.of("metric", 3)) + ), + new Result<>( + queryPlus.getQuery().getIntervals().get(0).getEnd(), + new TimeseriesResultValue(ImmutableMap.of("metric", 5)) + ) + ) + ); + } + }, + DateTimes.of("2014-08-02T-07").getMillis() + ); + + final Query> query = + Druids.newTimeseriesQueryBuilder() + .dataSource("dummy") + .intervals("2014-07-31T-07/2014-08-05T-07") + .granularity(new PeriodGranularity(new Period("P1D"), null, DateTimeZone.forID("America/Los_Angeles"))) + .aggregators(Arrays.asList(new CountAggregatorFactory("count"))) + .build(); + + Assert.assertEquals( + Lists.newArrayList( + new Result<>( + DateTimes.of("2014-07-31T-07"), + new TimeseriesResultValue(ImmutableMap.of("metric", 2)) + ), + new Result<>( + DateTimes.of("2014-08-02T-07"), + new TimeseriesResultValue(ImmutableMap.of("metric", 3)) + ), + new Result<>( + DateTimes.of("2014-08-02T-07"), + new TimeseriesResultValue(ImmutableMap.of("metric", 5)) + ) + ), + queryRunner.run(QueryPlus.wrap(query), CONTEXT).accumulate(new ArrayList<>(), Accumulators.list()) + ); + } + + @Test + public void testPostProcessWithTimezonesAndNoDstShift() throws Exception + { + QueryRunner> queryRunner = testOperator.postProcess( + new QueryRunner>() + { + @Override + public Sequence> run( + QueryPlus> queryPlus, + Map responseContext + ) + { + return Sequences.simple( + ImmutableList.of( + new Result<>( + DateTimes.of("2014-01-09T-07"), + new TimeseriesResultValue(ImmutableMap.of("metric", 2)) + ), + new Result<>( + DateTimes.of("2014-01-11T-07"), + new TimeseriesResultValue(ImmutableMap.of("metric", 3)) + ), + new Result<>( + queryPlus.getQuery().getIntervals().get(0).getEnd(), + new TimeseriesResultValue(ImmutableMap.of("metric", 5)) + ) + ) + ); + } + }, + DateTimes.of("2014-08-02T-07").getMillis() + ); + + final Query> query = + Druids.newTimeseriesQueryBuilder() + .dataSource("dummy") + .intervals("2014-07-31T-07/2014-08-05T-07") + .granularity(new PeriodGranularity(new Period("P1D"), null, DateTimeZone.forID("America/Phoenix"))) + .aggregators(Arrays.asList(new CountAggregatorFactory("count"))) + .build(); + + Assert.assertEquals( + Lists.newArrayList( + new Result<>( + DateTimes.of("2014-07-31T-07"), + new TimeseriesResultValue(ImmutableMap.of("metric", 2)) + ), + new Result<>( + DateTimes.of("2014-08-02T-07"), + new TimeseriesResultValue(ImmutableMap.of("metric", 3)) + ), + new Result<>( + DateTimes.of("2014-08-02T-07"), + new TimeseriesResultValue(ImmutableMap.of("metric", 5)) + ) + ), + queryRunner.run(QueryPlus.wrap(query), CONTEXT).accumulate(new ArrayList<>(), Accumulators.list()) + ); + } + @Test public void testEmptyFutureInterval() throws Exception { diff --git a/processing/src/test/java/io/druid/query/groupby/DefaultGroupByQueryMetricsTest.java b/processing/src/test/java/io/druid/query/groupby/DefaultGroupByQueryMetricsTest.java index 3d16b0108945..dfef95950efc 100644 --- a/processing/src/test/java/io/druid/query/groupby/DefaultGroupByQueryMetricsTest.java +++ b/processing/src/test/java/io/druid/query/groupby/DefaultGroupByQueryMetricsTest.java @@ -21,7 +21,7 @@ import com.google.common.collect.ImmutableMap; import com.google.common.collect.Lists; -import com.metamx.emitter.service.ServiceEmitter; +import io.druid.java.util.emitter.service.ServiceEmitter; import io.druid.java.util.common.Intervals; import io.druid.java.util.common.granularity.PeriodGranularity; import io.druid.query.CachingEmitter; diff --git a/processing/src/test/java/io/druid/query/groupby/GroupByLimitPushDownMultiNodeMergeTest.java b/processing/src/test/java/io/druid/query/groupby/GroupByLimitPushDownMultiNodeMergeTest.java index d11f8e62d3f0..e15f076dbfe5 100644 --- a/processing/src/test/java/io/druid/query/groupby/GroupByLimitPushDownMultiNodeMergeTest.java +++ b/processing/src/test/java/io/druid/query/groupby/GroupByLimitPushDownMultiNodeMergeTest.java @@ -49,6 +49,9 @@ import io.druid.java.util.common.guava.Sequences; import io.druid.java.util.common.logger.Logger; import io.druid.math.expr.ExprMacroTable; +import io.druid.query.aggregation.CountAggregatorFactory; +import io.druid.query.expression.TestExprMacroTable; +import io.druid.segment.virtual.ExpressionVirtualColumn; import io.druid.segment.writeout.OffHeapMemorySegmentWriteOutMediumFactory; import io.druid.query.BySegmentQueryRunner; import io.druid.query.DruidProcessingConfig; @@ -240,7 +243,78 @@ public void setup() throws Exception ); QueryableIndex qindexB = INDEX_IO.loadIndex(fileB); - groupByIndices = Arrays.asList(qindexA, qindexB); + final IncrementalIndex indexC = makeIncIndex(false); + incrementalIndices.add(indexC); + + event = new HashMap<>(); + event.put("dimA", "pomegranate"); + event.put("metA", 2395L); + row = new MapBasedInputRow(1505260800000L, dimNames, event); + indexC.add(row); + + event = new HashMap<>(); + event.put("dimA", "mango"); + event.put("metA", 8L); + row = new MapBasedInputRow(1605260800000L, dimNames, event); + indexC.add(row); + + event = new HashMap<>(); + event.put("dimA", "pomegranate"); + event.put("metA", 5028L); + row = new MapBasedInputRow(1705264400000L, dimNames, event); + indexC.add(row); + + event = new HashMap<>(); + event.put("dimA", "mango"); + event.put("metA", 7L); + row = new MapBasedInputRow(1805264400000L, dimNames, event); + indexC.add(row); + + final File fileC = INDEX_MERGER_V9.persist( + indexC, + new File(tmpDir, "C"), + new IndexSpec(), + null + ); + QueryableIndex qindexC = INDEX_IO.loadIndex(fileC); + + + final IncrementalIndex indexD = makeIncIndex(false); + incrementalIndices.add(indexD); + + event = new HashMap<>(); + event.put("dimA", "pomegranate"); + event.put("metA", 4718L); + row = new MapBasedInputRow(1505260800000L, dimNames, event); + indexD.add(row); + + event = new HashMap<>(); + event.put("dimA", "mango"); + event.put("metA", 18L); + row = new MapBasedInputRow(1605260800000L, dimNames, event); + indexD.add(row); + + event = new HashMap<>(); + event.put("dimA", "pomegranate"); + event.put("metA", 2698L); + row = new MapBasedInputRow(1705264400000L, dimNames, event); + indexD.add(row); + + event = new HashMap<>(); + event.put("dimA", "mango"); + event.put("metA", 3L); + row = new MapBasedInputRow(1805264400000L, dimNames, event); + indexD.add(row); + + final File fileD = INDEX_MERGER_V9.persist( + indexD, + new File(tmpDir, "D"), + new IndexSpec(), + null + ); + QueryableIndex qindexD = INDEX_IO.loadIndex(fileD); + + groupByIndices = Arrays.asList(qindexA, qindexB, qindexC, qindexD); setupGroupByFactory(); } @@ -376,6 +450,125 @@ public void tearDown() throws Exception } } + @Test + public void testDescendingNumerics() throws Exception + { + QueryToolChest toolChest = groupByFactory.getToolchest(); + QueryRunner theRunner = new FinalizeResultsQueryRunner<>( + toolChest.mergeResults( + groupByFactory.mergeRunners(executorService, getRunner1(2)) + ), + (QueryToolChest) toolChest + ); + + QueryRunner theRunner2 = new FinalizeResultsQueryRunner<>( + toolChest.mergeResults( + groupByFactory2.mergeRunners(executorService, getRunner2(3)) + ), + (QueryToolChest) toolChest + ); + + QueryRunner finalRunner = new FinalizeResultsQueryRunner<>( + toolChest.mergeResults( + new QueryRunner() + { + @Override + public Sequence run(QueryPlus queryPlus, Map responseContext) + { + return Sequences + .simple( + ImmutableList.of( + theRunner.run(queryPlus, responseContext), + theRunner2.run(queryPlus, responseContext) + ) + ) + .flatMerge(Function.identity(), queryPlus.getQuery().getResultOrdering()); + } + } + ), + (QueryToolChest) toolChest + ); + + QuerySegmentSpec intervalSpec = new MultipleIntervalSegmentSpec( + Collections.singletonList(Intervals.utc(1500000000000L, 1900000000000L)) + ); + + DefaultLimitSpec ls2 = new DefaultLimitSpec( + Arrays.asList( + new OrderByColumnSpec("d0", OrderByColumnSpec.Direction.DESCENDING, StringComparators.NUMERIC), + new OrderByColumnSpec("d1", OrderByColumnSpec.Direction.DESCENDING, StringComparators.NUMERIC), + new OrderByColumnSpec("d2", OrderByColumnSpec.Direction.DESCENDING, StringComparators.NUMERIC) + ), + 100 + ); + + GroupByQuery query = GroupByQuery + .builder() + .setDataSource("blah") + .setQuerySegmentSpec(intervalSpec) + .setVirtualColumns( + new ExpressionVirtualColumn("d0:v", "timestamp_extract(\"__time\",'YEAR','UTC')", ValueType.LONG, TestExprMacroTable.INSTANCE), + new ExpressionVirtualColumn("d1:v", "timestamp_extract(\"__time\",'MONTH','UTC')", ValueType.LONG, TestExprMacroTable.INSTANCE), + new ExpressionVirtualColumn("d2:v", "timestamp_extract(\"__time\",'DAY','UTC')", ValueType.LONG, TestExprMacroTable.INSTANCE) + ) + .setDimensions(Lists.newArrayList( + new DefaultDimensionSpec("d0:v", "d0", ValueType.LONG), + new DefaultDimensionSpec("d1:v", "d1", ValueType.LONG), + new DefaultDimensionSpec("d2:v", "d2", ValueType.LONG) + )) + .setAggregatorSpecs( + Arrays.asList(new CountAggregatorFactory("a0")) + ) + .setLimitSpec( + ls2 + ) + .setContext( + ImmutableMap.of( + GroupByQueryConfig.CTX_KEY_APPLY_LIMIT_PUSH_DOWN, true + ) + ) + .setGranularity(Granularities.ALL) + .build(); + + Sequence queryResult = finalRunner.run(QueryPlus.wrap(query), Maps.newHashMap()); + List results = Sequences.toList(queryResult, Lists.newArrayList()); + + Row expectedRow0 = GroupByQueryRunnerTestHelper.createExpectedRow( + "2017-07-14T02:40:00.000Z", + "d0", 2027L, + "d1", 3L, + "d2", 17L, + "a0", 2L + ); + Row expectedRow1 = GroupByQueryRunnerTestHelper.createExpectedRow( + "2017-07-14T02:40:00.000Z", + "d0", 2024L, + "d1", 1L, + "d2", 14L, + "a0", 2L + ); + Row expectedRow2 = GroupByQueryRunnerTestHelper.createExpectedRow( + "2017-07-14T02:40:00.000Z", + "d0", 2020L, + "d1", 11L, + "d2", 13L, + "a0", 2L + ); + Row expectedRow3 = GroupByQueryRunnerTestHelper.createExpectedRow( + "2017-07-14T02:40:00.000Z", + "d0", 2017L, + "d1", 9L, + "d2", 13L, + "a0", 2L + ); + + Assert.assertEquals(4, results.size()); + Assert.assertEquals(expectedRow0, results.get(0)); + Assert.assertEquals(expectedRow1, results.get(1)); + Assert.assertEquals(expectedRow2, results.get(2)); + Assert.assertEquals(expectedRow3, results.get(3)); + } + @Test public void testPartialLimitPushDownMerge() throws Exception { @@ -384,14 +577,14 @@ public void testPartialLimitPushDownMerge() throws Exception QueryToolChest toolChest = groupByFactory.getToolchest(); QueryRunner theRunner = new FinalizeResultsQueryRunner<>( toolChest.mergeResults( - groupByFactory.mergeRunners(executorService, getRunner1()) + groupByFactory.mergeRunners(executorService, getRunner1(0)) ), (QueryToolChest) toolChest ); QueryRunner theRunner2 = new FinalizeResultsQueryRunner<>( toolChest.mergeResults( - groupByFactory2.mergeRunners(executorService, getRunner2()) + groupByFactory2.mergeRunners(executorService, getRunner2(1)) ), (QueryToolChest) toolChest ); @@ -495,10 +688,10 @@ public Sequence run(QueryPlus queryPlus, Map responseC Assert.assertEquals(expectedRow3, results.get(3)); } - private List> getRunner1() + private List> getRunner1(int qIndexNumber) { List> runners = Lists.newArrayList(); - QueryableIndex index = groupByIndices.get(0); + QueryableIndex index = groupByIndices.get(qIndexNumber); QueryRunner runner = makeQueryRunner( groupByFactory, index.toString(), @@ -508,10 +701,10 @@ private List> getRunner1() return runners; } - private List> getRunner2() + private List> getRunner2(int qIndexNumber) { List> runners = Lists.newArrayList(); - QueryableIndex index2 = groupByIndices.get(1); + QueryableIndex index2 = groupByIndices.get(qIndexNumber); QueryRunner tooSmallRunner = makeQueryRunner( groupByFactory2, index2.toString(), diff --git a/processing/src/test/java/io/druid/query/groupby/GroupByQueryRunnerTest.java b/processing/src/test/java/io/druid/query/groupby/GroupByQueryRunnerTest.java index ce586d576c3e..d02ea057cad7 100644 --- a/processing/src/test/java/io/druid/query/groupby/GroupByQueryRunnerTest.java +++ b/processing/src/test/java/io/druid/query/groupby/GroupByQueryRunnerTest.java @@ -156,7 +156,7 @@ @RunWith(Parameterized.class) public class GroupByQueryRunnerTest { - public static final ObjectMapper DEFAULT_MAPPER = TestHelper.getSmileMapper(); + public static final ObjectMapper DEFAULT_MAPPER = TestHelper.makeSmileMapper(); public static final DruidProcessingConfig DEFAULT_PROCESSING_CONFIG = new DruidProcessingConfig() { @Override @@ -3533,6 +3533,68 @@ public void testGroupByWithAlphaNumericDimensionOrder() TestHelper.assertExpectedObjects(expectedResults, results, ""); } + @Test + public void testGroupByWithLookupAndLimitAndSortByDimsFirst() + { + Map map = new HashMap<>(); + map.put("automotive", "9"); + map.put("business", "8"); + map.put("entertainment", "7"); + map.put("health", "6"); + map.put("mezzanine", "5"); + map.put("news", "4"); + map.put("premium", "3"); + map.put("technology", "2"); + map.put("travel", "1"); + + GroupByQuery query = GroupByQuery + .builder() + .setDataSource(QueryRunnerTestHelper.dataSource) + .setQuerySegmentSpec(QueryRunnerTestHelper.firstToThird) + .setDimensions( + Lists.newArrayList( + new ExtractionDimensionSpec( + "quality", + "alias", + new LookupExtractionFn(new MapLookupExtractor(map, false), false, null, false, false) + ) + ) + ) + .setAggregatorSpecs( + Arrays.asList( + QueryRunnerTestHelper.rowsCount, + new LongSumAggregatorFactory("idx", "index") + ) + ) + .setLimitSpec(new DefaultLimitSpec(Lists.newArrayList( + new OrderByColumnSpec("alias", null, StringComparators.ALPHANUMERIC)), 11)) + .setGranularity(QueryRunnerTestHelper.dayGran) + .setContext(ImmutableMap.of("sortByDimsFirst", true)) + .build(); + + List expectedResults = Arrays.asList( + GroupByQueryRunnerTestHelper.createExpectedRow("2011-04-01", "alias", "1", "rows", 1L, "idx", 119L), + GroupByQueryRunnerTestHelper.createExpectedRow("2011-04-02", "alias", "1", "rows", 1L, "idx", 126L), + + GroupByQueryRunnerTestHelper.createExpectedRow("2011-04-01", "alias", "2", "rows", 1L, "idx", 78L), + GroupByQueryRunnerTestHelper.createExpectedRow("2011-04-02", "alias", "2", "rows", 1L, "idx", 97L), + + GroupByQueryRunnerTestHelper.createExpectedRow("2011-04-01", "alias", "3", "rows", 3L, "idx", 2900L), + GroupByQueryRunnerTestHelper.createExpectedRow("2011-04-02", "alias", "3", "rows", 3L, "idx", 2505L), + + GroupByQueryRunnerTestHelper.createExpectedRow("2011-04-01", "alias", "4", "rows", 1L, "idx", 121L), + GroupByQueryRunnerTestHelper.createExpectedRow("2011-04-02", "alias", "4", "rows", 1L, "idx", 114L), + + GroupByQueryRunnerTestHelper.createExpectedRow("2011-04-01", "alias", "5", "rows", 3L, "idx", 2870L), + GroupByQueryRunnerTestHelper.createExpectedRow("2011-04-02", "alias", "5", "rows", 3L, "idx", 2447L), + + GroupByQueryRunnerTestHelper.createExpectedRow("2011-04-01", "alias", "6", "rows", 1L, "idx", 120L) + ); + + Iterable results = GroupByQueryRunnerTestHelper.runQuery(factory, runner, query); + TestHelper.assertExpectedObjects(expectedResults, results, ""); + } + @Ignore @Test // This is a test to verify per limit groupings, but Druid currently does not support this functionality. At a point @@ -7438,7 +7500,9 @@ public void testGroupByLongColumn() query.getLimitSpec().build( query.getDimensions(), query.getAggregatorSpecs(), - query.getPostAggregatorSpecs() + query.getPostAggregatorSpecs(), + query.getGranularity(), + query.getContextSortByDimsFirst() ) ); @@ -7499,7 +7563,9 @@ public void testGroupByLongColumnDescending() query.getLimitSpec().build( query.getDimensions(), query.getAggregatorSpecs(), - query.getPostAggregatorSpecs() + query.getPostAggregatorSpecs(), + query.getGranularity(), + query.getContextSortByDimsFirst() ) ); @@ -7702,7 +7768,9 @@ public void testGroupByFloatColumn() query.getLimitSpec().build( query.getDimensions(), query.getAggregatorSpecs(), - query.getPostAggregatorSpecs() + query.getPostAggregatorSpecs(), + query.getGranularity(), + query.getContextSortByDimsFirst() ) ); @@ -7764,7 +7832,9 @@ public void testGroupByFloatColumnDescending() query.getLimitSpec().build( query.getDimensions(), query.getAggregatorSpecs(), - query.getPostAggregatorSpecs() + query.getPostAggregatorSpecs(), + query.getGranularity(), + query.getContextSortByDimsFirst() ) ); @@ -7825,7 +7895,9 @@ public void testGroupByDoubleColumnDescending() query.getLimitSpec().build( query.getDimensions(), query.getAggregatorSpecs(), - query.getPostAggregatorSpecs() + query.getPostAggregatorSpecs(), + query.getGranularity(), + query.getContextSortByDimsFirst() ) ); diff --git a/processing/src/test/java/io/druid/query/groupby/epinephelinae/BufferArrayGrouperTest.java b/processing/src/test/java/io/druid/query/groupby/epinephelinae/BufferArrayGrouperTest.java index 6ca584f2b8d3..acdc2c406047 100644 --- a/processing/src/test/java/io/druid/query/groupby/epinephelinae/BufferArrayGrouperTest.java +++ b/processing/src/test/java/io/druid/query/groupby/epinephelinae/BufferArrayGrouperTest.java @@ -86,4 +86,21 @@ private BufferArrayGrouper newGrouper( grouper.init(); return grouper; } + + @Test + public void testRequiredBufferCapacity() + { + int[] cardinalityArray = new int[] {1, 10, Integer.MAX_VALUE - 1}; + AggregatorFactory[] aggregatorFactories = new AggregatorFactory[] { + new LongSumAggregatorFactory("sum", "sum") + }; + + long[] requiredSizes = new long[] {17, 90, 16911433721L}; + + for (int i = 0; i < cardinalityArray.length; i++) { + Assert.assertEquals(requiredSizes[i], BufferArrayGrouper.requiredBufferCapacity( + cardinalityArray[i], + aggregatorFactories)); + } + } } diff --git a/processing/src/test/java/io/druid/query/groupby/having/HavingSpecTest.java b/processing/src/test/java/io/druid/query/groupby/having/HavingSpecTest.java index 18fc599e6bdd..183789a061b6 100644 --- a/processing/src/test/java/io/druid/query/groupby/having/HavingSpecTest.java +++ b/processing/src/test/java/io/druid/query/groupby/having/HavingSpecTest.java @@ -101,9 +101,8 @@ public void testTypeTypo() "value", 1.3 ); ObjectMapper mapper = new DefaultObjectMapper(); - @SuppressWarnings("unused") // expected exception + // noinspection unused HavingSpec spec = mapper.convertValue(greaterMap, HavingSpec.class); - } @Test @@ -156,9 +155,64 @@ public void testEqualHavingSpec() assertFalse(spec.eval(getTestRow(100.05f))); spec = new EqualToHavingSpec("metric", 100.56f); + assertFalse(spec.eval(getTestRow(100L))); + assertFalse(spec.eval(getTestRow(100.0))); + assertFalse(spec.eval(getTestRow(100d))); + assertFalse(spec.eval(getTestRow(100.56d))); // False since 100.56d != (double) 100.56f + assertFalse(spec.eval(getTestRow(90.53d))); assertTrue(spec.eval(getTestRow(100.56f))); assertFalse(spec.eval(getTestRow(90.53f))); assertFalse(spec.eval(getTestRow(Long.MAX_VALUE))); + + spec = new EqualToHavingSpec("metric", 100.56d); + assertFalse(spec.eval(getTestRow(100L))); + assertFalse(spec.eval(getTestRow(100.0))); + assertFalse(spec.eval(getTestRow(100d))); + assertTrue(spec.eval(getTestRow(100.56d))); + assertFalse(spec.eval(getTestRow(90.53d))); + assertFalse(spec.eval(getTestRow(100.56f))); // False since 100.56d != (double) 100.56f + assertFalse(spec.eval(getTestRow(90.53f))); + assertFalse(spec.eval(getTestRow(Long.MAX_VALUE))); + + spec = new EqualToHavingSpec("metric", 100.0f); + assertTrue(spec.eval(getTestRow(100L))); + assertTrue(spec.eval(getTestRow(100.0))); + assertTrue(spec.eval(getTestRow(100d))); + assertFalse(spec.eval(getTestRow(100.56d))); + assertFalse(spec.eval(getTestRow(90.53d))); + assertFalse(spec.eval(getTestRow(100.56f))); + assertFalse(spec.eval(getTestRow(90.53f))); + assertFalse(spec.eval(getTestRow(Long.MAX_VALUE))); + + spec = new EqualToHavingSpec("metric", 100.0d); + assertTrue(spec.eval(getTestRow(100L))); + assertTrue(spec.eval(getTestRow(100.0))); + assertTrue(spec.eval(getTestRow(100d))); + assertFalse(spec.eval(getTestRow(100.56d))); + assertFalse(spec.eval(getTestRow(90.53d))); + assertFalse(spec.eval(getTestRow(100.56f))); + assertFalse(spec.eval(getTestRow(90.53f))); + assertFalse(spec.eval(getTestRow(Long.MAX_VALUE))); + + spec = new EqualToHavingSpec("metric", 100); + assertTrue(spec.eval(getTestRow(100L))); + assertTrue(spec.eval(getTestRow(100.0))); + assertTrue(spec.eval(getTestRow(100d))); + assertFalse(spec.eval(getTestRow(100.56d))); + assertFalse(spec.eval(getTestRow(90.53d))); + assertFalse(spec.eval(getTestRow(100.56f))); + assertFalse(spec.eval(getTestRow(90.53f))); + assertFalse(spec.eval(getTestRow(Long.MAX_VALUE))); + + spec = new EqualToHavingSpec("metric", 100L); + assertTrue(spec.eval(getTestRow(100L))); + assertTrue(spec.eval(getTestRow(100.0))); + assertTrue(spec.eval(getTestRow(100d))); + assertFalse(spec.eval(getTestRow(100.56d))); + assertFalse(spec.eval(getTestRow(90.53d))); + assertFalse(spec.eval(getTestRow(100.56f))); + assertFalse(spec.eval(getTestRow(90.53f))); + assertFalse(spec.eval(getTestRow(Long.MAX_VALUE))); } private static class CountingHavingSpec extends BaseHavingSpec diff --git a/processing/src/test/java/io/druid/query/groupby/orderby/DefaultLimitSpecTest.java b/processing/src/test/java/io/druid/query/groupby/orderby/DefaultLimitSpecTest.java index c4bbebfe9c27..cd442acfddb1 100644 --- a/processing/src/test/java/io/druid/query/groupby/orderby/DefaultLimitSpecTest.java +++ b/processing/src/test/java/io/druid/query/groupby/orderby/DefaultLimitSpecTest.java @@ -27,6 +27,7 @@ import io.druid.data.input.MapBasedRow; import io.druid.data.input.Row; import io.druid.java.util.common.DateTimes; +import io.druid.java.util.common.granularity.Granularities; import io.druid.java.util.common.guava.Sequence; import io.druid.java.util.common.guava.Sequences; import io.druid.query.aggregation.AggregatorFactory; @@ -40,6 +41,7 @@ import io.druid.query.expression.TestExprMacroTable; import io.druid.query.ordering.StringComparators; import io.druid.segment.TestHelper; +import io.druid.segment.column.ValueType; import org.junit.Assert; import org.junit.Test; @@ -160,7 +162,9 @@ public void testBuildSimple() Function, Sequence> limitFn = limitSpec.build( ImmutableList.of(), ImmutableList.of(), - ImmutableList.of() + ImmutableList.of(), + Granularities.NONE, + false ); Assert.assertEquals( @@ -169,6 +173,50 @@ public void testBuildSimple() ); } + @Test + public void testWithAllGranularity() + { + DefaultLimitSpec limitSpec = new DefaultLimitSpec( + ImmutableList.of(new OrderByColumnSpec("k1", OrderByColumnSpec.Direction.ASCENDING, StringComparators.NUMERIC)), + 2 + ); + + Function, Sequence> limitFn = limitSpec.build( + ImmutableList.of(new DefaultDimensionSpec("k1", "k1", ValueType.DOUBLE)), + ImmutableList.of(), + ImmutableList.of(), + Granularities.ALL, + true + ); + + Assert.assertEquals( + ImmutableList.of(testRowsList.get(0), testRowsList.get(1)), + Sequences.toList(limitFn.apply(testRowsSequence), new ArrayList<>()) + ); + } + + @Test + public void testWithSortByDimsFirst() + { + DefaultLimitSpec limitSpec = new DefaultLimitSpec( + ImmutableList.of(new OrderByColumnSpec("k1", OrderByColumnSpec.Direction.ASCENDING, StringComparators.NUMERIC)), + 2 + ); + + Function, Sequence> limitFn = limitSpec.build( + ImmutableList.of(new DefaultDimensionSpec("k1", "k1", ValueType.DOUBLE)), + ImmutableList.of(), + ImmutableList.of(), + Granularities.NONE, + true + ); + + Assert.assertEquals( + ImmutableList.of(testRowsList.get(2), testRowsList.get(0)), + Sequences.toList(limitFn.apply(testRowsSequence), new ArrayList<>()) + ); + } + @Test public void testSortDimensionDescending() { @@ -180,7 +228,9 @@ public void testSortDimensionDescending() Function, Sequence> limitFn = limitSpec.build( ImmutableList.of(new DefaultDimensionSpec("k1", "k1")), ImmutableList.of(), - ImmutableList.of() + ImmutableList.of(), + Granularities.NONE, + false ); // Note: This test encodes the fact that limitSpec sorts numbers like strings; we might want to change this @@ -210,7 +260,9 @@ public void testBuildWithExplicitOrder() ), ImmutableList.of( new ConstantPostAggregator("k3", 1L) - ) + ), + Granularities.NONE, + false ); Assert.assertEquals( ImmutableList.of(testRowsList.get(0), testRowsList.get(1)), @@ -227,7 +279,9 @@ public void testBuildWithExplicitOrder() ), ImmutableList.of( new ConstantPostAggregator("k3", 1L) - ) + ), + Granularities.NONE, + false ); Assert.assertEquals( ImmutableList.of(testRowsList.get(2), testRowsList.get(0)), @@ -250,7 +304,9 @@ public void testBuildWithExplicitOrder() new ConstantPostAggregator("x", 1), new ConstantPostAggregator("y", 1)) ) - ) + ), + Granularities.NONE, + false ); Assert.assertEquals( (List) ImmutableList.of(testRowsList.get(2), testRowsList.get(0)), @@ -261,7 +317,9 @@ public void testBuildWithExplicitOrder() limitFn = limitSpec.build( ImmutableList.of(new DefaultDimensionSpec("k1", "k1")), ImmutableList.of(new LongSumAggregatorFactory("k2", "k2")), - ImmutableList.of(new ExpressionPostAggregator("k1", "1 + 1", null, TestExprMacroTable.INSTANCE)) + ImmutableList.of(new ExpressionPostAggregator("k1", "1 + 1", null, TestExprMacroTable.INSTANCE)), + Granularities.NONE, + false ); Assert.assertEquals( (List) ImmutableList.of(testRowsList.get(2), testRowsList.get(0)), diff --git a/processing/src/test/java/io/druid/query/lookup/LookupExtractionFnTest.java b/processing/src/test/java/io/druid/query/lookup/LookupExtractionFnTest.java index d60f728764e9..aea8bf626d6d 100644 --- a/processing/src/test/java/io/druid/query/lookup/LookupExtractionFnTest.java +++ b/processing/src/test/java/io/druid/query/lookup/LookupExtractionFnTest.java @@ -20,7 +20,6 @@ package io.druid.query.lookup; import com.fasterxml.jackson.databind.ObjectMapper; -import com.google.common.base.Function; import com.google.common.base.Strings; import com.google.common.collect.ImmutableList; import com.google.common.collect.ImmutableMap; @@ -36,11 +35,11 @@ import org.junit.runner.RunWith; import org.junit.runners.Parameterized; -import javax.annotation.Nullable; import java.io.IOException; import java.util.Arrays; import java.util.List; import java.util.Map; +import java.util.Optional; @RunWith(Parameterized.class) public class LookupExtractionFnTest @@ -53,30 +52,23 @@ public static Iterable constructorFeeder() ImmutableList.of( ImmutableSet.of(true, false), ImmutableSet.of("", "MISSING VALUE"), - ImmutableSet.of(true, false) + ImmutableSet.of(Optional.of(true), Optional.of(false), Optional.empty()) ) - ), new Function, Object[]>() - { - @Nullable - @Override - public Object[] apply(List input) - { - return input.toArray(); - } - } + ), + List::toArray ); } private static final ObjectMapper OBJECT_MAPPER = new DefaultObjectMapper(); private final boolean retainMissing; private final String replaceMissing; - private final boolean injective; + private final Boolean injective; - public LookupExtractionFnTest(boolean retainMissing, String replaceMissing, boolean injective) + public LookupExtractionFnTest(boolean retainMissing, String replaceMissing, Optional injective) { this.replaceMissing = Strings.emptyToNull(replaceMissing); this.retainMissing = retainMissing; - this.injective = injective; + this.injective = injective.orElse(null); } @Test @@ -136,7 +128,12 @@ public void testSimpleSerDe() throws IOException Assert.assertEquals(retainMissing, lookupExtractionFn2.isRetainMissingValue()); Assert.assertEquals(replaceMissing, lookupExtractionFn2.getReplaceMissingValueWith()); - Assert.assertEquals(injective, lookupExtractionFn2.isInjective()); + + if (injective == null) { + Assert.assertEquals(lookupExtractionFn2.getLookup().isOneToOne(), lookupExtractionFn2.isInjective()); + } else { + Assert.assertEquals(injective, lookupExtractionFn2.isInjective()); + } Assert.assertArrayEquals(lookupExtractionFn.getCacheKey(), lookupExtractionFn2.getCacheKey()); diff --git a/processing/src/test/java/io/druid/query/metadata/SegmentMetadataQueryQueryToolChestTest.java b/processing/src/test/java/io/druid/query/metadata/SegmentMetadataQueryQueryToolChestTest.java index 6a0e3c42a42b..359e39f6af2b 100644 --- a/processing/src/test/java/io/druid/query/metadata/SegmentMetadataQueryQueryToolChestTest.java +++ b/processing/src/test/java/io/druid/query/metadata/SegmentMetadataQueryQueryToolChestTest.java @@ -27,6 +27,7 @@ import io.druid.jackson.DefaultObjectMapper; import io.druid.java.util.common.Intervals; import io.druid.query.CacheStrategy; +import io.druid.query.Druids; import io.druid.query.TableDataSource; import io.druid.query.aggregation.AggregatorFactory; import io.druid.query.aggregation.DoubleMaxAggregatorFactory; @@ -38,10 +39,14 @@ import io.druid.query.metadata.metadata.SegmentMetadataQuery; import io.druid.query.spec.LegacySegmentSpec; import io.druid.segment.column.ValueType; +import io.druid.timeline.LogicalSegment; +import org.joda.time.Period; import org.junit.Assert; import org.junit.Test; +import java.util.List; import java.util.Map; +import java.util.stream.Collectors; public class SegmentMetadataQueryQueryToolChestTest { @@ -271,6 +276,37 @@ public void testMergeAggregatorsConflict() ); } + @Test + public void testFilterSegments() + { + final SegmentMetadataQueryConfig config = new SegmentMetadataQueryConfig(); + final SegmentMetadataQueryQueryToolChest toolChest = new SegmentMetadataQueryQueryToolChest(config); + + final List filteredSegments = toolChest.filterSegments( + Druids.newSegmentMetadataQueryBuilder().dataSource("foo").merge(true).build(), + ImmutableList + .of( + "2000-01-01/P1D", + "2000-01-04/P1D", + "2000-01-09/P1D", + "2000-01-09/P1D" + ) + .stream() + .map(interval -> (LogicalSegment) () -> Intervals.of(interval)) + .collect(Collectors.toList()) + ); + + Assert.assertEquals(Period.weeks(1), config.getDefaultHistory()); + Assert.assertEquals( + ImmutableList.of( + Intervals.of("2000-01-04/P1D"), + Intervals.of("2000-01-09/P1D"), + Intervals.of("2000-01-09/P1D") + ), + filteredSegments.stream().map(LogicalSegment::getInterval).collect(Collectors.toList()) + ); + } + @SuppressWarnings("ArgumentParameterSwap") @Test public void testMergeRollup() diff --git a/processing/src/test/java/io/druid/query/metadata/SegmentMetadataQueryTest.java b/processing/src/test/java/io/druid/query/metadata/SegmentMetadataQueryTest.java index deaebcc734ec..fa524f166355 100644 --- a/processing/src/test/java/io/druid/query/metadata/SegmentMetadataQueryTest.java +++ b/processing/src/test/java/io/druid/query/metadata/SegmentMetadataQueryTest.java @@ -899,6 +899,9 @@ public void testSerdeWithDefaultInterval() throws Exception // test serialize and deserialize Assert.assertEquals(query, MAPPER.readValue(MAPPER.writeValueAsString(query), Query.class)); + + // test copy + Assert.assertEquals(query, Druids.SegmentMetadataQueryBuilder.copy((SegmentMetadataQuery) query).build()); } @Test diff --git a/processing/src/test/java/io/druid/query/scan/ScanQuerySpecTest.java b/processing/src/test/java/io/druid/query/scan/ScanQuerySpecTest.java index 774a3b798a6e..8b903f7b72f5 100644 --- a/processing/src/test/java/io/druid/query/scan/ScanQuerySpecTest.java +++ b/processing/src/test/java/io/druid/query/scan/ScanQuerySpecTest.java @@ -57,7 +57,8 @@ public void testSerializationLegacyString() throws Exception + "\"columns\":[\"market\",\"quality\",\"index\"]," + "\"legacy\":null," + "\"context\":null," - + "\"descending\":false}"; + + "\"descending\":false," + + "\"granularity\":{\"type\":\"all\"}}"; ScanQuery query = new ScanQuery( new TableDataSource(QueryRunnerTestHelper.dataSource), diff --git a/processing/src/test/java/io/druid/query/search/DefaultSearchQueryMetricsTest.java b/processing/src/test/java/io/druid/query/search/DefaultSearchQueryMetricsTest.java index dbb2db70320e..3db6b64b9b84 100644 --- a/processing/src/test/java/io/druid/query/search/DefaultSearchQueryMetricsTest.java +++ b/processing/src/test/java/io/druid/query/search/DefaultSearchQueryMetricsTest.java @@ -20,7 +20,7 @@ package io.druid.query.search; import com.google.common.collect.ImmutableSet; -import com.metamx.emitter.service.ServiceEmitter; +import io.druid.java.util.emitter.service.ServiceEmitter; import io.druid.query.CachingEmitter; import io.druid.query.DefaultQueryMetricsTest; import io.druid.query.DruidMetrics; diff --git a/processing/src/test/java/io/druid/query/select/DefaultSelectQueryMetricsTest.java b/processing/src/test/java/io/druid/query/select/DefaultSelectQueryMetricsTest.java index 6aeefe2b2957..a422a3ad421a 100644 --- a/processing/src/test/java/io/druid/query/select/DefaultSelectQueryMetricsTest.java +++ b/processing/src/test/java/io/druid/query/select/DefaultSelectQueryMetricsTest.java @@ -19,7 +19,7 @@ package io.druid.query.select; -import com.metamx.emitter.service.ServiceEmitter; +import io.druid.java.util.emitter.service.ServiceEmitter; import io.druid.query.CachingEmitter; import io.druid.query.DefaultQueryMetricsTest; import io.druid.query.DruidMetrics; diff --git a/processing/src/test/java/io/druid/query/timeseries/DefaultTimeseriesQueryMetricsTest.java b/processing/src/test/java/io/druid/query/timeseries/DefaultTimeseriesQueryMetricsTest.java index 912eaf6a55c9..d5027bf5e9cc 100644 --- a/processing/src/test/java/io/druid/query/timeseries/DefaultTimeseriesQueryMetricsTest.java +++ b/processing/src/test/java/io/druid/query/timeseries/DefaultTimeseriesQueryMetricsTest.java @@ -19,7 +19,7 @@ package io.druid.query.timeseries; -import com.metamx.emitter.service.ServiceEmitter; +import io.druid.java.util.emitter.service.ServiceEmitter; import io.druid.query.CachingEmitter; import io.druid.query.DefaultQueryMetricsTest; import io.druid.query.DruidMetrics; diff --git a/processing/src/test/java/io/druid/query/topn/DefaultTopNQueryMetricsTest.java b/processing/src/test/java/io/druid/query/topn/DefaultTopNQueryMetricsTest.java index a61b393a094c..b7ad4730219c 100644 --- a/processing/src/test/java/io/druid/query/topn/DefaultTopNQueryMetricsTest.java +++ b/processing/src/test/java/io/druid/query/topn/DefaultTopNQueryMetricsTest.java @@ -20,7 +20,7 @@ package io.druid.query.topn; import com.google.common.collect.ImmutableSet; -import com.metamx.emitter.service.ServiceEmitter; +import io.druid.java.util.emitter.service.ServiceEmitter; import io.druid.java.util.common.granularity.Granularities; import io.druid.query.CachingEmitter; import io.druid.query.DefaultQueryMetricsTest; diff --git a/processing/src/test/java/io/druid/segment/TestHelper.java b/processing/src/test/java/io/druid/segment/TestHelper.java index 6d27a4b2d9a5..83eb8f88763e 100644 --- a/processing/src/test/java/io/druid/segment/TestHelper.java +++ b/processing/src/test/java/io/druid/segment/TestHelper.java @@ -83,7 +83,7 @@ public static ObjectMapper makeJsonMapper() return mapper; } - public static ObjectMapper getSmileMapper() + public static ObjectMapper makeSmileMapper() { final ObjectMapper mapper = new DefaultObjectMapper(); mapper.setInjectableValues( diff --git a/processing/src/test/java/io/druid/segment/incremental/IncrementalIndexTest.java b/processing/src/test/java/io/druid/segment/incremental/IncrementalIndexTest.java index ee728cb51605..ca6ffe9fb66f 100644 --- a/processing/src/test/java/io/druid/segment/incremental/IncrementalIndexTest.java +++ b/processing/src/test/java/io/druid/segment/incremental/IncrementalIndexTest.java @@ -25,13 +25,15 @@ import com.google.common.collect.Lists; import io.druid.collections.StupidPool; import io.druid.data.input.MapBasedInputRow; -import io.druid.data.input.Row; import io.druid.data.input.impl.DimensionSchema; import io.druid.data.input.impl.DimensionsSpec; import io.druid.data.input.impl.DoubleDimensionSchema; +import io.druid.data.input.impl.FloatDimensionSchema; +import io.druid.data.input.impl.LongDimensionSchema; import io.druid.data.input.impl.StringDimensionSchema; import io.druid.java.util.common.ISE; import io.druid.java.util.common.granularity.Granularities; +import io.druid.java.util.common.parsers.ParseException; import io.druid.query.aggregation.AggregatorFactory; import io.druid.query.aggregation.CountAggregatorFactory; import io.druid.query.aggregation.FilteredAggregatorFactory; @@ -40,6 +42,7 @@ import org.junit.Assert; import org.junit.Rule; import org.junit.Test; +import org.junit.rules.ExpectedException; import org.junit.runner.RunWith; import org.junit.runners.Parameterized; @@ -60,6 +63,9 @@ interface IndexCreator IncrementalIndex createIndex(); } + @Rule + public ExpectedException expectedException = ExpectedException.none(); + @Rule public final CloserRule closer = new CloserRule(false); @@ -76,8 +82,8 @@ public static Collection constructorFeeder() throws IOException DimensionsSpec dimensions = new DimensionsSpec( Arrays.asList( new StringDimensionSchema("string"), - new StringDimensionSchema("float"), - new StringDimensionSchema("long"), + new FloatDimensionSchema("float"), + new LongDimensionSchema("long"), new DoubleDimensionSchema("double") ), null, null ); @@ -206,28 +212,54 @@ public void controlTest() throws IndexSizeExceededException } @Test - public void testNullDimensionTransform() throws IndexSizeExceededException + public void testUnparseableNumerics() throws IndexSizeExceededException { IncrementalIndex index = closer.closeLater(indexCreator.createIndex()); + + expectedException.expect(ParseException.class); + expectedException.expectMessage("could not convert value [asdj] to long"); index.add( new MapBasedInputRow( System.currentTimeMillis() - 1, Lists.newArrayList("string", "float", "long", "double"), ImmutableMap.of( - "string", Arrays.asList("A", null, ""), - "float", Arrays.asList(Float.POSITIVE_INFINITY, null, ""), - "long", Arrays.asList(Long.MIN_VALUE, null, ""), - "double", "" + "string", "A", + "float", "19.0", + "long", "asdj", + "double", 21.0d ) ) ); - Row row = index.iterator().next(); + expectedException.expect(ParseException.class); + expectedException.expectMessage("could not convert value [aaa] to float"); + index.add( + new MapBasedInputRow( + System.currentTimeMillis() - 1, + Lists.newArrayList("string", "float", "long", "double"), + ImmutableMap.of( + "string", "A", + "float", "aaa", + "long", 20, + "double", 21.0d + ) + ) + ); - Assert.assertEquals(Arrays.asList(new String[]{"", "", "A"}), row.getRaw("string")); - Assert.assertEquals(Arrays.asList(new String[]{"", "", String.valueOf(Float.POSITIVE_INFINITY)}), row.getRaw("float")); - Assert.assertEquals(Arrays.asList(new String[]{"", "", String.valueOf(Long.MIN_VALUE)}), row.getRaw("long")); - Assert.assertEquals(0.0, row.getMetric("double").doubleValue(), 0.0); + expectedException.expect(ParseException.class); + expectedException.expectMessage("could not convert value [] to double"); + index.add( + new MapBasedInputRow( + System.currentTimeMillis() - 1, + Lists.newArrayList("string", "float", "long", "double"), + ImmutableMap.of( + "string", "A", + "float", 19.0, + "long", 20, + "double", "" + ) + ) + ); } @Test diff --git a/processing/src/test/java/io/druid/segment/serde/HyperUniquesSerdeForTest.java b/processing/src/test/java/io/druid/segment/serde/HyperUniquesSerdeForTest.java index 602868d48499..3ddc0d7633b2 100644 --- a/processing/src/test/java/io/druid/segment/serde/HyperUniquesSerdeForTest.java +++ b/processing/src/test/java/io/druid/segment/serde/HyperUniquesSerdeForTest.java @@ -21,7 +21,7 @@ import com.google.common.collect.Ordering; import com.google.common.hash.HashFunction; -import com.metamx.common.StringUtils; +import io.druid.java.util.common.StringUtils; import io.druid.data.input.InputRow; import io.druid.hll.HyperLogLogCollector; import io.druid.segment.writeout.SegmentWriteOutMedium; diff --git a/server/pom.xml b/server/pom.xml index f1aa77c1f7ee..c3fd86e0d153 100644 --- a/server/pom.xml +++ b/server/pom.xml @@ -26,7 +26,7 @@ io.druid druid - 0.12.0-SNAPSHOT + 0.12.2-SNAPSHOT @@ -46,8 +46,9 @@ 0.0.2 - com.metamx + io.druid java-util + ${project.parent.version} commons-cli diff --git a/server/src/main/java/io/druid/client/AbstractCuratorServerInventoryView.java b/server/src/main/java/io/druid/client/AbstractCuratorServerInventoryView.java index fec18e5a654a..d6c731cdc58f 100644 --- a/server/src/main/java/io/druid/client/AbstractCuratorServerInventoryView.java +++ b/server/src/main/java/io/druid/client/AbstractCuratorServerInventoryView.java @@ -23,7 +23,7 @@ import com.fasterxml.jackson.databind.ObjectMapper; import com.google.common.base.Function; import com.google.common.base.Throwables; -import com.metamx.emitter.EmittingLogger; +import io.druid.java.util.emitter.EmittingLogger; import io.druid.java.util.common.concurrent.Execs; import io.druid.curator.inventory.CuratorInventoryManager; import io.druid.curator.inventory.CuratorInventoryManagerStrategy; diff --git a/server/src/main/java/io/druid/client/BatchServerInventoryView.java b/server/src/main/java/io/druid/client/BatchServerInventoryView.java index abdeeaf2313e..44b23bd3d701 100644 --- a/server/src/main/java/io/druid/client/BatchServerInventoryView.java +++ b/server/src/main/java/io/druid/client/BatchServerInventoryView.java @@ -28,7 +28,7 @@ import com.google.common.collect.Iterables; import com.google.common.collect.Sets; import com.google.inject.Inject; -import com.metamx.emitter.EmittingLogger; +import io.druid.java.util.emitter.EmittingLogger; import io.druid.guice.ManageLifecycle; import io.druid.java.util.common.ISE; import io.druid.java.util.common.Pair; diff --git a/server/src/main/java/io/druid/client/BrokerServerView.java b/server/src/main/java/io/druid/client/BrokerServerView.java index a1adf95a9fef..47ec9d414ac4 100644 --- a/server/src/main/java/io/druid/client/BrokerServerView.java +++ b/server/src/main/java/io/druid/client/BrokerServerView.java @@ -25,8 +25,8 @@ import com.google.common.collect.Maps; import com.google.common.collect.Ordering; import com.google.inject.Inject; -import com.metamx.emitter.service.ServiceEmitter; -import com.metamx.http.client.HttpClient; +import io.druid.java.util.emitter.service.ServiceEmitter; +import io.druid.java.util.http.client.HttpClient; import io.druid.client.selector.QueryableDruidServer; import io.druid.client.selector.ServerSelector; import io.druid.client.selector.TierSelectorStrategy; diff --git a/server/src/main/java/io/druid/client/CachingClusteredClient.java b/server/src/main/java/io/druid/client/CachingClusteredClient.java index ab7cae42dd02..0fd9762dc7e9 100644 --- a/server/src/main/java/io/druid/client/CachingClusteredClient.java +++ b/server/src/main/java/io/druid/client/CachingClusteredClient.java @@ -40,7 +40,7 @@ import com.google.common.util.concurrent.ListeningExecutorService; import com.google.common.util.concurrent.MoreExecutors; import com.google.inject.Inject; -import com.metamx.emitter.EmittingLogger; +import io.druid.java.util.emitter.EmittingLogger; import io.druid.client.cache.Cache; import io.druid.client.cache.CacheConfig; import io.druid.client.selector.QueryableDruidServer; diff --git a/server/src/main/java/io/druid/client/DirectDruidClient.java b/server/src/main/java/io/druid/client/DirectDruidClient.java index 5399a01de593..f18345cc8a64 100644 --- a/server/src/main/java/io/druid/client/DirectDruidClient.java +++ b/server/src/main/java/io/druid/client/DirectDruidClient.java @@ -33,13 +33,13 @@ import com.google.common.util.concurrent.FutureCallback; import com.google.common.util.concurrent.Futures; import com.google.common.util.concurrent.ListenableFuture; -import com.metamx.emitter.service.ServiceEmitter; -import com.metamx.http.client.HttpClient; -import com.metamx.http.client.Request; -import com.metamx.http.client.response.ClientResponse; -import com.metamx.http.client.response.HttpResponseHandler; -import com.metamx.http.client.response.StatusResponseHandler; -import com.metamx.http.client.response.StatusResponseHolder; +import io.druid.java.util.emitter.service.ServiceEmitter; +import io.druid.java.util.http.client.HttpClient; +import io.druid.java.util.http.client.Request; +import io.druid.java.util.http.client.response.ClientResponse; +import io.druid.java.util.http.client.response.HttpResponseHandler; +import io.druid.java.util.http.client.response.StatusResponseHandler; +import io.druid.java.util.http.client.response.StatusResponseHolder; import io.druid.java.util.common.IAE; import io.druid.java.util.common.Pair; import io.druid.java.util.common.RE; @@ -116,21 +116,16 @@ public class DirectDruidClient implements QueryRunner private final boolean isSmile; /** - * Removes the magical fields added by {@link #makeResponseContextForQuery(Query, long)}. + * Removes the magical fields added by {@link #makeResponseContextForQuery()}. */ public static void removeMagicResponseContextFields(Map responseContext) { - responseContext.remove(DirectDruidClient.QUERY_FAIL_TIME); responseContext.remove(DirectDruidClient.QUERY_TOTAL_BYTES_GATHERED); } - public static Map makeResponseContextForQuery(Query query, long startTimeMillis) + public static Map makeResponseContextForQuery() { final Map responseContext = new ConcurrentHashMap<>(); - responseContext.put( - DirectDruidClient.QUERY_FAIL_TIME, - startTimeMillis + QueryContexts.getTimeout(query) - ); responseContext.put( DirectDruidClient.QUERY_TOTAL_BYTES_GATHERED, new AtomicLong() @@ -199,7 +194,7 @@ public Sequence run(final QueryPlus queryPlus, final Map c final long requestStartTimeNs = System.nanoTime(); - long timeoutAt = ((Long) context.get(QUERY_FAIL_TIME)).longValue(); + long timeoutAt = query.getContextValue(QUERY_FAIL_TIME); long maxScatterGatherBytes = QueryContexts.getMaxScatterGatherBytes(query); AtomicLong totalBytesGathered = (AtomicLong) context.get(QUERY_TOTAL_BYTES_GATHERED); diff --git a/server/src/main/java/io/druid/client/DruidDataSource.java b/server/src/main/java/io/druid/client/DruidDataSource.java index 84b04b3b146d..ed2594b67c88 100644 --- a/server/src/main/java/io/druid/client/DruidDataSource.java +++ b/server/src/main/java/io/druid/client/DruidDataSource.java @@ -21,13 +21,12 @@ import com.fasterxml.jackson.annotation.JsonProperty; import com.google.common.base.Preconditions; -import com.google.common.collect.ImmutableMap; import io.druid.timeline.DataSegment; import java.util.Collection; import java.util.Collections; import java.util.Map; -import java.util.concurrent.ConcurrentHashMap; +import java.util.concurrent.ConcurrentSkipListMap; /** */ @@ -35,7 +34,7 @@ public class DruidDataSource { private final String name; private final Map properties; - private final ConcurrentHashMap idToSegmentMap; + private final ConcurrentSkipListMap idToSegmentMap; public DruidDataSource( String name, @@ -44,7 +43,7 @@ public DruidDataSource( { this.name = Preconditions.checkNotNull(name); this.properties = properties; - this.idToSegmentMap = new ConcurrentHashMap<>(); + this.idToSegmentMap = new ConcurrentSkipListMap<>(); } @JsonProperty @@ -88,11 +87,7 @@ public boolean isEmpty() public ImmutableDruidDataSource toImmutableDruidDataSource() { - return new ImmutableDruidDataSource( - name, - ImmutableMap.copyOf(properties), - ImmutableMap.copyOf(idToSegmentMap) - ); + return new ImmutableDruidDataSource(name, properties, idToSegmentMap); } @Override @@ -107,6 +102,7 @@ public String toString() @Override public boolean equals(Object o) { + //noinspection Contract throw new UnsupportedOperationException("Use ImmutableDruidDataSource instead"); } diff --git a/server/src/main/java/io/druid/client/FilteredHttpServerInventoryViewProvider.java b/server/src/main/java/io/druid/client/FilteredHttpServerInventoryViewProvider.java index 5c3195bc15db..98d634f30e79 100644 --- a/server/src/main/java/io/druid/client/FilteredHttpServerInventoryViewProvider.java +++ b/server/src/main/java/io/druid/client/FilteredHttpServerInventoryViewProvider.java @@ -22,7 +22,7 @@ import com.fasterxml.jackson.annotation.JacksonInject; import com.fasterxml.jackson.databind.ObjectMapper; import com.google.common.base.Predicates; -import com.metamx.http.client.HttpClient; +import io.druid.java.util.http.client.HttpClient; import io.druid.discovery.DruidNodeDiscoveryProvider; import io.druid.guice.annotations.EscalatedClient; import io.druid.guice.annotations.Smile; diff --git a/server/src/main/java/io/druid/client/HttpServerInventoryView.java b/server/src/main/java/io/druid/client/HttpServerInventoryView.java index 3bb8fd25a81a..cd73c2daf8db 100644 --- a/server/src/main/java/io/druid/client/HttpServerInventoryView.java +++ b/server/src/main/java/io/druid/client/HttpServerInventoryView.java @@ -30,12 +30,8 @@ import com.google.common.util.concurrent.Futures; import com.google.common.util.concurrent.ListenableFuture; import com.google.inject.Inject; -import com.metamx.emitter.EmittingLogger; -import com.metamx.http.client.HttpClient; -import com.metamx.http.client.Request; -import com.metamx.http.client.io.AppendableByteArrayInputStream; -import com.metamx.http.client.response.ClientResponse; -import com.metamx.http.client.response.InputStreamResponseHandler; +import io.druid.java.util.emitter.EmittingLogger; +import io.druid.java.util.http.client.HttpClient; import io.druid.concurrent.LifecycleLock; import io.druid.discovery.DataNodeService; import io.druid.discovery.DiscoveryDruidNode; @@ -50,6 +46,10 @@ import io.druid.java.util.common.concurrent.ScheduledExecutors; import io.druid.java.util.common.lifecycle.LifecycleStart; import io.druid.java.util.common.lifecycle.LifecycleStop; +import io.druid.java.util.http.client.Request; +import io.druid.java.util.http.client.io.AppendableByteArrayInputStream; +import io.druid.java.util.http.client.response.ClientResponse; +import io.druid.java.util.http.client.response.InputStreamResponseHandler; import io.druid.server.coordination.DataSegmentChangeRequest; import io.druid.server.coordination.DruidServerMetadata; import io.druid.server.coordination.SegmentChangeRequestDrop; diff --git a/server/src/main/java/io/druid/client/HttpServerInventoryViewProvider.java b/server/src/main/java/io/druid/client/HttpServerInventoryViewProvider.java index bfcb3b262384..2a0ff5615911 100644 --- a/server/src/main/java/io/druid/client/HttpServerInventoryViewProvider.java +++ b/server/src/main/java/io/druid/client/HttpServerInventoryViewProvider.java @@ -22,7 +22,7 @@ import com.fasterxml.jackson.annotation.JacksonInject; import com.fasterxml.jackson.databind.ObjectMapper; import com.google.common.base.Predicates; -import com.metamx.http.client.HttpClient; +import io.druid.java.util.http.client.HttpClient; import io.druid.discovery.DruidNodeDiscoveryProvider; import io.druid.guice.annotations.EscalatedClient; import io.druid.guice.annotations.Smile; diff --git a/server/src/main/java/io/druid/client/ImmutableDruidDataSource.java b/server/src/main/java/io/druid/client/ImmutableDruidDataSource.java index 95532f891953..d23dbd628e14 100644 --- a/server/src/main/java/io/druid/client/ImmutableDruidDataSource.java +++ b/server/src/main/java/io/druid/client/ImmutableDruidDataSource.java @@ -19,13 +19,18 @@ package io.druid.client; +import com.fasterxml.jackson.annotation.JsonCreator; +import com.fasterxml.jackson.annotation.JsonIgnore; +import com.fasterxml.jackson.annotation.JsonProperty; import com.google.common.base.Preconditions; import com.google.common.collect.ImmutableMap; +import com.google.common.collect.ImmutableSortedMap; import io.druid.timeline.DataSegment; import java.util.Collection; import java.util.Map; import java.util.Objects; +import java.util.SortedMap; /** */ @@ -33,39 +38,58 @@ public class ImmutableDruidDataSource { private final String name; private final ImmutableMap properties; - private final ImmutableMap idToSegments; + private final ImmutableSortedMap idToSegments; public ImmutableDruidDataSource( String name, - ImmutableMap properties, - ImmutableMap idToSegments + Map properties, + SortedMap idToSegments ) { this.name = Preconditions.checkNotNull(name); - this.properties = properties; - this.idToSegments = idToSegments; + this.properties = ImmutableMap.copyOf(properties); + this.idToSegments = ImmutableSortedMap.copyOfSorted(idToSegments); } + @JsonCreator + public ImmutableDruidDataSource( + @JsonProperty("name") String name, + @JsonProperty("properties") Map properties, + @JsonProperty("segments") Collection segments + ) + { + this.name = Preconditions.checkNotNull(name); + this.properties = ImmutableMap.copyOf(properties); + final ImmutableSortedMap.Builder builder = ImmutableSortedMap.naturalOrder(); + segments.forEach(segment -> builder.put(segment.getIdentifier(), segment)); + this.idToSegments = builder.build(); + } + + @JsonProperty public String getName() { return name; } + @JsonProperty public Map getProperties() { return properties; } - public boolean isEmpty() + @JsonProperty + public Collection getSegments() { - return idToSegments.isEmpty(); + return idToSegments.values(); } - public Collection getSegments() + @JsonIgnore + public boolean isEmpty() { - return idToSegments.values(); + return idToSegments.isEmpty(); } + @JsonIgnore public DataSegment getSegment(String segmentIdentifier) { return idToSegments.get(segmentIdentifier); diff --git a/server/src/main/java/io/druid/client/ImmutableDruidServer.java b/server/src/main/java/io/druid/client/ImmutableDruidServer.java index d215a88aaba4..b43f9d00bbd9 100644 --- a/server/src/main/java/io/druid/client/ImmutableDruidServer.java +++ b/server/src/main/java/io/druid/client/ImmutableDruidServer.java @@ -21,7 +21,7 @@ import com.google.common.base.Preconditions; import com.google.common.collect.ImmutableMap; -import com.metamx.common.StringUtils; +import io.druid.java.util.common.StringUtils; import io.druid.server.coordination.DruidServerMetadata; import io.druid.server.coordination.ServerType; import io.druid.timeline.DataSegment; @@ -113,9 +113,9 @@ public Map getSegments() public String getURL() { if (metadata.getHostAndTlsPort() != null) { - return StringUtils.safeFormat("https://%s", metadata.getHostAndTlsPort()); + return StringUtils.nonStrictFormat("https://%s", metadata.getHostAndTlsPort()); } else { - return StringUtils.safeFormat("http://%s", metadata.getHostAndPort()); + return StringUtils.nonStrictFormat("http://%s", metadata.getHostAndPort()); } } diff --git a/server/src/main/java/io/druid/client/SingleServerInventoryView.java b/server/src/main/java/io/druid/client/SingleServerInventoryView.java index ef69679be23a..af5aaa423e92 100644 --- a/server/src/main/java/io/druid/client/SingleServerInventoryView.java +++ b/server/src/main/java/io/druid/client/SingleServerInventoryView.java @@ -25,7 +25,7 @@ import com.google.common.base.Predicate; import com.google.common.base.Predicates; import com.google.inject.Inject; -import com.metamx.emitter.EmittingLogger; +import io.druid.java.util.emitter.EmittingLogger; import io.druid.guice.ManageLifecycle; import io.druid.java.util.common.Pair; import io.druid.server.coordination.DruidServerMetadata; diff --git a/server/src/main/java/io/druid/client/cache/Cache.java b/server/src/main/java/io/druid/client/cache/Cache.java index 7db32b1be10d..70d42e4baf3d 100644 --- a/server/src/main/java/io/druid/client/cache/Cache.java +++ b/server/src/main/java/io/druid/client/cache/Cache.java @@ -21,7 +21,7 @@ import com.google.common.base.Preconditions; import com.google.common.primitives.Ints; -import com.metamx.emitter.service.ServiceEmitter; +import io.druid.java.util.emitter.service.ServiceEmitter; import io.druid.java.util.common.StringUtils; import javax.annotation.Nullable; diff --git a/server/src/main/java/io/druid/client/cache/CacheMonitor.java b/server/src/main/java/io/druid/client/cache/CacheMonitor.java index 752b90614c01..9a7bb141d242 100644 --- a/server/src/main/java/io/druid/client/cache/CacheMonitor.java +++ b/server/src/main/java/io/druid/client/cache/CacheMonitor.java @@ -20,9 +20,9 @@ package io.druid.client.cache; import com.google.inject.Inject; -import com.metamx.emitter.service.ServiceEmitter; -import com.metamx.emitter.service.ServiceMetricEvent; -import com.metamx.metrics.AbstractMonitor; +import io.druid.java.util.emitter.service.ServiceEmitter; +import io.druid.java.util.emitter.service.ServiceMetricEvent; +import io.druid.java.util.metrics.AbstractMonitor; import io.druid.java.util.common.StringUtils; public class CacheMonitor extends AbstractMonitor diff --git a/server/src/main/java/io/druid/client/cache/CaffeineCache.java b/server/src/main/java/io/druid/client/cache/CaffeineCache.java index ba4688a3ef44..139d879bac5e 100644 --- a/server/src/main/java/io/druid/client/cache/CaffeineCache.java +++ b/server/src/main/java/io/druid/client/cache/CaffeineCache.java @@ -27,8 +27,8 @@ import com.google.common.collect.Maps; import com.google.common.primitives.Chars; import com.google.common.primitives.Ints; -import com.metamx.emitter.service.ServiceEmitter; -import com.metamx.emitter.service.ServiceMetricEvent; +import io.druid.java.util.emitter.service.ServiceEmitter; +import io.druid.java.util.emitter.service.ServiceMetricEvent; import io.druid.java.util.common.logger.Logger; import net.jpountz.lz4.LZ4Compressor; diff --git a/server/src/main/java/io/druid/client/cache/HybridCache.java b/server/src/main/java/io/druid/client/cache/HybridCache.java index 330549a6bae0..8787a985a395 100644 --- a/server/src/main/java/io/druid/client/cache/HybridCache.java +++ b/server/src/main/java/io/druid/client/cache/HybridCache.java @@ -21,7 +21,7 @@ import com.google.common.collect.Maps; import com.google.common.collect.Sets; -import com.metamx.emitter.service.ServiceEmitter; +import io.druid.java.util.emitter.service.ServiceEmitter; import io.druid.java.util.common.logger.Logger; import javax.annotation.Nullable; diff --git a/server/src/main/java/io/druid/client/cache/MapCache.java b/server/src/main/java/io/druid/client/cache/MapCache.java index fe058523e5af..f9257fc52352 100644 --- a/server/src/main/java/io/druid/client/cache/MapCache.java +++ b/server/src/main/java/io/druid/client/cache/MapCache.java @@ -22,7 +22,7 @@ import com.google.common.collect.Lists; import com.google.common.collect.Maps; import com.google.common.primitives.Ints; -import com.metamx.emitter.service.ServiceEmitter; +import io.druid.java.util.emitter.service.ServiceEmitter; import java.nio.ByteBuffer; import java.util.Collections; diff --git a/server/src/main/java/io/druid/client/cache/MemcachedCache.java b/server/src/main/java/io/druid/client/cache/MemcachedCache.java index 7cd6e10223d6..59c9c9a4b9f2 100644 --- a/server/src/main/java/io/druid/client/cache/MemcachedCache.java +++ b/server/src/main/java/io/druid/client/cache/MemcachedCache.java @@ -32,9 +32,9 @@ import com.google.common.hash.HashFunction; import com.google.common.hash.Hashing; import com.google.common.primitives.Ints; -import com.metamx.emitter.service.ServiceEmitter; -import com.metamx.emitter.service.ServiceMetricEvent; -import com.metamx.metrics.AbstractMonitor; +import io.druid.java.util.emitter.service.ServiceEmitter; +import io.druid.java.util.emitter.service.ServiceMetricEvent; +import io.druid.java.util.metrics.AbstractMonitor; import io.druid.collections.ResourceHolder; import io.druid.collections.StupidResourceHolder; import io.druid.java.util.common.logger.Logger; diff --git a/server/src/main/java/io/druid/client/coordinator/CoordinatorClient.java b/server/src/main/java/io/druid/client/coordinator/CoordinatorClient.java index 20b65f065bef..131708b99406 100644 --- a/server/src/main/java/io/druid/client/coordinator/CoordinatorClient.java +++ b/server/src/main/java/io/druid/client/coordinator/CoordinatorClient.java @@ -23,7 +23,7 @@ import com.fasterxml.jackson.databind.ObjectMapper; import com.google.common.base.Throwables; import com.google.inject.Inject; -import com.metamx.http.client.response.FullResponseHolder; +import io.druid.java.util.http.client.response.FullResponseHolder; import io.druid.client.ImmutableSegmentLoadInfo; import io.druid.discovery.DruidLeaderClient; import io.druid.java.util.common.ISE; diff --git a/server/src/main/java/io/druid/client/indexing/IndexingServiceClient.java b/server/src/main/java/io/druid/client/indexing/IndexingServiceClient.java index 39aa1e037df3..3eca7d355bbd 100644 --- a/server/src/main/java/io/druid/client/indexing/IndexingServiceClient.java +++ b/server/src/main/java/io/druid/client/indexing/IndexingServiceClient.java @@ -24,7 +24,7 @@ import com.google.common.base.Preconditions; import com.google.common.base.Throwables; import com.google.inject.Inject; -import com.metamx.http.client.response.FullResponseHolder; +import io.druid.java.util.http.client.response.FullResponseHolder; import io.druid.discovery.DruidLeaderClient; import io.druid.indexer.TaskStatusPlus; import io.druid.java.util.common.DateTimes; diff --git a/server/src/main/java/io/druid/curator/discovery/CuratorDruidLeaderSelector.java b/server/src/main/java/io/druid/curator/discovery/CuratorDruidLeaderSelector.java index 36687d550a69..9b51a76580ef 100644 --- a/server/src/main/java/io/druid/curator/discovery/CuratorDruidLeaderSelector.java +++ b/server/src/main/java/io/druid/curator/discovery/CuratorDruidLeaderSelector.java @@ -21,7 +21,7 @@ import com.google.common.base.Preconditions; import com.google.common.base.Throwables; -import com.metamx.emitter.EmittingLogger; +import io.druid.java.util.emitter.EmittingLogger; import io.druid.concurrent.LifecycleLock; import io.druid.discovery.DruidLeaderSelector; import io.druid.guice.annotations.Self; diff --git a/server/src/main/java/io/druid/curator/discovery/CuratorServiceAnnouncer.java b/server/src/main/java/io/druid/curator/discovery/CuratorServiceAnnouncer.java index 8b737125bf9c..7a76946f046a 100644 --- a/server/src/main/java/io/druid/curator/discovery/CuratorServiceAnnouncer.java +++ b/server/src/main/java/io/druid/curator/discovery/CuratorServiceAnnouncer.java @@ -22,7 +22,7 @@ import com.google.common.base.Throwables; import com.google.common.collect.Maps; import com.google.inject.Inject; -import com.metamx.emitter.EmittingLogger; +import io.druid.java.util.emitter.EmittingLogger; import io.druid.server.DruidNode; import org.apache.curator.x.discovery.ServiceDiscovery; import org.apache.curator.x.discovery.ServiceInstance; diff --git a/server/src/main/java/io/druid/discovery/DruidLeaderClient.java b/server/src/main/java/io/druid/discovery/DruidLeaderClient.java index 3955f9329b8b..a77b8312937c 100644 --- a/server/src/main/java/io/druid/discovery/DruidLeaderClient.java +++ b/server/src/main/java/io/druid/discovery/DruidLeaderClient.java @@ -22,11 +22,11 @@ import com.google.common.base.Charsets; import com.google.common.base.Preconditions; import com.google.common.base.Throwables; -import com.metamx.http.client.HttpClient; -import com.metamx.http.client.Request; -import com.metamx.http.client.response.FullResponseHandler; -import com.metamx.http.client.response.FullResponseHolder; -import com.metamx.http.client.response.HttpResponseHandler; +import io.druid.java.util.http.client.HttpClient; +import io.druid.java.util.http.client.Request; +import io.druid.java.util.http.client.response.FullResponseHandler; +import io.druid.java.util.http.client.response.FullResponseHolder; +import io.druid.java.util.http.client.response.HttpResponseHandler; import io.druid.client.selector.Server; import io.druid.concurrent.LifecycleLock; import io.druid.curator.discovery.ServerDiscoverySelector; diff --git a/server/src/main/java/io/druid/guice/CoordinatorDiscoveryModule.java b/server/src/main/java/io/druid/guice/CoordinatorDiscoveryModule.java index 6af0f2d02250..a054287303fd 100644 --- a/server/src/main/java/io/druid/guice/CoordinatorDiscoveryModule.java +++ b/server/src/main/java/io/druid/guice/CoordinatorDiscoveryModule.java @@ -22,7 +22,7 @@ import com.google.inject.Binder; import com.google.inject.Module; import com.google.inject.Provides; -import com.metamx.http.client.HttpClient; +import io.druid.java.util.http.client.HttpClient; import io.druid.client.coordinator.Coordinator; import io.druid.client.coordinator.CoordinatorSelectorConfig; import io.druid.curator.discovery.ServerDiscoveryFactory; diff --git a/server/src/main/java/io/druid/guice/DruidBinders.java b/server/src/main/java/io/druid/guice/DruidBinders.java index e9fecc090a56..81d6d71adbe7 100644 --- a/server/src/main/java/io/druid/guice/DruidBinders.java +++ b/server/src/main/java/io/druid/guice/DruidBinders.java @@ -23,7 +23,7 @@ import com.google.inject.TypeLiteral; import com.google.inject.multibindings.MapBinder; import com.google.inject.multibindings.Multibinder; -import com.metamx.metrics.Monitor; +import io.druid.java.util.metrics.Monitor; import io.druid.query.Query; import io.druid.query.QueryRunnerFactory; import io.druid.query.QueryToolChest; diff --git a/server/src/main/java/io/druid/guice/IndexingServiceDiscoveryModule.java b/server/src/main/java/io/druid/guice/IndexingServiceDiscoveryModule.java index 05da8f301bb4..d54f35d4506e 100644 --- a/server/src/main/java/io/druid/guice/IndexingServiceDiscoveryModule.java +++ b/server/src/main/java/io/druid/guice/IndexingServiceDiscoveryModule.java @@ -22,7 +22,7 @@ import com.google.inject.Binder; import com.google.inject.Module; import com.google.inject.Provides; -import com.metamx.http.client.HttpClient; +import io.druid.java.util.http.client.HttpClient; import io.druid.client.indexing.IndexingService; import io.druid.client.indexing.IndexingServiceSelectorConfig; import io.druid.curator.discovery.ServerDiscoveryFactory; diff --git a/server/src/main/java/io/druid/guice/http/HttpClientModule.java b/server/src/main/java/io/druid/guice/http/HttpClientModule.java index 73b06163c0ce..748045c48983 100644 --- a/server/src/main/java/io/druid/guice/http/HttpClientModule.java +++ b/server/src/main/java/io/druid/guice/http/HttpClientModule.java @@ -23,9 +23,9 @@ import com.google.inject.Binder; import com.google.inject.Inject; import com.google.inject.Module; -import com.metamx.http.client.HttpClient; -import com.metamx.http.client.HttpClientConfig; -import com.metamx.http.client.HttpClientInit; +import io.druid.java.util.http.client.HttpClient; +import io.druid.java.util.http.client.HttpClientConfig; +import io.druid.java.util.http.client.HttpClientInit; import io.druid.guice.JsonConfigProvider; import io.druid.guice.LazySingleton; import io.druid.guice.annotations.EscalatedClient; diff --git a/server/src/main/java/io/druid/guice/http/LifecycleUtils.java b/server/src/main/java/io/druid/guice/http/LifecycleUtils.java index dd52d1d75df8..02393e623a61 100644 --- a/server/src/main/java/io/druid/guice/http/LifecycleUtils.java +++ b/server/src/main/java/io/druid/guice/http/LifecycleUtils.java @@ -24,9 +24,9 @@ public class LifecycleUtils { - public static com.metamx.common.lifecycle.Lifecycle asMmxLifecycle(Lifecycle lifecycle) + public static Lifecycle asMmxLifecycle(Lifecycle lifecycle) { - final com.metamx.common.lifecycle.Lifecycle metamxLifecycle = new com.metamx.common.lifecycle.Lifecycle(); + final Lifecycle metamxLifecycle = new Lifecycle(); try { lifecycle.addMaybeStartHandler(new Lifecycle.Handler() { diff --git a/server/src/main/java/io/druid/indexing/overlord/supervisor/NoopSupervisorSpec.java b/server/src/main/java/io/druid/indexing/overlord/supervisor/NoopSupervisorSpec.java index 401023fe032a..380861d9fb8f 100644 --- a/server/src/main/java/io/druid/indexing/overlord/supervisor/NoopSupervisorSpec.java +++ b/server/src/main/java/io/druid/indexing/overlord/supervisor/NoopSupervisorSpec.java @@ -19,20 +19,46 @@ package io.druid.indexing.overlord.supervisor; +import com.fasterxml.jackson.annotation.JsonCreator; +import com.fasterxml.jackson.annotation.JsonProperty; import io.druid.indexing.overlord.DataSourceMetadata; import javax.annotation.Nullable; +import java.util.ArrayList; import java.util.List; +import java.util.Objects; /** * Used as a tombstone marker in the supervisors metadata table to indicate that the supervisor has been removed. */ public class NoopSupervisorSpec implements SupervisorSpec { + // NoopSupervisorSpec is used as a tombstone, added when a previously running supervisor is stopped. + // Inherit the datasources of the previous running spec, so that we can determine whether a user is authorized to see + // this tombstone (users can only see tombstones for datasources that they have access to). + @Nullable + @JsonProperty("dataSources") + private List datasources; + + @Nullable + @JsonProperty("id") + private String id; + + @JsonCreator + public NoopSupervisorSpec( + @Nullable @JsonProperty("id") String id, + @Nullable @JsonProperty("dataSources") List datasources + ) + { + this.id = id; + this.datasources = datasources == null ? new ArrayList<>() : datasources; + } + @Override + @JsonProperty public String getId() { - return null; + return id; } @Override @@ -68,8 +94,30 @@ public void checkpoint( } @Override + @Nullable + @JsonProperty("dataSources") public List getDataSources() { - return null; + return datasources; + } + + @Override + public boolean equals(Object o) + { + if (this == o) { + return true; + } + if (o == null || getClass() != o.getClass()) { + return false; + } + NoopSupervisorSpec spec = (NoopSupervisorSpec) o; + return Objects.equals(datasources, spec.datasources) && + Objects.equals(getId(), spec.getId()); + } + + @Override + public int hashCode() + { + return Objects.hash(datasources, getId()); } } diff --git a/server/src/main/java/io/druid/indexing/overlord/supervisor/Supervisor.java b/server/src/main/java/io/druid/indexing/overlord/supervisor/Supervisor.java index c6c411480683..681421b48002 100644 --- a/server/src/main/java/io/druid/indexing/overlord/supervisor/Supervisor.java +++ b/server/src/main/java/io/druid/indexing/overlord/supervisor/Supervisor.java @@ -40,14 +40,14 @@ public interface Supervisor void reset(DataSourceMetadata dataSourceMetadata); /** - * The definition of checkpoint is not very strict as currently it does not affect data or control path + * The definition of checkpoint is not very strict as currently it does not affect data or control path. * On this call Supervisor can potentially checkpoint data processed so far to some durable storage * for example - Kafka Supervisor uses this to merge and handoff segments containing at least the data - * represented by dataSourceMetadata + * represented by {@param currentCheckpoint} DataSourceMetadata * - * @param sequenceName unique Identifier to figure out for which sequence to do check pointing - * @param previousCheckPoint DataSourceMetadata check pointed in previous call - * @param currentCheckPoint current DataSourceMetadata to be check pointed + * @param sequenceName unique Identifier to figure out for which sequence to do checkpointing + * @param previousCheckPoint DataSourceMetadata checkpointed in previous call + * @param currentCheckPoint current DataSourceMetadata to be checkpointed */ void checkpoint( @Nullable String sequenceName, diff --git a/server/src/main/java/io/druid/indexing/overlord/supervisor/VersionedSupervisorSpec.java b/server/src/main/java/io/druid/indexing/overlord/supervisor/VersionedSupervisorSpec.java index b96edf37406d..93772486c3a6 100644 --- a/server/src/main/java/io/druid/indexing/overlord/supervisor/VersionedSupervisorSpec.java +++ b/server/src/main/java/io/druid/indexing/overlord/supervisor/VersionedSupervisorSpec.java @@ -45,4 +45,31 @@ public String getVersion() { return version; } + + @Override + public boolean equals(Object o) + { + if (this == o) { + return true; + } + if (o == null || getClass() != o.getClass()) { + return false; + } + + VersionedSupervisorSpec that = (VersionedSupervisorSpec) o; + + if (getSpec() != null ? !getSpec().equals(that.getSpec()) : that.getSpec() != null) { + return false; + } + return getVersion() != null ? getVersion().equals(that.getVersion()) : that.getVersion() == null; + + } + + @Override + public int hashCode() + { + int result = getSpec() != null ? getSpec().hashCode() : 0; + result = 31 * result + (getVersion() != null ? getVersion().hashCode() : 0); + return result; + } } diff --git a/server/src/main/java/io/druid/metadata/IndexerSQLMetadataStorageCoordinator.java b/server/src/main/java/io/druid/metadata/IndexerSQLMetadataStorageCoordinator.java index 52067ab960c4..746e359127ef 100644 --- a/server/src/main/java/io/druid/metadata/IndexerSQLMetadataStorageCoordinator.java +++ b/server/src/main/java/io/druid/metadata/IndexerSQLMetadataStorageCoordinator.java @@ -19,6 +19,7 @@ package io.druid.metadata; +import com.fasterxml.jackson.core.JsonProcessingException; import com.fasterxml.jackson.databind.ObjectMapper; import com.google.common.base.Function; import com.google.common.base.Preconditions; @@ -39,6 +40,7 @@ import io.druid.java.util.common.IAE; import io.druid.java.util.common.ISE; import io.druid.java.util.common.Intervals; +import io.druid.java.util.common.Pair; import io.druid.java.util.common.StringUtils; import io.druid.java.util.common.lifecycle.LifecycleStart; import io.druid.java.util.common.logger.Logger; @@ -64,6 +66,7 @@ import org.skife.jdbi.v2.util.ByteArrayMapper; import org.skife.jdbi.v2.util.StringMapper; +import javax.annotation.Nullable; import java.io.IOException; import java.sql.SQLException; import java.util.ArrayList; @@ -383,7 +386,7 @@ public SegmentPublishResult inTransaction( public SegmentIdentifier allocatePendingSegment( final String dataSource, final String sequenceName, - final String previousSegmentId, + @Nullable final String previousSegmentId, final Interval interval, final String maxVersion, final boolean skipSegmentLineageCheck @@ -394,224 +397,376 @@ public SegmentIdentifier allocatePendingSegment( Preconditions.checkNotNull(interval, "interval"); Preconditions.checkNotNull(maxVersion, "maxVersion"); - final String previousSegmentIdNotNull = previousSegmentId == null ? "" : previousSegmentId; - return connector.retryTransaction( new TransactionCallback() { @Override public SegmentIdentifier inTransaction(Handle handle, TransactionStatus transactionStatus) throws Exception { - final List existingBytes; - if (!skipSegmentLineageCheck) { - existingBytes = handle - .createQuery( - StringUtils.format( - "SELECT payload FROM %s WHERE " - + "dataSource = :dataSource AND " - + "sequence_name = :sequence_name AND " - + "sequence_prev_id = :sequence_prev_id", - dbTables.getPendingSegmentsTable() - ) - ).bind("dataSource", dataSource) - .bind("sequence_name", sequenceName) - .bind("sequence_prev_id", previousSegmentIdNotNull) - .map(ByteArrayMapper.FIRST) - .list(); - } else { - existingBytes = handle - .createQuery( - StringUtils.format( - "SELECT payload FROM %s WHERE " - + "dataSource = :dataSource AND " - + "sequence_name = :sequence_name AND " - + "start = :start AND " - + "%2$send%2$s = :end", - dbTables.getPendingSegmentsTable(), connector.getQuoteString() - ) - ).bind("dataSource", dataSource) - .bind("sequence_name", sequenceName) - .bind("start", interval.getStart().toString()) - .bind("end", interval.getEnd().toString()) - .map(ByteArrayMapper.FIRST) - .list(); - } + return skipSegmentLineageCheck ? + allocatePendingSegment(handle, dataSource, sequenceName, interval, maxVersion) : + allocatePendingSegmentWithSegmentLineageCheck( + handle, + dataSource, + sequenceName, + previousSegmentId, + interval, + maxVersion + ); + } + }, + ALLOCATE_SEGMENT_QUIET_TRIES, + SQLMetadataConnector.DEFAULT_MAX_TRIES + ); + } - if (!existingBytes.isEmpty()) { - final SegmentIdentifier existingIdentifier = jsonMapper.readValue( - Iterables.getOnlyElement(existingBytes), - SegmentIdentifier.class - ); - - if (existingIdentifier.getInterval().getStartMillis() == interval.getStartMillis() - && existingIdentifier.getInterval().getEndMillis() == interval.getEndMillis()) { - log.info( - "Found existing pending segment [%s] for sequence[%s] (previous = [%s]) in DB", - existingIdentifier.getIdentifierAsString(), - sequenceName, - previousSegmentIdNotNull - ); + @Nullable + private SegmentIdentifier allocatePendingSegmentWithSegmentLineageCheck( + final Handle handle, + final String dataSource, + final String sequenceName, + @Nullable final String previousSegmentId, + final Interval interval, + final String maxVersion + ) throws IOException + { + final String previousSegmentIdNotNull = previousSegmentId == null ? "" : previousSegmentId; + final CheckExistingSegmentIdResult result = checkAndGetExistingSegmentId( + handle.createQuery( + StringUtils.format( + "SELECT payload FROM %s WHERE " + + "dataSource = :dataSource AND " + + "sequence_name = :sequence_name AND " + + "sequence_prev_id = :sequence_prev_id", + dbTables.getPendingSegmentsTable() + ) + ), + interval, + sequenceName, + previousSegmentIdNotNull, + Pair.of("dataSource", dataSource), + Pair.of("sequence_name", sequenceName), + Pair.of("sequence_prev_id", previousSegmentIdNotNull) + ); - return existingIdentifier; - } else { - log.warn( - "Cannot use existing pending segment [%s] for sequence[%s] (previous = [%s]) in DB, " - + "does not match requested interval[%s]", - existingIdentifier.getIdentifierAsString(), - sequenceName, - previousSegmentIdNotNull, - interval - ); + if (result.found) { + // The found existing segment identifier can be null if its interval doesn't match with the given interval + return result.segmentIdentifier; + } - return null; - } - } + final SegmentIdentifier newIdentifier = createNewSegment(handle, dataSource, interval, maxVersion); + if (newIdentifier == null) { + return null; + } - // Make up a pending segment based on existing segments and pending segments in the DB. This works - // assuming that all tasks inserting segments at a particular point in time are going through the - // allocatePendingSegment flow. This should be assured through some other mechanism (like task locks). + // SELECT -> INSERT can fail due to races; callers must be prepared to retry. + // Avoiding ON DUPLICATE KEY since it's not portable. + // Avoiding try/catch since it may cause inadvertent transaction-splitting. + + // UNIQUE key for the row, ensuring sequences do not fork in two directions. + // Using a single column instead of (sequence_name, sequence_prev_id) as some MySQL storage engines + // have difficulty with large unique keys (see https://github.com/druid-io/druid/issues/2319) + final String sequenceNamePrevIdSha1 = BaseEncoding.base16().encode( + Hashing.sha1() + .newHasher() + .putBytes(StringUtils.toUtf8(sequenceName)) + .putByte((byte) 0xff) + .putBytes(StringUtils.toUtf8(previousSegmentIdNotNull)) + .hash() + .asBytes() + ); - final SegmentIdentifier newIdentifier; + insertToMetastore( + handle, + newIdentifier, + dataSource, + interval, + previousSegmentIdNotNull, + sequenceName, + sequenceNamePrevIdSha1 + ); + return newIdentifier; + } - final List> existingChunks = getTimelineForIntervalsWithHandle( - handle, - dataSource, - ImmutableList.of(interval) - ).lookup(interval); - - if (existingChunks.size() > 1) { - // Not possible to expand more than one chunk with a single segment. - log.warn( - "Cannot allocate new segment for dataSource[%s], interval[%s], maxVersion[%s]: already have [%,d] chunks.", - dataSource, - interval, - maxVersion, - existingChunks.size() - ); - return null; - } else { - SegmentIdentifier max = null; - - if (!existingChunks.isEmpty()) { - TimelineObjectHolder existingHolder = Iterables.getOnlyElement(existingChunks); - for (PartitionChunk existing : existingHolder.getObject()) { - if (max == null || max.getShardSpec().getPartitionNum() < existing.getObject() - .getShardSpec() - .getPartitionNum()) { - max = SegmentIdentifier.fromDataSegment(existing.getObject()); - } - } - } + @Nullable + private SegmentIdentifier allocatePendingSegment( + final Handle handle, + final String dataSource, + final String sequenceName, + final Interval interval, + final String maxVersion + ) throws IOException + { + final CheckExistingSegmentIdResult result = checkAndGetExistingSegmentId( + handle.createQuery( + StringUtils.format( + "SELECT payload FROM %s WHERE " + + "dataSource = :dataSource AND " + + "sequence_name = :sequence_name AND " + + "start = :start AND " + + "%2$send%2$s = :end", + dbTables.getPendingSegmentsTable(), + connector.getQuoteString() + ) + ), + interval, + sequenceName, + null, + Pair.of("dataSource", dataSource), + Pair.of("sequence_name", sequenceName), + Pair.of("start", interval.getStart().toString()), + Pair.of("end", interval.getEnd().toString()) + ); - final List pendings = getPendingSegmentsForIntervalWithHandle( - handle, - dataSource, - interval - ); - - for (SegmentIdentifier pending : pendings) { - if (max == null || - pending.getVersion().compareTo(max.getVersion()) > 0 || - (pending.getVersion().equals(max.getVersion()) - && pending.getShardSpec().getPartitionNum() > max.getShardSpec().getPartitionNum())) { - max = pending; - } - } + if (result.found) { + // The found existing segment identifier can be null if its interval doesn't match with the given interval + return result.segmentIdentifier; + } - if (max == null) { - newIdentifier = new SegmentIdentifier( - dataSource, - interval, - maxVersion, - new NumberedShardSpec(0, 0) - ); - } else if (!max.getInterval().equals(interval) || max.getVersion().compareTo(maxVersion) > 0) { - log.warn( - "Cannot allocate new segment for dataSource[%s], interval[%s], maxVersion[%s]: conflicting segment[%s].", - dataSource, - interval, - maxVersion, - max.getIdentifierAsString() - ); - return null; - } else if (max.getShardSpec() instanceof LinearShardSpec) { - newIdentifier = new SegmentIdentifier( - dataSource, - max.getInterval(), - max.getVersion(), - new LinearShardSpec(max.getShardSpec().getPartitionNum() + 1) - ); - } else if (max.getShardSpec() instanceof NumberedShardSpec) { - newIdentifier = new SegmentIdentifier( - dataSource, - max.getInterval(), - max.getVersion(), - new NumberedShardSpec( - max.getShardSpec().getPartitionNum() + 1, - ((NumberedShardSpec) max.getShardSpec()).getPartitions() - ) - ); - } else { - log.warn( - "Cannot allocate new segment for dataSource[%s], interval[%s], maxVersion[%s]: ShardSpec class[%s] used by [%s].", - dataSource, - interval, - maxVersion, - max.getShardSpec().getClass(), - max.getIdentifierAsString() - ); - return null; - } - } + final SegmentIdentifier newIdentifier = createNewSegment(handle, dataSource, interval, maxVersion); + if (newIdentifier == null) { + return null; + } - // SELECT -> INSERT can fail due to races; callers must be prepared to retry. - // Avoiding ON DUPLICATE KEY since it's not portable. - // Avoiding try/catch since it may cause inadvertent transaction-splitting. - - // UNIQUE key for the row, ensuring sequences do not fork in two directions. - // Using a single column instead of (sequence_name, sequence_prev_id) as some MySQL storage engines - // have difficulty with large unique keys (see https://github.com/druid-io/druid/issues/2319) - final String sequenceNamePrevIdSha1 = BaseEncoding.base16().encode( - Hashing.sha1() - .newHasher() - .putBytes(StringUtils.toUtf8(sequenceName)) - .putByte((byte) 0xff) - .putBytes(StringUtils.toUtf8(previousSegmentIdNotNull)) - .hash() - .asBytes() - ); + // SELECT -> INSERT can fail due to races; callers must be prepared to retry. + // Avoiding ON DUPLICATE KEY since it's not portable. + // Avoiding try/catch since it may cause inadvertent transaction-splitting. + + // UNIQUE key for the row, ensuring we don't have more than one segment per sequence per interval. + // Using a single column instead of (sequence_name, sequence_prev_id) as some MySQL storage engines + // have difficulty with large unique keys (see https://github.com/druid-io/druid/issues/2319) + final String sequenceNamePrevIdSha1 = BaseEncoding.base16().encode( + Hashing.sha1() + .newHasher() + .putBytes(StringUtils.toUtf8(sequenceName)) + .putByte((byte) 0xff) + .putLong(interval.getStartMillis()) + .putLong(interval.getEndMillis()) + .hash() + .asBytes() + ); - handle.createStatement( - StringUtils.format( - "INSERT INTO %1$s (id, dataSource, created_date, start, %2$send%2$s, sequence_name, sequence_prev_id, sequence_name_prev_id_sha1, payload) " - + "VALUES (:id, :dataSource, :created_date, :start, :end, :sequence_name, :sequence_prev_id, :sequence_name_prev_id_sha1, :payload)", - dbTables.getPendingSegmentsTable(), connector.getQuoteString() - ) - ) - .bind("id", newIdentifier.getIdentifierAsString()) - .bind("dataSource", dataSource) - .bind("created_date", DateTimes.nowUtc().toString()) - .bind("start", interval.getStart().toString()) - .bind("end", interval.getEnd().toString()) - .bind("sequence_name", sequenceName) - .bind("sequence_prev_id", previousSegmentIdNotNull) - .bind("sequence_name_prev_id_sha1", sequenceNamePrevIdSha1) - .bind("payload", jsonMapper.writeValueAsBytes(newIdentifier)) - .execute(); - - log.info( - "Allocated pending segment [%s] for sequence[%s] (previous = [%s]) in DB", - newIdentifier.getIdentifierAsString(), - sequenceName, - previousSegmentIdNotNull - ); + // always insert empty previous sequence id + insertToMetastore(handle, newIdentifier, dataSource, interval, "", sequenceName, sequenceNamePrevIdSha1); - return newIdentifier; - } - }, - ALLOCATE_SEGMENT_QUIET_TRIES, - SQLMetadataConnector.DEFAULT_MAX_TRIES + log.info( + "Allocated pending segment [%s] for sequence[%s] in DB", + newIdentifier.getIdentifierAsString(), + sequenceName ); + + return newIdentifier; + } + + private CheckExistingSegmentIdResult checkAndGetExistingSegmentId( + final Query> query, + final Interval interval, + final String sequenceName, + final @Nullable String previousSegmentId, + final Pair... queryVars + ) throws IOException + { + Query> boundQuery = query; + for (Pair var : queryVars) { + boundQuery = boundQuery.bind(var.lhs, var.rhs); + } + final List existingBytes = boundQuery.map(ByteArrayMapper.FIRST).list(); + + if (!existingBytes.isEmpty()) { + final SegmentIdentifier existingIdentifier = jsonMapper.readValue( + Iterables.getOnlyElement(existingBytes), + SegmentIdentifier.class + ); + + if (existingIdentifier.getInterval().getStartMillis() == interval.getStartMillis() + && existingIdentifier.getInterval().getEndMillis() == interval.getEndMillis()) { + if (previousSegmentId == null) { + log.info( + "Found existing pending segment [%s] for sequence[%s] in DB", + existingIdentifier.getIdentifierAsString(), + sequenceName + ); + } else { + log.info( + "Found existing pending segment [%s] for sequence[%s] (previous = [%s]) in DB", + existingIdentifier.getIdentifierAsString(), + sequenceName, + previousSegmentId + ); + } + + return new CheckExistingSegmentIdResult(true, existingIdentifier); + } else { + if (previousSegmentId == null) { + log.warn( + "Cannot use existing pending segment [%s] for sequence[%s] in DB, " + + "does not match requested interval[%s]", + existingIdentifier.getIdentifierAsString(), + sequenceName, + interval + ); + } else { + log.warn( + "Cannot use existing pending segment [%s] for sequence[%s] (previous = [%s]) in DB, " + + "does not match requested interval[%s]", + existingIdentifier.getIdentifierAsString(), + sequenceName, + previousSegmentId, + interval + ); + } + + return new CheckExistingSegmentIdResult(true, null); + } + } + return new CheckExistingSegmentIdResult(false, null); + } + + private static class CheckExistingSegmentIdResult + { + private final boolean found; + @Nullable + private final SegmentIdentifier segmentIdentifier; + + CheckExistingSegmentIdResult(boolean found, @Nullable SegmentIdentifier segmentIdentifier) + { + this.found = found; + this.segmentIdentifier = segmentIdentifier; + } + } + + private void insertToMetastore( + Handle handle, + SegmentIdentifier newIdentifier, + String dataSource, + Interval interval, + String previousSegmentId, + String sequenceName, + String sequenceNamePrevIdSha1 + ) throws JsonProcessingException + { + handle.createStatement( + StringUtils.format( + "INSERT INTO %1$s (id, dataSource, created_date, start, %2$send%2$s, sequence_name, sequence_prev_id, sequence_name_prev_id_sha1, payload) " + + "VALUES (:id, :dataSource, :created_date, :start, :end, :sequence_name, :sequence_prev_id, :sequence_name_prev_id_sha1, :payload)", + dbTables.getPendingSegmentsTable(), + connector.getQuoteString() + ) + ) + .bind("id", newIdentifier.getIdentifierAsString()) + .bind("dataSource", dataSource) + .bind("created_date", DateTimes.nowUtc().toString()) + .bind("start", interval.getStart().toString()) + .bind("end", interval.getEnd().toString()) + .bind("sequence_name", sequenceName) + .bind("sequence_prev_id", previousSegmentId) + .bind("sequence_name_prev_id_sha1", sequenceNamePrevIdSha1) + .bind("payload", jsonMapper.writeValueAsBytes(newIdentifier)) + .execute(); + } + + @Nullable + private SegmentIdentifier createNewSegment( + final Handle handle, + final String dataSource, + final Interval interval, + final String maxVersion + ) throws IOException + { + // Make up a pending segment based on existing segments and pending segments in the DB. This works + // assuming that all tasks inserting segments at a particular point in time are going through the + // allocatePendingSegment flow. This should be assured through some other mechanism (like task locks). + + final List> existingChunks = getTimelineForIntervalsWithHandle( + handle, + dataSource, + ImmutableList.of(interval) + ).lookup(interval); + + if (existingChunks.size() > 1) { + // Not possible to expand more than one chunk with a single segment. + log.warn( + "Cannot allocate new segment for dataSource[%s], interval[%s], maxVersion[%s]: already have [%,d] chunks.", + dataSource, + interval, + maxVersion, + existingChunks.size() + ); + return null; + } else { + SegmentIdentifier max = null; + + if (!existingChunks.isEmpty()) { + TimelineObjectHolder existingHolder = Iterables.getOnlyElement(existingChunks); + for (PartitionChunk existing : existingHolder.getObject()) { + if (max == null || max.getShardSpec().getPartitionNum() < existing.getObject() + .getShardSpec() + .getPartitionNum()) { + max = SegmentIdentifier.fromDataSegment(existing.getObject()); + } + } + } + + final List pendings = getPendingSegmentsForIntervalWithHandle( + handle, + dataSource, + interval + ); + + for (SegmentIdentifier pending : pendings) { + if (max == null || + pending.getVersion().compareTo(max.getVersion()) > 0 || + (pending.getVersion().equals(max.getVersion()) + && pending.getShardSpec().getPartitionNum() > max.getShardSpec().getPartitionNum())) { + max = pending; + } + } + + if (max == null) { + return new SegmentIdentifier( + dataSource, + interval, + maxVersion, + new NumberedShardSpec(0, 0) + ); + } else if (!max.getInterval().equals(interval) || max.getVersion().compareTo(maxVersion) > 0) { + log.warn( + "Cannot allocate new segment for dataSource[%s], interval[%s], maxVersion[%s]: conflicting segment[%s].", + dataSource, + interval, + maxVersion, + max.getIdentifierAsString() + ); + return null; + } else if (max.getShardSpec() instanceof LinearShardSpec) { + return new SegmentIdentifier( + dataSource, + max.getInterval(), + max.getVersion(), + new LinearShardSpec(max.getShardSpec().getPartitionNum() + 1) + ); + } else if (max.getShardSpec() instanceof NumberedShardSpec) { + return new SegmentIdentifier( + dataSource, + max.getInterval(), + max.getVersion(), + new NumberedShardSpec( + max.getShardSpec().getPartitionNum() + 1, + ((NumberedShardSpec) max.getShardSpec()).getPartitions() + ) + ); + } else { + log.warn( + "Cannot allocate new segment for dataSource[%s], interval[%s], maxVersion[%s]: ShardSpec class[%s] used by [%s].", + dataSource, + interval, + maxVersion, + max.getShardSpec().getClass(), + max.getIdentifierAsString() + ); + return null; + } + } } @Override diff --git a/server/src/main/java/io/druid/metadata/SQLMetadataRuleManager.java b/server/src/main/java/io/druid/metadata/SQLMetadataRuleManager.java index e527d632ded0..8bec701757da 100644 --- a/server/src/main/java/io/druid/metadata/SQLMetadataRuleManager.java +++ b/server/src/main/java/io/druid/metadata/SQLMetadataRuleManager.java @@ -31,7 +31,7 @@ import com.google.common.util.concurrent.ListeningScheduledExecutorService; import com.google.common.util.concurrent.MoreExecutors; import com.google.inject.Inject; -import com.metamx.emitter.EmittingLogger; +import io.druid.java.util.emitter.EmittingLogger; import io.druid.audit.AuditEntry; import io.druid.audit.AuditInfo; import io.druid.audit.AuditManager; diff --git a/server/src/main/java/io/druid/metadata/SQLMetadataSegmentManager.java b/server/src/main/java/io/druid/metadata/SQLMetadataSegmentManager.java index eca6620f17a4..6f3f4581991d 100644 --- a/server/src/main/java/io/druid/metadata/SQLMetadataSegmentManager.java +++ b/server/src/main/java/io/druid/metadata/SQLMetadataSegmentManager.java @@ -33,10 +33,9 @@ import com.google.common.util.concurrent.ListeningScheduledExecutorService; import com.google.common.util.concurrent.MoreExecutors; import com.google.inject.Inject; -import com.metamx.emitter.EmittingLogger; +import io.druid.java.util.emitter.EmittingLogger; import io.druid.client.DruidDataSource; import io.druid.client.ImmutableDruidDataSource; -import io.druid.concurrent.LifecycleLock; import io.druid.guice.ManageLifecycle; import io.druid.java.util.common.DateTimes; import io.druid.java.util.common.Intervals; @@ -86,7 +85,10 @@ public class SQLMetadataSegmentManager implements MetadataSegmentManager private static final Interner DATA_SEGMENT_INTERNER = Interners.newWeakInterner(); private static final EmittingLogger log = new EmittingLogger(SQLMetadataSegmentManager.class); - private final LifecycleLock lifecycleLock = new LifecycleLock(); + // Use to synchronize start() and stop(). These methods should be synchronized to prevent from being called at the + // same time if two different threads are calling them. This might be possible if a druid coordinator gets and drops + // leadership repeatedly in quick succession. + private final Object lock = new Object(); private final ObjectMapper jsonMapper; private final Supplier config; @@ -96,6 +98,7 @@ public class SQLMetadataSegmentManager implements MetadataSegmentManager private volatile ListeningScheduledExecutorService exec = null; private volatile ListenableFuture future = null; + private volatile boolean started; @Inject public SQLMetadataSegmentManager( @@ -118,11 +121,11 @@ public SQLMetadataSegmentManager( @LifecycleStart public void start() { - if (!lifecycleLock.canStart()) { - return; - } + synchronized (lock) { + if (started) { + return; + } - try { exec = MoreExecutors.listeningDecorator(Execs.scheduledSingleThreaded("DatabaseSegmentManager-Exec--%d")); final Duration delay = config.get().getPollDuration().toStandardDuration(); @@ -145,10 +148,7 @@ public void run() delay.getMillis(), TimeUnit.MILLISECONDS ); - lifecycleLock.started(); - } - finally { - lifecycleLock.exitStart(); + started = true; } } @@ -156,10 +156,11 @@ public void run() @LifecycleStop public void stop() { - if (!lifecycleLock.canStop()) { - return; - } - try { + synchronized (lock) { + if (!started) { + return; + } + final ConcurrentHashMap emptyMap = new ConcurrentHashMap<>(); ConcurrentHashMap current; do { @@ -170,9 +171,7 @@ public void stop() future = null; exec.shutdownNow(); exec = null; - } - finally { - lifecycleLock.exitStop(); + started = false; } } @@ -368,7 +367,7 @@ public boolean removeSegment(String ds, final String segmentID) @Override public boolean isStarted() { - return lifecycleLock.isStarted(); + return started; } @Override @@ -422,7 +421,7 @@ public List fold( public void poll() { try { - if (!lifecycleLock.isStarted()) { + if (!started) { return; } diff --git a/server/src/main/java/io/druid/metadata/SQLMetadataStorageActionHandler.java b/server/src/main/java/io/druid/metadata/SQLMetadataStorageActionHandler.java index 90b508f05052..66208be95692 100644 --- a/server/src/main/java/io/druid/metadata/SQLMetadataStorageActionHandler.java +++ b/server/src/main/java/io/druid/metadata/SQLMetadataStorageActionHandler.java @@ -27,7 +27,7 @@ import com.google.common.base.Throwables; import com.google.common.collect.Lists; import com.google.common.collect.Maps; -import com.metamx.emitter.EmittingLogger; +import io.druid.java.util.emitter.EmittingLogger; import io.druid.java.util.common.DateTimes; import io.druid.java.util.common.Pair; import io.druid.java.util.common.StringUtils; diff --git a/server/src/main/java/io/druid/query/lookup/LookupReferencesManager.java b/server/src/main/java/io/druid/query/lookup/LookupReferencesManager.java index 6200961fad24..f2c0036707b0 100644 --- a/server/src/main/java/io/druid/query/lookup/LookupReferencesManager.java +++ b/server/src/main/java/io/druid/query/lookup/LookupReferencesManager.java @@ -28,8 +28,8 @@ import com.google.common.collect.ImmutableList; import com.google.common.collect.ImmutableMap; import com.google.inject.Inject; -import com.metamx.emitter.EmittingLogger; -import com.metamx.http.client.response.FullResponseHolder; +import io.druid.java.util.emitter.EmittingLogger; +import io.druid.java.util.http.client.response.FullResponseHolder; import io.druid.client.coordinator.Coordinator; import io.druid.concurrent.LifecycleLock; import io.druid.discovery.DruidLeaderClient; @@ -65,7 +65,6 @@ import java.util.concurrent.atomic.AtomicReference; import java.util.concurrent.locks.LockSupport; import java.util.function.Function; -import java.util.stream.Collectors; /** * This class provide a basic {@link LookupExtractorFactory} references manager. @@ -340,7 +339,7 @@ private void updateToLoadAndDrop( private void takeSnapshot(Map lookupMap) { if (lookupSnapshotTaker != null) { - lookupSnapshotTaker.takeSnapshot(getLookupBeanList(lookupMap)); + lookupSnapshotTaker.takeSnapshot(lookupListeningAnnouncerConfig.getLookupTier(), getLookupBeanList(lookupMap)); } } @@ -363,8 +362,7 @@ private List getLookupsList() { List lookupBeanList; if (lookupConfig.getEnableLookupSyncOnStartup()) { - String tier = lookupListeningAnnouncerConfig.getLookupTier(); - lookupBeanList = getLookupListFromCoordinator(tier); + lookupBeanList = getLookupListFromCoordinator(lookupListeningAnnouncerConfig.getLookupTier()); if (lookupBeanList == null) { LOG.info("Coordinator is unavailable. Loading saved snapshot instead"); lookupBeanList = getLookupListFromSnapshot(); @@ -456,7 +454,7 @@ private Map tryGetLookupListFromCoordin private List getLookupListFromSnapshot() { if (lookupSnapshotTaker != null) { - return lookupSnapshotTaker.pullExistingSnapshot(); + return lookupSnapshotTaker.pullExistingSnapshot(lookupListeningAnnouncerConfig.getLookupTier()); } return null; } @@ -472,37 +470,30 @@ private List getLookupBeanList(Map lookupBeanList) { - ImmutableMap.Builder builder = ImmutableMap.builder(); - ExecutorService executorService = Execs.multiThreaded( + final ImmutableMap.Builder builder = ImmutableMap.builder(); + final ExecutorService executorService = Execs.multiThreaded( lookupConfig.getNumLookupLoadingThreads(), "LookupReferencesManager-Startup-%s" ); - CompletionService> completionService = + final CompletionService> completionService = new ExecutorCompletionService<>(executorService); + final List remainingLookups = new ArrayList<>(lookupBeanList); try { LOG.info("Starting lookup loading process"); - List remainingLookups = lookupBeanList; - for (int i = 0; i < lookupConfig.getLookupStartRetries(); i++) { + for (int i = 0; i < lookupConfig.getLookupStartRetries() && !remainingLookups.isEmpty(); i++) { LOG.info("Round of attempts #%d, [%d] lookups", i + 1, remainingLookups.size()); - Map successfulLookups = + final Map successfulLookups = startLookups(remainingLookups, completionService); builder.putAll(successfulLookups); - List failedLookups = remainingLookups - .stream() - .filter(l -> !successfulLookups.containsKey(l.getName())) - .collect(Collectors.toList()); - if (failedLookups.isEmpty()) { - break; - } else { - // next round - remainingLookups = failedLookups; - } + remainingLookups.removeIf(l -> successfulLookups.containsKey(l.getName())); + } + if (!remainingLookups.isEmpty()) { + LOG.warn( + "Failed to start the following lookups after [%d] attempts: [%s]", + lookupConfig.getLookupStartRetries(), + remainingLookups + ); } - LOG.info( - "Failed to start the following lookups after [%d] attempts: [%s]", - lookupConfig.getLookupStartRetries(), - remainingLookups - ); stateRef.set(new LookupUpdateState(builder.build(), ImmutableList.of(), ImmutableList.of())); } catch (InterruptedException | RuntimeException e) { diff --git a/server/src/main/java/io/druid/query/lookup/RegisteredLookupExtractionFn.java b/server/src/main/java/io/druid/query/lookup/RegisteredLookupExtractionFn.java index 525f72dcb78a..d0fd34cf2365 100644 --- a/server/src/main/java/io/druid/query/lookup/RegisteredLookupExtractionFn.java +++ b/server/src/main/java/io/druid/query/lookup/RegisteredLookupExtractionFn.java @@ -28,6 +28,7 @@ import javax.annotation.Nullable; import java.nio.ByteBuffer; +import java.util.Objects; public class RegisteredLookupExtractionFn implements ExtractionFn { @@ -38,7 +39,7 @@ public class RegisteredLookupExtractionFn implements ExtractionFn private final String lookup; private final boolean retainMissingValue; private final String replaceMissingValueWith; - private final boolean injective; + private final Boolean injective; private final boolean optimize; @JsonCreator @@ -47,7 +48,7 @@ public RegisteredLookupExtractionFn( @JsonProperty("lookup") String lookup, @JsonProperty("retainMissingValue") final boolean retainMissingValue, @Nullable @JsonProperty("replaceMissingValueWith") final String replaceMissingValueWith, - @JsonProperty("injective") final boolean injective, + @JsonProperty("injective") final Boolean injective, @JsonProperty("optimize") Boolean optimize ) { @@ -79,7 +80,7 @@ public String getReplaceMissingValueWith() } @JsonProperty("injective") - public boolean isInjective() + public Boolean isInjective() { return injective; } @@ -142,13 +143,17 @@ private LookupExtractionFn ensureDelegate() // http://www.javamex.com/tutorials/double_checked_locking.shtml synchronized (delegateLock) { if (null == delegate) { + final LookupExtractor factory = Preconditions.checkNotNull( + manager.get(getLookup()), + "Lookup [%s] not found", + getLookup() + ).getLookupExtractorFactory().get(); + delegate = new LookupExtractionFn( - Preconditions.checkNotNull(manager.get(getLookup()), "Lookup [%s] not found", getLookup()) - .getLookupExtractorFactory() - .get(), + factory, isRetainMissingValue(), getReplaceMissingValueWith(), - isInjective(), + injective == null ? factory.isOneToOne() : injective, isOptimize() ); } @@ -158,7 +163,7 @@ private LookupExtractionFn ensureDelegate() } @Override - public boolean equals(Object o) + public boolean equals(final Object o) { if (this == o) { return true; @@ -166,35 +171,19 @@ public boolean equals(Object o) if (o == null || getClass() != o.getClass()) { return false; } - - RegisteredLookupExtractionFn that = (RegisteredLookupExtractionFn) o; - - if (isRetainMissingValue() != that.isRetainMissingValue()) { - return false; - } - if (isInjective() != that.isInjective()) { - return false; - } - if (isOptimize() != that.isOptimize()) { - return false; - } - if (!getLookup().equals(that.getLookup())) { - return false; - } - return getReplaceMissingValueWith() != null - ? getReplaceMissingValueWith().equals(that.getReplaceMissingValueWith()) - : that.getReplaceMissingValueWith() == null; + final RegisteredLookupExtractionFn that = (RegisteredLookupExtractionFn) o; + return retainMissingValue == that.retainMissingValue && + optimize == that.optimize && + Objects.equals(lookup, that.lookup) && + Objects.equals(replaceMissingValueWith, that.replaceMissingValueWith) && + Objects.equals(injective, that.injective); } @Override public int hashCode() { - int result = getLookup().hashCode(); - result = 31 * result + (isRetainMissingValue() ? 1 : 0); - result = 31 * result + (getReplaceMissingValueWith() != null ? getReplaceMissingValueWith().hashCode() : 0); - result = 31 * result + (isInjective() ? 1 : 0); - result = 31 * result + (isOptimize() ? 1 : 0); - return result; + + return Objects.hash(lookup, retainMissingValue, replaceMissingValueWith, injective, optimize); } @Override diff --git a/server/src/main/java/io/druid/segment/loading/LocalDataSegmentFinder.java b/server/src/main/java/io/druid/segment/loading/LocalDataSegmentFinder.java index 018dcce3e281..177461c39f5f 100644 --- a/server/src/main/java/io/druid/segment/loading/LocalDataSegmentFinder.java +++ b/server/src/main/java/io/druid/segment/loading/LocalDataSegmentFinder.java @@ -20,24 +20,24 @@ package io.druid.segment.loading; import com.fasterxml.jackson.databind.ObjectMapper; -import com.google.common.collect.Sets; import com.google.inject.Inject; - import io.druid.guice.LocalDataStorageDruidModule; +import io.druid.java.util.common.Pair; import io.druid.java.util.common.logger.Logger; import io.druid.timeline.DataSegment; import org.apache.commons.io.FileUtils; import java.io.File; import java.io.IOException; +import java.util.HashMap; import java.util.Map; import java.util.Set; +import java.util.stream.Collectors; /** */ public class LocalDataSegmentFinder implements DataSegmentFinder { - private static final Logger log = new Logger(LocalDataSegmentFinder.class); private final ObjectMapper mapper; @@ -49,25 +49,26 @@ public LocalDataSegmentFinder(ObjectMapper mapper) } @Override - public Set findSegments(String workingDirPath, boolean updateDescriptor) - throws SegmentLoadingException + public Set findSegments(String workingDirPath, boolean updateDescriptor) throws SegmentLoadingException { + final Map> timestampedSegments = new HashMap<>(); - final Set segments = Sets.newHashSet(); final File workingDir = new File(workingDirPath); if (!workingDir.isDirectory()) { throw new SegmentLoadingException("Working directory [%s] didn't exist !?", workingDir); } - recursiveSearchSegments(segments, workingDir, updateDescriptor); - return segments; + recursiveSearchSegments(timestampedSegments, workingDir, updateDescriptor); + + return timestampedSegments.values().stream().map(x -> x.lhs).collect(Collectors.toSet()); } - private void recursiveSearchSegments(Set segments, File workingDir, boolean updateDescriptor) - throws SegmentLoadingException + private void recursiveSearchSegments( + Map> timestampedSegments, File workingDir, boolean updateDescriptor + ) throws SegmentLoadingException { for (File file : workingDir.listFiles()) { if (file.isDirectory()) { - recursiveSearchSegments(segments, file, updateDescriptor); + recursiveSearchSegments(timestampedSegments, file, updateDescriptor); } else if (file.getName().equals("descriptor.json")) { final File indexZip = new File(file.getParentFile(), "index.zip"); if (indexZip.exists()) { @@ -88,7 +89,8 @@ private void recursiveSearchSegments(Set segments, File workingDir, FileUtils.writeStringToFile(file, mapper.writeValueAsString(dataSegment)); } } - segments.add(dataSegment); + + DataSegmentFinder.putInMapRetainingNewest(timestampedSegments, dataSegment, indexZip.lastModified()); } catch (IOException e) { throw new SegmentLoadingException( diff --git a/server/src/main/java/io/druid/segment/loading/LocalDataSegmentKiller.java b/server/src/main/java/io/druid/segment/loading/LocalDataSegmentKiller.java index 8c8715d1f9d9..770c89a4a023 100644 --- a/server/src/main/java/io/druid/segment/loading/LocalDataSegmentKiller.java +++ b/server/src/main/java/io/druid/segment/loading/LocalDataSegmentKiller.java @@ -52,19 +52,21 @@ public void kill(DataSegment segment) throws SegmentLoadingException try { if (path.getName().endsWith(".zip")) { - // path format -- > .../dataSource/interval/version/partitionNum/xxx.zip - File partitionNumDir = path.getParentFile(); - FileUtils.deleteDirectory(partitionNumDir); + // or .../dataSource/interval/version/partitionNum/UUID/xxx.zip + + File parentDir = path.getParentFile(); + FileUtils.deleteDirectory(parentDir); - //try to delete other directories if possible - File versionDir = partitionNumDir.getParentFile(); - if (versionDir.delete()) { - File intervalDir = versionDir.getParentFile(); - if (intervalDir.delete()) { - File dataSourceDir = intervalDir.getParentFile(); - dataSourceDir.delete(); + // possibly recursively delete empty parent directories up to 'dataSource' + parentDir = parentDir.getParentFile(); + int maxDepth = 4; // if for some reason there's no datasSource directory, stop recursing somewhere reasonable + while (parentDir != null && --maxDepth >= 0) { + if (!parentDir.delete() || segment.getDataSource().equals(parentDir.getName())) { + break; } + + parentDir = parentDir.getParentFile(); } } else { throw new SegmentLoadingException("Unknown file type[%s]", path); diff --git a/server/src/main/java/io/druid/segment/loading/LocalDataSegmentPusher.java b/server/src/main/java/io/druid/segment/loading/LocalDataSegmentPusher.java index b6e52bd551d3..830af44be3d9 100644 --- a/server/src/main/java/io/druid/segment/loading/LocalDataSegmentPusher.java +++ b/server/src/main/java/io/druid/segment/loading/LocalDataSegmentPusher.java @@ -23,6 +23,7 @@ import com.google.common.collect.ImmutableMap; import com.google.inject.Inject; import io.druid.java.util.common.CompressionUtils; +import io.druid.java.util.common.IOE; import io.druid.java.util.common.logger.Logger; import io.druid.segment.SegmentUtils; import io.druid.timeline.DataSegment; @@ -31,25 +32,23 @@ import java.io.File; import java.io.IOException; import java.net.URI; -import java.nio.file.FileAlreadyExistsException; import java.nio.file.Files; +import java.nio.file.StandardOpenOption; import java.util.Map; import java.util.UUID; -/** - */ public class LocalDataSegmentPusher implements DataSegmentPusher { private static final Logger log = new Logger(LocalDataSegmentPusher.class); + private static final String INDEX_FILENAME = "index.zip"; + private static final String DESCRIPTOR_FILENAME = "descriptor.json"; + private final LocalDataSegmentPusherConfig config; private final ObjectMapper jsonMapper; @Inject - public LocalDataSegmentPusher( - LocalDataSegmentPusherConfig config, - ObjectMapper jsonMapper - ) + public LocalDataSegmentPusher(LocalDataSegmentPusherConfig config, ObjectMapper jsonMapper) { this.config = config; this.jsonMapper = jsonMapper; @@ -71,11 +70,11 @@ public String getPathForHadoop(String dataSource) } @Override - public DataSegment push(File dataSegmentFile, DataSegment segment) throws IOException + public DataSegment push(final File dataSegmentFile, final DataSegment segment, final boolean useUniquePath) + throws IOException { - final String storageDir = this.getStorageDir(segment); final File baseStorageDir = config.getStorageDirectory(); - final File outDir = new File(baseStorageDir, storageDir); + final File outDir = new File(baseStorageDir, this.getStorageDir(segment, useUniquePath)); log.info("Copying segment[%s] to local filesystem at location[%s]", segment.getIdentifier(), outDir.toString()); @@ -93,29 +92,39 @@ public DataSegment push(File dataSegmentFile, DataSegment segment) throws IOExce ); } - final File tmpOutDir = new File(baseStorageDir, intermediateDirFor(storageDir)); + final File tmpOutDir = new File(baseStorageDir, makeIntermediateDir()); log.info("Creating intermediate directory[%s] for segment[%s]", tmpOutDir.toString(), segment.getIdentifier()); - final long size = compressSegment(dataSegmentFile, tmpOutDir); - - final DataSegment dataSegment = createDescriptorFile( - segment.withLoadSpec(makeLoadSpec(new File(outDir, "index.zip").toURI())) - .withSize(size) - .withBinaryVersion(SegmentUtils.getVersionFromDir(dataSegmentFile)), - tmpOutDir - ); + FileUtils.forceMkdir(tmpOutDir); - // moving the temporary directory to the final destination, once success the potentially concurrent push operations - // will be failed and will read the descriptor.json created by current push operation directly - FileUtils.forceMkdir(outDir.getParentFile()); try { - Files.move(tmpOutDir.toPath(), outDir.toPath()); + final File tmpIndexFile = new File(tmpOutDir, INDEX_FILENAME); + final long size = compressSegment(dataSegmentFile, tmpIndexFile); + + final File tmpDescriptorFile = new File(tmpOutDir, DESCRIPTOR_FILENAME); + DataSegment dataSegment = createDescriptorFile( + segment.withLoadSpec(makeLoadSpec(new File(outDir, INDEX_FILENAME).toURI())) + .withSize(size) + .withBinaryVersion(SegmentUtils.getVersionFromDir(dataSegmentFile)), + tmpDescriptorFile + ); + + FileUtils.forceMkdir(outDir); + final File indexFileTarget = new File(outDir, tmpIndexFile.getName()); + final File descriptorFileTarget = new File(outDir, tmpDescriptorFile.getName()); + + if (!tmpIndexFile.renameTo(indexFileTarget)) { + throw new IOE("Failed to rename [%s] to [%s]", tmpIndexFile, indexFileTarget); + } + + if (!tmpDescriptorFile.renameTo(descriptorFileTarget)) { + throw new IOE("Failed to rename [%s] to [%s]", tmpDescriptorFile, descriptorFileTarget); + } + + return dataSegment; } - catch (FileAlreadyExistsException e) { - log.warn("Push destination directory[%s] exists, ignore this message if replication is configured.", outDir); + finally { FileUtils.deleteDirectory(tmpOutDir); - return jsonMapper.readValue(new File(outDir, "descriptor.json"), DataSegment.class); } - return dataSegment; } @Override @@ -124,26 +133,26 @@ public Map makeLoadSpec(URI finalIndexZipFilePath) return ImmutableMap.of("type", "local", "path", finalIndexZipFilePath.getPath()); } - private String intermediateDirFor(String storageDir) + private String makeIntermediateDir() { - return "intermediate_pushes/" + storageDir + "." + UUID.randomUUID().toString(); + return "intermediate_pushes/" + UUID.randomUUID().toString(); } - private long compressSegment(File dataSegmentFile, File outDir) throws IOException + private long compressSegment(File dataSegmentFile, File dest) throws IOException { - FileUtils.forceMkdir(outDir); - File outFile = new File(outDir, "index.zip"); - log.info("Compressing files from[%s] to [%s]", dataSegmentFile, outFile); - return CompressionUtils.zip(dataSegmentFile, outFile); + log.info("Compressing files from[%s] to [%s]", dataSegmentFile, dest); + return CompressionUtils.zip(dataSegmentFile, dest, true); } - private DataSegment createDescriptorFile(DataSegment segment, File outDir) throws IOException + private DataSegment createDescriptorFile(DataSegment segment, File dest) throws IOException { - File descriptorFile = new File(outDir, "descriptor.json"); - log.info("Creating descriptor file at[%s]", descriptorFile); + log.info("Creating descriptor file at[%s]", dest); // Avoid using Guava in DataSegmentPushers because they might be used with very diverse Guava versions in // runtime, and because Guava deletes methods over time, that causes incompatibilities. - Files.write(descriptorFile.toPath(), jsonMapper.writeValueAsBytes(segment)); + Files.write( + dest.toPath(), jsonMapper.writeValueAsBytes(segment), StandardOpenOption.CREATE, StandardOpenOption.SYNC + ); + return segment; } } diff --git a/server/src/main/java/io/druid/segment/loading/SegmentLoaderLocalCacheManager.java b/server/src/main/java/io/druid/segment/loading/SegmentLoaderLocalCacheManager.java index fbdb31cbcc8e..75377a463ec0 100644 --- a/server/src/main/java/io/druid/segment/loading/SegmentLoaderLocalCacheManager.java +++ b/server/src/main/java/io/druid/segment/loading/SegmentLoaderLocalCacheManager.java @@ -23,7 +23,7 @@ import com.google.common.collect.Lists; import com.google.common.primitives.Longs; import com.google.inject.Inject; -import com.metamx.emitter.EmittingLogger; +import io.druid.java.util.emitter.EmittingLogger; import io.druid.guice.annotations.Json; import io.druid.segment.IndexIO; import io.druid.segment.Segment; @@ -91,10 +91,10 @@ public boolean isSegmentLoaded(final DataSegment segment) return findStorageLocationIfLoaded(segment) != null; } - public StorageLocation findStorageLocationIfLoaded(final DataSegment segment) + private StorageLocation findStorageLocationIfLoaded(final DataSegment segment) { for (StorageLocation location : getSortedList(locations)) { - File localStorageDir = new File(location.getPath(), DataSegmentPusher.getDefaultStorageDir(segment)); + File localStorageDir = new File(location.getPath(), DataSegmentPusher.getDefaultStorageDir(segment, false)); if (localStorageDir.exists()) { return location; } @@ -127,7 +127,7 @@ public Segment getSegment(DataSegment segment) throws SegmentLoadingException public File getSegmentFiles(DataSegment segment) throws SegmentLoadingException { StorageLocation loc = findStorageLocationIfLoaded(segment); - String storageDir = DataSegmentPusher.getDefaultStorageDir(segment); + String storageDir = DataSegmentPusher.getDefaultStorageDir(segment, false); if (loc == null) { loc = loadSegmentWithRetry(segment, storageDir); @@ -232,11 +232,11 @@ public void cleanup(DataSegment segment) throws SegmentLoadingException // in this case, findStorageLocationIfLoaded() will think segment is located in the failed storageDir which is actually not. // So we should always clean all possible locations here for (StorageLocation location : getSortedList(locations)) { - File localStorageDir = new File(location.getPath(), DataSegmentPusher.getDefaultStorageDir(segment)); + File localStorageDir = new File(location.getPath(), DataSegmentPusher.getDefaultStorageDir(segment, false)); if (localStorageDir.exists()) { // Druid creates folders of the form dataSource/interval/version/partitionNum. // We need to clean up all these directories if they are all empty. - File cacheFile = new File(location.getPath(), DataSegmentPusher.getDefaultStorageDir(segment)); + File cacheFile = new File(location.getPath(), DataSegmentPusher.getDefaultStorageDir(segment, false)); cleanupCacheFiles(location.getPath(), cacheFile); location.removeSegment(segment); } @@ -272,7 +272,7 @@ public void cleanupCacheFiles(File baseFile, File cacheFile) throws IOException } } - public List getSortedList(List locs) + private List getSortedList(List locs) { List locations = new ArrayList<>(locs); Collections.sort(locations, COMPARATOR); diff --git a/server/src/main/java/io/druid/segment/realtime/RealtimeManager.java b/server/src/main/java/io/druid/segment/realtime/RealtimeManager.java index 73bf34fb0b28..f054ad94aaa9 100644 --- a/server/src/main/java/io/druid/segment/realtime/RealtimeManager.java +++ b/server/src/main/java/io/druid/segment/realtime/RealtimeManager.java @@ -29,7 +29,7 @@ import com.google.common.collect.Maps; import com.google.common.util.concurrent.MoreExecutors; import com.google.inject.Inject; -import com.metamx.emitter.EmittingLogger; +import io.druid.java.util.emitter.EmittingLogger; import io.druid.java.util.common.concurrent.Execs; import io.druid.data.input.Committer; import io.druid.data.input.Firehose; diff --git a/server/src/main/java/io/druid/segment/realtime/RealtimeMetricsMonitor.java b/server/src/main/java/io/druid/segment/realtime/RealtimeMetricsMonitor.java index 376181e11a1f..f65ee0627c78 100644 --- a/server/src/main/java/io/druid/segment/realtime/RealtimeMetricsMonitor.java +++ b/server/src/main/java/io/druid/segment/realtime/RealtimeMetricsMonitor.java @@ -22,11 +22,11 @@ import com.google.common.collect.ImmutableMap; import com.google.common.collect.Maps; import com.google.inject.Inject; -import com.metamx.emitter.EmittingLogger; -import com.metamx.emitter.service.ServiceEmitter; -import com.metamx.emitter.service.ServiceMetricEvent; -import com.metamx.metrics.AbstractMonitor; -import com.metamx.metrics.MonitorUtils; +import io.druid.java.util.emitter.EmittingLogger; +import io.druid.java.util.emitter.service.ServiceEmitter; +import io.druid.java.util.emitter.service.ServiceMetricEvent; +import io.druid.java.util.metrics.AbstractMonitor; +import io.druid.java.util.metrics.MonitorUtils; import io.druid.query.DruidMetrics; import java.util.List; diff --git a/server/src/main/java/io/druid/segment/realtime/appenderator/Appenderator.java b/server/src/main/java/io/druid/segment/realtime/appenderator/Appenderator.java index 564ed4b9181d..7f06f86269e3 100644 --- a/server/src/main/java/io/druid/segment/realtime/appenderator/Appenderator.java +++ b/server/src/main/java/io/druid/segment/realtime/appenderator/Appenderator.java @@ -26,6 +26,7 @@ import io.druid.query.QuerySegmentWalker; import io.druid.segment.incremental.IndexSizeExceededException; +import javax.annotation.Nullable; import java.io.Closeable; import java.util.Collection; import java.util.List; @@ -35,9 +36,9 @@ * both of those. It can also push data to deep storage. But, it does not decide which segments data should go into. * It also doesn't publish segments to the metadata store or monitor handoff; you have to do that yourself! *

    - * Any time you call one of the methods that adds, persists, or pushes data, you must provide a Committer, or a - * Supplier of one, that represents all data you have given to the Appenderator so far. The Committer will be used when - * that data has been persisted to disk. + * You can provide a {@link Committer} or a Supplier of one when you call one of the methods that {@link #add}, + * {@link #persistAll}, or {@link #push}. The Committer should represent all data you have given to the Appenderator so + * far. This Committer will be used when that data has been persisted to disk. */ public interface Appenderator extends QuerySegmentWalker, Closeable { @@ -72,8 +73,9 @@ default AppenderatorAddResult add(SegmentIdentifier identifier, InputRow row, Su * Committer is guaranteed to be *created* synchronously with the call to add, but will actually be used * asynchronously. *

    - * The add, clear, persist, persistAll, and push methods should all be called from the same thread to keep the - * metadata committed by Committer in sync. + * If committer is not provided, no metadata is persisted. If it's provided, {@link #add}, {@link #clear}, + * {@link #persistAll}, and {@link #push} methods should all be called from the same thread to keep the metadata + * committed by Committer in sync. * * @param identifier the segment into which this row should be added * @param row the row to add @@ -94,7 +96,7 @@ default AppenderatorAddResult add(SegmentIdentifier identifier, InputRow row, Su AppenderatorAddResult add( SegmentIdentifier identifier, InputRow row, - Supplier committerSupplier, + @Nullable Supplier committerSupplier, boolean allowIncrementalPersists ) throws IndexSizeExceededException, SegmentNotWritableException; @@ -127,8 +129,8 @@ AppenderatorAddResult add( * for some reason, rows have been added that we do not actually want to hand off. Blocks until all data has been * cleared. This may take some time, since all pending persists must finish first. *

    - * The add, clear, persist, persistAll, and push methods should all be called from the same thread to keep the - * metadata committed by Committer in sync. + * {@link #add}, {@link #clear}, {@link #persistAll}, and {@link #push} methods should all be called from the same + * thread to keep the metadata committed by Committer in sync. */ void clear() throws InterruptedException; @@ -146,58 +148,44 @@ AppenderatorAddResult add( */ ListenableFuture drop(SegmentIdentifier identifier); - /** - * Persist any in-memory indexed data for segments of the given identifiers to durable storage. This may be only - * somewhat durable, e.g. the machine's local disk. The Committer will be made synchronously with the call to - * persist, but will actually be used asynchronously. Any metadata returned by the committer will be associated with - * the data persisted to disk. - *

    - * The add, clear, persist, persistAll, and push methods should all be called from the same thread to keep the - * metadata committed by Committer in sync. - * - * @param identifiers segment identifiers to be persisted - * @param committer a committer associated with all data that has been added to segments of the given identifiers so - * far - * - * @return future that resolves when all pending data to segments of the identifiers has been persisted, contains - * commit metadata for this persist - */ - ListenableFuture persist(Collection identifiers, Committer committer); - /** * Persist any in-memory indexed data to durable storage. This may be only somewhat durable, e.g. the * machine's local disk. The Committer will be made synchronously with the call to persistAll, but will actually * be used asynchronously. Any metadata returned by the committer will be associated with the data persisted to * disk. *

    - * The add, clear, persist, persistAll, and push methods should all be called from the same thread to keep the - * metadata committed by Committer in sync. + * If committer is not provided, no metadata is persisted. If it's provided, {@link #add}, {@link #clear}, + * {@link #persistAll}, and {@link #push} methods should all be called from the same thread to keep the metadata + * committed by Committer in sync. * * @param committer a committer associated with all data that has been added so far * * @return future that resolves when all pending data has been persisted, contains commit metadata for this persist */ - default ListenableFuture persistAll(Committer committer) - { - return persist(getSegments(), committer); - } + ListenableFuture persistAll(@Nullable Committer committer); /** * Merge and push particular segments to deep storage. This will trigger an implicit - * {@link #persist(Collection, Committer)} using the provided Committer. + * {@link #persistAll(Committer)} using the provided Committer. *

    * After this method is called, you cannot add new data to any segments that were previously under construction. *

    - * The add, clear, persist, persistAll, and push methods should all be called from the same thread to keep the - * metadata committed by Committer in sync. + * If committer is not provided, no metadata is persisted. If it's provided, {@link #add}, {@link #clear}, + * {@link #persistAll}, and {@link #push} methods should all be called from the same thread to keep the metadata + * committed by Committer in sync. * * @param identifiers list of segments to push * @param committer a committer associated with all data that has been added so far + * @param useUniquePath true if the segment should be written to a path with a unique identifier * * @return future that resolves when all segments have been pushed. The segment list will be the list of segments * that have been pushed and the commit metadata from the Committer. */ - ListenableFuture push(Collection identifiers, Committer committer); + ListenableFuture push( + Collection identifiers, + @Nullable Committer committer, + boolean useUniquePath + ); /** * Stop any currently-running processing and clean up after ourselves. This allows currently running persists and pushes diff --git a/server/src/main/java/io/druid/segment/realtime/appenderator/AppenderatorDriver.java b/server/src/main/java/io/druid/segment/realtime/appenderator/AppenderatorDriver.java deleted file mode 100644 index 82dc9ef6759a..000000000000 --- a/server/src/main/java/io/druid/segment/realtime/appenderator/AppenderatorDriver.java +++ /dev/null @@ -1,776 +0,0 @@ -/* - * Licensed to Metamarkets Group Inc. (Metamarkets) under one - * or more contributor license agreements. See the NOTICE file - * distributed with this work for additional information - * regarding copyright ownership. Metamarkets licenses this file - * to you under the Apache License, Version 2.0 (the - * "License"); you may not use this file except in compliance - * with the License. You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, - * software distributed under the License is distributed on an - * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY - * KIND, either express or implied. See the License for the - * specific language governing permissions and limitations - * under the License. - */ - -package io.druid.segment.realtime.appenderator; - -import com.fasterxml.jackson.annotation.JsonCreator; -import com.fasterxml.jackson.annotation.JsonProperty; -import com.fasterxml.jackson.databind.ObjectMapper; -import com.google.common.annotations.VisibleForTesting; -import com.google.common.base.Function; -import com.google.common.base.Joiner; -import com.google.common.base.Preconditions; -import com.google.common.base.Supplier; -import com.google.common.base.Throwables; -import com.google.common.collect.ImmutableList; -import com.google.common.collect.ImmutableMap; -import com.google.common.collect.ImmutableSet; -import com.google.common.collect.Maps; -import com.google.common.collect.Sets; -import com.google.common.util.concurrent.FutureCallback; -import com.google.common.util.concurrent.Futures; -import com.google.common.util.concurrent.ListenableFuture; -import com.google.common.util.concurrent.ListeningExecutorService; -import com.google.common.util.concurrent.MoreExecutors; -import com.google.common.util.concurrent.SettableFuture; -import io.druid.data.input.Committer; -import io.druid.data.input.InputRow; -import io.druid.java.util.common.ISE; -import io.druid.java.util.common.concurrent.Execs; -import io.druid.java.util.common.logger.Logger; -import io.druid.query.SegmentDescriptor; -import io.druid.segment.realtime.FireDepartmentMetrics; -import io.druid.segment.realtime.plumber.SegmentHandoffNotifier; -import io.druid.segment.realtime.plumber.SegmentHandoffNotifierFactory; -import org.joda.time.DateTime; - -import java.io.Closeable; -import java.io.IOException; -import java.util.ArrayList; -import java.util.Collection; -import java.util.HashSet; -import java.util.LinkedList; -import java.util.List; -import java.util.Map; -import java.util.NavigableMap; -import java.util.Set; -import java.util.TreeMap; -import java.util.concurrent.ExecutionException; -import java.util.concurrent.atomic.AtomicInteger; -import java.util.stream.Collectors; - -/** - * A AppenderatorDriver drives an Appenderator to index a finite stream of data. This class does not help you - * index unbounded streams. All handoff is done at the end of indexing. - *

    - * This class helps with doing things that Appenderators don't, including deciding which segments to use (with a - * SegmentAllocator), publishing segments to the metadata store (with a SegmentPublisher), and monitoring handoff (with - * a SegmentHandoffNotifier). - *

    - * Note that the commit metadata stored by this class via the underlying Appenderator is not the same metadata as - * you pass in. It's wrapped in some extra metadata needed by the driver. - */ -public class AppenderatorDriver implements Closeable -{ - private static final Logger log = new Logger(AppenderatorDriver.class); - - private final Appenderator appenderator; - private final SegmentAllocator segmentAllocator; - private final SegmentHandoffNotifier handoffNotifier; - private final UsedSegmentChecker usedSegmentChecker; - private final ObjectMapper objectMapper; - private final FireDepartmentMetrics metrics; - - enum SegmentState - { - ACTIVE, - INACTIVE, - PUBLISHING - } - - static class SegmentWithState - { - private SegmentIdentifier segmentIdentifier; - private SegmentState state; - - @JsonCreator - SegmentWithState( - @JsonProperty("segmentIdentifier") SegmentIdentifier segmentIdentifier, - @JsonProperty("state") SegmentState state - ) - { - this.segmentIdentifier = segmentIdentifier; - this.state = state; - } - - @JsonProperty - public SegmentIdentifier getSegmentIdentifier() - { - return segmentIdentifier; - } - - @JsonProperty - public SegmentState getState() - { - return state; - } - - @Override - public String toString() - { - return "SegmentWithState{" + - "segmentIdentifier=" + segmentIdentifier + - ", state=" + state + - '}'; - } - } - - // sequenceName -> {Interval Start millis -> List of Segments for this interval} - // there might be multiple segments for a start interval, for example one segment - // can be in ACTIVE state and others might be in PUBLISHING state - private final Map>> segments = new TreeMap<>(); - - private final Set publishingSequences = new HashSet<>(); - - // sequenceName -> most recently allocated segment - private final Map lastSegmentIds = Maps.newHashMap(); - - private final ListeningExecutorService publishExecutor; - - /** - * Create a driver. - * - * @param appenderator appenderator - * @param segmentAllocator segment allocator - * @param handoffNotifierFactory handoff notifier factory - * @param usedSegmentChecker used segment checker - * @param objectMapper object mapper, used for serde of commit metadata - * @param metrics Firedepartment metrics - */ - public AppenderatorDriver( - Appenderator appenderator, - SegmentAllocator segmentAllocator, - SegmentHandoffNotifierFactory handoffNotifierFactory, - UsedSegmentChecker usedSegmentChecker, - ObjectMapper objectMapper, - FireDepartmentMetrics metrics - ) - { - this.appenderator = Preconditions.checkNotNull(appenderator, "appenderator"); - this.segmentAllocator = Preconditions.checkNotNull(segmentAllocator, "segmentAllocator"); - this.handoffNotifier = Preconditions.checkNotNull(handoffNotifierFactory, "handoffNotifierFactory") - .createSegmentHandoffNotifier(appenderator.getDataSource()); - this.usedSegmentChecker = Preconditions.checkNotNull(usedSegmentChecker, "usedSegmentChecker"); - this.objectMapper = Preconditions.checkNotNull(objectMapper, "objectMapper"); - this.metrics = Preconditions.checkNotNull(metrics, "metrics"); - this.publishExecutor = MoreExecutors.listeningDecorator(Execs.singleThreaded("publish-%d")); - } - - @VisibleForTesting - Map>> getSegments() - { - return segments; - } - - /** - * Perform any initial setup and return currently persisted commit metadata. - *

    - * Note that this method returns the same metadata you've passed in with your Committers, even though this class - * stores extra metadata on disk. - * - * @return currently persisted commit metadata - */ - public Object startJob() - { - handoffNotifier.start(); - - final AppenderatorDriverMetadata metadata = objectMapper.convertValue( - appenderator.startJob(), - AppenderatorDriverMetadata.class - ); - - log.info("Restored metadata[%s].", metadata); - - if (metadata != null) { - synchronized (segments) { - for (Map.Entry> entry : metadata.getSegments().entrySet()) { - final String sequenceName = entry.getKey(); - final TreeMap> segmentMap = Maps.newTreeMap(); - - segments.put(sequenceName, segmentMap); - - for (SegmentWithState segmentWithState : entry.getValue()) { - segmentMap.computeIfAbsent( - segmentWithState.getSegmentIdentifier().getInterval().getStartMillis(), - k -> new LinkedList<>() - ); - LinkedList segmentList = segmentMap.get(segmentWithState.getSegmentIdentifier() - .getInterval() - .getStartMillis()); - // always keep the ACTIVE segment for an interval start millis in the front - if (segmentWithState.getState() == SegmentState.ACTIVE) { - segmentList.addFirst(segmentWithState); - } else { - segmentList.addLast(segmentWithState); - } - } - } - lastSegmentIds.putAll(metadata.getLastSegmentIds()); - } - - return metadata.getCallerMetadata(); - } else { - return null; - } - } - - private void addSegment(String sequenceName, SegmentIdentifier identifier) - { - synchronized (segments) { - segments.computeIfAbsent(sequenceName, k -> new TreeMap<>()) - .computeIfAbsent(identifier.getInterval().getStartMillis(), k -> new LinkedList<>()) - .addFirst(new SegmentWithState(identifier, SegmentState.ACTIVE)); - lastSegmentIds.put(sequenceName, identifier.getIdentifierAsString()); - } - } - - /** - * Clears out all our state and also calls {@link Appenderator#clear()} on the underlying Appenderator. - */ - public void clear() throws InterruptedException - { - synchronized (segments) { - segments.clear(); - } - appenderator.clear(); - } - - public AppenderatorDriverAddResult add( - final InputRow row, - final String sequenceName, - final Supplier committerSupplier - ) throws IOException - { - return add(row, sequenceName, committerSupplier, false, true); - } - - /** - * Add a row. Must not be called concurrently from multiple threads. - * - * @param row the row to add - * @param sequenceName sequenceName for this row's segment - * @param committerSupplier supplier of a committer associated with all data that has been added, including this row - * if {@param allowIncrementalPersists} is set to false then this will not be used - * @param skipSegmentLineageCheck if true, perform lineage validation using previousSegmentId for this sequence. - * Should be set to false if replica tasks would index events in same order - * @param allowIncrementalPersists whether to allow persist to happen when maxRowsInMemory or intermediate persist period - * threshold is hit - * - * @return {@link AppenderatorDriverAddResult} - * - * @throws IOException if there is an I/O error while allocating or writing to a segment - */ - - public AppenderatorDriverAddResult add( - final InputRow row, - final String sequenceName, - final Supplier committerSupplier, - final boolean skipSegmentLineageCheck, - final boolean allowIncrementalPersists - ) throws IOException - { - Preconditions.checkNotNull(row, "row"); - Preconditions.checkNotNull(sequenceName, "sequenceName"); - Preconditions.checkNotNull(committerSupplier, "committerSupplier"); - - final SegmentIdentifier identifier = getSegment(row, sequenceName, skipSegmentLineageCheck); - - if (identifier != null) { - try { - final Appenderator.AppenderatorAddResult result = appenderator.add( - identifier, - row, - wrapCommitterSupplier(committerSupplier), - allowIncrementalPersists - ); - return AppenderatorDriverAddResult.ok( - identifier, - result.getNumRowsInSegment(), - appenderator.getTotalRowCount(), - result.isPersistRequired() - ); - } - catch (SegmentNotWritableException e) { - throw new ISE(e, "WTF?! Segment[%s] not writable when it should have been.", identifier); - } - } else { - return AppenderatorDriverAddResult.fail(); - } - } - - /** - * Persist all data indexed through this driver so far. Blocks until complete. - *

    - * Should be called after all data has been added through {@link #add(InputRow, String, Supplier, boolean, boolean)}. - * - * @param committer committer representing all data that has been added so far - * - * @return commitMetadata persisted - */ - public Object persist(final Committer committer) throws InterruptedException - { - try { - log.info("Persisting data."); - final long start = System.currentTimeMillis(); - final Object commitMetadata = appenderator.persistAll(wrapCommitter(committer)).get(); - log.info("Persisted pending data in %,dms.", System.currentTimeMillis() - start); - return commitMetadata; - } - catch (InterruptedException e) { - throw e; - } - catch (Exception e) { - throw Throwables.propagate(e); - } - } - - /** - * Persist all data indexed through this driver so far. Returns a future of persisted commitMetadata. - *

    - * Should be called after all data has been added through {@link #add(InputRow, String, Supplier, boolean, boolean)}. - * - * @param committer committer representing all data that has been added so far - * - * @return future containing commitMetadata persisted - */ - public ListenableFuture persistAsync(final Committer committer) - throws InterruptedException, ExecutionException - { - log.info("Persisting data asynchronously"); - return appenderator.persistAll(wrapCommitter(committer)); - } - - /** - * Register the segments in the given {@link SegmentsAndMetadata} to be handed off and execute a background task which - * waits until the hand off completes. - * - * @param segmentsAndMetadata the result segments and metadata of - * {@link #publish(TransactionalSegmentPublisher, Committer, Collection)} - * - * @return null if the input segmentsAndMetadata is null. Otherwise, a {@link ListenableFuture} for the submitted task - * which returns {@link SegmentsAndMetadata} containing the segments successfully handed off and the metadata - * of the caller of {@link AppenderatorDriverMetadata} - */ - public ListenableFuture registerHandoff(SegmentsAndMetadata segmentsAndMetadata) - { - if (segmentsAndMetadata == null) { - return Futures.immediateFuture(null); - - } else { - final List waitingSegmentIdList = segmentsAndMetadata.getSegments().stream() - .map(SegmentIdentifier::fromDataSegment) - .collect(Collectors.toList()); - - if (waitingSegmentIdList.isEmpty()) { - return Futures.immediateFuture( - new SegmentsAndMetadata( - segmentsAndMetadata.getSegments(), - ((AppenderatorDriverMetadata) segmentsAndMetadata.getCommitMetadata()) - .getCallerMetadata() - ) - ); - } - - log.info("Register handoff of segments: [%s]", waitingSegmentIdList); - - final SettableFuture resultFuture = SettableFuture.create(); - final AtomicInteger numRemainingHandoffSegments = new AtomicInteger(waitingSegmentIdList.size()); - - for (final SegmentIdentifier segmentIdentifier : waitingSegmentIdList) { - handoffNotifier.registerSegmentHandoffCallback( - new SegmentDescriptor( - segmentIdentifier.getInterval(), - segmentIdentifier.getVersion(), - segmentIdentifier.getShardSpec().getPartitionNum() - ), - MoreExecutors.sameThreadExecutor(), - () -> { - log.info("Segment[%s] successfully handed off, dropping.", segmentIdentifier); - metrics.incrementHandOffCount(); - - final ListenableFuture dropFuture = appenderator.drop(segmentIdentifier); - Futures.addCallback( - dropFuture, - new FutureCallback() - { - @Override - public void onSuccess(Object result) - { - if (numRemainingHandoffSegments.decrementAndGet() == 0) { - log.info("Successfully handed off [%d] segments.", segmentsAndMetadata.getSegments().size()); - resultFuture.set( - new SegmentsAndMetadata( - segmentsAndMetadata.getSegments(), - ((AppenderatorDriverMetadata) segmentsAndMetadata.getCommitMetadata()) - .getCallerMetadata() - ) - ); - } - } - - @Override - public void onFailure(Throwable e) - { - log.warn(e, "Failed to drop segment[%s]?!", segmentIdentifier); - numRemainingHandoffSegments.decrementAndGet(); - resultFuture.setException(e); - } - } - ); - } - ); - } - - return resultFuture; - } - } - - /** - * Closes this driver. Does not close the underlying Appenderator; you should do that yourself. - */ - @Override - public void close() - { - publishExecutor.shutdownNow(); - handoffNotifier.close(); - } - - private SegmentIdentifier getActiveSegment(final DateTime timestamp, final String sequenceName) - { - synchronized (segments) { - final NavigableMap> segmentsForSequence = segments.get(sequenceName); - - if (segmentsForSequence == null) { - return null; - } - - final Map.Entry> candidateEntry = segmentsForSequence.floorEntry(timestamp.getMillis()); - if (candidateEntry != null - && candidateEntry.getValue().getFirst().getSegmentIdentifier().getInterval().contains(timestamp) - && candidateEntry.getValue().getFirst().getState().equals(SegmentState.ACTIVE)) { - return candidateEntry.getValue().getFirst().getSegmentIdentifier(); - } else { - return null; - } - } - } - - /** - * Return a segment usable for "timestamp". May return null if no segment can be allocated. - * - * @param row input row - * @param sequenceName sequenceName for potential segment allocation - * @param skipSegmentLineageCheck if false, perform lineage validation using previousSegmentId for this sequence. - * Should be set to false if replica tasks would index events in same order - * - * @return identifier, or null - * - * @throws IOException if an exception occurs while allocating a segment - */ - private SegmentIdentifier getSegment( - final InputRow row, - final String sequenceName, - final boolean skipSegmentLineageCheck - ) throws IOException - { - synchronized (segments) { - final DateTime timestamp = row.getTimestamp(); - final SegmentIdentifier existing = getActiveSegment(timestamp, sequenceName); - if (existing != null) { - return existing; - } else { - // Allocate new segment. - final SegmentIdentifier newSegment = segmentAllocator.allocate( - row, - sequenceName, - lastSegmentIds.get(sequenceName), - // send lastSegmentId irrespective of skipSegmentLineageCheck so that - // unique constraint for sequence_name_prev_id_sha1 does not fail for - // allocatePendingSegment in IndexerSQLMetadataStorageCoordinator - skipSegmentLineageCheck - ); - - if (newSegment != null) { - for (SegmentIdentifier identifier : appenderator.getSegments()) { - if (identifier.equals(newSegment)) { - throw new ISE( - "WTF?! Allocated segment[%s] which conflicts with existing segment[%s].", - newSegment, - identifier - ); - } - } - - log.info("New segment[%s] for sequenceName[%s].", newSegment, sequenceName); - addSegment(sequenceName, newSegment); - } else { - // Well, we tried. - log.warn("Cannot allocate segment for timestamp[%s], sequenceName[%s]. ", timestamp, sequenceName); - } - - return newSegment; - } - } - } - - /** - * Move a set of identifiers out from "active", making way for newer segments. - */ - public void moveSegmentOut(final String sequenceName, final List identifiers) - { - synchronized (segments) { - final NavigableMap> activeSegmentsForSequence = segments.get(sequenceName); - if (activeSegmentsForSequence == null) { - throw new ISE("WTF?! Asked to remove segments for sequenceName[%s] which doesn't exist...", sequenceName); - } - - for (final SegmentIdentifier identifier : identifiers) { - log.info("Moving segment[%s] out of active list.", identifier); - final long key = identifier.getInterval().getStartMillis(); - if (activeSegmentsForSequence.get(key) == null || activeSegmentsForSequence.get(key).stream().noneMatch( - segmentWithState -> { - if (segmentWithState.getSegmentIdentifier().equals(identifier)) { - segmentWithState.state = SegmentState.INACTIVE; - return true; - } else { - return false; - } - } - )) { - throw new ISE("WTF?! Asked to remove segment[%s] that didn't exist...", identifier); - } - } - } - } - - /** - * Publish all pending segments. - * - * @param publisher segment publisher - * @param committer committer - * - * @return a {@link ListenableFuture} for the publish task which removes published {@code sequenceNames} from - * {@code activeSegments} and {@code publishPendingSegments} - */ - public ListenableFuture publishAll( - final TransactionalSegmentPublisher publisher, - final Committer committer - ) - { - final List theSequences; - synchronized (segments) { - theSequences = ImmutableList.copyOf(segments.keySet()); - } - return publish(publisher, wrapCommitter(committer), theSequences); - } - - /** - * Execute a task in background to publish all segments corresponding to the given sequence names. The task - * internally pushes the segments to the deep storage first, and then publishes the metadata to the metadata storage. - * - * @param publisher segment publisher - * @param committer committer - * @param sequenceNames a collection of sequence names to be published - * - * @return a {@link ListenableFuture} for the submitted task which removes published {@code sequenceNames} from - * {@code activeSegments} and {@code publishPendingSegments} - */ - public ListenableFuture publish( - final TransactionalSegmentPublisher publisher, - final Committer committer, - final Collection sequenceNames - ) - { - final List theSegments = new ArrayList<>(); - synchronized (segments) { - sequenceNames.stream() - .filter(sequenceName -> !publishingSequences.contains(sequenceName)) - .forEach(sequenceName -> { - if (segments.containsKey(sequenceName)) { - publishingSequences.add(sequenceName); - segments.get(sequenceName) - .values() - .stream() - .flatMap(Collection::stream) - .forEach(segmentWithState -> { - segmentWithState.state = SegmentState.PUBLISHING; - theSegments.add(segmentWithState.getSegmentIdentifier()); - }); - } - }); - } - - final ListenableFuture publishFuture = publish( - publisher, - wrapCommitter(committer), - theSegments - ); - - Futures.addCallback( - publishFuture, - new FutureCallback() - { - @Override - public void onSuccess(SegmentsAndMetadata result) - { - if (result != null) { - publishingSequences.removeAll(sequenceNames); - sequenceNames.forEach(segments::remove); - } - } - - @Override - public void onFailure(Throwable t) - { - // Do nothing, caller should handle the exception - log.error("Error publishing sequences [%s]", sequenceNames); - } - } - ); - - return publishFuture; - } - - /** - * Execute a task in background to publish the given segments. The task blocks until complete. - * Retries forever on transient failures, but may exit early on permanent failures. - *

    - * Should be called after all data has been added through {@link #add(InputRow, String, Supplier, boolean, boolean)}. - * - * @param publisher publisher to use for this set of segments - * @param wrappedCommitter committer representing all data that has been added so far - * - * @return segments and metadata published if successful, or null if segments could not be handed off due to - * transaction failure with commit metadata. - */ - private ListenableFuture publish( - final TransactionalSegmentPublisher publisher, - final WrappedCommitter wrappedCommitter, - final List segmentIdentifiers - ) - { - log.info("Pushing segments: [%s]", Joiner.on(", ").join(segmentIdentifiers)); - - return Futures.transform( - appenderator.push(segmentIdentifiers, wrappedCommitter), - (Function) segmentsAndMetadata -> { - // Sanity check - final Set pushedSegments = segmentsAndMetadata.getSegments().stream() - .map(SegmentIdentifier::fromDataSegment) - .collect(Collectors.toSet()); - if (!pushedSegments.equals(Sets.newHashSet(segmentIdentifiers))) { - throw new ISE( - "WTF?! Pushed different segments than requested. Pushed[%s], requested[%s].", - pushedSegments, - segmentIdentifiers - ); - } - - if (segmentsAndMetadata.getSegments().isEmpty()) { - log.info("Nothing to publish, skipping publish step."); - } else { - log.info( - "Publishing segments with commitMetadata[%s]: [%s]", - segmentsAndMetadata.getCommitMetadata(), - Joiner.on(", ").join(segmentsAndMetadata.getSegments()) - ); - - try { - final boolean published = publisher.publishSegments( - ImmutableSet.copyOf(segmentsAndMetadata.getSegments()), - ((AppenderatorDriverMetadata) segmentsAndMetadata.getCommitMetadata()).getCallerMetadata() - ); - - if (published) { - log.info("Published segments."); - } else { - log.info("Transaction failure while publishing segments, checking if someone else beat us to it."); - if (usedSegmentChecker.findUsedSegments(pushedSegments) - .equals(Sets.newHashSet(segmentsAndMetadata.getSegments()))) { - log.info("Our segments really do exist, awaiting handoff."); - } else { - throw new ISE("Failed to publish segments[%s]", segmentIdentifiers); - } - } - } - catch (IOException e) { - throw Throwables.propagate(e); - } - } - - return segmentsAndMetadata; - }, - publishExecutor - ); - } - - public ListenableFuture publishAndRegisterHandoff( - final TransactionalSegmentPublisher publisher, - final Committer committer, - final Collection sequenceNames - ) - { - return Futures.transform( - publish(publisher, committer, sequenceNames), - this::registerHandoff - ); - } - - private interface WrappedCommitter extends Committer - { - } - - private Supplier wrapCommitterSupplier(final Supplier committerSupplier) - { - return () -> wrapCommitter(committerSupplier.get()); - } - - private WrappedCommitter wrapCommitter(final Committer committer) - { - final AppenderatorDriverMetadata wrappedMetadata; - synchronized (segments) { - wrappedMetadata = new AppenderatorDriverMetadata( - ImmutableMap.copyOf( - Maps.transformValues( - segments, - (Function>, List>) input -> ImmutableList - .copyOf(input.values().stream().flatMap(Collection::stream).collect(Collectors.toList())) - ) - ), - ImmutableMap.copyOf(lastSegmentIds), - committer.getMetadata() - ); - } - - return new WrappedCommitter() - { - @Override - public Object getMetadata() - { - return wrappedMetadata; - } - - @Override - public void run() - { - committer.run(); - } - }; - } -} diff --git a/server/src/main/java/io/druid/segment/realtime/appenderator/AppenderatorDriverAddResult.java b/server/src/main/java/io/druid/segment/realtime/appenderator/AppenderatorDriverAddResult.java index 4838d58a227f..c5afa3a78205 100644 --- a/server/src/main/java/io/druid/segment/realtime/appenderator/AppenderatorDriverAddResult.java +++ b/server/src/main/java/io/druid/segment/realtime/appenderator/AppenderatorDriverAddResult.java @@ -25,7 +25,7 @@ import javax.annotation.Nullable; /** - * Result of {@link AppenderatorDriver#add(InputRow, String, Supplier, boolean)}. It contains the identifier of the + * Result of {@link BaseAppenderatorDriver#add(InputRow, String, Supplier, boolean)}. It contains the identifier of the * segment which the InputRow is added to, the number of rows in that segment and if persist is required because either * maxRowsInMemory or intermediate persist period threshold is hit. */ diff --git a/server/src/main/java/io/druid/segment/realtime/appenderator/AppenderatorDriverMetadata.java b/server/src/main/java/io/druid/segment/realtime/appenderator/AppenderatorDriverMetadata.java index 0e8722c7d087..d1e002a66dab 100644 --- a/server/src/main/java/io/druid/segment/realtime/appenderator/AppenderatorDriverMetadata.java +++ b/server/src/main/java/io/druid/segment/realtime/appenderator/AppenderatorDriverMetadata.java @@ -24,6 +24,7 @@ import com.google.common.base.Preconditions; import com.google.common.collect.Maps; import com.google.common.collect.Sets; +import io.druid.segment.realtime.appenderator.SegmentWithState.SegmentState; import java.util.ArrayList; import java.util.List; @@ -33,13 +34,13 @@ public class AppenderatorDriverMetadata { - private final Map> segments; + private final Map> segments; private final Map lastSegmentIds; private final Object callerMetadata; @JsonCreator public AppenderatorDriverMetadata( - @JsonProperty("segments") Map> segments, + @JsonProperty("segments") Map> segments, @JsonProperty("lastSegmentIds") Map lastSegmentIds, @JsonProperty("callerMetadata") Object callerMetadata, // Next two properties are for backwards compatibility, should be removed on versions greater than 0.12.x @@ -57,7 +58,7 @@ public AppenderatorDriverMetadata( ); if (segments == null) { // convert old metadata to new one - final Map> newMetadata = Maps.newHashMap(); + final Map> newMetadata = Maps.newHashMap(); final Set activeSegmentsAlreadySeen = Sets.newHashSet(); // temp data structure activeSegments.entrySet() @@ -67,10 +68,7 @@ public AppenderatorDriverMetadata( .stream() .map(segmentIdentifier -> { activeSegmentsAlreadySeen.add(segmentIdentifier.toString()); - return new AppenderatorDriver.SegmentWithState( - segmentIdentifier, - AppenderatorDriver.SegmentState.ACTIVE - ); + return SegmentWithState.newSegment(segmentIdentifier); }) .collect(Collectors.toList()) )); @@ -84,9 +82,9 @@ public AppenderatorDriverMetadata( .stream() .filter(segmentIdentifier -> !activeSegmentsAlreadySeen.contains( segmentIdentifier.toString())) - .map(segmentIdentifier -> new AppenderatorDriver.SegmentWithState( + .map(segmentIdentifier -> SegmentWithState.newSegment( segmentIdentifier, - AppenderatorDriver.SegmentState.INACTIVE + SegmentState.APPEND_FINISHED )) .collect(Collectors.toList()) )); @@ -99,7 +97,7 @@ public AppenderatorDriverMetadata( } public AppenderatorDriverMetadata( - Map> segments, + Map> segments, Map lastSegmentIds, Object callerMetadata ) @@ -108,7 +106,7 @@ public AppenderatorDriverMetadata( } @JsonProperty - public Map> getSegments() + public Map> getSegments() { return segments; } diff --git a/server/src/main/java/io/druid/segment/realtime/appenderator/AppenderatorImpl.java b/server/src/main/java/io/druid/segment/realtime/appenderator/AppenderatorImpl.java index aec0f751a3b6..70c22187d8dd 100644 --- a/server/src/main/java/io/druid/segment/realtime/appenderator/AppenderatorImpl.java +++ b/server/src/main/java/io/druid/segment/realtime/appenderator/AppenderatorImpl.java @@ -37,8 +37,8 @@ import com.google.common.util.concurrent.ListenableFuture; import com.google.common.util.concurrent.ListeningExecutorService; import com.google.common.util.concurrent.MoreExecutors; -import com.metamx.emitter.EmittingLogger; -import com.metamx.emitter.service.ServiceEmitter; +import io.druid.java.util.emitter.EmittingLogger; +import io.druid.java.util.emitter.service.ServiceEmitter; import io.druid.client.cache.Cache; import io.druid.client.cache.CacheConfig; import io.druid.common.guava.ThreadRenamingCallable; @@ -202,7 +202,7 @@ public Object startJob() public AppenderatorAddResult add( final SegmentIdentifier identifier, final InputRow row, - final Supplier committerSupplier, + @Nullable final Supplier committerSupplier, final boolean allowIncrementalPersists ) throws IndexSizeExceededException, SegmentNotWritableException { @@ -244,7 +244,7 @@ public AppenderatorAddResult add( || rowsCurrentlyInMemory.get() >= tuningConfig.getMaxRowsInMemory()) { if (allowIncrementalPersists) { // persistAll clears rowsCurrentlyInMemory, no need to update it. - persistAll(committerSupplier.get()); + persistAll(committerSupplier == null ? null : committerSupplier.get()); } else { isPersistRequired = true; } @@ -340,35 +340,37 @@ public void clear() throws InterruptedException // Drop commit metadata, then abandon all segments. try { - final ListenableFuture uncommitFuture = persistExecutor.submit( - new Callable() - { - @Override - public Object call() throws Exception + if (persistExecutor != null) { + final ListenableFuture uncommitFuture = persistExecutor.submit( + new Callable() { - try { - commitLock.lock(); - objectMapper.writeValue(computeCommitFile(), Committed.nil()); - } - finally { - commitLock.unlock(); + @Override + public Object call() throws Exception + { + try { + commitLock.lock(); + objectMapper.writeValue(computeCommitFile(), Committed.nil()); + } + finally { + commitLock.unlock(); + } + return null; } - return null; } - } - ); + ); - // Await uncommit. - uncommitFuture.get(); + // Await uncommit. + uncommitFuture.get(); - // Drop everything. - final List> futures = Lists.newArrayList(); - for (Map.Entry entry : sinks.entrySet()) { - futures.add(abandonSegment(entry.getKey(), entry.getValue(), true)); - } + // Drop everything. + final List> futures = Lists.newArrayList(); + for (Map.Entry entry : sinks.entrySet()) { + futures.add(abandonSegment(entry.getKey(), entry.getValue(), true)); + } - // Await dropping. - Futures.allAsList(futures).get(); + // Await dropping. + Futures.allAsList(futures).get(); + } } catch (ExecutionException e) { throw Throwables.propagate(e); @@ -387,12 +389,12 @@ public ListenableFuture drop(final SegmentIdentifier identifier) } @Override - public ListenableFuture persist(Collection identifiers, Committer committer) + public ListenableFuture persistAll(@Nullable final Committer committer) { final Map currentHydrants = Maps.newHashMap(); final List> indexesToPersist = Lists.newArrayList(); int numPersistedRows = 0; - for (SegmentIdentifier identifier : identifiers) { + for (SegmentIdentifier identifier : sinks.keySet()) { final Sink sink = sinks.get(identifier); if (sink == null) { throw new ISE("No sink for identifier: %s", identifier); @@ -418,7 +420,7 @@ public ListenableFuture persist(Collection identifier log.info("Submitting persist runnable for dataSource[%s]", schema.getDataSource()); final String threadName = StringUtils.format("%s-incremental-persist", schema.getDataSource()); - final Object commitMetadata = committer.getMetadata(); + final Object commitMetadata = committer == null ? null : committer.getMetadata(); final Stopwatch runExecStopwatch = Stopwatch.createStarted(); final Stopwatch persistStopwatch = Stopwatch.createStarted(); final ListenableFuture future = persistExecutor.submit( @@ -432,37 +434,39 @@ public Object doCall() metrics.incrementRowOutputCount(persistHydrant(pair.lhs, pair.rhs)); } - log.info( - "Committing metadata[%s] for sinks[%s].", commitMetadata, Joiner.on(", ").join( - currentHydrants.entrySet() - .stream() - .map(entry -> StringUtils.format( - "%s:%d", - entry.getKey(), - entry.getValue() - )) - .collect(Collectors.toList()) - ) - ); - - committer.run(); - - try { - commitLock.lock(); - final File commitFile = computeCommitFile(); - final Map commitHydrants = Maps.newHashMap(); - if (commitFile.exists()) { - // merge current hydrants with existing hydrants - final Committed oldCommitted = objectMapper.readValue(commitFile, Committed.class); - commitHydrants.putAll(oldCommitted.getHydrants()); + if (committer != null) { + log.info( + "Committing metadata[%s] for sinks[%s].", commitMetadata, Joiner.on(", ").join( + currentHydrants.entrySet() + .stream() + .map(entry -> StringUtils.format( + "%s:%d", + entry.getKey(), + entry.getValue() + )) + .collect(Collectors.toList()) + ) + ); + + committer.run(); + + try { + commitLock.lock(); + final Map commitHydrants = Maps.newHashMap(); + final Committed oldCommit = readCommit(); + if (oldCommit != null) { + // merge current hydrants with existing hydrants + commitHydrants.putAll(oldCommit.getHydrants()); + } + commitHydrants.putAll(currentHydrants); + writeCommit(new Committed(commitHydrants, commitMetadata)); + } + finally { + commitLock.unlock(); } - commitHydrants.putAll(currentHydrants); - objectMapper.writeValue(commitFile, new Committed(commitHydrants, commitMetadata)); - } - finally { - commitLock.unlock(); } + // return null if committer is null return commitMetadata; } catch (Exception e) { @@ -492,17 +496,11 @@ public Object doCall() return future; } - @Override - public ListenableFuture persistAll(final Committer committer) - { - // Submit persistAll task to the persistExecutor - return persist(sinks.keySet(), committer); - } - @Override public ListenableFuture push( final Collection identifiers, - final Committer committer + @Nullable final Committer committer, + final boolean useUniquePath ) { final Map theSinks = Maps.newHashMap(); @@ -516,7 +514,9 @@ public ListenableFuture push( } return Futures.transform( - persist(identifiers, committer), + // We should always persist all segments regardless of the input because metadata should be committed for all + // segments. + persistAll(committer), (Function) commitMetadata -> { final List dataSegments = Lists.newArrayList(); @@ -526,7 +526,7 @@ public ListenableFuture push( continue; } - final DataSegment dataSegment = mergeAndPush(entry.getKey(), entry.getValue()); + final DataSegment dataSegment = mergeAndPush(entry.getKey(), entry.getValue(), useUniquePath); if (dataSegment != null) { dataSegments.add(dataSegment); } else { @@ -558,11 +558,11 @@ private ListenableFuture pushBarrier() * * @param identifier sink identifier * @param sink sink to push + * @param useUniquePath true if the segment should be written to a path with a unique identifier * * @return segment descriptor, or null if the sink is no longer valid */ - - private DataSegment mergeAndPush(final SegmentIdentifier identifier, final Sink sink) + private DataSegment mergeAndPush(final SegmentIdentifier identifier, final Sink sink, final boolean useUniquePath) { // Bail out if this sink is null or otherwise not what we expect. if (sinks.get(identifier) != sink) { @@ -633,9 +633,13 @@ private DataSegment mergeAndPush(final SegmentIdentifier identifier, final Sink // Retry pushing segments because uploading to deep storage might fail especially for cloud storage types final DataSegment segment = RetryUtils.retry( + // The appenderator is currently being used for the local indexing task and the Kafka indexing task. For the + // Kafka indexing task, pushers must use unique file paths in deep storage in order to maintain exactly-once + // semantics. () -> dataSegmentPusher.push( mergedFile, - sink.getSegment().withDimensions(IndexMerger.getMergedDimensionsFromQueryableIndexes(indexes)) + sink.getSegment().withDimensions(IndexMerger.getMergedDimensionsFromQueryableIndexes(indexes)), + useUniquePath ), exception -> exception instanceof Exception, 5 @@ -694,6 +698,9 @@ public void close() intermediateTempExecutor == null || intermediateTempExecutor.awaitTermination(365, TimeUnit.DAYS), "intermediateTempExecutor not terminated" ); + persistExecutor = null; + pushExecutor = null; + intermediateTempExecutor = null; } catch (InterruptedException e) { Thread.currentThread().interrupt(); @@ -733,6 +740,7 @@ public void closeNow() } try { shutdownExecutors(); + // We don't wait for pushExecutor to be terminated. See Javadoc for more details. Preconditions.checkState( persistExecutor == null || persistExecutor.awaitTermination(365, TimeUnit.DAYS), "persistExecutor not terminated" @@ -741,6 +749,8 @@ public void closeNow() intermediateTempExecutor == null || intermediateTempExecutor.awaitTermination(365, TimeUnit.DAYS), "intermediateTempExecutor not terminated" ); + persistExecutor = null; + intermediateTempExecutor = null; } catch (InterruptedException e) { Thread.currentThread().interrupt(); @@ -1020,7 +1030,7 @@ public Object apply(@Nullable Object input) { if (sinks.get(identifier) != sink) { // Only abandon sink if it is the same one originally requested to be abandoned. - log.warn("Sink for segment[%s] no longer valid, not abandoning."); + log.warn("Sink for segment[%s] no longer valid, not abandoning.", identifier); return null; } @@ -1029,10 +1039,9 @@ public Object apply(@Nullable Object input) log.info("Removing commit metadata for segment[%s].", identifier); try { commitLock.lock(); - final File commitFile = computeCommitFile(); - if (commitFile.exists()) { - final Committed oldCommitted = objectMapper.readValue(commitFile, Committed.class); - objectMapper.writeValue(commitFile, oldCommitted.without(identifier.getIdentifierAsString())); + final Committed oldCommit = readCommit(); + if (oldCommit != null) { + writeCommit(oldCommit.without(identifier.getIdentifierAsString())); } } catch (Exception e) { @@ -1085,6 +1094,23 @@ public Object apply(@Nullable Object input) ); } + private Committed readCommit() throws IOException + { + final File commitFile = computeCommitFile(); + if (commitFile.exists()) { + // merge current hydrants with existing hydrants + return objectMapper.readValue(commitFile, Committed.class); + } else { + return null; + } + } + + private void writeCommit(Committed newCommit) throws IOException + { + final File commitFile = computeCommitFile(); + objectMapper.writeValue(commitFile, newCommit); + } + private File computeCommitFile() { return new File(tuningConfig.getBasePersistDirectory(), "commit.json"); diff --git a/server/src/main/java/io/druid/segment/realtime/appenderator/AppenderatorPlumber.java b/server/src/main/java/io/druid/segment/realtime/appenderator/AppenderatorPlumber.java index 2d34065d5023..a3683d0950a2 100644 --- a/server/src/main/java/io/druid/segment/realtime/appenderator/AppenderatorPlumber.java +++ b/server/src/main/java/io/druid/segment/realtime/appenderator/AppenderatorPlumber.java @@ -29,7 +29,7 @@ import com.google.common.collect.Lists; import com.google.common.util.concurrent.FutureCallback; import com.google.common.util.concurrent.Futures; -import com.metamx.emitter.EmittingLogger; +import io.druid.java.util.emitter.EmittingLogger; import io.druid.common.guava.ThreadRenamingCallable; import io.druid.java.util.common.concurrent.Execs; import io.druid.data.input.Committer; @@ -463,7 +463,7 @@ public String apply(SegmentIdentifier input) // WARNING: Committers.nil() here means that on-disk data can get out of sync with committing. Futures.addCallback( - appenderator.push(segmentsToPush, Committers.nil()), + appenderator.push(segmentsToPush, Committers.nil(), false), new FutureCallback() { @Override diff --git a/server/src/main/java/io/druid/segment/realtime/appenderator/Appenderators.java b/server/src/main/java/io/druid/segment/realtime/appenderator/Appenderators.java index 1014f1380a74..e3fabbc0b410 100644 --- a/server/src/main/java/io/druid/segment/realtime/appenderator/Appenderators.java +++ b/server/src/main/java/io/druid/segment/realtime/appenderator/Appenderators.java @@ -20,7 +20,7 @@ package io.druid.segment.realtime.appenderator; import com.fasterxml.jackson.databind.ObjectMapper; -import com.metamx.emitter.service.ServiceEmitter; +import io.druid.java.util.emitter.service.ServiceEmitter; import io.druid.client.cache.Cache; import io.druid.client.cache.CacheConfig; import io.druid.java.util.common.StringUtils; diff --git a/server/src/main/java/io/druid/segment/realtime/appenderator/BaseAppenderatorDriver.java b/server/src/main/java/io/druid/segment/realtime/appenderator/BaseAppenderatorDriver.java new file mode 100644 index 000000000000..1f04236c41c7 --- /dev/null +++ b/server/src/main/java/io/druid/segment/realtime/appenderator/BaseAppenderatorDriver.java @@ -0,0 +1,685 @@ +/* + * Licensed to Metamarkets Group Inc. (Metamarkets) under one + * or more contributor license agreements. See the NOTICE file + * distributed with this work for additional information + * regarding copyright ownership. Metamarkets licenses this file + * to you under the Apache License, Version 2.0 (the + * "License"); you may not use this file except in compliance + * with the License. You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, + * software distributed under the License is distributed on an + * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY + * KIND, either express or implied. See the License for the + * specific language governing permissions and limitations + * under the License. + */ + +package io.druid.segment.realtime.appenderator; + +import com.google.common.annotations.VisibleForTesting; +import com.google.common.base.Function; +import com.google.common.base.Joiner; +import com.google.common.base.Preconditions; +import com.google.common.base.Supplier; +import com.google.common.base.Throwables; +import com.google.common.collect.ImmutableList; +import com.google.common.collect.ImmutableMap; +import com.google.common.collect.ImmutableSet; +import com.google.common.collect.Maps; +import com.google.common.collect.Sets; +import com.google.common.util.concurrent.Futures; +import com.google.common.util.concurrent.ListenableFuture; +import com.google.common.util.concurrent.ListeningExecutorService; +import com.google.common.util.concurrent.MoreExecutors; +import io.druid.data.input.Committer; +import io.druid.data.input.InputRow; +import io.druid.java.util.common.ISE; +import io.druid.java.util.common.concurrent.Execs; +import io.druid.java.util.common.logger.Logger; +import io.druid.segment.loading.DataSegmentKiller; +import io.druid.segment.realtime.appenderator.SegmentWithState.SegmentState; +import org.joda.time.DateTime; +import org.joda.time.Interval; + +import javax.annotation.Nullable; +import java.io.Closeable; +import java.io.IOException; +import java.util.ArrayList; +import java.util.Collection; +import java.util.List; +import java.util.Map; +import java.util.Map.Entry; +import java.util.NavigableMap; +import java.util.Objects; +import java.util.Set; +import java.util.TreeMap; +import java.util.function.Consumer; +import java.util.stream.Collectors; +import java.util.stream.Stream; + +/** + * A BaseAppenderatorDriver drives an Appenderator to index a finite stream of data. This class does not help you + * index unbounded streams. All handoff is done at the end of indexing. + *

    + * This class helps with doing things that Appenderators don't, including deciding which segments to use (with a + * SegmentAllocator), publishing segments to the metadata store (with a SegmentPublisher). + *

    + * This class has two child classes, i.e., {@link BatchAppenderatorDriver} and {@link StreamAppenderatorDriver}, + * which are for batch and streaming ingestion, respectively. This class provides some fundamental methods for making + * the child classes' life easier like {@link #pushInBackground}, {@link #dropInBackground}, or + * {@link #publishInBackground}. The child classes can use these methods to achieve their goal. + *

    + * Note that the commit metadata stored by this class via the underlying Appenderator is not the same metadata as + * you pass in. It's wrapped in some extra metadata needed by the driver. + */ +public abstract class BaseAppenderatorDriver implements Closeable +{ + /** + * Segments allocated for an intervval. + * There should be at most a single active (appending) segment at any time. + */ + static class SegmentsOfInterval + { + private final Interval interval; + private final List appendFinishedSegments = new ArrayList<>(); + + @Nullable + private SegmentWithState appendingSegment; + + SegmentsOfInterval(Interval interval) + { + this.interval = interval; + } + + SegmentsOfInterval( + Interval interval, + @Nullable SegmentWithState appendingSegment, + List appendFinishedSegments + ) + { + this.interval = interval; + this.appendingSegment = appendingSegment; + this.appendFinishedSegments.addAll(appendFinishedSegments); + + if (appendingSegment != null) { + Preconditions.checkArgument( + appendingSegment.getState() == SegmentState.APPENDING, + "appendingSegment[%s] is not in the APPENDING state", + appendingSegment.getSegmentIdentifier() + ); + } + if (appendFinishedSegments + .stream() + .anyMatch(segmentWithState -> segmentWithState.getState() == SegmentState.APPENDING)) { + throw new ISE("Some appendFinishedSegments[%s] is in the APPENDING state", appendFinishedSegments); + } + } + + void setAppendingSegment(SegmentWithState appendingSegment) + { + Preconditions.checkArgument( + appendingSegment.getState() == SegmentState.APPENDING, + "segment[%s] is not in the APPENDING state", + appendingSegment.getSegmentIdentifier() + ); + // There should be only one appending segment at any time + Preconditions.checkState( + this.appendingSegment == null, + "WTF?! Current appendingSegment[%s] is not null. " + + "Its state must be changed before setting a new appendingSegment[%s]", + this.appendingSegment, + appendingSegment + ); + this.appendingSegment = appendingSegment; + } + + void finishAppendingToCurrentActiveSegment(Consumer stateTransitionFn) + { + Preconditions.checkNotNull(appendingSegment, "appendingSegment"); + stateTransitionFn.accept(appendingSegment); + appendFinishedSegments.add(appendingSegment); + appendingSegment = null; + } + + Interval getInterval() + { + return interval; + } + + SegmentWithState getAppendingSegment() + { + return appendingSegment; + } + + List getAllSegments() + { + final List allSegments = new ArrayList<>(appendFinishedSegments.size() + 1); + if (appendingSegment != null) { + allSegments.add(appendingSegment); + } + allSegments.addAll(appendFinishedSegments); + return allSegments; + } + } + + /** + * Allocated segments for a sequence + */ + static class SegmentsForSequence + { + // Interval Start millis -> List of Segments for this interval + // there might be multiple segments for a start interval, for example one segment + // can be in APPENDING state and others might be in PUBLISHING state + private final NavigableMap intervalToSegmentStates; + + // most recently allocated segment + private String lastSegmentId; + + SegmentsForSequence() + { + this.intervalToSegmentStates = new TreeMap<>(); + } + + SegmentsForSequence( + NavigableMap intervalToSegmentStates, + String lastSegmentId + ) + { + this.intervalToSegmentStates = intervalToSegmentStates; + this.lastSegmentId = lastSegmentId; + } + + void add(SegmentIdentifier identifier) + { + intervalToSegmentStates.computeIfAbsent( + identifier.getInterval().getStartMillis(), + k -> new SegmentsOfInterval(identifier.getInterval()) + ).setAppendingSegment(SegmentWithState.newSegment(identifier)); + lastSegmentId = identifier.getIdentifierAsString(); + } + + Entry floor(long timestamp) + { + return intervalToSegmentStates.floorEntry(timestamp); + } + + SegmentsOfInterval get(long timestamp) + { + return intervalToSegmentStates.get(timestamp); + } + + Stream allSegmentStateStream() + { + return intervalToSegmentStates + .values() + .stream() + .flatMap(segmentsOfInterval -> segmentsOfInterval.getAllSegments().stream()); + } + + Stream getAllSegmentsOfInterval() + { + return intervalToSegmentStates.values().stream(); + } + } + + private static final Logger log = new Logger(BaseAppenderatorDriver.class); + + private final SegmentAllocator segmentAllocator; + private final UsedSegmentChecker usedSegmentChecker; + private final DataSegmentKiller dataSegmentKiller; + + protected final Appenderator appenderator; + // sequenceName -> segmentsForSequence + // This map should be locked with itself before accessing it. + // Note: BatchAppenderatorDriver currently doesn't need to lock this map because it doesn't do anything concurrently. + // However, it's desired to do some operations like indexing and pushing at the same time. Locking this map is also + // required in BatchAppenderatorDriver once this feature is supported. + protected final Map segments = new TreeMap<>(); + protected final ListeningExecutorService executor; + + BaseAppenderatorDriver( + Appenderator appenderator, + SegmentAllocator segmentAllocator, + UsedSegmentChecker usedSegmentChecker, + DataSegmentKiller dataSegmentKiller + ) + { + this.appenderator = Preconditions.checkNotNull(appenderator, "appenderator"); + this.segmentAllocator = Preconditions.checkNotNull(segmentAllocator, "segmentAllocator"); + this.usedSegmentChecker = Preconditions.checkNotNull(usedSegmentChecker, "usedSegmentChecker"); + this.dataSegmentKiller = Preconditions.checkNotNull(dataSegmentKiller, "dataSegmentKiller"); + this.executor = MoreExecutors.listeningDecorator(Execs.singleThreaded("publish-%d")); + } + + @VisibleForTesting + Map getSegments() + { + return segments; + } + + /** + * Perform any initial setup and return currently persisted commit metadata. + *

    + * Note that this method returns the same metadata you've passed in with your Committers, even though this class + * stores extra metadata on disk. + * + * @return currently persisted commit metadata + */ + @Nullable + public abstract Object startJob(); + + /** + * Find a segment in the {@link SegmentState#APPENDING} state for the given timestamp and sequenceName. + */ + private SegmentIdentifier getAppendableSegment(final DateTime timestamp, final String sequenceName) + { + synchronized (segments) { + final SegmentsForSequence segmentsForSequence = segments.get(sequenceName); + + if (segmentsForSequence == null) { + return null; + } + + final Map.Entry candidateEntry = segmentsForSequence.floor( + timestamp.getMillis() + ); + + if (candidateEntry != null) { + final SegmentsOfInterval segmentsOfInterval = candidateEntry.getValue(); + if (segmentsOfInterval.interval.contains(timestamp)) { + return segmentsOfInterval.appendingSegment == null ? + null : + segmentsOfInterval.appendingSegment.getSegmentIdentifier(); + } else { + return null; + } + } else { + return null; + } + } + } + + /** + * Return a segment usable for "timestamp". May return null if no segment can be allocated. + * + * @param row input row + * @param sequenceName sequenceName for potential segment allocation + * @param skipSegmentLineageCheck if false, perform lineage validation using previousSegmentId for this sequence. + * Should be set to false if replica tasks would index events in same order + * + * @return identifier, or null + * + * @throws IOException if an exception occurs while allocating a segment + */ + private SegmentIdentifier getSegment( + final InputRow row, + final String sequenceName, + final boolean skipSegmentLineageCheck + ) throws IOException + { + synchronized (segments) { + final DateTime timestamp = row.getTimestamp(); + final SegmentIdentifier existing = getAppendableSegment(timestamp, sequenceName); + if (existing != null) { + return existing; + } else { + // Allocate new segment. + final SegmentsForSequence segmentsForSequence = segments.get(sequenceName); + final SegmentIdentifier newSegment = segmentAllocator.allocate( + row, + sequenceName, + segmentsForSequence == null ? null : segmentsForSequence.lastSegmentId, + // send lastSegmentId irrespective of skipSegmentLineageCheck so that + // unique constraint for sequence_name_prev_id_sha1 does not fail for + // allocatePendingSegment in IndexerSQLMetadataStorageCoordinator + skipSegmentLineageCheck + ); + + if (newSegment != null) { + for (SegmentIdentifier identifier : appenderator.getSegments()) { + if (identifier.equals(newSegment)) { + throw new ISE( + "WTF?! Allocated segment[%s] which conflicts with existing segment[%s].", + newSegment, + identifier + ); + } + } + + log.info("New segment[%s] for row[%s] sequenceName[%s].", newSegment, row, sequenceName); + addSegment(sequenceName, newSegment); + } else { + // Well, we tried. + log.warn("Cannot allocate segment for timestamp[%s], sequenceName[%s]. ", timestamp, sequenceName); + } + + return newSegment; + } + } + } + + private void addSegment(String sequenceName, SegmentIdentifier identifier) + { + synchronized (segments) { + segments.computeIfAbsent(sequenceName, k -> new SegmentsForSequence()) + .add(identifier); + } + } + + /** + * Add a row. Must not be called concurrently from multiple threads. + * + * @param row the row to add + * @param sequenceName sequenceName for this row's segment + * @param committerSupplier supplier of a committer associated with all data that has been added, including this row + * if {@param allowIncrementalPersists} is set to false then this will not be used + * @param skipSegmentLineageCheck if true, perform lineage validation using previousSegmentId for this sequence. + * Should be set to false if replica tasks would index events in same order + * @param allowIncrementalPersists whether to allow persist to happen when maxRowsInMemory or intermediate persist period + * threshold is hit + * + * @return {@link AppenderatorDriverAddResult} + * + * @throws IOException if there is an I/O error while allocating or writing to a segment + */ + protected AppenderatorDriverAddResult append( + final InputRow row, + final String sequenceName, + @Nullable final Supplier committerSupplier, + final boolean skipSegmentLineageCheck, + final boolean allowIncrementalPersists + ) throws IOException + { + Preconditions.checkNotNull(row, "row"); + Preconditions.checkNotNull(sequenceName, "sequenceName"); + + final SegmentIdentifier identifier = getSegment(row, sequenceName, skipSegmentLineageCheck); + + if (identifier != null) { + try { + final Appenderator.AppenderatorAddResult result = appenderator.add( + identifier, + row, + committerSupplier == null ? null : wrapCommitterSupplier(committerSupplier), + allowIncrementalPersists + ); + return AppenderatorDriverAddResult.ok( + identifier, + result.getNumRowsInSegment(), + appenderator.getTotalRowCount(), + result.isPersistRequired() + ); + } + catch (SegmentNotWritableException e) { + throw new ISE(e, "WTF?! Segment[%s] not writable when it should have been.", identifier); + } + } else { + return AppenderatorDriverAddResult.fail(); + } + } + + /** + * Returns a stream of {@link SegmentWithState} for the given sequenceNames. + */ + Stream getSegmentWithStates(Collection sequenceNames) + { + synchronized (segments) { + return sequenceNames + .stream() + .map(segments::get) + .filter(Objects::nonNull) + .flatMap(segmentsForSequence -> segmentsForSequence.intervalToSegmentStates.values().stream()) + .flatMap(segmentsOfInterval -> segmentsOfInterval.getAllSegments().stream()); + } + } + + Stream getAppendingSegments(Collection sequenceNames) + { + synchronized (segments) { + return sequenceNames + .stream() + .map(segments::get) + .filter(Objects::nonNull) + .flatMap(segmentsForSequence -> segmentsForSequence.intervalToSegmentStates.values().stream()) + .map(segmentsOfInterval -> segmentsOfInterval.appendingSegment) + .filter(Objects::nonNull); + } + } + + /** + * Push the given segments in background. + * + * @param wrappedCommitter should not be null if you want to persist intermediate states + * @param segmentIdentifiers identifiers of the segments to be pushed + * @param useUniquePath true if the segment should be written to a path with a unique identifier + * + * @return a future for pushing segments + */ + ListenableFuture pushInBackground( + @Nullable final WrappedCommitter wrappedCommitter, + final Collection segmentIdentifiers, + final boolean useUniquePath + ) + { + log.info("Pushing segments in background: [%s]", Joiner.on(", ").join(segmentIdentifiers)); + + return Futures.transform( + appenderator.push(segmentIdentifiers, wrappedCommitter, useUniquePath), + (Function) segmentsAndMetadata -> { + // Sanity check + final Set pushedSegments = segmentsAndMetadata.getSegments().stream() + .map(SegmentIdentifier::fromDataSegment) + .collect(Collectors.toSet()); + if (!pushedSegments.equals(Sets.newHashSet(segmentIdentifiers))) { + log.warn( + "Removing segments from deep storage because sanity check failed: %s", segmentsAndMetadata.getSegments() + ); + + segmentsAndMetadata.getSegments().forEach(dataSegmentKiller::killQuietly); + + throw new ISE( + "WTF?! Pushed different segments than requested. Pushed[%s], requested[%s].", + pushedSegments, + segmentIdentifiers + ); + } + + return segmentsAndMetadata; + }, + executor + ); + } + + /** + * Drop segments in background. The segments should be pushed (in batch ingestion) or published (in streaming + * ingestion) before being dropped. + * + * @param segmentsAndMetadata result of pushing or publishing + * + * @return a future for dropping segments + */ + ListenableFuture dropInBackground(SegmentsAndMetadata segmentsAndMetadata) + { + log.info("Dropping segments[%s]", segmentsAndMetadata.getSegments()); + final ListenableFuture dropFuture = Futures.allAsList( + segmentsAndMetadata + .getSegments() + .stream() + .map(segment -> appenderator.drop(SegmentIdentifier.fromDataSegment(segment))) + .collect(Collectors.toList()) + ); + + return Futures.transform( + dropFuture, + (Function) x -> { + final Object metadata = segmentsAndMetadata.getCommitMetadata(); + return new SegmentsAndMetadata( + segmentsAndMetadata.getSegments(), + metadata == null ? null : ((AppenderatorDriverMetadata) metadata).getCallerMetadata() + ); + } + ); + } + + /** + * Publish segments in background. The segments should be dropped (in batch ingestion) or pushed (in streaming + * ingestion) before being published. + * + * @param segmentsAndMetadata result of dropping or pushing + * @param publisher transactional segment publisher + * + * @return a future for publishing segments + */ + ListenableFuture publishInBackground( + SegmentsAndMetadata segmentsAndMetadata, + TransactionalSegmentPublisher publisher + ) + { + return executor.submit( + () -> { + if (segmentsAndMetadata.getSegments().isEmpty()) { + log.info("Nothing to publish, skipping publish step."); + } else { + log.info( + "Publishing segments with commitMetadata[%s]: [%s]", + segmentsAndMetadata.getCommitMetadata(), + Joiner.on(", ").join(segmentsAndMetadata.getSegments()) + ); + + try { + final Object metadata = segmentsAndMetadata.getCommitMetadata(); + final boolean published = publisher.publishSegments( + ImmutableSet.copyOf(segmentsAndMetadata.getSegments()), + metadata == null ? null : ((AppenderatorDriverMetadata) metadata).getCallerMetadata() + ); + + if (published) { + log.info("Published segments."); + } else { + log.info("Transaction failure while publishing segments, checking if someone else beat us to it."); + final Set segmentsIdentifiers = segmentsAndMetadata + .getSegments() + .stream() + .map(SegmentIdentifier::fromDataSegment) + .collect(Collectors.toSet()); + if (usedSegmentChecker.findUsedSegments(segmentsIdentifiers) + .equals(Sets.newHashSet(segmentsAndMetadata.getSegments()))) { + log.info( + "Removing our segments from deep storage because someone else already published them: %s", + segmentsAndMetadata.getSegments() + ); + segmentsAndMetadata.getSegments().forEach(dataSegmentKiller::killQuietly); + + log.info("Our segments really do exist, awaiting handoff."); + } else { + throw new ISE("Failed to publish segments[%s]", segmentsAndMetadata.getSegments()); + } + } + } + catch (Exception e) { + log.warn( + "Removing segments from deep storage after failed publish: %s", + segmentsAndMetadata.getSegments() + ); + segmentsAndMetadata.getSegments().forEach(dataSegmentKiller::killQuietly); + + throw Throwables.propagate(e); + } + } + + return segmentsAndMetadata; + } + ); + } + + /** + * Clears out all our state and also calls {@link Appenderator#clear()} on the underlying Appenderator. + */ + public void clear() throws InterruptedException + { + synchronized (segments) { + segments.clear(); + } + appenderator.clear(); + } + + /** + * Closes this driver. Does not close the underlying Appenderator; you should do that yourself. + */ + @Override + public void close() + { + executor.shutdownNow(); + } + + /** + * Wrapped committer for BaseAppenderatorDriver. Used in only {@link StreamAppenderatorDriver} because batch ingestion + * doesn't need committing intermediate states. + */ + static class WrappedCommitter implements Committer + { + private final Committer delegate; + private final AppenderatorDriverMetadata metadata; + + WrappedCommitter(Committer delegate, AppenderatorDriverMetadata metadata) + { + this.delegate = delegate; + this.metadata = metadata; + } + + @Override + public Object getMetadata() + { + return metadata; + } + + @Override + public void run() + { + delegate.run(); + } + } + + WrappedCommitter wrapCommitter(final Committer committer) + { + final AppenderatorDriverMetadata wrappedMetadata; + final Map snapshot; + synchronized (segments) { + snapshot = ImmutableMap.copyOf(segments); + } + + wrappedMetadata = new AppenderatorDriverMetadata( + ImmutableMap.copyOf( + Maps.transformValues( + snapshot, + (Function>) input -> ImmutableList.copyOf( + input.intervalToSegmentStates + .values() + .stream() + .flatMap(segmentsOfInterval -> segmentsOfInterval.getAllSegments().stream()) + .collect(Collectors.toList()) + ) + ) + ), + snapshot.entrySet() + .stream() + .collect( + Collectors.toMap( + Entry::getKey, + e -> e.getValue().lastSegmentId + ) + ), + committer.getMetadata() + ); + + return new WrappedCommitter(committer, wrappedMetadata); + } + + private Supplier wrapCommitterSupplier(final Supplier committerSupplier) + { + return () -> wrapCommitter(committerSupplier.get()); + } +} diff --git a/server/src/main/java/io/druid/segment/realtime/appenderator/BatchAppenderatorDriver.java b/server/src/main/java/io/druid/segment/realtime/appenderator/BatchAppenderatorDriver.java new file mode 100644 index 000000000000..7d6363796435 --- /dev/null +++ b/server/src/main/java/io/druid/segment/realtime/appenderator/BatchAppenderatorDriver.java @@ -0,0 +1,217 @@ +/* + * Licensed to Metamarkets Group Inc. (Metamarkets) under one + * or more contributor license agreements. See the NOTICE file + * distributed with this work for additional information + * regarding copyright ownership. Metamarkets licenses this file + * to you under the Apache License, Version 2.0 (the + * "License"); you may not use this file except in compliance + * with the License. You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, + * software distributed under the License is distributed on an + * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY + * KIND, either express or implied. See the License for the + * specific language governing permissions and limitations + * under the License. + */ + +package io.druid.segment.realtime.appenderator; + +import com.google.common.base.Preconditions; +import com.google.common.collect.ImmutableList; +import com.google.common.collect.ImmutableMap; +import com.google.common.util.concurrent.Futures; +import com.google.common.util.concurrent.ListenableFuture; +import io.druid.data.input.InputRow; +import io.druid.java.util.common.ISE; +import io.druid.segment.loading.DataSegmentKiller; +import io.druid.segment.realtime.appenderator.SegmentWithState.SegmentState; +import io.druid.timeline.DataSegment; + +import javax.annotation.Nullable; +import java.io.IOException; +import java.util.Collection; +import java.util.Map; +import java.util.concurrent.ExecutionException; +import java.util.concurrent.TimeUnit; +import java.util.concurrent.TimeoutException; +import java.util.function.Function; +import java.util.stream.Collectors; + +/** + * This class is specifialized for batch ingestion. In batch ingestion, the segment lifecycle is like: + *

    + *

    + * APPENDING -> PUSHED_AND_DROPPED -> PUBLISHED
    + * 
    + *

    + *

      + *
    • APPENDING: Segment is available for appending.
    • + *
    • PUSHED_AND_DROPPED: Segment is pushed to deep storage and dropped from the local storage.
    • + *
    • PUBLISHED: Segment's metadata is published to metastore.
    • + *
    + */ +public class BatchAppenderatorDriver extends BaseAppenderatorDriver +{ + /** + * Create a driver. + * + * @param appenderator appenderator + * @param segmentAllocator segment allocator + * @param usedSegmentChecker used segment checker + */ + public BatchAppenderatorDriver( + Appenderator appenderator, + SegmentAllocator segmentAllocator, + UsedSegmentChecker usedSegmentChecker, + DataSegmentKiller dataSegmentKiller + ) + { + super(appenderator, segmentAllocator, usedSegmentChecker, dataSegmentKiller); + } + + /** + * This method always returns null because batch ingestion doesn't support restoring tasks on failures. + * + * @return always null + */ + @Override + @Nullable + public Object startJob() + { + final Object metadata = appenderator.startJob(); + if (metadata != null) { + throw new ISE("Metadata should be null because BatchAppenderatorDriver never persists it"); + } + return null; + } + + /** + * Add a row. Must not be called concurrently from multiple threads. + * + * @param row the row to add + * @param sequenceName sequenceName for this row's segment + * + * @return {@link AppenderatorDriverAddResult} + * + * @throws IOException if there is an I/O error while allocating or writing to a segment + */ + public AppenderatorDriverAddResult add( + InputRow row, + String sequenceName + ) throws IOException + { + return append(row, sequenceName, null, false, true); + } + + /** + * Push and drop all segments in the {@link SegmentState#APPENDING} state. + * + * @param pushAndClearTimeoutMs timeout for pushing and dropping segments + * + * @return {@link SegmentsAndMetadata} for pushed and dropped segments + */ + public SegmentsAndMetadata pushAllAndClear(long pushAndClearTimeoutMs) + throws InterruptedException, ExecutionException, TimeoutException + { + final Collection sequences; + synchronized (segments) { + sequences = ImmutableList.copyOf(segments.keySet()); + } + + return pushAndClear(sequences, pushAndClearTimeoutMs); + } + + private SegmentsAndMetadata pushAndClear( + Collection sequenceNames, + long pushAndClearTimeoutMs + ) throws InterruptedException, ExecutionException, TimeoutException + { + final Map requestedSegmentIdsForSequences = getAppendingSegments(sequenceNames) + .collect(Collectors.toMap(SegmentWithState::getSegmentIdentifier, Function.identity())); + + final ListenableFuture future = Futures.transform( + pushInBackground(null, requestedSegmentIdsForSequences.keySet(), false), + this::dropInBackground + ); + + final SegmentsAndMetadata segmentsAndMetadata = pushAndClearTimeoutMs == 0L ? + future.get() : + future.get(pushAndClearTimeoutMs, TimeUnit.MILLISECONDS); + + // Sanity check + final Map pushedSegmentIdToSegmentMap = segmentsAndMetadata + .getSegments() + .stream() + .collect(Collectors.toMap(SegmentIdentifier::fromDataSegment, Function.identity())); + + if (!pushedSegmentIdToSegmentMap.keySet().equals(requestedSegmentIdsForSequences.keySet())) { + throw new ISE( + "Pushed segments[%s] are different from the requested ones[%s]", + pushedSegmentIdToSegmentMap.keySet(), + requestedSegmentIdsForSequences.keySet() + ); + } + + synchronized (segments) { + for (String sequenceName : sequenceNames) { + final SegmentsForSequence segmentsForSequence = segments.get(sequenceName); + if (segmentsForSequence == null) { + throw new ISE("Can't find segmentsForSequence for sequence[%s]", sequenceName); + } + + segmentsForSequence.getAllSegmentsOfInterval().forEach(segmentsOfInterval -> { + final SegmentWithState appendingSegment = segmentsOfInterval.getAppendingSegment(); + if (appendingSegment != null) { + final DataSegment pushedSegment = pushedSegmentIdToSegmentMap.get(appendingSegment.getSegmentIdentifier()); + if (pushedSegment == null) { + throw new ISE("Can't find pushedSegments for segment[%s]", appendingSegment.getSegmentIdentifier()); + } + + segmentsOfInterval.finishAppendingToCurrentActiveSegment( + segmentWithState -> segmentWithState.pushAndDrop(pushedSegment) + ); + } + }); + } + } + + return segmentsAndMetadata; + } + + /** + * Publish all segments. + * + * @param publisher segment publisher + * + * @return a {@link ListenableFuture} for the publish task + */ + public ListenableFuture publishAll(final TransactionalSegmentPublisher publisher) + { + final Map snapshot; + synchronized (segments) { + snapshot = ImmutableMap.copyOf(segments); + } + + return publishInBackground( + new SegmentsAndMetadata( + snapshot + .values() + .stream() + .flatMap(SegmentsForSequence::allSegmentStateStream) + .map(segmentWithState -> Preconditions + .checkNotNull( + segmentWithState.getDataSegment(), + "dataSegment for segmentId[%s]", + segmentWithState.getSegmentIdentifier() + ) + ) + .collect(Collectors.toList()), + null + ), + publisher + ); + } +} diff --git a/server/src/main/java/io/druid/segment/realtime/appenderator/DefaultRealtimeAppenderatorFactory.java b/server/src/main/java/io/druid/segment/realtime/appenderator/DefaultRealtimeAppenderatorFactory.java index 6be9b1b355fe..46b5f1a7afa5 100644 --- a/server/src/main/java/io/druid/segment/realtime/appenderator/DefaultRealtimeAppenderatorFactory.java +++ b/server/src/main/java/io/druid/segment/realtime/appenderator/DefaultRealtimeAppenderatorFactory.java @@ -21,7 +21,7 @@ import com.fasterxml.jackson.annotation.JacksonInject; import com.fasterxml.jackson.databind.ObjectMapper; -import com.metamx.emitter.service.ServiceEmitter; +import io.druid.java.util.emitter.service.ServiceEmitter; import io.druid.client.cache.Cache; import io.druid.client.cache.CacheConfig; diff --git a/server/src/main/java/io/druid/segment/realtime/appenderator/SegmentWithState.java b/server/src/main/java/io/druid/segment/realtime/appenderator/SegmentWithState.java new file mode 100644 index 000000000000..3a89c1593cf9 --- /dev/null +++ b/server/src/main/java/io/druid/segment/realtime/appenderator/SegmentWithState.java @@ -0,0 +1,153 @@ +/* + * Licensed to Metamarkets Group Inc. (Metamarkets) under one + * or more contributor license agreements. See the NOTICE file + * distributed with this work for additional information + * regarding copyright ownership. Metamarkets licenses this file + * to you under the Apache License, Version 2.0 (the + * "License"); you may not use this file except in compliance + * with the License. You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, + * software distributed under the License is distributed on an + * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY + * KIND, either express or implied. See the License for the + * specific language governing permissions and limitations + * under the License. + */ + +package io.druid.segment.realtime.appenderator; + +import com.fasterxml.jackson.annotation.JsonCreator; +import com.fasterxml.jackson.annotation.JsonProperty; +import com.google.common.base.Preconditions; +import io.druid.timeline.DataSegment; + +import javax.annotation.Nullable; +import java.util.Collection; +import java.util.List; + +public class SegmentWithState +{ + /** + * Segment state transition is different in {@link BatchAppenderatorDriver} and {@link StreamAppenderatorDriver}. + * When a new segment is created, its state is {@link #APPENDING}. + * + * - In stream ingestion, the state of some segments can be changed to the {@link #APPEND_FINISHED} state. Data is + * not appended to these segments anymore, and they are waiting for beging published. + * See {@link StreamAppenderatorDriver#moveSegmentOut(String, List)}. + * - In batch ingestion, the state of some segments can be changed to the {@link #PUSHED_AND_DROPPED} state. These + * segments are pushed and dropped from the local storage, but not published yet. + * See {@link BatchAppenderatorDriver#pushAndClear(Collection, long)}. + * + * Note: If you need to add more states which are used differently in batch and streaming ingestion, consider moving + * SegmentState to {@link BatchAppenderatorDriver} and {@link StreamAppenderatorDriver}. + */ + public enum SegmentState + { + APPENDING, + APPEND_FINISHED, // only used in StreamAppenderatorDriver + PUSHED_AND_DROPPED; // only used in BatchAppenderatorDriver + + @JsonCreator + public static SegmentState fromString(@JsonProperty String name) + { + if (name.equalsIgnoreCase("ACTIVE")) { + return APPENDING; + } else if (name.equalsIgnoreCase("INACTIVE")) { + return APPEND_FINISHED; + } else { + return SegmentState.valueOf(name); + } + } + } + + private final SegmentIdentifier segmentIdentifier; + private SegmentState state; + + /** + * This is to keep what dataSegment object was created for {@link #segmentIdentifier} when + * {@link BaseAppenderatorDriver#pushInBackground} is called. + */ + @Nullable private DataSegment dataSegment; + + static SegmentWithState newSegment(SegmentIdentifier segmentIdentifier) + { + return new SegmentWithState(segmentIdentifier, SegmentState.APPENDING, null); + } + + static SegmentWithState newSegment(SegmentIdentifier segmentIdentifier, SegmentState state) + { + return new SegmentWithState(segmentIdentifier, state, null); + } + + @JsonCreator + public SegmentWithState( + @JsonProperty("segmentIdentifier") SegmentIdentifier segmentIdentifier, + @JsonProperty("state") SegmentState state, + @JsonProperty("dataSegment") @Nullable DataSegment dataSegment) + { + this.segmentIdentifier = segmentIdentifier; + this.state = state; + this.dataSegment = dataSegment; + } + + public void setState(SegmentState state) + { + this.state = state; + } + + /** + * Change the segment state to {@link SegmentState#APPEND_FINISHED}. The current state should be + * {@link SegmentState#APPENDING}. + */ + public void finishAppending() + { + checkStateTransition(this.state, SegmentState.APPENDING, SegmentState.APPEND_FINISHED); + this.state = SegmentState.APPEND_FINISHED; + } + + /** + * Change the segment state to {@link SegmentState#PUSHED_AND_DROPPED}. The current state should be + * {@link SegmentState#APPENDING}. This method should be called after the segment of {@link #segmentIdentifier} is + * completely pushed and dropped. + * + * @param dataSegment pushed {@link DataSegment} + */ + public void pushAndDrop(DataSegment dataSegment) + { + checkStateTransition(this.state, SegmentState.APPENDING, SegmentState.PUSHED_AND_DROPPED); + this.state = SegmentState.PUSHED_AND_DROPPED; + this.dataSegment = dataSegment; + } + + @JsonProperty + public SegmentIdentifier getSegmentIdentifier() + { + return segmentIdentifier; + } + + @JsonProperty + public SegmentState getState() + { + return state; + } + + @JsonProperty + @Nullable + public DataSegment getDataSegment() + { + return dataSegment; + } + + private static void checkStateTransition(SegmentState actualFrom, SegmentState expectedFrom, SegmentState to) + { + Preconditions.checkState( + actualFrom == expectedFrom, + "Wrong state transition from [%s] to [%s]", + actualFrom, + to + ); + } +} diff --git a/server/src/main/java/io/druid/segment/realtime/appenderator/SegmentsAndMetadata.java b/server/src/main/java/io/druid/segment/realtime/appenderator/SegmentsAndMetadata.java index f5d7ae1e1902..72a7f8b325c4 100644 --- a/server/src/main/java/io/druid/segment/realtime/appenderator/SegmentsAndMetadata.java +++ b/server/src/main/java/io/druid/segment/realtime/appenderator/SegmentsAndMetadata.java @@ -22,6 +22,7 @@ import com.google.common.collect.ImmutableList; import io.druid.timeline.DataSegment; +import javax.annotation.Nullable; import java.util.List; import java.util.Objects; @@ -34,13 +35,14 @@ public class SegmentsAndMetadata public SegmentsAndMetadata( List segments, - Object commitMetadata + @Nullable Object commitMetadata ) { this.segments = ImmutableList.copyOf(segments); this.commitMetadata = commitMetadata; } + @Nullable public Object getCommitMetadata() { return commitMetadata; diff --git a/server/src/main/java/io/druid/segment/realtime/appenderator/SinkQuerySegmentWalker.java b/server/src/main/java/io/druid/segment/realtime/appenderator/SinkQuerySegmentWalker.java index 3905adbbcbab..9d758553e71d 100644 --- a/server/src/main/java/io/druid/segment/realtime/appenderator/SinkQuerySegmentWalker.java +++ b/server/src/main/java/io/druid/segment/realtime/appenderator/SinkQuerySegmentWalker.java @@ -24,8 +24,8 @@ import com.google.common.base.Preconditions; import com.google.common.collect.Iterables; import com.google.common.util.concurrent.MoreExecutors; -import com.metamx.emitter.EmittingLogger; -import com.metamx.emitter.service.ServiceEmitter; +import io.druid.java.util.emitter.EmittingLogger; +import io.druid.java.util.emitter.service.ServiceEmitter; import io.druid.client.CachingQueryRunner; import io.druid.client.cache.Cache; import io.druid.client.cache.CacheConfig; diff --git a/server/src/main/java/io/druid/segment/realtime/appenderator/StreamAppenderatorDriver.java b/server/src/main/java/io/druid/segment/realtime/appenderator/StreamAppenderatorDriver.java new file mode 100644 index 000000000000..6d1109202eaa --- /dev/null +++ b/server/src/main/java/io/druid/segment/realtime/appenderator/StreamAppenderatorDriver.java @@ -0,0 +1,438 @@ +/* + * Licensed to Metamarkets Group Inc. (Metamarkets) under one + * or more contributor license agreements. See the NOTICE file + * distributed with this work for additional information + * regarding copyright ownership. Metamarkets licenses this file + * to you under the Apache License, Version 2.0 (the + * "License"); you may not use this file except in compliance + * with the License. You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, + * software distributed under the License is distributed on an + * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY + * KIND, either express or implied. See the License for the + * specific language governing permissions and limitations + * under the License. + */ + +package io.druid.segment.realtime.appenderator; + +import com.fasterxml.jackson.databind.ObjectMapper; +import com.google.common.base.Function; +import com.google.common.base.Preconditions; +import com.google.common.base.Supplier; +import com.google.common.base.Throwables; +import com.google.common.util.concurrent.AsyncFunction; +import com.google.common.util.concurrent.FutureCallback; +import com.google.common.util.concurrent.Futures; +import com.google.common.util.concurrent.ListenableFuture; +import com.google.common.util.concurrent.MoreExecutors; +import com.google.common.util.concurrent.SettableFuture; +import io.druid.data.input.Committer; +import io.druid.data.input.InputRow; +import io.druid.java.util.common.ISE; +import io.druid.java.util.common.Pair; +import io.druid.java.util.common.guava.Comparators; +import io.druid.java.util.common.logger.Logger; +import io.druid.query.SegmentDescriptor; +import io.druid.segment.loading.DataSegmentKiller; +import io.druid.segment.realtime.FireDepartmentMetrics; +import io.druid.segment.realtime.appenderator.SegmentWithState.SegmentState; +import io.druid.segment.realtime.plumber.SegmentHandoffNotifier; +import io.druid.segment.realtime.plumber.SegmentHandoffNotifierFactory; + +import javax.annotation.Nullable; +import java.io.IOException; +import java.util.ArrayList; +import java.util.Collection; +import java.util.Comparator; +import java.util.List; +import java.util.Map; +import java.util.Map.Entry; +import java.util.NavigableMap; +import java.util.TreeMap; +import java.util.concurrent.atomic.AtomicInteger; +import java.util.stream.Collectors; + +/** + * This class is specialized for streaming ingestion. In streaming ingestion, the segment lifecycle is like: + *

    + *

    + * APPENDING -> APPEND_FINISHED -> PUBLISHED
    + * 
    + *

    + *

      + *
    • APPENDING: Segment is available for appending.
    • + *
    • APPEND_FINISHED: Segment cannot be updated (data cannot be added anymore) and is waiting for being published.
    • + *
    • PUBLISHED: Segment is pushed to deep storage, its metadata is published to metastore, and finally the segment is + * dropped from local storage
    • + *
    + */ +public class StreamAppenderatorDriver extends BaseAppenderatorDriver +{ + private static final Logger log = new Logger(StreamAppenderatorDriver.class); + + private final SegmentHandoffNotifier handoffNotifier; + private final FireDepartmentMetrics metrics; + private final ObjectMapper objectMapper; + + /** + * Create a driver. + * + * @param appenderator appenderator + * @param segmentAllocator segment allocator + * @param handoffNotifierFactory handoff notifier factory + * @param usedSegmentChecker used segment checker + * @param objectMapper object mapper, used for serde of commit metadata + * @param metrics Firedepartment metrics + */ + public StreamAppenderatorDriver( + Appenderator appenderator, + SegmentAllocator segmentAllocator, + SegmentHandoffNotifierFactory handoffNotifierFactory, + UsedSegmentChecker usedSegmentChecker, + DataSegmentKiller dataSegmentKiller, + ObjectMapper objectMapper, + FireDepartmentMetrics metrics + ) + { + super(appenderator, segmentAllocator, usedSegmentChecker, dataSegmentKiller); + + this.handoffNotifier = Preconditions.checkNotNull(handoffNotifierFactory, "handoffNotifierFactory") + .createSegmentHandoffNotifier(appenderator.getDataSource()); + this.metrics = Preconditions.checkNotNull(metrics, "metrics"); + this.objectMapper = Preconditions.checkNotNull(objectMapper, "objectMapper"); + } + + @Override + @Nullable + public Object startJob() + { + handoffNotifier.start(); + + final AppenderatorDriverMetadata metadata = objectMapper.convertValue( + appenderator.startJob(), + AppenderatorDriverMetadata.class + ); + + log.info("Restored metadata[%s].", metadata); + + if (metadata != null) { + synchronized (segments) { + final Map lastSegmentIds = metadata.getLastSegmentIds(); + Preconditions.checkState( + metadata.getSegments().keySet().equals(lastSegmentIds.keySet()), + "Sequences for segment states and last segment IDs are not same" + ); + + final Map builders = new TreeMap<>(); + + for (Entry> entry : metadata.getSegments().entrySet()) { + final String sequenceName = entry.getKey(); + final SegmentsForSequenceBuilder builder = new SegmentsForSequenceBuilder(lastSegmentIds.get(sequenceName)); + builders.put(sequenceName, builder); + entry.getValue().forEach(builder::add); + } + + builders.forEach((sequence, builder) -> segments.put(sequence, builder.build())); + } + + return metadata.getCallerMetadata(); + } else { + return null; + } + } + + /** + * Add a row. Must not be called concurrently from multiple threads. + * + * @param row the row to add + * @param sequenceName sequenceName for this row's segment + * @param committerSupplier supplier of a committer associated with all data that has been added, including this row + * if {@param allowIncrementalPersists} is set to false then this will not be used + * @param skipSegmentLineageCheck if true, perform lineage validation using previousSegmentId for this sequence. + * Should be set to false if replica tasks would index events in same order + * @param allowIncrementalPersists whether to allow persist to happen when maxRowsInMemory or intermediate persist period + * threshold is hit + * + * @return {@link AppenderatorDriverAddResult} + * + * @throws IOException if there is an I/O error while allocating or writing to a segment + */ + public AppenderatorDriverAddResult add( + final InputRow row, + final String sequenceName, + final Supplier committerSupplier, + final boolean skipSegmentLineageCheck, + final boolean allowIncrementalPersists + ) throws IOException + { + return append(row, sequenceName, committerSupplier, skipSegmentLineageCheck, allowIncrementalPersists); + } + + /** + * Move a set of identifiers out from "active", making way for newer segments. + * This method is to support KafkaIndexTask's legacy mode and will be removed in the future. + * See KakfaIndexTask.runLegacy(). + */ + public void moveSegmentOut(final String sequenceName, final List identifiers) + { + synchronized (segments) { + final SegmentsForSequence activeSegmentsForSequence = segments.get(sequenceName); + if (activeSegmentsForSequence == null) { + throw new ISE("WTF?! Asked to remove segments for sequenceName[%s] which doesn't exist...", sequenceName); + } + + for (final SegmentIdentifier identifier : identifiers) { + log.info("Moving segment[%s] out of active list.", identifier); + final long key = identifier.getInterval().getStartMillis(); + final SegmentsOfInterval segmentsOfInterval = activeSegmentsForSequence.get(key); + if (segmentsOfInterval == null || + segmentsOfInterval.getAppendingSegment() == null || + !segmentsOfInterval.getAppendingSegment().getSegmentIdentifier().equals(identifier)) { + throw new ISE("WTF?! Asked to remove segment[%s] that didn't exist...", identifier); + } + segmentsOfInterval.finishAppendingToCurrentActiveSegment(SegmentWithState::finishAppending); + } + } + } + + /** + * Persist all data indexed through this driver so far. Blocks until complete. + *

    + * Should be called after all data has been added through {@link #add(InputRow, String, Supplier, boolean, boolean)}. + * + * @param committer committer representing all data that has been added so far + * + * @return commitMetadata persisted + */ + public Object persist(final Committer committer) throws InterruptedException + { + try { + log.info("Persisting data."); + final long start = System.currentTimeMillis(); + final Object commitMetadata = appenderator.persistAll(wrapCommitter(committer)).get(); + log.info("Persisted pending data in %,dms.", System.currentTimeMillis() - start); + return commitMetadata; + } + catch (InterruptedException e) { + throw e; + } + catch (Exception e) { + throw Throwables.propagate(e); + } + } + + /** + * Persist all data indexed through this driver so far. Returns a future of persisted commitMetadata. + *

    + * Should be called after all data has been added through {@link #add(InputRow, String, Supplier, boolean, boolean)}. + * + * @param committer committer representing all data that has been added so far + * + * @return future containing commitMetadata persisted + */ + public ListenableFuture persistAsync(final Committer committer) + { + return appenderator.persistAll(wrapCommitter(committer)); + } + + /** + * Execute a task in background to publish all segments corresponding to the given sequence names. The task + * internally pushes the segments to the deep storage first, and then publishes the metadata to the metadata storage. + * + * @param publisher segment publisher + * @param committer committer + * @param sequenceNames a collection of sequence names to be published + * + * @return a {@link ListenableFuture} for the submitted task which removes published {@code sequenceNames} from + * {@code activeSegments} and {@code publishPendingSegments} + */ + public ListenableFuture publish( + final TransactionalSegmentPublisher publisher, + final Committer committer, + final Collection sequenceNames + ) + { + final List theSegments = getSegmentWithStates(sequenceNames) + .map(SegmentWithState::getSegmentIdentifier) + .collect(Collectors.toList()); + + final ListenableFuture publishFuture = Futures.transform( + // useUniquePath=true prevents inconsistencies in segment data when task failures or replicas leads to a second + // version of a segment with the same identifier containing different data; see DataSegmentPusher.push() docs + pushInBackground(wrapCommitter(committer), theSegments, true), + (AsyncFunction) segmentsAndMetadata -> publishInBackground( + segmentsAndMetadata, + publisher + ) + ); + + return Futures.transform( + publishFuture, + (Function) segmentsAndMetadata -> { + synchronized (segments) { + sequenceNames.forEach(segments::remove); + } + return segmentsAndMetadata; + } + ); + } + + /** + * Register the segments in the given {@link SegmentsAndMetadata} to be handed off and execute a background task which + * waits until the hand off completes. + * + * @param segmentsAndMetadata the result segments and metadata of + * {@link #publish(TransactionalSegmentPublisher, Committer, Collection)} + * + * @return null if the input segmentsAndMetadata is null. Otherwise, a {@link ListenableFuture} for the submitted task + * which returns {@link SegmentsAndMetadata} containing the segments successfully handed off and the metadata + * of the caller of {@link AppenderatorDriverMetadata} + */ + public ListenableFuture registerHandoff(SegmentsAndMetadata segmentsAndMetadata) + { + if (segmentsAndMetadata == null) { + return Futures.immediateFuture(null); + + } else { + final List waitingSegmentIdList = segmentsAndMetadata.getSegments().stream() + .map(SegmentIdentifier::fromDataSegment) + .collect(Collectors.toList()); + final Object metadata = Preconditions.checkNotNull(segmentsAndMetadata.getCommitMetadata(), "commitMetadata"); + + if (waitingSegmentIdList.isEmpty()) { + return Futures.immediateFuture( + new SegmentsAndMetadata( + segmentsAndMetadata.getSegments(), + ((AppenderatorDriverMetadata) metadata).getCallerMetadata() + ) + ); + } + + log.info("Register handoff of segments: [%s]", waitingSegmentIdList); + + final SettableFuture resultFuture = SettableFuture.create(); + final AtomicInteger numRemainingHandoffSegments = new AtomicInteger(waitingSegmentIdList.size()); + + for (final SegmentIdentifier segmentIdentifier : waitingSegmentIdList) { + handoffNotifier.registerSegmentHandoffCallback( + new SegmentDescriptor( + segmentIdentifier.getInterval(), + segmentIdentifier.getVersion(), + segmentIdentifier.getShardSpec().getPartitionNum() + ), + MoreExecutors.sameThreadExecutor(), + () -> { + log.info("Segment[%s] successfully handed off, dropping.", segmentIdentifier); + metrics.incrementHandOffCount(); + + final ListenableFuture dropFuture = appenderator.drop(segmentIdentifier); + Futures.addCallback( + dropFuture, + new FutureCallback() + { + @Override + public void onSuccess(Object result) + { + if (numRemainingHandoffSegments.decrementAndGet() == 0) { + log.info("Successfully handed off [%d] segments.", segmentsAndMetadata.getSegments().size()); + resultFuture.set( + new SegmentsAndMetadata( + segmentsAndMetadata.getSegments(), + ((AppenderatorDriverMetadata) metadata).getCallerMetadata() + ) + ); + } + } + + @Override + public void onFailure(Throwable e) + { + log.warn(e, "Failed to drop segment[%s]?!", segmentIdentifier); + numRemainingHandoffSegments.decrementAndGet(); + resultFuture.setException(e); + } + } + ); + } + ); + } + + return resultFuture; + } + } + + public ListenableFuture publishAndRegisterHandoff( + final TransactionalSegmentPublisher publisher, + final Committer committer, + final Collection sequenceNames + ) + { + return Futures.transform( + publish(publisher, committer, sequenceNames), + this::registerHandoff + ); + } + + @Override + public void close() + { + super.close(); + handoffNotifier.close(); + } + + private static class SegmentsForSequenceBuilder + { + // segmentId -> (appendingSegment, appendFinishedSegments) + private final NavigableMap>> intervalToSegments = + new TreeMap<>(Comparator.comparing(SegmentIdentifier::getInterval, Comparators.intervalsByStartThenEnd())); + private final String lastSegmentId; + + SegmentsForSequenceBuilder(String lastSegmentId) + { + this.lastSegmentId = lastSegmentId; + } + + void add(SegmentWithState segmentWithState) + { + final SegmentIdentifier identifier = segmentWithState.getSegmentIdentifier(); + final Pair> pair = intervalToSegments.get(identifier); + final List appendFinishedSegments = pair == null || pair.rhs == null ? + new ArrayList<>() : + pair.rhs; + + // always keep APPENDING segments for an interval start millis in the front + if (segmentWithState.getState() == SegmentState.APPENDING) { + if (pair != null && pair.lhs != null) { + throw new ISE( + "WTF?! there was already an appendingSegment[%s] before adding an appendingSegment[%s]", + pair.lhs, + segmentWithState + ); + } + + intervalToSegments.put(identifier, Pair.of(segmentWithState, appendFinishedSegments)); + } else { + final SegmentWithState appendingSegment = pair == null ? null : pair.lhs; + appendFinishedSegments.add(segmentWithState); + intervalToSegments.put(identifier, Pair.of(appendingSegment, appendFinishedSegments)); + } + } + + SegmentsForSequence build() + { + final NavigableMap map = new TreeMap<>(); + for (Entry>> entry : + intervalToSegments.entrySet()) { + map.put( + entry.getKey().getInterval().getStartMillis(), + new SegmentsOfInterval(entry.getKey().getInterval(), entry.getValue().lhs, entry.getValue().rhs) + ); + } + return new SegmentsForSequence(map, lastSegmentId); + } + } +} diff --git a/server/src/main/java/io/druid/segment/realtime/appenderator/TransactionalSegmentPublisher.java b/server/src/main/java/io/druid/segment/realtime/appenderator/TransactionalSegmentPublisher.java index de74f58966ac..359708a78c0a 100644 --- a/server/src/main/java/io/druid/segment/realtime/appenderator/TransactionalSegmentPublisher.java +++ b/server/src/main/java/io/druid/segment/realtime/appenderator/TransactionalSegmentPublisher.java @@ -21,6 +21,7 @@ import io.druid.timeline.DataSegment; +import javax.annotation.Nullable; import java.io.IOException; import java.util.Set; @@ -35,6 +36,6 @@ public interface TransactionalSegmentPublisher */ boolean publishSegments( Set segments, - Object commitMetadata + @Nullable Object commitMetadata ) throws IOException; } diff --git a/server/src/main/java/io/druid/segment/realtime/firehose/CombiningFirehoseFactory.java b/server/src/main/java/io/druid/segment/realtime/firehose/CombiningFirehoseFactory.java index 4267f6409a29..ce292eddb64c 100644 --- a/server/src/main/java/io/druid/segment/realtime/firehose/CombiningFirehoseFactory.java +++ b/server/src/main/java/io/druid/segment/realtime/firehose/CombiningFirehoseFactory.java @@ -23,7 +23,7 @@ import com.fasterxml.jackson.annotation.JsonProperty; import com.google.common.base.Preconditions; import com.google.common.base.Throwables; -import com.metamx.emitter.EmittingLogger; +import io.druid.java.util.emitter.EmittingLogger; import io.druid.data.input.Firehose; import io.druid.data.input.FirehoseFactory; import io.druid.data.input.InputRow; diff --git a/server/src/main/java/io/druid/segment/realtime/firehose/EventReceiverFirehoseFactory.java b/server/src/main/java/io/druid/segment/realtime/firehose/EventReceiverFirehoseFactory.java index 4343e873a564..9624f8bbcb0d 100644 --- a/server/src/main/java/io/druid/segment/realtime/firehose/EventReceiverFirehoseFactory.java +++ b/server/src/main/java/io/druid/segment/realtime/firehose/EventReceiverFirehoseFactory.java @@ -32,7 +32,7 @@ import com.google.common.collect.ImmutableMap; import com.google.common.collect.Lists; import com.google.common.io.CountingInputStream; -import com.metamx.emitter.EmittingLogger; +import io.druid.java.util.emitter.EmittingLogger; import io.druid.java.util.common.concurrent.Execs; import io.druid.data.input.Firehose; import io.druid.data.input.FirehoseFactory; diff --git a/server/src/main/java/io/druid/segment/realtime/firehose/LocalFirehoseFactory.java b/server/src/main/java/io/druid/segment/realtime/firehose/LocalFirehoseFactory.java index 6b80f2e96fbc..6db1e8c30941 100644 --- a/server/src/main/java/io/druid/segment/realtime/firehose/LocalFirehoseFactory.java +++ b/server/src/main/java/io/druid/segment/realtime/firehose/LocalFirehoseFactory.java @@ -22,7 +22,7 @@ import com.fasterxml.jackson.annotation.JsonCreator; import com.fasterxml.jackson.annotation.JsonProperty; import com.google.common.base.Preconditions; -import com.metamx.emitter.EmittingLogger; +import io.druid.java.util.emitter.EmittingLogger; import io.druid.data.input.impl.AbstractTextFilesFirehoseFactory; import io.druid.data.input.impl.StringInputRowParser; import io.druid.java.util.common.CompressionUtils; diff --git a/server/src/main/java/io/druid/segment/realtime/firehose/TimedShutoffFirehoseFactory.java b/server/src/main/java/io/druid/segment/realtime/firehose/TimedShutoffFirehoseFactory.java index 68a19c4482e9..8c87ececff60 100644 --- a/server/src/main/java/io/druid/segment/realtime/firehose/TimedShutoffFirehoseFactory.java +++ b/server/src/main/java/io/druid/segment/realtime/firehose/TimedShutoffFirehoseFactory.java @@ -21,7 +21,7 @@ import com.fasterxml.jackson.annotation.JsonCreator; import com.fasterxml.jackson.annotation.JsonProperty; -import com.metamx.emitter.EmittingLogger; +import io.druid.java.util.emitter.EmittingLogger; import io.druid.java.util.common.concurrent.Execs; import io.druid.data.input.Firehose; import io.druid.data.input.FirehoseFactory; diff --git a/server/src/main/java/io/druid/segment/realtime/plumber/FlushingPlumber.java b/server/src/main/java/io/druid/segment/realtime/plumber/FlushingPlumber.java index 546181b1fb02..0c01d99d66ab 100644 --- a/server/src/main/java/io/druid/segment/realtime/plumber/FlushingPlumber.java +++ b/server/src/main/java/io/druid/segment/realtime/plumber/FlushingPlumber.java @@ -21,8 +21,8 @@ import com.fasterxml.jackson.databind.ObjectMapper; import com.google.common.collect.Lists; -import com.metamx.emitter.EmittingLogger; -import com.metamx.emitter.service.ServiceEmitter; +import io.druid.java.util.emitter.EmittingLogger; +import io.druid.java.util.emitter.service.ServiceEmitter; import io.druid.client.cache.Cache; import io.druid.client.cache.CacheConfig; import io.druid.common.guava.ThreadRenamingCallable; diff --git a/server/src/main/java/io/druid/segment/realtime/plumber/FlushingPlumberSchool.java b/server/src/main/java/io/druid/segment/realtime/plumber/FlushingPlumberSchool.java index 11416650ff9a..c2ef8344cada 100644 --- a/server/src/main/java/io/druid/segment/realtime/plumber/FlushingPlumberSchool.java +++ b/server/src/main/java/io/druid/segment/realtime/plumber/FlushingPlumberSchool.java @@ -24,7 +24,7 @@ import com.fasterxml.jackson.annotation.JsonProperty; import com.fasterxml.jackson.databind.ObjectMapper; import com.google.common.base.Preconditions; -import com.metamx.emitter.service.ServiceEmitter; +import io.druid.java.util.emitter.service.ServiceEmitter; import io.druid.client.cache.Cache; import io.druid.client.cache.CacheConfig; import io.druid.guice.annotations.Processing; diff --git a/server/src/main/java/io/druid/segment/realtime/plumber/RealtimePlumber.java b/server/src/main/java/io/druid/segment/realtime/plumber/RealtimePlumber.java index 3a19ed632161..91ccae79407a 100644 --- a/server/src/main/java/io/druid/segment/realtime/plumber/RealtimePlumber.java +++ b/server/src/main/java/io/druid/segment/realtime/plumber/RealtimePlumber.java @@ -30,8 +30,8 @@ import com.google.common.collect.Iterables; import com.google.common.collect.Lists; import com.google.common.primitives.Ints; -import com.metamx.emitter.EmittingLogger; -import com.metamx.emitter.service.ServiceEmitter; +import io.druid.java.util.emitter.EmittingLogger; +import io.druid.java.util.emitter.service.ServiceEmitter; import io.druid.client.cache.Cache; import io.druid.client.cache.CacheConfig; import io.druid.common.guava.ThreadRenamingCallable; @@ -446,7 +446,8 @@ public void doRun() DataSegment segment = dataSegmentPusher.push( mergedFile, - sink.getSegment().withDimensions(IndexMerger.getMergedDimensionsFromQueryableIndexes(indexes)) + sink.getSegment().withDimensions(IndexMerger.getMergedDimensionsFromQueryableIndexes(indexes)), + false ); log.info("Inserting [%s] to the metadata store", sink.getSegment().getIdentifier()); segmentPublisher.publishSegment(segment); diff --git a/server/src/main/java/io/druid/segment/realtime/plumber/RealtimePlumberSchool.java b/server/src/main/java/io/druid/segment/realtime/plumber/RealtimePlumberSchool.java index 8aaf0f2bf7a2..989cf29ea798 100644 --- a/server/src/main/java/io/druid/segment/realtime/plumber/RealtimePlumberSchool.java +++ b/server/src/main/java/io/druid/segment/realtime/plumber/RealtimePlumberSchool.java @@ -23,7 +23,7 @@ import com.fasterxml.jackson.annotation.JsonCreator; import com.fasterxml.jackson.databind.ObjectMapper; import com.google.common.base.Preconditions; -import com.metamx.emitter.service.ServiceEmitter; +import io.druid.java.util.emitter.service.ServiceEmitter; import io.druid.client.cache.Cache; import io.druid.client.cache.CacheConfig; import io.druid.guice.annotations.Processing; diff --git a/server/src/main/java/io/druid/server/AsyncQueryForwardingServlet.java b/server/src/main/java/io/druid/server/AsyncQueryForwardingServlet.java index b6c5f287235f..23d60b82e985 100644 --- a/server/src/main/java/io/druid/server/AsyncQueryForwardingServlet.java +++ b/server/src/main/java/io/druid/server/AsyncQueryForwardingServlet.java @@ -26,14 +26,15 @@ import com.google.common.collect.ImmutableMap; import com.google.inject.Inject; import com.google.inject.Provider; -import com.metamx.emitter.EmittingLogger; -import com.metamx.emitter.service.ServiceEmitter; import io.druid.client.selector.Server; import io.druid.guice.annotations.Json; import io.druid.guice.annotations.Smile; import io.druid.guice.http.DruidHttpClientConfig; import io.druid.java.util.common.DateTimes; import io.druid.java.util.common.IAE; +import io.druid.java.util.common.jackson.JacksonUtils; +import io.druid.java.util.emitter.EmittingLogger; +import io.druid.java.util.emitter.service.ServiceEmitter; import io.druid.query.DruidMetrics; import io.druid.query.GenericQueryMetricsFactory; import io.druid.query.Query; @@ -44,7 +45,10 @@ import io.druid.server.router.QueryHostFinder; import io.druid.server.router.Router; import io.druid.server.security.AuthConfig; -import io.druid.server.security.Escalator; +import io.druid.server.security.AuthenticationResult; +import io.druid.server.security.Authenticator; +import io.druid.server.security.AuthenticatorMapper; +import org.apache.http.client.utils.URIBuilder; import org.eclipse.jetty.client.HttpClient; import org.eclipse.jetty.client.api.Request; import org.eclipse.jetty.client.api.Response; @@ -58,10 +62,8 @@ import javax.servlet.http.HttpServletResponse; import javax.ws.rs.core.MediaType; import java.io.IOException; -import java.io.UnsupportedEncodingException; import java.net.URI; import java.net.URISyntaxException; -import java.net.URLDecoder; import java.util.Map; import java.util.UUID; import java.util.concurrent.TimeUnit; @@ -113,7 +115,7 @@ private static void handleException(HttpServletResponse response, ObjectMapper o private final ServiceEmitter emitter; private final RequestLogger requestLogger; private final GenericQueryMetricsFactory queryMetricsFactory; - private final Escalator escalator; + private final AuthenticatorMapper authenticatorMapper; private HttpClient broadcastClient; @@ -128,7 +130,7 @@ public AsyncQueryForwardingServlet( ServiceEmitter emitter, RequestLogger requestLogger, GenericQueryMetricsFactory queryMetricsFactory, - Escalator escalator + AuthenticatorMapper authenticatorMapper ) { this.warehouse = warehouse; @@ -140,7 +142,7 @@ public AsyncQueryForwardingServlet( this.emitter = emitter; this.requestLogger = requestLogger; this.queryMetricsFactory = queryMetricsFactory; - this.escalator = escalator; + this.authenticatorMapper = authenticatorMapper; } @Override @@ -179,31 +181,34 @@ protected void service(HttpServletRequest request, HttpServletResponse response) final ObjectMapper objectMapper = isSmile ? smileMapper : jsonMapper; request.setAttribute(OBJECTMAPPER_ATTRIBUTE, objectMapper); - final Server defaultServer = hostFinder.getDefaultServer(); - request.setAttribute(HOST_ATTRIBUTE, defaultServer.getHost()); - request.setAttribute(SCHEME_ATTRIBUTE, defaultServer.getScheme()); + final String requestURI = request.getRequestURI(); + final String method = request.getMethod(); + final Server targetServer; // The Router does not have the ability to look inside SQL queries and route them intelligently, so just treat // them as a generic request. - final boolean isQueryEndpoint = request.getRequestURI().startsWith("/druid/v2") - && !request.getRequestURI().startsWith("/druid/v2/sql"); + final boolean isQueryEndpoint = requestURI.startsWith("/druid/v2") + && !requestURI.startsWith("/druid/v2/sql"); - final boolean isAvatica = request.getRequestURI().startsWith("/druid/v2/sql/avatica"); + final boolean isAvatica = requestURI.startsWith("/druid/v2/sql/avatica"); if (isAvatica) { - Map requestMap = objectMapper.readValue(request.getInputStream(), Map.class); + Map requestMap = objectMapper.readValue( + request.getInputStream(), + JacksonUtils.TYPE_REFERENCE_MAP_STRING_OBJECT + ); String connectionId = getAvaticaConnectionId(requestMap); - Server targetServer = hostFinder.findServerAvatica(connectionId); + targetServer = hostFinder.findServerAvatica(connectionId); byte[] requestBytes = objectMapper.writeValueAsBytes(requestMap); - request.setAttribute(HOST_ATTRIBUTE, targetServer.getHost()); - request.setAttribute(SCHEME_ATTRIBUTE, targetServer.getScheme()); request.setAttribute(AVATICA_QUERY_ATTRIBUTE, requestBytes); - } else if (isQueryEndpoint && HttpMethod.DELETE.is(request.getMethod())) { + } else if (isQueryEndpoint && HttpMethod.DELETE.is(method)) { // query cancellation request - for (final Server server: hostFinder.getAllServers()) { + targetServer = hostFinder.pickDefaultServer(); + + for (final Server server : hostFinder.getAllServers()) { // send query cancellation to all brokers this query may have gone to - // to keep the code simple, the proxy servlet will also send a request to one of the default brokers - if (!server.getHost().equals(defaultServer.getHost())) { + // to keep the code simple, the proxy servlet will also send a request to the default targetServer. + if (!server.getHost().equals(targetServer.getHost())) { // issue async requests Response.CompleteListener completeListener = result -> { if (result.isFailed()) { @@ -214,25 +219,28 @@ protected void service(HttpServletRequest request, HttpServletResponse response) ); } }; - broadcastClient + + Request broadcastReq = broadcastClient .newRequest(rewriteURI(request, server.getScheme(), server.getHost())) .method(HttpMethod.DELETE) - .timeout(CANCELLATION_TIMEOUT_MILLIS, TimeUnit.MILLISECONDS) - .send(completeListener); + .timeout(CANCELLATION_TIMEOUT_MILLIS, TimeUnit.MILLISECONDS); + + copyRequestHeaders(request, broadcastReq); + broadcastReq.send(completeListener); } interruptedQueryCount.incrementAndGet(); } - } else if (isQueryEndpoint && HttpMethod.POST.is(request.getMethod())) { + } else if (isQueryEndpoint && HttpMethod.POST.is(method)) { // query request try { Query inputQuery = objectMapper.readValue(request.getInputStream(), Query.class); if (inputQuery != null) { - final Server server = hostFinder.getServer(inputQuery); - request.setAttribute(HOST_ATTRIBUTE, server.getHost()); - request.setAttribute(SCHEME_ATTRIBUTE, server.getScheme()); + targetServer = hostFinder.pickServer(inputQuery); if (inputQuery.getId() == null) { inputQuery = inputQuery.withId(UUID.randomUUID().toString()); } + } else { + targetServer = hostFinder.pickDefaultServer(); } request.setAttribute(QUERY_ATTRIBUTE, inputQuery); } @@ -260,8 +268,22 @@ protected void service(HttpServletRequest request, HttpServletResponse response) handleException(response, objectMapper, e); return; } + } else { + targetServer = hostFinder.pickDefaultServer(); } + request.setAttribute(HOST_ATTRIBUTE, targetServer.getHost()); + request.setAttribute(SCHEME_ATTRIBUTE, targetServer.getScheme()); + + doService(request, response); + } + + protected void doService( + HttpServletRequest request, + HttpServletResponse response + ) throws ServletException, IOException + { + // Just call the superclass service method. Overriden in tests. super.service(request, response); } @@ -297,6 +319,22 @@ protected void sendProxyRequest( // will log that on the remote node. clientRequest.setAttribute(AuthConfig.DRUID_AUTHORIZATION_CHECKED, true); + // Check if there is an authentication result and use it to decorate the proxy request if needed. + AuthenticationResult authenticationResult = (AuthenticationResult) clientRequest.getAttribute( + AuthConfig.DRUID_AUTHENTICATION_RESULT); + if (authenticationResult != null && authenticationResult.getAuthenticatedBy() != null) { + Authenticator authenticator = authenticatorMapper.getAuthenticatorMap() + .get(authenticationResult.getAuthenticatedBy()); + if (authenticator != null) { + authenticator.decorateProxyRequest( + clientRequest, + proxyResponse, + proxyRequest + ); + } else { + log.error("Can not find Authenticator with Name [%s]", authenticationResult.getAuthenticatedBy()); + } + } super.sendProxyRequest( clientRequest, proxyResponse, @@ -320,7 +358,11 @@ protected Response.Listener newProxyResponseListener( @Override protected String rewriteTarget(HttpServletRequest request) { - return rewriteURI(request, (String) request.getAttribute(SCHEME_ATTRIBUTE), (String) request.getAttribute(HOST_ATTRIBUTE)).toString(); + return rewriteURI( + request, + (String) request.getAttribute(SCHEME_ATTRIBUTE), + (String) request.getAttribute(HOST_ATTRIBUTE) + ).toString(); } protected URI rewriteURI(HttpServletRequest request, String scheme, String host) @@ -331,15 +373,15 @@ protected URI rewriteURI(HttpServletRequest request, String scheme, String host) protected static URI makeURI(String scheme, String host, String requestURI, String rawQueryString) { try { - return new URI( - scheme, - host, - requestURI, - rawQueryString == null ? null : URLDecoder.decode(rawQueryString, "UTF-8"), - null - ); + return new URIBuilder() + .setScheme(scheme) + .setHost(host) + .setPath(requestURI) + // No need to encode-decode queryString, it is already encoded + .setQuery(rawQueryString) + .build(); } - catch (UnsupportedEncodingException | URISyntaxException e) { + catch (URISyntaxException e) { log.error(e, "Unable to rewrite URI [%s]", e.getMessage()); throw Throwables.propagate(e); } @@ -348,7 +390,7 @@ protected static URI makeURI(String scheme, String host, String requestURI, Stri @Override protected HttpClient newHttpClient() { - return escalator.createEscalatedJettyClient(httpClientProvider.get()); + return httpClientProvider.get(); } @Override diff --git a/server/src/main/java/io/druid/server/ClientQuerySegmentWalker.java b/server/src/main/java/io/druid/server/ClientQuerySegmentWalker.java index 993747b7a3e2..768e284ca1c5 100644 --- a/server/src/main/java/io/druid/server/ClientQuerySegmentWalker.java +++ b/server/src/main/java/io/druid/server/ClientQuerySegmentWalker.java @@ -22,7 +22,7 @@ import com.fasterxml.jackson.core.type.TypeReference; import com.fasterxml.jackson.databind.ObjectMapper; import com.google.inject.Inject; -import com.metamx.emitter.service.ServiceEmitter; +import io.druid.java.util.emitter.service.ServiceEmitter; import io.druid.client.CachingClusteredClient; import io.druid.query.FluentQueryRunnerBuilder; import io.druid.query.PostProcessingOperator; diff --git a/server/src/main/java/io/druid/server/QueryLifecycle.java b/server/src/main/java/io/druid/server/QueryLifecycle.java index 1f25f370e279..3c03314cd595 100644 --- a/server/src/main/java/io/druid/server/QueryLifecycle.java +++ b/server/src/main/java/io/druid/server/QueryLifecycle.java @@ -21,7 +21,6 @@ import com.google.common.base.Strings; import com.google.common.collect.Iterables; -import com.metamx.emitter.service.ServiceEmitter; import io.druid.client.DirectDruidClient; import io.druid.java.util.common.DateTimes; import io.druid.java.util.common.ISE; @@ -29,6 +28,7 @@ import io.druid.java.util.common.guava.SequenceWrapper; import io.druid.java.util.common.guava.Sequences; import io.druid.java.util.common.logger.Logger; +import io.druid.java.util.emitter.service.ServiceEmitter; import io.druid.query.DruidMetrics; import io.druid.query.GenericQueryMetricsFactory; import io.druid.query.Query; @@ -249,10 +249,7 @@ public QueryResponse execute() { transition(State.AUTHORIZED, State.EXECUTING); - final Map responseContext = DirectDruidClient.makeResponseContextForQuery( - queryPlus.getQuery(), - System.currentTimeMillis() - ); + final Map responseContext = DirectDruidClient.makeResponseContextForQuery(); final Sequence res = queryPlus.run(texasRanger, responseContext); diff --git a/server/src/main/java/io/druid/server/QueryLifecycleFactory.java b/server/src/main/java/io/druid/server/QueryLifecycleFactory.java index 9b8b1c5f147b..e22d1ba7ccb3 100644 --- a/server/src/main/java/io/druid/server/QueryLifecycleFactory.java +++ b/server/src/main/java/io/druid/server/QueryLifecycleFactory.java @@ -20,7 +20,7 @@ package io.druid.server; import com.google.inject.Inject; -import com.metamx.emitter.service.ServiceEmitter; +import io.druid.java.util.emitter.service.ServiceEmitter; import io.druid.guice.LazySingleton; import io.druid.query.GenericQueryMetricsFactory; import io.druid.query.QuerySegmentWalker; diff --git a/server/src/main/java/io/druid/server/QueryResource.java b/server/src/main/java/io/druid/server/QueryResource.java index 8c1208709116..cd8cc14a6000 100644 --- a/server/src/main/java/io/druid/server/QueryResource.java +++ b/server/src/main/java/io/druid/server/QueryResource.java @@ -30,7 +30,7 @@ import com.google.common.collect.Sets; import com.google.common.io.CountingOutputStream; import com.google.inject.Inject; -import com.metamx.emitter.EmittingLogger; +import io.druid.java.util.emitter.EmittingLogger; import io.druid.client.DirectDruidClient; import io.druid.guice.LazySingleton; import io.druid.guice.annotations.Json; diff --git a/server/src/main/java/io/druid/server/SegmentManager.java b/server/src/main/java/io/druid/server/SegmentManager.java index 3a7ba3cdbfd1..c93528327315 100644 --- a/server/src/main/java/io/druid/server/SegmentManager.java +++ b/server/src/main/java/io/druid/server/SegmentManager.java @@ -22,7 +22,7 @@ import com.google.common.annotations.VisibleForTesting; import com.google.common.collect.Ordering; import com.google.inject.Inject; -import com.metamx.emitter.EmittingLogger; +import io.druid.java.util.emitter.EmittingLogger; import io.druid.common.guava.SettableSupplier; import io.druid.segment.ReferenceCountingSegment; import io.druid.segment.Segment; diff --git a/server/src/main/java/io/druid/server/SetAndVerifyContextQueryRunner.java b/server/src/main/java/io/druid/server/SetAndVerifyContextQueryRunner.java index 637b9dd14fb0..7901e3674b09 100644 --- a/server/src/main/java/io/druid/server/SetAndVerifyContextQueryRunner.java +++ b/server/src/main/java/io/druid/server/SetAndVerifyContextQueryRunner.java @@ -19,6 +19,8 @@ package io.druid.server; +import com.google.common.collect.ImmutableMap; +import io.druid.client.DirectDruidClient; import io.druid.java.util.common.guava.Sequence; import io.druid.query.Query; import io.druid.query.QueryContexts; @@ -35,11 +37,13 @@ public class SetAndVerifyContextQueryRunner implements QueryRunner { private final ServerConfig serverConfig; private final QueryRunner baseRunner; + private final long startTimeMillis; public SetAndVerifyContextQueryRunner(ServerConfig serverConfig, QueryRunner baseRunner) { this.serverConfig = serverConfig; this.baseRunner = baseRunner; + this.startTimeMillis = System.currentTimeMillis(); } @Override @@ -54,12 +58,12 @@ public Sequence run(QueryPlus queryPlus, Map responseContext) ); } - public static > QueryType withTimeoutAndMaxScatterGatherBytes( + public > QueryType withTimeoutAndMaxScatterGatherBytes( final QueryType query, ServerConfig serverConfig ) { - return (QueryType) QueryContexts.verifyMaxQueryTimeout( + Query newQuery = QueryContexts.verifyMaxQueryTimeout( QueryContexts.withMaxScatterGatherBytes( QueryContexts.withDefaultTimeout( (Query) query, @@ -69,5 +73,6 @@ public static > QueryType withTimeoutAndMaxScatter ), serverConfig.getMaxQueryTimeout() ); + return (QueryType) newQuery.withOverriddenContext(ImmutableMap.of(DirectDruidClient.QUERY_FAIL_TIME, this.startTimeMillis + QueryContexts.getTimeout(newQuery))); } } diff --git a/server/src/main/java/io/druid/server/audit/SQLAuditManager.java b/server/src/main/java/io/druid/server/audit/SQLAuditManager.java index da221868960a..de556c349c43 100644 --- a/server/src/main/java/io/druid/server/audit/SQLAuditManager.java +++ b/server/src/main/java/io/druid/server/audit/SQLAuditManager.java @@ -22,8 +22,8 @@ import com.fasterxml.jackson.databind.ObjectMapper; import com.google.common.base.Supplier; import com.google.inject.Inject; -import com.metamx.emitter.service.ServiceEmitter; -import com.metamx.emitter.service.ServiceMetricEvent; +import io.druid.java.util.emitter.service.ServiceEmitter; +import io.druid.java.util.emitter.service.ServiceMetricEvent; import io.druid.audit.AuditEntry; import io.druid.audit.AuditManager; diff --git a/server/src/main/java/io/druid/server/audit/SQLAuditManagerProvider.java b/server/src/main/java/io/druid/server/audit/SQLAuditManagerProvider.java index 4ecbc602cf5c..864c81de1c43 100644 --- a/server/src/main/java/io/druid/server/audit/SQLAuditManagerProvider.java +++ b/server/src/main/java/io/druid/server/audit/SQLAuditManagerProvider.java @@ -23,7 +23,7 @@ import com.google.common.base.Supplier; import com.google.common.base.Throwables; import com.google.inject.Inject; -import com.metamx.emitter.service.ServiceEmitter; +import io.druid.java.util.emitter.service.ServiceEmitter; import io.druid.audit.AuditManager; import io.druid.guice.annotations.Json; import io.druid.java.util.common.lifecycle.Lifecycle; diff --git a/server/src/main/java/io/druid/server/coordination/SegmentLoadDropHandler.java b/server/src/main/java/io/druid/server/coordination/SegmentLoadDropHandler.java index 1e84da5e20a9..5774c4bc8e61 100644 --- a/server/src/main/java/io/druid/server/coordination/SegmentLoadDropHandler.java +++ b/server/src/main/java/io/druid/server/coordination/SegmentLoadDropHandler.java @@ -35,7 +35,7 @@ import com.google.common.util.concurrent.ListenableFuture; import com.google.common.util.concurrent.SettableFuture; import com.google.inject.Inject; -import com.metamx.emitter.EmittingLogger; +import io.druid.java.util.emitter.EmittingLogger; import io.druid.guice.ManageLifecycle; import io.druid.java.util.common.ISE; import io.druid.java.util.common.concurrent.Execs; @@ -73,7 +73,11 @@ public class SegmentLoadDropHandler implements DataSegmentChangeHandler { private static final EmittingLogger log = new EmittingLogger(SegmentLoadDropHandler.class); - private final Object lock = new Object(); + // Synchronizes removals from segmentsToDelete + private final Object segmentDeleteLock = new Object(); + + // Synchronizes start/stop of this object. + private final Object startStopLock = new Object(); private final ObjectMapper jsonMapper; private final SegmentLoaderConfig config; @@ -137,7 +141,7 @@ public SegmentLoadDropHandler( @LifecycleStart public void start() throws IOException { - synchronized (lock) { + synchronized (startStopLock) { if (started) { return; } @@ -159,7 +163,7 @@ public void start() throws IOException @LifecycleStop public void stop() { - synchronized (lock) { + synchronized (startStopLock) { if (!started) { return; } @@ -296,7 +300,7 @@ each time when addSegment() is called, it has to wait for the lock in order to m things slow. Given that in most cases segmentsToDelete.contains(segment) returns false, it will save a lot of cost of acquiring lock by doing the "contains" check outside the synchronized block. */ - synchronized (lock) { + synchronized (segmentDeleteLock) { segmentsToDelete.remove(segment); } } @@ -423,7 +427,7 @@ private void removeSegment( public void run() { try { - synchronized (lock) { + synchronized (segmentDeleteLock) { if (segmentsToDelete.remove(segment)) { segmentManager.dropSegment(segment); diff --git a/server/src/main/java/io/druid/server/coordination/ServerManager.java b/server/src/main/java/io/druid/server/coordination/ServerManager.java index 39da89d03fd3..7cee07003cbb 100644 --- a/server/src/main/java/io/druid/server/coordination/ServerManager.java +++ b/server/src/main/java/io/druid/server/coordination/ServerManager.java @@ -23,8 +23,8 @@ import com.google.common.base.Function; import com.google.common.collect.Iterables; import com.google.inject.Inject; -import com.metamx.emitter.EmittingLogger; -import com.metamx.emitter.service.ServiceEmitter; +import io.druid.java.util.emitter.EmittingLogger; +import io.druid.java.util.emitter.service.ServiceEmitter; import io.druid.client.CachingQueryRunner; import io.druid.client.cache.Cache; import io.druid.client.cache.CacheConfig; diff --git a/server/src/main/java/io/druid/server/coordination/ZkCoordinator.java b/server/src/main/java/io/druid/server/coordination/ZkCoordinator.java index 98d226e99448..ea02415b9d23 100644 --- a/server/src/main/java/io/druid/server/coordination/ZkCoordinator.java +++ b/server/src/main/java/io/druid/server/coordination/ZkCoordinator.java @@ -22,7 +22,7 @@ import com.fasterxml.jackson.databind.ObjectMapper; import com.google.common.base.Throwables; import com.google.inject.Inject; -import com.metamx.emitter.EmittingLogger; +import io.druid.java.util.emitter.EmittingLogger; import io.druid.java.util.common.concurrent.Execs; import io.druid.java.util.common.lifecycle.LifecycleStart; import io.druid.java.util.common.lifecycle.LifecycleStop; diff --git a/server/src/main/java/io/druid/server/coordinator/CachingCostBalancerStrategyFactory.java b/server/src/main/java/io/druid/server/coordinator/CachingCostBalancerStrategyFactory.java index 6a206b0f87a1..a0bd835d1839 100644 --- a/server/src/main/java/io/druid/server/coordinator/CachingCostBalancerStrategyFactory.java +++ b/server/src/main/java/io/druid/server/coordinator/CachingCostBalancerStrategyFactory.java @@ -22,7 +22,7 @@ import com.fasterxml.jackson.annotation.JacksonInject; import com.fasterxml.jackson.annotation.JsonCreator; import com.google.common.util.concurrent.ListeningExecutorService; -import com.metamx.emitter.EmittingLogger; +import io.druid.java.util.emitter.EmittingLogger; import io.druid.client.ServerInventoryView; import io.druid.client.ServerView; import io.druid.java.util.common.concurrent.Execs; diff --git a/server/src/main/java/io/druid/server/coordinator/CostBalancerStrategy.java b/server/src/main/java/io/druid/server/coordinator/CostBalancerStrategy.java index b9a82fe5468e..c4cd0f7cf882 100644 --- a/server/src/main/java/io/druid/server/coordinator/CostBalancerStrategy.java +++ b/server/src/main/java/io/druid/server/coordinator/CostBalancerStrategy.java @@ -25,7 +25,7 @@ import com.google.common.util.concurrent.Futures; import com.google.common.util.concurrent.ListenableFuture; import com.google.common.util.concurrent.ListeningExecutorService; -import com.metamx.emitter.EmittingLogger; +import io.druid.java.util.emitter.EmittingLogger; import io.druid.java.util.common.Pair; import io.druid.timeline.DataSegment; diff --git a/server/src/main/java/io/druid/server/coordinator/CuratorLoadQueuePeon.java b/server/src/main/java/io/druid/server/coordinator/CuratorLoadQueuePeon.java index 99761cd43a28..597495493b64 100644 --- a/server/src/main/java/io/druid/server/coordinator/CuratorLoadQueuePeon.java +++ b/server/src/main/java/io/druid/server/coordinator/CuratorLoadQueuePeon.java @@ -22,7 +22,7 @@ import com.fasterxml.jackson.annotation.JsonProperty; import com.fasterxml.jackson.databind.ObjectMapper; import com.google.common.collect.Lists; -import com.metamx.emitter.EmittingLogger; +import io.druid.java.util.emitter.EmittingLogger; import io.druid.java.util.common.ISE; import io.druid.java.util.common.concurrent.ScheduledExecutors; import io.druid.server.coordination.DataSegmentChangeRequest; diff --git a/server/src/main/java/io/druid/server/coordinator/DruidCoordinator.java b/server/src/main/java/io/druid/server/coordinator/DruidCoordinator.java index 7239aca5b223..428fc5588cef 100644 --- a/server/src/main/java/io/druid/server/coordinator/DruidCoordinator.java +++ b/server/src/main/java/io/druid/server/coordinator/DruidCoordinator.java @@ -30,8 +30,8 @@ import com.google.common.util.concurrent.ListeningExecutorService; import com.google.common.util.concurrent.MoreExecutors; import com.google.inject.Inject; -import com.metamx.emitter.EmittingLogger; -import com.metamx.emitter.service.ServiceEmitter; +import io.druid.java.util.emitter.EmittingLogger; +import io.druid.java.util.emitter.service.ServiceEmitter; import io.druid.client.DruidDataSource; import io.druid.client.DruidServer; import io.druid.client.ImmutableDruidDataSource; @@ -254,6 +254,7 @@ public Map getLoadManagementPeons() .computeIfAbsent(tier, ignored -> new Object2LongOpenHashMap<>()) .addTo(segment.getDataSource(), Math.max(ruleReplicants - currentReplicants, 0)); }); + break; // only the first matching rule applies } } @@ -299,7 +300,11 @@ public Map getLoadStatus() for (DruidServer druidServer : serverInventoryView.getInventory()) { final DruidDataSource loadedView = druidServer.getDataSource(dataSource.getName()); if (loadedView != null) { - segments.removeAll(loadedView.getSegments()); + // This does not use segments.removeAll(loadedView.getSegments()) for performance reasons. + // Please see https://github.com/druid-io/druid/pull/5632 and LoadStatusBenchmark for more info. + for (DataSegment serverSegment : loadedView.getSegments()) { + segments.remove(serverSegment); + } } } final int unloadedSegmentSize = segments.size(); diff --git a/server/src/main/java/io/druid/server/coordinator/DruidCoordinatorRuntimeParams.java b/server/src/main/java/io/druid/server/coordinator/DruidCoordinatorRuntimeParams.java index e2b51771c175..936dd93a8c55 100644 --- a/server/src/main/java/io/druid/server/coordinator/DruidCoordinatorRuntimeParams.java +++ b/server/src/main/java/io/druid/server/coordinator/DruidCoordinatorRuntimeParams.java @@ -21,7 +21,7 @@ import com.google.common.collect.Maps; import com.google.common.collect.Sets; -import com.metamx.emitter.service.ServiceEmitter; +import io.druid.java.util.emitter.service.ServiceEmitter; import io.druid.client.ImmutableDruidDataSource; import io.druid.java.util.common.DateTimes; import io.druid.metadata.MetadataRuleManager; diff --git a/server/src/main/java/io/druid/server/coordinator/HttpLoadQueuePeon.java b/server/src/main/java/io/druid/server/coordinator/HttpLoadQueuePeon.java index 17e65ce804c9..dbeeb7386018 100644 --- a/server/src/main/java/io/druid/server/coordinator/HttpLoadQueuePeon.java +++ b/server/src/main/java/io/druid/server/coordinator/HttpLoadQueuePeon.java @@ -28,14 +28,14 @@ import com.google.common.util.concurrent.FutureCallback; import com.google.common.util.concurrent.Futures; import com.google.common.util.concurrent.ListenableFuture; -import com.metamx.common.RE; -import com.metamx.common.StringUtils; -import com.metamx.emitter.EmittingLogger; -import com.metamx.http.client.HttpClient; -import com.metamx.http.client.Request; -import com.metamx.http.client.io.AppendableByteArrayInputStream; -import com.metamx.http.client.response.ClientResponse; -import com.metamx.http.client.response.InputStreamResponseHandler; +import io.druid.java.util.common.RE; +import io.druid.java.util.common.StringUtils; +import io.druid.java.util.emitter.EmittingLogger; +import io.druid.java.util.http.client.HttpClient; +import io.druid.java.util.http.client.Request; +import io.druid.java.util.http.client.io.AppendableByteArrayInputStream; +import io.druid.java.util.http.client.response.ClientResponse; +import io.druid.java.util.http.client.response.InputStreamResponseHandler; import io.druid.java.util.common.ISE; import io.druid.java.util.common.concurrent.ScheduledExecutors; import io.druid.server.coordination.DataSegmentChangeCallback; @@ -135,7 +135,7 @@ public HttpLoadQueuePeon( try { this.changeRequestURL = new URL( new URL(baseUrl), - StringUtils.safeFormat( + StringUtils.nonStrictFormat( "druid-internal/v1/segments/changeRequests?timeout=%d", config.getHttpLoadQueuePeonHostTimeout().getMillis() ) @@ -272,7 +272,7 @@ private void logRequestFailure(Throwable t) { log.error( t, - "Request[%s] Failed with code[%s] and status[%s]. Reason[%s].", + "Request[%s] Failed with status[%s]. Reason[%s].", changeRequestURL, responseHandler.status, responseHandler.description diff --git a/server/src/main/java/io/druid/server/coordinator/LoadQueueTaskMaster.java b/server/src/main/java/io/druid/server/coordinator/LoadQueueTaskMaster.java index c24a98a2ff2f..f395e09be17a 100644 --- a/server/src/main/java/io/druid/server/coordinator/LoadQueueTaskMaster.java +++ b/server/src/main/java/io/druid/server/coordinator/LoadQueueTaskMaster.java @@ -21,7 +21,7 @@ import com.fasterxml.jackson.databind.ObjectMapper; import com.google.inject.Inject; -import com.metamx.http.client.HttpClient; +import io.druid.java.util.http.client.HttpClient; import io.druid.client.ImmutableDruidServer; import io.druid.guice.annotations.Global; import io.druid.guice.annotations.Json; diff --git a/server/src/main/java/io/druid/server/coordinator/ReplicationThrottler.java b/server/src/main/java/io/druid/server/coordinator/ReplicationThrottler.java index dcf69b5381c1..68f48ece8dc9 100644 --- a/server/src/main/java/io/druid/server/coordinator/ReplicationThrottler.java +++ b/server/src/main/java/io/druid/server/coordinator/ReplicationThrottler.java @@ -21,7 +21,7 @@ import com.google.common.collect.Lists; import com.google.common.collect.Maps; -import com.metamx.emitter.EmittingLogger; +import io.druid.java.util.emitter.EmittingLogger; import io.druid.java.util.common.StringUtils; import java.util.List; diff --git a/server/src/main/java/io/druid/server/coordinator/SegmentReplicantLookup.java b/server/src/main/java/io/druid/server/coordinator/SegmentReplicantLookup.java index a713c8d181c1..357eeb99be7c 100644 --- a/server/src/main/java/io/druid/server/coordinator/SegmentReplicantLookup.java +++ b/server/src/main/java/io/druid/server/coordinator/SegmentReplicantLookup.java @@ -36,6 +36,7 @@ public class SegmentReplicantLookup public static SegmentReplicantLookup make(DruidCluster cluster) { final Table segmentsInCluster = HashBasedTable.create(); + final Table loadingSegments = HashBasedTable.create(); for (SortedSet serversByType : cluster.getSortedHistoricalsByTier()) { for (ServerHolder serverHolder : serversByType) { @@ -48,17 +49,29 @@ public static SegmentReplicantLookup make(DruidCluster cluster) } segmentsInCluster.put(segment.getIdentifier(), server.getTier(), ++numReplicants); } + + // Also account for queued segments + for (DataSegment segment : serverHolder.getPeon().getSegmentsToLoad()) { + Integer numReplicants = loadingSegments.get(segment.getIdentifier(), server.getTier()); + if (numReplicants == null) { + numReplicants = 0; + } + loadingSegments.put(segment.getIdentifier(), server.getTier(), ++numReplicants); + } } } - return new SegmentReplicantLookup(segmentsInCluster); + return new SegmentReplicantLookup(segmentsInCluster, loadingSegments); } private final Table segmentsInCluster; - private SegmentReplicantLookup(Table segmentsInCluster) + private final Table loadingSegments; + + private SegmentReplicantLookup(Table segmentsInCluster, Table loadingSegments) { this.segmentsInCluster = segmentsInCluster; + this.loadingSegments = loadingSegments; } public Map getClusterTiers(String segmentId) @@ -82,4 +95,30 @@ public int getLoadedReplicants(String segmentId, String tier) Integer retVal = segmentsInCluster.get(segmentId, tier); return (retVal == null) ? 0 : retVal; } + + public int getLoadingReplicants(String segmentId, String tier) + { + Integer retVal = loadingSegments.get(segmentId, tier); + return (retVal == null) ? 0 : retVal; + } + + public int getLoadingReplicants(String segmentId) + { + Map allTiers = loadingSegments.row(segmentId); + int retVal = 0; + for (Integer replicants : allTiers.values()) { + retVal += replicants; + } + return retVal; + } + + public int getTotalReplicants(String segmentId) + { + return getLoadedReplicants(segmentId) + getLoadingReplicants(segmentId); + } + + public int getTotalReplicants(String segmentId, String tier) + { + return getLoadedReplicants(segmentId, tier) + getLoadingReplicants(segmentId, tier); + } } diff --git a/server/src/main/java/io/druid/server/coordinator/helper/DruidCoordinatorBalancer.java b/server/src/main/java/io/druid/server/coordinator/helper/DruidCoordinatorBalancer.java index 88cd9b50a128..cc266d705df6 100644 --- a/server/src/main/java/io/druid/server/coordinator/helper/DruidCoordinatorBalancer.java +++ b/server/src/main/java/io/druid/server/coordinator/helper/DruidCoordinatorBalancer.java @@ -20,7 +20,7 @@ package io.druid.server.coordinator.helper; import com.google.common.collect.Lists; -import com.metamx.emitter.EmittingLogger; +import io.druid.java.util.emitter.EmittingLogger; import io.druid.client.ImmutableDruidServer; import io.druid.java.util.common.StringUtils; import io.druid.server.coordinator.BalancerSegmentHolder; diff --git a/server/src/main/java/io/druid/server/coordinator/helper/DruidCoordinatorLogger.java b/server/src/main/java/io/druid/server/coordinator/helper/DruidCoordinatorLogger.java index ced5efb1dc78..190a9fb0d507 100644 --- a/server/src/main/java/io/druid/server/coordinator/helper/DruidCoordinatorLogger.java +++ b/server/src/main/java/io/druid/server/coordinator/helper/DruidCoordinatorLogger.java @@ -19,8 +19,8 @@ package io.druid.server.coordinator.helper; -import com.metamx.emitter.service.ServiceEmitter; -import com.metamx.emitter.service.ServiceMetricEvent; +import io.druid.java.util.emitter.service.ServiceEmitter; +import io.druid.java.util.emitter.service.ServiceMetricEvent; import io.druid.client.ImmutableDruidServer; import io.druid.java.util.common.logger.Logger; import io.druid.query.DruidMetrics; diff --git a/server/src/main/java/io/druid/server/coordinator/helper/DruidCoordinatorRuleRunner.java b/server/src/main/java/io/druid/server/coordinator/helper/DruidCoordinatorRuleRunner.java index 7924e45adbb5..5fa0ff17b220 100644 --- a/server/src/main/java/io/druid/server/coordinator/helper/DruidCoordinatorRuleRunner.java +++ b/server/src/main/java/io/druid/server/coordinator/helper/DruidCoordinatorRuleRunner.java @@ -20,8 +20,8 @@ package io.druid.server.coordinator.helper; import com.google.common.collect.Lists; -import com.metamx.common.guava.Comparators; -import com.metamx.emitter.EmittingLogger; +import io.druid.java.util.common.guava.Comparators; +import io.druid.java.util.emitter.EmittingLogger; import io.druid.java.util.common.DateTimes; import io.druid.metadata.MetadataRuleManager; import io.druid.server.coordinator.CoordinatorStats; diff --git a/server/src/main/java/io/druid/server/coordinator/helper/DruidCoordinatorSegmentMerger.java b/server/src/main/java/io/druid/server/coordinator/helper/DruidCoordinatorSegmentMerger.java index 708e73287a68..9a15c6a3d154 100644 --- a/server/src/main/java/io/druid/server/coordinator/helper/DruidCoordinatorSegmentMerger.java +++ b/server/src/main/java/io/druid/server/coordinator/helper/DruidCoordinatorSegmentMerger.java @@ -29,7 +29,7 @@ import com.google.common.collect.Multiset; import com.google.common.collect.Ordering; import com.google.inject.Inject; -import com.metamx.emitter.service.ServiceMetricEvent; +import io.druid.java.util.emitter.service.ServiceMetricEvent; import io.druid.client.indexing.IndexingServiceClient; import io.druid.common.config.JacksonConfigManager; import io.druid.java.util.common.DateTimes; diff --git a/server/src/main/java/io/druid/server/coordinator/helper/DruidCoordinatorVersionConverter.java b/server/src/main/java/io/druid/server/coordinator/helper/DruidCoordinatorVersionConverter.java index d37d0e660f51..f14bf0dbbce1 100644 --- a/server/src/main/java/io/druid/server/coordinator/helper/DruidCoordinatorVersionConverter.java +++ b/server/src/main/java/io/druid/server/coordinator/helper/DruidCoordinatorVersionConverter.java @@ -20,7 +20,7 @@ package io.druid.server.coordinator.helper; import com.google.inject.Inject; -import com.metamx.emitter.EmittingLogger; +import io.druid.java.util.emitter.EmittingLogger; import io.druid.client.indexing.IndexingServiceClient; import io.druid.common.config.JacksonConfigManager; import io.druid.segment.IndexIO; diff --git a/server/src/main/java/io/druid/server/coordinator/rules/BroadcastDistributionRule.java b/server/src/main/java/io/druid/server/coordinator/rules/BroadcastDistributionRule.java index e32268044b4c..6ce76f7f421f 100644 --- a/server/src/main/java/io/druid/server/coordinator/rules/BroadcastDistributionRule.java +++ b/server/src/main/java/io/druid/server/coordinator/rules/BroadcastDistributionRule.java @@ -19,7 +19,7 @@ package io.druid.server.coordinator.rules; -import com.metamx.emitter.EmittingLogger; +import io.druid.java.util.emitter.EmittingLogger; import io.druid.server.coordinator.CoordinatorStats; import io.druid.server.coordinator.DruidCoordinator; import io.druid.server.coordinator.DruidCoordinatorRuntimeParams; diff --git a/server/src/main/java/io/druid/server/coordinator/rules/LoadRule.java b/server/src/main/java/io/druid/server/coordinator/rules/LoadRule.java index 2d6c163ced81..dc615f800518 100644 --- a/server/src/main/java/io/druid/server/coordinator/rules/LoadRule.java +++ b/server/src/main/java/io/druid/server/coordinator/rules/LoadRule.java @@ -19,7 +19,7 @@ package io.druid.server.coordinator.rules; -import com.metamx.emitter.EmittingLogger; +import io.druid.java.util.emitter.EmittingLogger; import io.druid.java.util.common.IAE; import io.druid.server.coordinator.CoordinatorStats; import io.druid.server.coordinator.DruidCluster; @@ -95,8 +95,9 @@ private void assign( final CoordinatorStats stats ) { - // if primary replica already exists - if (!currentReplicants.isEmpty()) { + // if primary replica already exists or is loading + final int loading = params.getSegmentReplicantLookup().getTotalReplicants(segment.getIdentifier()); + if (!currentReplicants.isEmpty() || loading > 0) { assignReplicas(params, segment, stats, null); } else { final ServerHolder primaryHolderToLoad = assignPrimary(params, segment); @@ -169,7 +170,6 @@ private ServerHolder assignPrimary( if (targetReplicantsInTier <= 0) { continue; } - final String tier = entry.getKey(); final List holders = getFilteredHolders( @@ -228,7 +228,7 @@ private void assignReplicas( final int numAssigned = assignReplicasForTier( tier, entry.getIntValue(), - currentReplicants.getOrDefault(tier, 0), + params.getSegmentReplicantLookup().getTotalReplicants(segment.getIdentifier(), tier), params, createLoadQueueSizeLimitingPredicate(params), segment diff --git a/server/src/main/java/io/druid/server/emitter/ComposingEmitterModule.java b/server/src/main/java/io/druid/server/emitter/ComposingEmitterModule.java index ee014f1d69b9..ce8b34e8c36d 100644 --- a/server/src/main/java/io/druid/server/emitter/ComposingEmitterModule.java +++ b/server/src/main/java/io/druid/server/emitter/ComposingEmitterModule.java @@ -28,8 +28,8 @@ import com.google.inject.Provides; import com.google.inject.name.Named; import com.google.inject.name.Names; -import com.metamx.emitter.core.ComposingEmitter; -import com.metamx.emitter.core.Emitter; +import io.druid.java.util.emitter.core.ComposingEmitter; +import io.druid.java.util.emitter.core.Emitter; import io.druid.guice.JsonConfigProvider; import io.druid.guice.ManageLifecycle; import io.druid.initialization.DruidModule; diff --git a/server/src/main/java/io/druid/server/emitter/EmitterModule.java b/server/src/main/java/io/druid/server/emitter/EmitterModule.java index a0e08225ee1a..7df0137b59c3 100644 --- a/server/src/main/java/io/druid/server/emitter/EmitterModule.java +++ b/server/src/main/java/io/druid/server/emitter/EmitterModule.java @@ -34,9 +34,9 @@ import com.google.inject.multibindings.MapBinder; import com.google.inject.name.Named; import com.google.inject.name.Names; -import com.metamx.emitter.EmittingLogger; -import com.metamx.emitter.core.Emitter; -import com.metamx.emitter.service.ServiceEmitter; +import io.druid.java.util.emitter.EmittingLogger; +import io.druid.java.util.emitter.core.Emitter; +import io.druid.java.util.emitter.service.ServiceEmitter; import io.druid.guice.LazySingleton; import io.druid.guice.ManageLifecycle; import io.druid.guice.annotations.Self; diff --git a/server/src/main/java/io/druid/server/emitter/HttpEmitterModule.java b/server/src/main/java/io/druid/server/emitter/HttpEmitterModule.java index 36e5f46ab16d..b09ba21d63f1 100644 --- a/server/src/main/java/io/druid/server/emitter/HttpEmitterModule.java +++ b/server/src/main/java/io/druid/server/emitter/HttpEmitterModule.java @@ -26,9 +26,9 @@ import com.google.inject.Provides; import com.google.inject.name.Named; import com.google.inject.util.Providers; -import com.metamx.emitter.core.Emitter; -import com.metamx.emitter.core.HttpEmitterConfig; -import com.metamx.emitter.core.HttpPostEmitter; +import io.druid.java.util.emitter.core.Emitter; +import io.druid.java.util.emitter.core.HttpEmitterConfig; +import io.druid.java.util.emitter.core.HttpPostEmitter; import io.druid.guice.JsonConfigProvider; import io.druid.guice.LazySingleton; import io.druid.guice.ManageLifecycle; diff --git a/server/src/main/java/io/druid/server/emitter/HttpEmittingMonitor.java b/server/src/main/java/io/druid/server/emitter/HttpEmittingMonitor.java index 0bb85ef55ce8..f55a7837ab4a 100644 --- a/server/src/main/java/io/druid/server/emitter/HttpEmittingMonitor.java +++ b/server/src/main/java/io/druid/server/emitter/HttpEmittingMonitor.java @@ -21,14 +21,14 @@ import com.google.common.collect.ImmutableMap; import com.google.inject.Inject; -import com.metamx.emitter.core.Emitter; -import com.metamx.emitter.core.HttpPostEmitter; -import com.metamx.emitter.core.ParametrizedUriEmitter; -import com.metamx.emitter.service.ServiceEmitter; -import com.metamx.metrics.AbstractMonitor; -import com.metamx.metrics.FeedDefiningMonitor; -import com.metamx.metrics.HttpPostEmitterMonitor; -import com.metamx.metrics.ParametrizedUriEmitterMonitor; +import io.druid.java.util.emitter.core.Emitter; +import io.druid.java.util.emitter.core.HttpPostEmitter; +import io.druid.java.util.emitter.core.ParametrizedUriEmitter; +import io.druid.java.util.emitter.service.ServiceEmitter; +import io.druid.java.util.metrics.AbstractMonitor; +import io.druid.java.util.metrics.FeedDefiningMonitor; +import io.druid.java.util.metrics.HttpPostEmitterMonitor; +import io.druid.java.util.metrics.ParametrizedUriEmitterMonitor; /** * Able to monitor {@link HttpPostEmitter} or {@link ParametrizedUriEmitter}, which is based on the former. diff --git a/server/src/main/java/io/druid/server/emitter/LogEmitterModule.java b/server/src/main/java/io/druid/server/emitter/LogEmitterModule.java index 6372321d70fb..a0b5877e3e5d 100644 --- a/server/src/main/java/io/druid/server/emitter/LogEmitterModule.java +++ b/server/src/main/java/io/druid/server/emitter/LogEmitterModule.java @@ -25,9 +25,9 @@ import com.google.inject.Module; import com.google.inject.Provides; import com.google.inject.name.Named; -import com.metamx.emitter.core.Emitter; -import com.metamx.emitter.core.LoggingEmitter; -import com.metamx.emitter.core.LoggingEmitterConfig; +import io.druid.java.util.emitter.core.Emitter; +import io.druid.java.util.emitter.core.LoggingEmitter; +import io.druid.java.util.emitter.core.LoggingEmitterConfig; import io.druid.guice.JsonConfigProvider; import io.druid.guice.ManageLifecycle; diff --git a/server/src/main/java/io/druid/server/emitter/NoopEmitterModule.java b/server/src/main/java/io/druid/server/emitter/NoopEmitterModule.java index ea5a838fe65d..3ed5d96c8247 100644 --- a/server/src/main/java/io/druid/server/emitter/NoopEmitterModule.java +++ b/server/src/main/java/io/druid/server/emitter/NoopEmitterModule.java @@ -23,8 +23,8 @@ import com.google.inject.Module; import com.google.inject.Provides; import com.google.inject.name.Named; -import com.metamx.emitter.core.Emitter; -import com.metamx.emitter.core.NoopEmitter; +import io.druid.java.util.emitter.core.Emitter; +import io.druid.java.util.emitter.core.NoopEmitter; import io.druid.guice.ManageLifecycle; /** diff --git a/server/src/main/java/io/druid/server/emitter/ParametrizedUriEmitterModule.java b/server/src/main/java/io/druid/server/emitter/ParametrizedUriEmitterModule.java index 6cba3bea7696..3ea1f672eb9b 100644 --- a/server/src/main/java/io/druid/server/emitter/ParametrizedUriEmitterModule.java +++ b/server/src/main/java/io/druid/server/emitter/ParametrizedUriEmitterModule.java @@ -25,9 +25,9 @@ import com.google.inject.Module; import com.google.inject.Provides; import com.google.inject.name.Named; -import com.metamx.emitter.core.Emitter; -import com.metamx.emitter.core.ParametrizedUriEmitter; -import com.metamx.emitter.core.ParametrizedUriEmitterConfig; +import io.druid.java.util.emitter.core.Emitter; +import io.druid.java.util.emitter.core.ParametrizedUriEmitter; +import io.druid.java.util.emitter.core.ParametrizedUriEmitterConfig; import io.druid.guice.JsonConfigProvider; import io.druid.guice.ManageLifecycle; import io.druid.java.util.common.lifecycle.Lifecycle; diff --git a/server/src/main/java/io/druid/server/http/DatasourcesResource.java b/server/src/main/java/io/druid/server/http/DatasourcesResource.java index c53e37158fbd..a763c613cc56 100644 --- a/server/src/main/java/io/druid/server/http/DatasourcesResource.java +++ b/server/src/main/java/io/druid/server/http/DatasourcesResource.java @@ -65,12 +65,15 @@ import javax.ws.rs.core.Context; import javax.ws.rs.core.MediaType; import javax.ws.rs.core.Response; +import java.util.Collections; import java.util.Comparator; import java.util.HashSet; import java.util.List; import java.util.Map; import java.util.Objects; import java.util.Set; +import java.util.SortedMap; +import java.util.TreeMap; import java.util.stream.Collectors; /** @@ -143,7 +146,7 @@ public Response getTheDataSource( @QueryParam("full") final String full ) { - ImmutableDruidDataSource dataSource = getDataSource(dataSourceName); + final ImmutableDruidDataSource dataSource = getDataSource(dataSourceName); if (dataSource == null) { return Response.noContent().build(); @@ -508,7 +511,7 @@ private ImmutableDruidDataSource getDataSource(final String dataSourceName) return null; } - Map segmentMap = Maps.newHashMap(); + final SortedMap segmentMap = new TreeMap<>(); for (ImmutableDruidDataSource dataSource : dataSources) { Iterable segments = dataSource.getSegments(); for (DataSegment segment : segments) { @@ -516,11 +519,7 @@ private ImmutableDruidDataSource getDataSource(final String dataSourceName) } } - return new ImmutableDruidDataSource( - dataSourceName, - ImmutableMap.of(), - ImmutableMap.copyOf(segmentMap) - ); + return new ImmutableDruidDataSource(dataSourceName, Collections.emptyMap(), segmentMap); } private Pair> getSegment(String segmentId) diff --git a/server/src/main/java/io/druid/server/http/SegmentListerResource.java b/server/src/main/java/io/druid/server/http/SegmentListerResource.java index 600806cc7e13..20f6512e2cd7 100644 --- a/server/src/main/java/io/druid/server/http/SegmentListerResource.java +++ b/server/src/main/java/io/druid/server/http/SegmentListerResource.java @@ -25,7 +25,7 @@ import com.google.common.util.concurrent.Futures; import com.google.common.util.concurrent.ListenableFuture; import com.google.inject.Inject; -import com.metamx.emitter.EmittingLogger; +import io.druid.java.util.emitter.EmittingLogger; import com.sun.jersey.spi.container.ResourceFilters; import io.druid.guice.annotations.Json; import io.druid.guice.annotations.Smile; diff --git a/server/src/main/java/io/druid/server/initialization/AuthorizerMapperModule.java b/server/src/main/java/io/druid/server/initialization/AuthorizerMapperModule.java index 4ba019550833..7a5b17a484b2 100644 --- a/server/src/main/java/io/druid/server/initialization/AuthorizerMapperModule.java +++ b/server/src/main/java/io/druid/server/initialization/AuthorizerMapperModule.java @@ -95,11 +95,20 @@ public AuthorizerMapper get() // Default is allow all if (authorizers == null) { + AllowAllAuthorizer allowAllAuthorizer = new AllowAllAuthorizer(); + authorizerMap.put(AuthConfig.ALLOW_ALL_NAME, allowAllAuthorizer); + return new AuthorizerMapper(null) { @Override public Authorizer getAuthorizer(String name) { - return new AllowAllAuthorizer(); + return allowAllAuthorizer; + } + + @Override + public Map getAuthorizerMap() + { + return authorizerMap; } }; } diff --git a/server/src/main/java/io/druid/server/initialization/jetty/JettyServerModule.java b/server/src/main/java/io/druid/server/initialization/jetty/JettyServerModule.java index f482e91f547b..4eb92b253edd 100644 --- a/server/src/main/java/io/druid/server/initialization/jetty/JettyServerModule.java +++ b/server/src/main/java/io/druid/server/initialization/jetty/JettyServerModule.java @@ -33,10 +33,10 @@ import com.google.inject.Scopes; import com.google.inject.Singleton; import com.google.inject.multibindings.Multibinder; -import com.metamx.emitter.service.ServiceEmitter; -import com.metamx.emitter.service.ServiceMetricEvent; -import com.metamx.metrics.AbstractMonitor; -import com.metamx.metrics.MonitorUtils; +import io.druid.java.util.emitter.service.ServiceEmitter; +import io.druid.java.util.emitter.service.ServiceMetricEvent; +import io.druid.java.util.metrics.AbstractMonitor; +import io.druid.java.util.metrics.MonitorUtils; import com.sun.jersey.api.core.DefaultResourceConfig; import com.sun.jersey.api.core.ResourceConfig; import com.sun.jersey.guice.JerseyServletModule; diff --git a/server/src/main/java/io/druid/server/log/EmittingRequestLogger.java b/server/src/main/java/io/druid/server/log/EmittingRequestLogger.java index 2e57f747f467..108636a3ddd1 100644 --- a/server/src/main/java/io/druid/server/log/EmittingRequestLogger.java +++ b/server/src/main/java/io/druid/server/log/EmittingRequestLogger.java @@ -22,9 +22,9 @@ import com.fasterxml.jackson.annotation.JsonProperty; import com.fasterxml.jackson.annotation.JsonValue; import com.google.common.collect.ImmutableMap; -import com.metamx.emitter.core.Event; -import com.metamx.emitter.service.ServiceEmitter; -import com.metamx.emitter.service.ServiceEventBuilder; +import io.druid.java.util.emitter.core.Event; +import io.druid.java.util.emitter.service.ServiceEmitter; +import io.druid.java.util.emitter.service.ServiceEventBuilder; import io.druid.query.Query; import io.druid.server.QueryStats; import io.druid.server.RequestLogLine; diff --git a/server/src/main/java/io/druid/server/log/EmittingRequestLoggerProvider.java b/server/src/main/java/io/druid/server/log/EmittingRequestLoggerProvider.java index 534a744e42d0..0b130d8e3002 100644 --- a/server/src/main/java/io/druid/server/log/EmittingRequestLoggerProvider.java +++ b/server/src/main/java/io/druid/server/log/EmittingRequestLoggerProvider.java @@ -24,7 +24,7 @@ import com.fasterxml.jackson.annotation.JsonTypeName; import com.google.inject.Inject; import com.google.inject.Injector; -import com.metamx.emitter.service.ServiceEmitter; +import io.druid.java.util.emitter.service.ServiceEmitter; import io.druid.java.util.common.logger.Logger; import javax.validation.constraints.NotNull; diff --git a/server/src/main/java/io/druid/server/lookup/cache/LookupCoordinatorManager.java b/server/src/main/java/io/druid/server/lookup/cache/LookupCoordinatorManager.java index 613c9b0d1663..8e7424c8f252 100644 --- a/server/src/main/java/io/druid/server/lookup/cache/LookupCoordinatorManager.java +++ b/server/src/main/java/io/druid/server/lookup/cache/LookupCoordinatorManager.java @@ -36,15 +36,8 @@ import com.google.common.util.concurrent.ListeningScheduledExecutorService; import com.google.common.util.concurrent.MoreExecutors; import com.google.inject.Inject; -import com.metamx.emitter.EmittingLogger; -import com.metamx.http.client.HttpClient; -import com.metamx.http.client.Request; -import com.metamx.http.client.response.ClientResponse; -import com.metamx.http.client.response.HttpResponseHandler; -import com.metamx.http.client.response.SequenceInputStreamResponseHandler; import io.druid.audit.AuditInfo; import io.druid.common.config.JacksonConfigManager; -import io.druid.java.util.common.concurrent.Execs; import io.druid.concurrent.LifecycleLock; import io.druid.discovery.DruidNodeDiscoveryProvider; import io.druid.guice.annotations.EscalatedGlobal; @@ -54,6 +47,13 @@ import io.druid.java.util.common.ISE; import io.druid.java.util.common.StreamUtils; import io.druid.java.util.common.StringUtils; +import io.druid.java.util.common.concurrent.Execs; +import io.druid.java.util.emitter.EmittingLogger; +import io.druid.java.util.http.client.HttpClient; +import io.druid.java.util.http.client.Request; +import io.druid.java.util.http.client.response.ClientResponse; +import io.druid.java.util.http.client.response.HttpResponseHandler; +import io.druid.java.util.http.client.response.SequenceInputStreamResponseHandler; import io.druid.query.lookup.LookupsState; import io.druid.server.http.HostAndPortWithScheme; import io.druid.server.listener.resource.ListenerResource; @@ -124,8 +124,7 @@ public class LookupCoordinatorManager // Updated by config watching service private AtomicReference>> lookupMapConfigRef; - @VisibleForTesting - final LifecycleLock lifecycleLock = new LifecycleLock(); + private final LifecycleLock lifecycleLock = new LifecycleLock(); private ListeningScheduledExecutorService executorService; private ListenableScheduledFuture backgroundManagerFuture; @@ -333,6 +332,17 @@ public LookupExtractorFactoryMapContainer getLookup(final String tier, final Str return tierLookups.get(lookupName); } + public boolean isStarted() + { + return lifecycleLock.isStarted(); + } + + @VisibleForTesting + boolean awaitStarted(long waitTimeMs) + { + return lifecycleLock.awaitStarted(waitTimeMs, TimeUnit.MILLISECONDS); + } + // start() and stop() are synchronized so that they never run in parallel in case of ZK acting funny or druid bug and // coordinator becomes leader and drops leadership in quick succession. public void start() @@ -439,8 +449,7 @@ public void stop() } finally { //so that subsequent start() would happen, even if stop() failed with exception - lifecycleLock.exitStop(); - lifecycleLock.reset(); + lifecycleLock.exitStopAndReset(); } } } diff --git a/server/src/main/java/io/druid/server/metrics/DruidMonitorSchedulerConfig.java b/server/src/main/java/io/druid/server/metrics/DruidMonitorSchedulerConfig.java index 65838149d48c..770d6bf5d1b9 100644 --- a/server/src/main/java/io/druid/server/metrics/DruidMonitorSchedulerConfig.java +++ b/server/src/main/java/io/druid/server/metrics/DruidMonitorSchedulerConfig.java @@ -20,7 +20,7 @@ package io.druid.server.metrics; import com.fasterxml.jackson.annotation.JsonProperty; -import com.metamx.metrics.MonitorSchedulerConfig; +import io.druid.java.util.metrics.MonitorSchedulerConfig; import org.joda.time.Duration; import org.joda.time.Period; diff --git a/server/src/main/java/io/druid/server/metrics/DruidSysMonitor.java b/server/src/main/java/io/druid/server/metrics/DruidSysMonitor.java index 83f23040519f..9fe9778b9bd2 100644 --- a/server/src/main/java/io/druid/server/metrics/DruidSysMonitor.java +++ b/server/src/main/java/io/druid/server/metrics/DruidSysMonitor.java @@ -21,7 +21,7 @@ import com.google.common.collect.Lists; import com.google.inject.Inject; -import com.metamx.metrics.SysMonitor; +import io.druid.java.util.metrics.SysMonitor; import io.druid.segment.loading.SegmentLoaderConfig; import io.druid.segment.loading.StorageLocationConfig; diff --git a/server/src/main/java/io/druid/server/metrics/EventReceiverFirehoseMonitor.java b/server/src/main/java/io/druid/server/metrics/EventReceiverFirehoseMonitor.java index 66fd4c1a6fda..ad5b36b5bf90 100644 --- a/server/src/main/java/io/druid/server/metrics/EventReceiverFirehoseMonitor.java +++ b/server/src/main/java/io/druid/server/metrics/EventReceiverFirehoseMonitor.java @@ -22,11 +22,11 @@ import com.google.common.collect.ImmutableMap; import com.google.common.collect.Lists; import com.google.inject.Inject; -import com.metamx.emitter.service.ServiceEmitter; -import com.metamx.emitter.service.ServiceMetricEvent; -import com.metamx.metrics.AbstractMonitor; -import com.metamx.metrics.KeyedDiff; -import com.metamx.metrics.MonitorUtils; +import io.druid.java.util.emitter.service.ServiceEmitter; +import io.druid.java.util.emitter.service.ServiceMetricEvent; +import io.druid.java.util.metrics.AbstractMonitor; +import io.druid.java.util.metrics.KeyedDiff; +import io.druid.java.util.metrics.MonitorUtils; import io.druid.query.DruidMetrics; import java.util.Map; diff --git a/server/src/main/java/io/druid/server/metrics/HistoricalMetricsMonitor.java b/server/src/main/java/io/druid/server/metrics/HistoricalMetricsMonitor.java index 3d775984a135..b5954d579fd2 100644 --- a/server/src/main/java/io/druid/server/metrics/HistoricalMetricsMonitor.java +++ b/server/src/main/java/io/druid/server/metrics/HistoricalMetricsMonitor.java @@ -20,9 +20,9 @@ package io.druid.server.metrics; import com.google.inject.Inject; -import com.metamx.emitter.service.ServiceEmitter; -import com.metamx.emitter.service.ServiceMetricEvent; -import com.metamx.metrics.AbstractMonitor; +import io.druid.java.util.emitter.service.ServiceEmitter; +import io.druid.java.util.emitter.service.ServiceMetricEvent; +import io.druid.java.util.metrics.AbstractMonitor; import io.druid.client.DruidServerConfig; import io.druid.query.DruidMetrics; import io.druid.server.SegmentManager; diff --git a/server/src/main/java/io/druid/server/metrics/MetricsModule.java b/server/src/main/java/io/druid/server/metrics/MetricsModule.java index 1f24942c9351..6ad8ef43b59f 100644 --- a/server/src/main/java/io/druid/server/metrics/MetricsModule.java +++ b/server/src/main/java/io/druid/server/metrics/MetricsModule.java @@ -28,12 +28,12 @@ import com.google.inject.Module; import com.google.inject.Provides; import com.google.inject.name.Names; -import com.metamx.emitter.service.ServiceEmitter; -import com.metamx.metrics.JvmCpuMonitor; -import com.metamx.metrics.JvmMonitor; -import com.metamx.metrics.Monitor; -import com.metamx.metrics.MonitorScheduler; -import com.metamx.metrics.SysMonitor; +import io.druid.java.util.emitter.service.ServiceEmitter; +import io.druid.java.util.metrics.JvmCpuMonitor; +import io.druid.java.util.metrics.JvmMonitor; +import io.druid.java.util.metrics.Monitor; +import io.druid.java.util.metrics.MonitorScheduler; +import io.druid.java.util.metrics.SysMonitor; import io.druid.guice.DruidBinders; import io.druid.guice.JsonConfigProvider; import io.druid.guice.LazySingleton; diff --git a/server/src/main/java/io/druid/server/metrics/MonitorsConfig.java b/server/src/main/java/io/druid/server/metrics/MonitorsConfig.java index b7ba7912462f..05ccdc8a03fe 100644 --- a/server/src/main/java/io/druid/server/metrics/MonitorsConfig.java +++ b/server/src/main/java/io/druid/server/metrics/MonitorsConfig.java @@ -22,7 +22,8 @@ import com.fasterxml.jackson.annotation.JsonProperty; import com.google.common.collect.ImmutableMap; import com.google.common.collect.Lists; -import com.metamx.metrics.Monitor; +import io.druid.java.util.common.logger.Logger; +import io.druid.java.util.metrics.Monitor; import io.druid.query.DruidMetrics; import javax.validation.constraints.NotNull; @@ -35,17 +36,37 @@ */ public class MonitorsConfig { - public final static String METRIC_DIMENSION_PREFIX = "druid.metrics.emitter.dimension."; + private static final Logger log = new Logger(MonitorsConfig.class); + + public static final String METRIC_DIMENSION_PREFIX = "druid.metrics.emitter.dimension."; + + /** + * Prior to 0.12.0, Druid used Monitor classes from the `com.metamx.metrics` package. + * In 0.12.0, these Monitor classes were moved into Druid under `io.druid.java.util.metrics`. + * See https://github.com/druid-io/druid/pull/5289 for details. + * + * We automatically adjust old `com.metamx.metrics` package references to `io.druid.java.util.metrics` + * for backwards compatibility purposes, easing the upgrade process for users. + */ + public static final String OLD_METAMX_PACKAGE_NAME = "com.metamx.metrics"; + public static final String NEW_DRUID_PACKAGE_NAME = "io.druid.java.util.metrics"; @JsonProperty("monitors") @NotNull - private List> monitors = Lists.newArrayList(); + private List> monitors; public List> getMonitors() { return monitors; } + public MonitorsConfig( + @JsonProperty("monitors") List monitorNames + ) + { + monitors = getMonitorsFromNames(monitorNames); + } + @Override public String toString() { @@ -54,7 +75,6 @@ public String toString() '}'; } - public static Map mapOfDatasourceAndTaskID( final String datasource, final String taskId @@ -86,4 +106,31 @@ public static Map extractDimensions(Properties props, List> getMonitorsFromNames(List monitorNames) + { + List> monitors = Lists.newArrayList(); + if (monitorNames == null) { + return monitors; + } + try { + for (String monitorName : monitorNames) { + String effectiveMonitorName = monitorName.replace(OLD_METAMX_PACKAGE_NAME, NEW_DRUID_PACKAGE_NAME); + if (!effectiveMonitorName.equals(monitorName)) { + log.warn( + "Deprecated Monitor class name [%s] found, please use package %s instead of %s", + monitorName, + NEW_DRUID_PACKAGE_NAME, + OLD_METAMX_PACKAGE_NAME + ); + } + Class monitorClass = (Class) Class.forName(effectiveMonitorName); + monitors.add(monitorClass); + } + return monitors; + } + catch (ClassNotFoundException cnfe) { + throw new RuntimeException(cnfe); + } + } } diff --git a/server/src/main/java/io/druid/server/metrics/QueryCountStatsMonitor.java b/server/src/main/java/io/druid/server/metrics/QueryCountStatsMonitor.java index dcf09c928c83..3a30aab81280 100644 --- a/server/src/main/java/io/druid/server/metrics/QueryCountStatsMonitor.java +++ b/server/src/main/java/io/druid/server/metrics/QueryCountStatsMonitor.java @@ -20,10 +20,10 @@ import com.google.common.collect.ImmutableMap; import com.google.inject.Inject; -import com.metamx.emitter.service.ServiceEmitter; -import com.metamx.emitter.service.ServiceMetricEvent; -import com.metamx.metrics.AbstractMonitor; -import com.metamx.metrics.KeyedDiff; +import io.druid.java.util.emitter.service.ServiceEmitter; +import io.druid.java.util.emitter.service.ServiceMetricEvent; +import io.druid.java.util.metrics.AbstractMonitor; +import io.druid.java.util.metrics.KeyedDiff; import java.util.Map; diff --git a/server/src/main/java/io/druid/server/router/CoordinatorRuleManager.java b/server/src/main/java/io/druid/server/router/CoordinatorRuleManager.java index 755424475923..85d42a8e4c36 100644 --- a/server/src/main/java/io/druid/server/router/CoordinatorRuleManager.java +++ b/server/src/main/java/io/druid/server/router/CoordinatorRuleManager.java @@ -24,7 +24,7 @@ import com.google.common.base.Supplier; import com.google.common.collect.Lists; import com.google.inject.Inject; -import com.metamx.http.client.response.FullResponseHolder; +import io.druid.java.util.http.client.response.FullResponseHolder; import io.druid.java.util.common.concurrent.Execs; import io.druid.discovery.DruidLeaderClient; import io.druid.guice.ManageLifecycle; diff --git a/server/src/main/java/io/druid/server/router/QueryHostFinder.java b/server/src/main/java/io/druid/server/router/QueryHostFinder.java index 55fcfd6ffc8b..fa077ef2d545 100644 --- a/server/src/main/java/io/druid/server/router/QueryHostFinder.java +++ b/server/src/main/java/io/druid/server/router/QueryHostFinder.java @@ -20,10 +20,10 @@ package io.druid.server.router; import com.google.inject.Inject; -import com.metamx.emitter.EmittingLogger; import io.druid.client.selector.Server; import io.druid.java.util.common.ISE; import io.druid.java.util.common.Pair; +import io.druid.java.util.emitter.EmittingLogger; import io.druid.query.Query; import java.util.Collection; @@ -90,7 +90,7 @@ public Server findServerAvatica(String connectionId) return chosenServer; } - public Server getServer(Query query) + public Server pickServer(Query query) { Server server = findServer(query); @@ -107,7 +107,7 @@ public Server getServer(Query query) return server; } - public Server getDefaultServer() + public Server pickDefaultServer() { Server server = findDefaultServer(); diff --git a/server/src/main/java/io/druid/server/router/TieredBrokerHostSelector.java b/server/src/main/java/io/druid/server/router/TieredBrokerHostSelector.java index f52151fa23db..373b52f3e82b 100644 --- a/server/src/main/java/io/druid/server/router/TieredBrokerHostSelector.java +++ b/server/src/main/java/io/druid/server/router/TieredBrokerHostSelector.java @@ -25,7 +25,7 @@ import com.google.common.collect.Iterables; import com.google.common.collect.Maps; import com.google.inject.Inject; -import com.metamx.emitter.EmittingLogger; +import io.druid.java.util.emitter.EmittingLogger; import io.druid.client.selector.Server; import io.druid.discovery.DiscoveryDruidNode; import io.druid.discovery.DruidNodeDiscovery; diff --git a/server/src/main/java/io/druid/server/security/AllowAllAuthenticator.java b/server/src/main/java/io/druid/server/security/AllowAllAuthenticator.java index c87d67546ae2..1911a570267b 100644 --- a/server/src/main/java/io/druid/server/security/AllowAllAuthenticator.java +++ b/server/src/main/java/io/druid/server/security/AllowAllAuthenticator.java @@ -38,7 +38,7 @@ public class AllowAllAuthenticator implements Authenticator public static final AuthenticationResult ALLOW_ALL_RESULT = new AuthenticationResult( AuthConfig.ALLOW_ALL_NAME, AuthConfig.ALLOW_ALL_NAME, - null + AuthConfig.ALLOW_ALL_NAME, null ); @Override diff --git a/server/src/main/java/io/druid/server/security/AllowOptionsResourceFilter.java b/server/src/main/java/io/druid/server/security/AllowOptionsResourceFilter.java new file mode 100644 index 000000000000..1e8f488902b4 --- /dev/null +++ b/server/src/main/java/io/druid/server/security/AllowOptionsResourceFilter.java @@ -0,0 +1,84 @@ +/* + * Licensed to Metamarkets Group Inc. (Metamarkets) under one + * or more contributor license agreements. See the NOTICE file + * distributed with this work for additional information + * regarding copyright ownership. Metamarkets licenses this file + * to you under the Apache License, Version 2.0 (the + * "License"); you may not use this file except in compliance + * with the License. You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, + * software distributed under the License is distributed on an + * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY + * KIND, either express or implied. See the License for the + * specific language governing permissions and limitations + * under the License. + */ + +package io.druid.server.security; + +import javax.servlet.Filter; +import javax.servlet.FilterChain; +import javax.servlet.FilterConfig; +import javax.servlet.ServletException; +import javax.servlet.ServletRequest; +import javax.servlet.ServletResponse; +import javax.servlet.http.HttpServletRequest; +import javax.servlet.http.HttpServletResponse; +import javax.ws.rs.HttpMethod; +import java.io.IOException; + +public class AllowOptionsResourceFilter implements Filter +{ + private final boolean allowUnauthenticatedHttpOptions; + + public AllowOptionsResourceFilter( + boolean allowUnauthenticatedHttpOptions + ) + { + this.allowUnauthenticatedHttpOptions = allowUnauthenticatedHttpOptions; + } + + @Override + public void init(FilterConfig filterConfig) throws ServletException + { + + } + + @Override + public void doFilter( + ServletRequest request, ServletResponse response, FilterChain chain + ) throws IOException, ServletException + { + HttpServletRequest httpReq = (HttpServletRequest) request; + + // Druid itself doesn't explictly handle OPTIONS requests, no resource handler will authorize such requests. + // so this filter catches all OPTIONS requests and authorizes them. + if (HttpMethod.OPTIONS.equals(httpReq.getMethod())) { + if (httpReq.getAttribute(AuthConfig.DRUID_AUTHENTICATION_RESULT) == null) { + // If the request already had credentials and authenticated successfully, keep the authenticated identity. + // Otherwise, allow the unauthenticated request. + if (allowUnauthenticatedHttpOptions) { + httpReq.setAttribute( + AuthConfig.DRUID_AUTHENTICATION_RESULT, + new AuthenticationResult(AuthConfig.ALLOW_ALL_NAME, AuthConfig.ALLOW_ALL_NAME, null, null) + ); + } else { + ((HttpServletResponse) response).sendError(HttpServletResponse.SC_UNAUTHORIZED); + } + } + + httpReq.setAttribute(AuthConfig.DRUID_AUTHORIZATION_CHECKED, true); + } + + chain.doFilter(request, response); + } + + @Override + public void destroy() + { + + } +} diff --git a/server/src/main/java/io/druid/server/security/AuthConfig.java b/server/src/main/java/io/druid/server/security/AuthConfig.java index af4768a4d5e0..9fe71afef9bc 100644 --- a/server/src/main/java/io/druid/server/security/AuthConfig.java +++ b/server/src/main/java/io/druid/server/security/AuthConfig.java @@ -23,6 +23,7 @@ import com.fasterxml.jackson.annotation.JsonProperty; import java.util.List; +import java.util.Objects; public class AuthConfig { @@ -40,17 +41,19 @@ public class AuthConfig public AuthConfig() { - this(null, null); + this(null, null, false); } @JsonCreator public AuthConfig( @JsonProperty("authenticatorChain") List authenticationChain, - @JsonProperty("authorizers") List authorizers + @JsonProperty("authorizers") List authorizers, + @JsonProperty("allowUnauthenticatedHttpOptions") boolean allowUnauthenticatedHttpOptions ) { this.authenticatorChain = authenticationChain; this.authorizers = authorizers; + this.allowUnauthenticatedHttpOptions = allowUnauthenticatedHttpOptions; } @JsonProperty @@ -59,6 +62,9 @@ public AuthConfig( @JsonProperty private List authorizers; + @JsonProperty + private final boolean allowUnauthenticatedHttpOptions; + public List getAuthenticatorChain() { return authenticatorChain; @@ -69,12 +75,18 @@ public List getAuthorizers() return authorizers; } + public boolean isAllowUnauthenticatedHttpOptions() + { + return allowUnauthenticatedHttpOptions; + } + @Override public String toString() { return "AuthConfig{" + - "authenticatorChain='" + authenticatorChain + '\'' + - ", authorizers='" + authorizers + '\'' + + "authenticatorChain=" + authenticatorChain + + ", authorizers=" + authorizers + + ", allowUnauthenticatedHttpOptions=" + allowUnauthenticatedHttpOptions + '}'; } @@ -87,23 +99,15 @@ public boolean equals(Object o) if (o == null || getClass() != o.getClass()) { return false; } - AuthConfig that = (AuthConfig) o; - - if (getAuthenticatorChain() != null - ? !getAuthenticatorChain().equals(that.getAuthenticatorChain()) - : that.getAuthenticatorChain() != null) { - return false; - } - return getAuthorizers() != null ? getAuthorizers().equals(that.getAuthorizers()) : that.getAuthorizers() == null; - + return isAllowUnauthenticatedHttpOptions() == that.isAllowUnauthenticatedHttpOptions() && + Objects.equals(getAuthenticatorChain(), that.getAuthenticatorChain()) && + Objects.equals(getAuthorizers(), that.getAuthorizers()); } @Override public int hashCode() { - int result = getAuthenticatorChain() != null ? getAuthenticatorChain().hashCode() : 0; - result = 31 * result + (getAuthorizers() != null ? getAuthorizers().hashCode() : 0); - return result; + return Objects.hash(getAuthenticatorChain(), getAuthorizers(), isAllowUnauthenticatedHttpOptions()); } } diff --git a/server/src/main/java/io/druid/server/security/AuthenticationResult.java b/server/src/main/java/io/druid/server/security/AuthenticationResult.java index bb9effdf8e05..a1a7d5d966ec 100644 --- a/server/src/main/java/io/druid/server/security/AuthenticationResult.java +++ b/server/src/main/java/io/druid/server/security/AuthenticationResult.java @@ -37,6 +37,15 @@ public class AuthenticationResult */ private final String authorizerName; + + /** + * Name of authenticator whom created the results + * + * If you found your self asking why the authenticatedBy field can be null please read this + * https://github.com/druid-io/druid/pull/5706#discussion_r185940889 + */ + @Nullable + private final String authenticatedBy; /** * parameter containing additional context information from an Authenticator */ @@ -46,11 +55,13 @@ public class AuthenticationResult public AuthenticationResult( final String identity, final String authorizerName, + final String authenticatedBy, final Map context ) { this.identity = identity; this.authorizerName = authorizerName; + this.authenticatedBy = authenticatedBy; this.context = context; } @@ -68,4 +79,9 @@ public Map getContext() { return context; } + + public String getAuthenticatedBy() + { + return authenticatedBy; + } } diff --git a/server/src/main/java/io/druid/server/security/AuthenticationUtils.java b/server/src/main/java/io/druid/server/security/AuthenticationUtils.java index cabaa828274c..bd6b2be7680f 100644 --- a/server/src/main/java/io/druid/server/security/AuthenticationUtils.java +++ b/server/src/main/java/io/druid/server/security/AuthenticationUtils.java @@ -27,6 +27,16 @@ public class AuthenticationUtils { + public static void addAllowOptionsFilter(ServletContextHandler root, boolean allowUnauthenticatedHttpOptions) + { + FilterHolder holder = new FilterHolder(new AllowOptionsResourceFilter(allowUnauthenticatedHttpOptions)); + root.addFilter( + holder, + "/*", + null + ); + } + public static void addAuthenticationFilterChain( ServletContextHandler root, List authenticators diff --git a/server/src/main/java/io/druid/server/security/Authenticator.java b/server/src/main/java/io/druid/server/security/Authenticator.java index 969b4497f2a5..1b81dcfc4737 100644 --- a/server/src/main/java/io/druid/server/security/Authenticator.java +++ b/server/src/main/java/io/druid/server/security/Authenticator.java @@ -22,9 +22,12 @@ import com.fasterxml.jackson.annotation.JsonSubTypes; import com.fasterxml.jackson.annotation.JsonTypeInfo; import io.druid.server.initialization.jetty.ServletFilterHolder; +import org.eclipse.jetty.client.api.Request; import javax.annotation.Nullable; import javax.servlet.Filter; +import javax.servlet.http.HttpServletRequest; +import javax.servlet.http.HttpServletResponse; import java.util.Map; @JsonTypeInfo(use = JsonTypeInfo.Id.NAME, property = "type") @@ -93,4 +96,23 @@ public interface Authenticator extends ServletFilterHolder */ @Nullable AuthenticationResult authenticateJDBCContext(Map context); + + + /** + * This is used to add some Headers or Authentication token/results that can be used by down stream target host. + * Such token can be used to authenticate the user down stream, in cases where to original credenitals + * are not forwardable as is and therefore the need to attach some authentication tokens by the proxy. + * + * @param clientRequest original client request processed by the upstream chain of authenticator + * @param proxyResponse proxy Response + * @param proxyRequest actual proxy request targeted to a given broker + */ + default void decorateProxyRequest( + final HttpServletRequest clientRequest, + final HttpServletResponse proxyResponse, + final Request proxyRequest + ) + { + //noop + } } diff --git a/server/src/main/java/io/druid/server/security/AuthorizationUtils.java b/server/src/main/java/io/druid/server/security/AuthorizationUtils.java index 68e35b5484c2..b14d6a788511 100644 --- a/server/src/main/java/io/druid/server/security/AuthorizationUtils.java +++ b/server/src/main/java/io/druid/server/security/AuthorizationUtils.java @@ -27,6 +27,7 @@ import io.druid.java.util.common.ISE; import javax.servlet.http.HttpServletRequest; +import java.util.List; import java.util.Map; import java.util.Set; @@ -281,6 +282,65 @@ public static Iterable filterAuthorizedResources( return filteredResources; } + /** + * Given a map of resource lists, filter each resources list by applying the resource action generator to each + * item in each resource list. + * + * The resourceActionGenerator returns an Iterable for each resource. + * + * If a resource list is null or has no authorized items after filtering, it will not be included in the returned + * map. + * + * This function will set the DRUID_AUTHORIZATION_CHECKED attribute in the request. + * + * If this attribute is already set when this function is called, an exception is thrown. + * + * @param request HTTP request to be authorized + * @param unfilteredResources Map of resource lists to be filtered + * @param resourceActionGenerator Function that creates an iterable of resource-actions from a resource + * @param authorizerMapper authorizer mapper + * + * @return Map containing lists of resources that were authorized + */ + public static Map> filterAuthorizedResources( + final HttpServletRequest request, + final Map> unfilteredResources, + final Function> resourceActionGenerator, + final AuthorizerMapper authorizerMapper + ) + { + if (request.getAttribute(AuthConfig.DRUID_AUTHORIZATION_CHECKED) != null) { + throw new ISE("Request already had authorization check."); + } + + final AuthenticationResult authenticationResult = AuthorizationUtils.authenticationResultFromRequest(request); + + Map> filteredResources = Maps.newHashMap(); + for (Map.Entry> entry : unfilteredResources.entrySet()) { + if (entry.getValue() == null) { + continue; + } + + final List filteredList = Lists.newArrayList( + AuthorizationUtils.filterAuthorizedResources( + authenticationResult, + entry.getValue(), + resourceActionGenerator, + authorizerMapper + ) + ); + + if (filteredList.size() > 0) { + filteredResources.put( + entry.getKey(), + filteredList + ); + } + } + + request.setAttribute(AuthConfig.DRUID_AUTHORIZATION_CHECKED, true); + return filteredResources; + } /** * Function for the common pattern of generating a resource-action for reading from a datasource, using the diff --git a/server/src/main/java/io/druid/server/security/Escalator.java b/server/src/main/java/io/druid/server/security/Escalator.java index c3a9c19aa7d5..31a77a74d07b 100644 --- a/server/src/main/java/io/druid/server/security/Escalator.java +++ b/server/src/main/java/io/druid/server/security/Escalator.java @@ -21,7 +21,7 @@ import com.fasterxml.jackson.annotation.JsonSubTypes; import com.fasterxml.jackson.annotation.JsonTypeInfo; -import com.metamx.http.client.HttpClient; +import io.druid.java.util.http.client.HttpClient; /** * This interface provides methods needed for escalating internal system requests with priveleged authentication @@ -46,20 +46,9 @@ public interface Escalator */ HttpClient createEscalatedClient(HttpClient baseClient); - /** - * Return a client that sends requests with the format/information necessary to authenticate successfully - * against this Authenticator's authentication scheme using the identity of the internal system user. - *

    - * This HTTP client is used by the Druid Router node. - * - * @param baseClient Base Jetty HttpClient - * - * @return Jetty HttpClient that sends requests with the credentials of the internal system user - */ - org.eclipse.jetty.client.HttpClient createEscalatedJettyClient(org.eclipse.jetty.client.HttpClient baseClient); - /** * @return an AuthenticationResult representing the identity of the internal system user. */ AuthenticationResult createEscalatedAuthenticationResult(); + } diff --git a/server/src/main/java/io/druid/server/security/NoopEscalator.java b/server/src/main/java/io/druid/server/security/NoopEscalator.java index 583d12dc249a..45f22deecc05 100644 --- a/server/src/main/java/io/druid/server/security/NoopEscalator.java +++ b/server/src/main/java/io/druid/server/security/NoopEscalator.java @@ -19,7 +19,7 @@ package io.druid.server.security; -import com.metamx.http.client.HttpClient; +import io.druid.java.util.http.client.HttpClient; public class NoopEscalator implements Escalator { @@ -29,12 +29,6 @@ public HttpClient createEscalatedClient(HttpClient baseClient) return baseClient; } - @Override - public org.eclipse.jetty.client.HttpClient createEscalatedJettyClient(org.eclipse.jetty.client.HttpClient baseClient) - { - return baseClient; - } - @Override public AuthenticationResult createEscalatedAuthenticationResult() { diff --git a/server/src/main/java/io/druid/server/security/PreResponseAuthorizationCheckFilter.java b/server/src/main/java/io/druid/server/security/PreResponseAuthorizationCheckFilter.java index ad0144a0b8c7..1c8e5e63914f 100644 --- a/server/src/main/java/io/druid/server/security/PreResponseAuthorizationCheckFilter.java +++ b/server/src/main/java/io/druid/server/security/PreResponseAuthorizationCheckFilter.java @@ -21,7 +21,7 @@ import com.fasterxml.jackson.databind.ObjectMapper; import com.google.common.collect.Sets; -import com.metamx.emitter.EmittingLogger; +import io.druid.java.util.emitter.EmittingLogger; import io.druid.java.util.common.ISE; import io.druid.query.QueryInterruptedException; import io.druid.server.DruidNode; diff --git a/server/src/main/java/io/druid/server/security/UnsecuredResourceFilter.java b/server/src/main/java/io/druid/server/security/UnsecuredResourceFilter.java index dda859c1af9b..df741dccccb1 100644 --- a/server/src/main/java/io/druid/server/security/UnsecuredResourceFilter.java +++ b/server/src/main/java/io/druid/server/security/UnsecuredResourceFilter.java @@ -48,7 +48,7 @@ public void doFilter( // but the value doesn't matter since we skip authorization checks for requests that go through this filter servletRequest.setAttribute( AuthConfig.DRUID_AUTHENTICATION_RESULT, - new AuthenticationResult(AuthConfig.ALLOW_ALL_NAME, AuthConfig.ALLOW_ALL_NAME, null) + new AuthenticationResult(AuthConfig.ALLOW_ALL_NAME, AuthConfig.ALLOW_ALL_NAME, AuthConfig.ALLOW_ALL_NAME, null) ); // This request will not go to an Authorizer, so we need to set this for PreResponseAuthorizationCheckFilter diff --git a/server/src/test/java/io/druid/client/BrokerServerViewTest.java b/server/src/test/java/io/druid/client/BrokerServerViewTest.java index 27a731fb2b78..1024c3878a19 100644 --- a/server/src/test/java/io/druid/client/BrokerServerViewTest.java +++ b/server/src/test/java/io/druid/client/BrokerServerViewTest.java @@ -28,7 +28,7 @@ import com.google.common.collect.ImmutableMap; import com.google.common.collect.Iterables; import com.google.common.collect.Lists; -import com.metamx.http.client.HttpClient; +import io.druid.java.util.http.client.HttpClient; import io.druid.client.selector.HighestPriorityTierSelectorStrategy; import io.druid.client.selector.RandomServerSelectorStrategy; import io.druid.client.selector.ServerSelector; diff --git a/server/src/test/java/io/druid/client/CachingQueryRunnerTest.java b/server/src/test/java/io/druid/client/CachingQueryRunnerTest.java index 0ba202eb0091..3fdc02ddf30d 100644 --- a/server/src/test/java/io/druid/client/CachingQueryRunnerTest.java +++ b/server/src/test/java/io/druid/client/CachingQueryRunnerTest.java @@ -25,7 +25,7 @@ import com.google.common.collect.Iterators; import com.google.common.collect.Lists; import com.google.common.util.concurrent.MoreExecutors; -import com.metamx.emitter.service.ServiceEmitter; +import io.druid.java.util.emitter.service.ServiceEmitter; import io.druid.client.cache.Cache; import io.druid.client.cache.CacheConfig; import io.druid.client.cache.CacheStats; diff --git a/server/src/test/java/io/druid/client/DirectDruidClientTest.java b/server/src/test/java/io/druid/client/DirectDruidClientTest.java index ad59b6485580..de9d1eda80ac 100644 --- a/server/src/test/java/io/druid/client/DirectDruidClientTest.java +++ b/server/src/test/java/io/druid/client/DirectDruidClientTest.java @@ -20,15 +20,12 @@ package io.druid.client; import com.fasterxml.jackson.core.JsonProcessingException; +import com.google.common.collect.ImmutableMap; import com.google.common.collect.Lists; import com.google.common.collect.Maps; import com.google.common.util.concurrent.Futures; import com.google.common.util.concurrent.ListenableFuture; import com.google.common.util.concurrent.SettableFuture; -import com.metamx.http.client.HttpClient; -import com.metamx.http.client.Request; -import com.metamx.http.client.response.HttpResponseHandler; -import com.metamx.http.client.response.StatusResponseHolder; import io.druid.client.selector.ConnectionCountServerSelectorStrategy; import io.druid.client.selector.HighestPriorityTierSelectorStrategy; import io.druid.client.selector.QueryableDruidServer; @@ -39,6 +36,10 @@ import io.druid.java.util.common.StringUtils; import io.druid.java.util.common.guava.Sequence; import io.druid.java.util.common.guava.Sequences; +import io.druid.java.util.http.client.HttpClient; +import io.druid.java.util.http.client.Request; +import io.druid.java.util.http.client.response.HttpResponseHandler; +import io.druid.java.util.http.client.response.StatusResponseHolder; import io.druid.query.Druids; import io.druid.query.QueryInterruptedException; import io.druid.query.QueryPlus; @@ -165,7 +166,7 @@ public void testRun() throws Exception serverSelector.addServerAndUpdateSegment(queryableDruidServer2, serverSelector.getSegment()); TimeBoundaryQuery query = Druids.newTimeBoundaryQueryBuilder().dataSource("test").build(); - + query = query.withOverriddenContext(ImmutableMap.of(DirectDruidClient.QUERY_FAIL_TIME, Long.MAX_VALUE)); Sequence s1 = client1.run(QueryPlus.wrap(query), defaultContext); Assert.assertTrue(capturedRequest.hasCaptured()); Assert.assertEquals(url, capturedRequest.getValue().getUrl()); @@ -269,6 +270,7 @@ public void testCancel() throws Exception serverSelector.addServerAndUpdateSegment(queryableDruidServer1, serverSelector.getSegment()); TimeBoundaryQuery query = Druids.newTimeBoundaryQueryBuilder().dataSource("test").build(); + query = query.withOverriddenContext(ImmutableMap.of(DirectDruidClient.QUERY_FAIL_TIME, Long.MAX_VALUE)); cancellationFuture.set(new StatusResponseHolder(HttpResponseStatus.OK, new StringBuilder("cancelled"))); Sequence results = client1.run(QueryPlus.wrap(query), defaultContext); Assert.assertEquals(HttpMethod.DELETE, capturedRequest.getValue().getMethod()); @@ -340,6 +342,7 @@ public void testQueryInterruptionExceptionLogMessage() throws JsonProcessingExce serverSelector.addServerAndUpdateSegment(queryableDruidServer, dataSegment); TimeBoundaryQuery query = Druids.newTimeBoundaryQueryBuilder().dataSource("test").build(); + query = query.withOverriddenContext(ImmutableMap.of(DirectDruidClient.QUERY_FAIL_TIME, Long.MAX_VALUE)); interruptionFuture.set( new ByteArrayInputStream( StringUtils.toUtf8("{\"error\":\"testing1\",\"errorMessage\":\"testing2\"}") diff --git a/server/src/test/java/io/druid/client/HttpServerInventoryViewTest.java b/server/src/test/java/io/druid/client/HttpServerInventoryViewTest.java index 73b6c38aef61..7e10ad8734e5 100644 --- a/server/src/test/java/io/druid/client/HttpServerInventoryViewTest.java +++ b/server/src/test/java/io/druid/client/HttpServerInventoryViewTest.java @@ -25,9 +25,9 @@ import com.google.common.util.concurrent.Futures; import com.google.common.util.concurrent.ListenableFuture; import com.google.common.util.concurrent.MoreExecutors; -import com.metamx.http.client.HttpClient; -import com.metamx.http.client.Request; -import com.metamx.http.client.response.HttpResponseHandler; +import io.druid.java.util.http.client.HttpClient; +import io.druid.java.util.http.client.Request; +import io.druid.java.util.http.client.response.HttpResponseHandler; import io.druid.discovery.DataNodeService; import io.druid.discovery.DiscoveryDruidNode; import io.druid.discovery.DruidNodeDiscovery; diff --git a/server/src/test/java/io/druid/client/ImmutableDruidDataSourceTest.java b/server/src/test/java/io/druid/client/ImmutableDruidDataSourceTest.java new file mode 100644 index 000000000000..ad101dbeea31 --- /dev/null +++ b/server/src/test/java/io/druid/client/ImmutableDruidDataSourceTest.java @@ -0,0 +1,64 @@ +/* + * Licensed to Metamarkets Group Inc. (Metamarkets) under one + * or more contributor license agreements. See the NOTICE file + * distributed with this work for additional information + * regarding copyright ownership. Metamarkets licenses this file + * to you under the Apache License, Version 2.0 (the + * "License"); you may not use this file except in compliance + * with the License. You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, + * software distributed under the License is distributed on an + * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY + * KIND, either express or implied. See the License for the + * specific language governing permissions and limitations + * under the License. + */ + +package io.druid.client; + +import com.fasterxml.jackson.databind.InjectableValues.Std; +import com.fasterxml.jackson.databind.ObjectMapper; +import com.google.common.collect.ImmutableList; +import com.google.common.collect.ImmutableMap; +import com.google.common.collect.ImmutableSortedMap; +import io.druid.jackson.DefaultObjectMapper; +import io.druid.java.util.common.Intervals; +import io.druid.timeline.DataSegment; +import io.druid.timeline.DataSegment.PruneLoadSpecHolder; +import org.junit.Assert; +import org.junit.Test; + +import java.io.IOException; + +public class ImmutableDruidDataSourceTest +{ + @Test + public void testSerde() throws IOException + { + final DataSegment segment = new DataSegment( + "test", + Intervals.of("2017/2018"), + "version", + null, + ImmutableList.of("dim1", "dim2"), + ImmutableList.of("met1", "met2"), + null, + 1, + 100L, + PruneLoadSpecHolder.DEFAULT + ); + final ImmutableDruidDataSource dataSource = new ImmutableDruidDataSource( + "test", + ImmutableMap.of("prop1", "val1", "prop2", "val2"), + ImmutableSortedMap.of(segment.getIdentifier(), segment) + ); + + final ObjectMapper objectMapper = new DefaultObjectMapper() + .setInjectableValues(new Std().addValue(PruneLoadSpecHolder.class, PruneLoadSpecHolder.DEFAULT)); + final String json = objectMapper.writeValueAsString(dataSource); + Assert.assertEquals(dataSource, objectMapper.readValue(json, ImmutableDruidDataSource.class)); + } +} diff --git a/server/src/test/java/io/druid/client/cache/CacheConfigTest.java b/server/src/test/java/io/druid/client/cache/CacheConfigTest.java index 096887d81b52..7643a618405a 100644 --- a/server/src/test/java/io/druid/client/cache/CacheConfigTest.java +++ b/server/src/test/java/io/druid/client/cache/CacheConfigTest.java @@ -45,7 +45,7 @@ public class CacheConfigTest static Injector injector; static JsonConfigurator configurator; JsonConfigProvider configProvider; - private static final String propertyPrefix = "io.druid.test.cache"; + private static final String propertyPrefix = "io.druid.collections.test.cache"; @BeforeClass public static void populateStatics() diff --git a/server/src/test/java/io/druid/client/cache/MemcachedCacheTest.java b/server/src/test/java/io/druid/client/cache/MemcachedCacheTest.java index 5bfbfa10038c..805cccecbb34 100644 --- a/server/src/test/java/io/druid/client/cache/MemcachedCacheTest.java +++ b/server/src/test/java/io/druid/client/cache/MemcachedCacheTest.java @@ -30,10 +30,10 @@ import com.google.inject.Injector; import com.google.inject.Module; import com.google.inject.name.Names; -import com.metamx.emitter.core.Emitter; -import com.metamx.emitter.core.Event; -import com.metamx.emitter.service.ServiceEmitter; -import com.metamx.metrics.AbstractMonitor; +import io.druid.java.util.emitter.core.Emitter; +import io.druid.java.util.emitter.core.Event; +import io.druid.java.util.emitter.service.ServiceEmitter; +import io.druid.java.util.metrics.AbstractMonitor; import io.druid.collections.ResourceHolder; import io.druid.collections.StupidResourceHolder; import io.druid.guice.GuiceInjectors; diff --git a/server/src/test/java/io/druid/curator/discovery/CuratorDruidLeaderSelectorTest.java b/server/src/test/java/io/druid/curator/discovery/CuratorDruidLeaderSelectorTest.java index 66b458ef2209..8182737da63f 100644 --- a/server/src/test/java/io/druid/curator/discovery/CuratorDruidLeaderSelectorTest.java +++ b/server/src/test/java/io/druid/curator/discovery/CuratorDruidLeaderSelectorTest.java @@ -19,8 +19,8 @@ package io.druid.curator.discovery; -import com.metamx.emitter.EmittingLogger; -import com.metamx.emitter.service.ServiceEmitter; +import io.druid.java.util.emitter.EmittingLogger; +import io.druid.java.util.emitter.service.ServiceEmitter; import io.druid.curator.CuratorTestBase; import io.druid.discovery.DruidLeaderSelector; import io.druid.java.util.common.logger.Logger; diff --git a/server/src/test/java/io/druid/discovery/DruidLeaderClientTest.java b/server/src/test/java/io/druid/discovery/DruidLeaderClientTest.java index cc88f6660ff1..f1ed409e9125 100644 --- a/server/src/test/java/io/druid/discovery/DruidLeaderClientTest.java +++ b/server/src/test/java/io/druid/discovery/DruidLeaderClientTest.java @@ -29,8 +29,8 @@ import com.google.inject.name.Named; import com.google.inject.name.Names; import com.google.inject.servlet.GuiceFilter; -import com.metamx.http.client.HttpClient; -import com.metamx.http.client.Request; +import io.druid.java.util.http.client.HttpClient; +import io.druid.java.util.http.client.Request; import io.druid.curator.discovery.ServerDiscoverySelector; import io.druid.guice.GuiceInjectors; import io.druid.guice.Jerseys; diff --git a/server/src/test/java/io/druid/initialization/ComposingEmitterModuleTest.java b/server/src/test/java/io/druid/initialization/ComposingEmitterModuleTest.java index 646f1fa9597f..5cc911bc8c1c 100644 --- a/server/src/test/java/io/druid/initialization/ComposingEmitterModuleTest.java +++ b/server/src/test/java/io/druid/initialization/ComposingEmitterModuleTest.java @@ -26,7 +26,7 @@ import com.google.inject.Key; import com.google.inject.Module; import com.google.inject.name.Names; -import com.metamx.emitter.core.Emitter; +import io.druid.java.util.emitter.core.Emitter; import io.druid.guice.DruidGuiceExtensions; import io.druid.guice.LifecycleModule; import io.druid.server.emitter.ComposingEmitterConfig; diff --git a/server/src/test/java/io/druid/metadata/SQLMetadataRuleManagerTest.java b/server/src/test/java/io/druid/metadata/SQLMetadataRuleManagerTest.java index 5df9c6b535ec..36cf995df3a0 100644 --- a/server/src/test/java/io/druid/metadata/SQLMetadataRuleManagerTest.java +++ b/server/src/test/java/io/druid/metadata/SQLMetadataRuleManagerTest.java @@ -82,6 +82,16 @@ public void setUp() ); } + @Test + public void testMultipleStopAndStart() + { + // Simulate successive losing and getting the coordinator leadership + ruleManager.start(); + ruleManager.stop(); + ruleManager.start(); + ruleManager.stop(); + } + @Test public void testRuleInsert() { diff --git a/server/src/test/java/io/druid/metadata/MetadataSegmentManagerTest.java b/server/src/test/java/io/druid/metadata/SQLMetadataSegmentManagerTest.java similarity index 95% rename from server/src/test/java/io/druid/metadata/MetadataSegmentManagerTest.java rename to server/src/test/java/io/druid/metadata/SQLMetadataSegmentManagerTest.java index 82252935b4b7..927894551654 100644 --- a/server/src/test/java/io/druid/metadata/MetadataSegmentManagerTest.java +++ b/server/src/test/java/io/druid/metadata/SQLMetadataSegmentManagerTest.java @@ -25,7 +25,7 @@ import com.google.common.collect.ImmutableMap; import com.google.common.collect.ImmutableSet; import com.google.common.collect.Iterables; -import com.metamx.emitter.EmittingLogger; +import io.druid.java.util.emitter.EmittingLogger; import io.druid.java.util.common.Intervals; import io.druid.java.util.common.StringUtils; import io.druid.segment.TestHelper; @@ -41,7 +41,7 @@ import java.io.IOException; -public class MetadataSegmentManagerTest +public class SQLMetadataSegmentManagerTest { @Rule public final TestDerbyConnector.DerbyConnectorRule derbyConnectorRule = new TestDerbyConnector.DerbyConnectorRule(); @@ -230,4 +230,14 @@ public void testRemoveDataSegment() throws IOException Assert.assertNull(manager.getInventoryValue(newDataSource)); Assert.assertTrue(manager.removeSegment(newDataSource, newSegment.getIdentifier())); } + + @Test + public void testStopAndStart() + { + // Simulate successive losing and getting the coordinator leadership + manager.start(); + manager.stop(); + manager.start(); + manager.stop(); + } } diff --git a/server/src/test/java/io/druid/query/lookup/LookupReferencesManagerTest.java b/server/src/test/java/io/druid/query/lookup/LookupReferencesManagerTest.java index 62860d4d4d4f..5f9539384dc5 100644 --- a/server/src/test/java/io/druid/query/lookup/LookupReferencesManagerTest.java +++ b/server/src/test/java/io/druid/query/lookup/LookupReferencesManagerTest.java @@ -21,9 +21,9 @@ import com.fasterxml.jackson.databind.ObjectMapper; import com.google.common.collect.ImmutableMap; -import com.metamx.emitter.EmittingLogger; -import com.metamx.http.client.Request; -import com.metamx.http.client.response.FullResponseHolder; +import io.druid.java.util.emitter.EmittingLogger; +import io.druid.java.util.http.client.Request; +import io.druid.java.util.http.client.response.FullResponseHolder; import io.druid.discovery.DruidLeaderClient; import io.druid.jackson.DefaultObjectMapper; import io.druid.server.metrics.NoopServiceEmitter; @@ -102,7 +102,7 @@ public void testStartStop() throws InterruptedException, IOException lookupMap.put("testMockForStartStop", container); String strResult = mapper.writeValueAsString(lookupMap); Request request = new Request(HttpMethod.GET, new URL("http://localhost:1234/xx")); - expect(config.getLookupTier()).andReturn(LOOKUP_TIER); + expect(config.getLookupTier()).andReturn(LOOKUP_TIER).anyTimes(); replay(config); expect(druidLeaderClient.makeRequest(HttpMethod.GET, "/druid/coordinator/v1/lookups/config/lookupTier?detailed=true")) .andReturn(request); @@ -163,7 +163,7 @@ public void testAddGetRemove() throws Exception lookupMap.put("testMockForAddGetRemove", container); String strResult = mapper.writeValueAsString(lookupMap); Request request = new Request(HttpMethod.GET, new URL("http://localhost:1234/xx")); - expect(config.getLookupTier()).andReturn(LOOKUP_TIER); + expect(config.getLookupTier()).andReturn(LOOKUP_TIER).anyTimes(); replay(config); expect(druidLeaderClient.makeRequest(HttpMethod.GET, "/druid/coordinator/v1/lookups/config/lookupTier?detailed=true")) .andReturn(request); @@ -201,7 +201,7 @@ public void testCloseIsCalledAfterStopping() throws Exception lookupMap.put("testMockForCloseIsCalledAfterStopping", container); String strResult = mapper.writeValueAsString(lookupMap); Request request = new Request(HttpMethod.GET, new URL("http://localhost:1234/xx")); - expect(config.getLookupTier()).andReturn(LOOKUP_TIER); + expect(config.getLookupTier()).andReturn(LOOKUP_TIER).anyTimes(); replay(config); expect(druidLeaderClient.makeRequest(HttpMethod.GET, "/druid/coordinator/v1/lookups/config/lookupTier?detailed=true")) .andReturn(request); @@ -232,7 +232,7 @@ public void testCloseIsCalledAfterRemove() throws Exception lookupMap.put("testMockForCloseIsCalledAfterRemove", container); String strResult = mapper.writeValueAsString(lookupMap); Request request = new Request(HttpMethod.GET, new URL("http://localhost:1234/xx")); - expect(config.getLookupTier()).andReturn(LOOKUP_TIER); + expect(config.getLookupTier()).andReturn(LOOKUP_TIER).anyTimes(); replay(config); expect(druidLeaderClient.makeRequest(HttpMethod.GET, "/druid/coordinator/v1/lookups/config/lookupTier?detailed=true")) .andReturn(request); @@ -260,7 +260,7 @@ public void testGetNotThere() throws Exception lookupMap.put("testMockForGetNotThere", container); String strResult = mapper.writeValueAsString(lookupMap); Request request = new Request(HttpMethod.GET, new URL("http://localhost:1234/xx")); - expect(config.getLookupTier()).andReturn(LOOKUP_TIER); + expect(config.getLookupTier()).andReturn(LOOKUP_TIER).anyTimes(); replay(config); expect(druidLeaderClient.makeRequest(HttpMethod.GET, "/druid/coordinator/v1/lookups/config/lookupTier?detailed=true")) .andReturn(request); @@ -290,7 +290,7 @@ public void testUpdateWithHigherVersion() throws Exception lookupMap.put("testMockForUpdateWithHigherVersion", container); String strResult = mapper.writeValueAsString(lookupMap); Request request = new Request(HttpMethod.GET, new URL("http://localhost:1234/xx")); - expect(config.getLookupTier()).andReturn(LOOKUP_TIER); + expect(config.getLookupTier()).andReturn(LOOKUP_TIER).anyTimes(); replay(config); expect(druidLeaderClient.makeRequest(HttpMethod.GET, "/druid/coordinator/v1/lookups/config/lookupTier?detailed=true")) .andReturn(request); @@ -324,7 +324,7 @@ public void testUpdateWithLowerVersion() throws Exception lookupMap.put("testMockForUpdateWithLowerVersion", container); String strResult = mapper.writeValueAsString(lookupMap); Request request = new Request(HttpMethod.GET, new URL("http://localhost:1234/xx")); - expect(config.getLookupTier()).andReturn(LOOKUP_TIER); + expect(config.getLookupTier()).andReturn(LOOKUP_TIER).anyTimes(); replay(config); expect(druidLeaderClient.makeRequest(HttpMethod.GET, "/druid/coordinator/v1/lookups/config/lookupTier?detailed=true")) .andReturn(request); @@ -352,7 +352,7 @@ public void testRemoveNonExisting() throws Exception lookupMap.put("testMockForRemoveNonExisting", container); String strResult = mapper.writeValueAsString(lookupMap); Request request = new Request(HttpMethod.GET, new URL("http://localhost:1234/xx")); - expect(config.getLookupTier()).andReturn(LOOKUP_TIER); + expect(config.getLookupTier()).andReturn(LOOKUP_TIER).anyTimes(); replay(config); expect(druidLeaderClient.makeRequest(HttpMethod.GET, "/druid/coordinator/v1/lookups/config/lookupTier?detailed=true")) .andReturn(request); @@ -403,7 +403,7 @@ public void testGetAllLookupsState() throws Exception Map lookupMap = new HashMap<>(); String strResult = mapper.writeValueAsString(lookupMap); Request request = new Request(HttpMethod.GET, new URL("http://localhost:1234/xx")); - expect(config.getLookupTier()).andReturn(LOOKUP_TIER); + expect(config.getLookupTier()).andReturn(LOOKUP_TIER).anyTimes(); replay(config); expect(druidLeaderClient.makeRequest(HttpMethod.GET, "/druid/coordinator/v1/lookups/config/lookupTier?detailed=true")) .andReturn(request); @@ -445,7 +445,7 @@ public void testRealModeWithMainThread() throws Exception lookupMap.put("testMockForRealModeWithMainThread", container); String strResult = mapper.writeValueAsString(lookupMap); Request request = new Request(HttpMethod.GET, new URL("http://localhost:1234/xx")); - expect(config.getLookupTier()).andReturn(LOOKUP_TIER); + expect(config.getLookupTier()).andReturn(LOOKUP_TIER).anyTimes(); replay(config); expect(druidLeaderClient.makeRequest(HttpMethod.GET, "/druid/coordinator/v1/lookups/config/lookupTier?detailed=true")) .andReturn(request); @@ -521,7 +521,7 @@ public void testCoordinatorLookupSync() throws Exception lookupMap.put("testLookup3", container3); String strResult = mapper.writeValueAsString(lookupMap); Request request = new Request(HttpMethod.GET, new URL("http://localhost:1234/xx")); - expect(config.getLookupTier()).andReturn(LOOKUP_TIER); + expect(config.getLookupTier()).andReturn(LOOKUP_TIER).anyTimes(); replay(config); expect(druidLeaderClient.makeRequest(HttpMethod.GET, "/druid/coordinator/v1/lookups/config/lookupTier?detailed=true")) .andReturn(request); @@ -547,7 +547,7 @@ public void testLoadLookupOnCoordinatorFailure() throws Exception lookupMap.put("testMockForLoadLookupOnCoordinatorFailure", container); String strResult = mapper.writeValueAsString(lookupMap); Request request = new Request(HttpMethod.GET, new URL("http://localhost:1234/xx")); - expect(config.getLookupTier()).andReturn(LOOKUP_TIER); + expect(config.getLookupTier()).andReturn(LOOKUP_TIER).anyTimes(); replay(config); expect(druidLeaderClient.makeRequest(HttpMethod.GET, "/druid/coordinator/v1/lookups/config/lookupTier?detailed=true")) .andReturn(request) @@ -565,13 +565,13 @@ public void testLoadLookupOnCoordinatorFailure() throws Exception lookupReferencesManager.handlePendingNotices(); lookupReferencesManager.stop(); lookupReferencesManager = new LookupReferencesManager( - new LookupConfig(lookupReferencesManager.lookupSnapshotTaker.getPersistFile().getParent()), + new LookupConfig(lookupReferencesManager.lookupSnapshotTaker.getPersistFile(LOOKUP_TIER).getParent()), mapper, druidLeaderClient, config, true ); reset(config); reset(druidLeaderClient); - expect(config.getLookupTier()).andReturn(LOOKUP_TIER); + expect(config.getLookupTier()).andReturn(LOOKUP_TIER).anyTimes(); replay(config); expect(druidLeaderClient.makeRequest(HttpMethod.GET, "/druid/coordinator/v1/lookups/config/lookupTier?detailed=true")) .andReturn(request) @@ -593,7 +593,7 @@ public void testDisableLookupSync() throws Exception lookupMap.put("testMockForDisableLookupSync", container); String strResult = mapper.writeValueAsString(lookupMap); Request request = new Request(HttpMethod.GET, new URL("http://localhost:1234/xx")); - expect(config.getLookupTier()).andReturn(LOOKUP_TIER); + expect(config.getLookupTier()).andReturn(LOOKUP_TIER).anyTimes(); replay(config); expect(druidLeaderClient.makeRequest(HttpMethod.GET, "/druid/coordinator/v1/lookups/lookupTier?detailed=true")) .andReturn(request); diff --git a/server/src/test/java/io/druid/query/lookup/LookupSnapshotTakerTest.java b/server/src/test/java/io/druid/query/lookup/LookupSnapshotTakerTest.java index 3fc085623ffa..fed17880abba 100644 --- a/server/src/test/java/io/druid/query/lookup/LookupSnapshotTakerTest.java +++ b/server/src/test/java/io/druid/query/lookup/LookupSnapshotTakerTest.java @@ -26,11 +26,11 @@ import io.druid.java.util.common.ISE; import io.druid.java.util.common.StringUtils; import io.druid.segment.TestHelper; -import org.apache.commons.io.FileUtils; import org.junit.Assert; import org.junit.Before; import org.junit.Rule; import org.junit.Test; +import org.junit.rules.ExpectedException; import org.junit.rules.TemporaryFolder; import java.io.File; @@ -41,8 +41,15 @@ public class LookupSnapshotTakerTest { + private static final String TIER1 = "tier1"; + private static final String TIER2 = "tier2"; + @Rule public TemporaryFolder temporaryFolder = new TemporaryFolder(); + + @Rule + public ExpectedException expectedException = ExpectedException.none(); + private final ObjectMapper mapper = TestHelper.makeJsonMapper(); @@ -60,67 +67,40 @@ public void setUp() throws IOException @Test public void testTakeSnapshotAndPullExisting() throws IOException { - - LookupBean lookupBean = new LookupBean( - "name", + LookupBean lookupBean1 = new LookupBean( + "name1", null, new LookupExtractorFactoryContainer( "v1", - new MapLookupExtractorFactory( - ImmutableMap.of( - "key", - "value" - ), true - ) + new MapLookupExtractorFactory(ImmutableMap.of("key", "value"), true) ) ); - List lookupBeanList = Lists.newArrayList(lookupBean); - lookupSnapshotTaker.takeSnapshot(lookupBeanList); - List actualList = lookupSnapshotTaker.pullExistingSnapshot(); - Assert.assertEquals(lookupBeanList, actualList); - } - - //test backward compatibility with snapshots stored using 0.9.x code - @Test - public void testBackwardCompatibility() throws IOException - { - File directory = temporaryFolder.newFolder(); - File snapshotFile = new File(directory, LookupSnapshotTaker.PERSIST_FILE_NAME); - Assert.assertFalse(snapshotFile.exists()); - FileUtils.write( - snapshotFile, - "[{\"factory\":{\"type\":\"map\",\"map\":{\"key\":\"value\"},\"isOneToOne\":true},\"name\":\"name\"}]" - ); - Assert.assertTrue(snapshotFile.exists()); - List actualList = new LookupSnapshotTaker(mapper, directory.getAbsolutePath()).pullExistingSnapshot(); - - LookupBean lookupBean = new LookupBean( - "name", + LookupBean lookupBean2 = new LookupBean( + "name2", null, new LookupExtractorFactoryContainer( - null, - new MapLookupExtractorFactory( - ImmutableMap.of( - "key", - "value" - ), true - ) + "v1", + new MapLookupExtractorFactory(ImmutableMap.of("key", "value"), true) ) ); - List lookupBeanList = Lists.newArrayList(lookupBean); - - Assert.assertEquals(lookupBeanList, actualList); + List lookupBeanList1 = Lists.newArrayList(lookupBean1); + lookupSnapshotTaker.takeSnapshot(TIER1, lookupBeanList1); + List lookupBeanList2 = Lists.newArrayList(lookupBean2); + lookupSnapshotTaker.takeSnapshot(TIER2, lookupBeanList2); + Assert.assertEquals(lookupBeanList1, lookupSnapshotTaker.pullExistingSnapshot(TIER1)); + Assert.assertEquals(lookupBeanList2, lookupSnapshotTaker.pullExistingSnapshot(TIER2)); } - @Test(expected = ISE.class) + @Test public void testIOExceptionDuringLookupPersist() throws IOException { File directory = temporaryFolder.newFolder(); - File snapshotFile = new File(directory, LookupSnapshotTaker.PERSIST_FILE_NAME); + LookupSnapshotTaker lookupSnapshotTaker = new LookupSnapshotTaker(mapper, directory.getAbsolutePath()); + File snapshotFile = lookupSnapshotTaker.getPersistFile(TIER1); Assert.assertFalse(snapshotFile.exists()); Assert.assertTrue(snapshotFile.createNewFile()); Assert.assertTrue(snapshotFile.setReadOnly()); - LookupSnapshotTaker lookupSnapshotTaker = new LookupSnapshotTaker(mapper, directory.getAbsolutePath()); + Assert.assertTrue(snapshotFile.getParentFile().setReadOnly()); LookupBean lookupBean = new LookupBean( "name", null, @@ -135,26 +115,28 @@ public void testIOExceptionDuringLookupPersist() throws IOException ) ); List lookupBeanList = Lists.newArrayList(lookupBean); - lookupSnapshotTaker.takeSnapshot(lookupBeanList); - } + expectedException.expect(ISE.class); + expectedException.expectMessage("Exception during serialization of lookups"); + lookupSnapshotTaker.takeSnapshot(TIER1, lookupBeanList); + } @Test public void tesLookupPullingFromEmptyFile() throws IOException { - File snapshotFile = lookupSnapshotTaker.getPersistFile(); + File snapshotFile = lookupSnapshotTaker.getPersistFile(TIER1); Assert.assertTrue(snapshotFile.createNewFile()); - Assert.assertEquals(Collections.EMPTY_LIST, lookupSnapshotTaker.pullExistingSnapshot()); + Assert.assertEquals(Collections.EMPTY_LIST, lookupSnapshotTaker.pullExistingSnapshot(TIER1)); } @Test(expected = ISE.class) public void tesLookupPullingFromCorruptFile() throws IOException { - File snapshotFile = lookupSnapshotTaker.getPersistFile(); + File snapshotFile = lookupSnapshotTaker.getPersistFile(TIER1); Assert.assertTrue(snapshotFile.createNewFile()); byte[] bytes = StringUtils.toUtf8("test corrupt file"); Files.write(bytes, snapshotFile); - lookupSnapshotTaker.pullExistingSnapshot(); + lookupSnapshotTaker.pullExistingSnapshot(TIER1); } @Test @@ -162,7 +144,7 @@ public void testLookupPullingFromNonExistingFile() throws IOException { File directory = temporaryFolder.newFolder(); LookupSnapshotTaker lookupSnapshotTaker = new LookupSnapshotTaker(mapper, directory.getAbsolutePath()); - List actualList = lookupSnapshotTaker.pullExistingSnapshot(); + List actualList = lookupSnapshotTaker.pullExistingSnapshot(TIER1); Assert.assertEquals(Collections.EMPTY_LIST, actualList); } } diff --git a/server/src/test/java/io/druid/query/lookup/RegisteredLookupExtractionFnTest.java b/server/src/test/java/io/druid/query/lookup/RegisteredLookupExtractionFnTest.java index f1332e21ec5f..0e990d89e37e 100644 --- a/server/src/test/java/io/druid/query/lookup/RegisteredLookupExtractionFnTest.java +++ b/server/src/test/java/io/druid/query/lookup/RegisteredLookupExtractionFnTest.java @@ -23,6 +23,7 @@ import com.google.common.collect.ImmutableMap; import io.druid.jackson.DefaultObjectMapper; import io.druid.java.util.common.jackson.JacksonUtils; +import io.druid.query.extraction.ExtractionFn; import io.druid.query.extraction.MapLookupExtractor; import org.easymock.EasyMock; import org.junit.Assert; @@ -57,16 +58,39 @@ public void testSimpleDelegation() LOOKUP_NAME, true, null, - true, + false, false ); EasyMock.verify(manager); + + Assert.assertEquals(false, fn.isInjective()); + Assert.assertEquals(ExtractionFn.ExtractionType.MANY_TO_ONE, fn.getExtractionType()); + for (String orig : Arrays.asList("", "foo", "bat")) { Assert.assertEquals(LOOKUP_EXTRACTOR.apply(orig), fn.apply(orig)); } Assert.assertEquals("not in the map", fn.apply("not in the map")); } + @Test + public void testInheritInjective() + { + final LookupReferencesManager manager = EasyMock.createStrictMock(LookupReferencesManager.class); + managerReturnsMap(manager); + EasyMock.replay(manager); + final RegisteredLookupExtractionFn fn = new RegisteredLookupExtractionFn( + manager, + LOOKUP_NAME, + true, + null, + null, + false + ); + EasyMock.verify(manager); + + Assert.assertNull(fn.isInjective()); + Assert.assertEquals(ExtractionFn.ExtractionType.ONE_TO_ONE, fn.getExtractionType()); + } @Test public void testMissingDelegation() diff --git a/server/src/test/java/io/druid/segment/loading/LocalDataSegmentFinderTest.java b/server/src/test/java/io/druid/segment/loading/LocalDataSegmentFinderTest.java index 78b48a0ab6aa..b399bb8c5985 100644 --- a/server/src/test/java/io/druid/segment/loading/LocalDataSegmentFinderTest.java +++ b/server/src/test/java/io/druid/segment/loading/LocalDataSegmentFinderTest.java @@ -36,6 +36,7 @@ import org.junit.rules.TemporaryFolder; import java.io.File; +import java.io.FileOutputStream; import java.io.IOException; import java.util.Set; @@ -204,12 +205,6 @@ public void testFindSegments() throws SegmentLoadingException, IOException Assert.assertEquals(serializedSegment4_1, FileUtils.readFileToString(descriptor4_1)); } - private String getDescriptorPath(DataSegment segment) - { - final File indexzip = new File(String.valueOf(segment.getLoadSpec().get("path"))); - return indexzip.getParent() + "/" + DESCRIPTOR_JSON; - } - @Test(expected = SegmentLoadingException.class) public void testFindSegmentsFail() throws SegmentLoadingException { @@ -219,4 +214,44 @@ public void testFindSegmentsFail() throws SegmentLoadingException final LocalDataSegmentFinder localDataSegmentFinder = new LocalDataSegmentFinder(mapper); localDataSegmentFinder.findSegments(dataSourceDir.getAbsolutePath(), false); } + + @Test + public void testPreferNewestSegment() throws Exception + { + dataSourceDir = temporaryFolder.newFolder(); + descriptor1 = new File(dataSourceDir.getAbsolutePath() + "/interval10/v10/0/older", DESCRIPTOR_JSON); + descriptor2 = new File(dataSourceDir.getAbsolutePath() + "/interval10/v10/0/newer", DESCRIPTOR_JSON); + + descriptor1.getParentFile().mkdirs(); + descriptor2.getParentFile().mkdirs(); + + mapper.writeValue(descriptor1, SEGMENT_1); + mapper.writeValue(descriptor2, SEGMENT_1); + + indexZip1 = new File(descriptor1.getParentFile(), INDEX_ZIP); + indexZip2 = new File(descriptor2.getParentFile(), INDEX_ZIP); + + FileOutputStream fos1 = new FileOutputStream(indexZip1); + fos1.getFD().sync(); + fos1.close(); + + Thread.sleep(1000); + + FileOutputStream fos2 = new FileOutputStream(indexZip2); + fos2.getFD().sync(); + fos2.close(); + + final Set segments = new LocalDataSegmentFinder(mapper).findSegments( + dataSourceDir.getAbsolutePath(), false + ); + + Assert.assertEquals(1, segments.size()); + Assert.assertEquals(indexZip2.getAbsolutePath(), segments.iterator().next().getLoadSpec().get("path")); + } + + private String getDescriptorPath(DataSegment segment) + { + final File indexzip = new File(String.valueOf(segment.getLoadSpec().get("path"))); + return indexzip.getParent() + "/" + DESCRIPTOR_JSON; + } } diff --git a/server/src/test/java/io/druid/segment/loading/LocalDataSegmentKillerTest.java b/server/src/test/java/io/druid/segment/loading/LocalDataSegmentKillerTest.java index b5eaad7fc9c1..4c75b4d5f681 100644 --- a/server/src/test/java/io/druid/segment/loading/LocalDataSegmentKillerTest.java +++ b/server/src/test/java/io/druid/segment/loading/LocalDataSegmentKillerTest.java @@ -31,6 +31,7 @@ import java.io.File; import java.io.IOException; +import java.util.UUID; public class LocalDataSegmentKillerTest { @@ -93,6 +94,28 @@ public void testKill() throws Exception Assert.assertFalse(dataSourceDir.exists()); } + @Test + public void testKillUniquePath() throws Exception + { + final LocalDataSegmentKiller killer = new LocalDataSegmentKiller(new LocalDataSegmentPusherConfig()); + final String uuid = UUID.randomUUID().toString().substring(0, 5); + final File dataSourceDir = temporaryFolder.newFolder("dataSource"); + final File intervalDir = new File(dataSourceDir, "interval"); + final File versionDir = new File(intervalDir, "1"); + final File partitionDir = new File(versionDir, "0"); + final File uuidDir = new File(partitionDir, uuid); + + makePartitionDirWithIndex(uuidDir); + + killer.kill(getSegmentWithPath(new File(uuidDir, "index.zip").toString())); + + Assert.assertFalse(uuidDir.exists()); + Assert.assertFalse(partitionDir.exists()); + Assert.assertFalse(versionDir.exists()); + Assert.assertFalse(intervalDir.exists()); + Assert.assertFalse(dataSourceDir.exists()); + } + private void makePartitionDirWithIndex(File path) throws IOException { Assert.assertTrue(path.mkdirs()); diff --git a/server/src/test/java/io/druid/segment/loading/LocalDataSegmentPusherTest.java b/server/src/test/java/io/druid/segment/loading/LocalDataSegmentPusherTest.java index 2309735424d5..6ff0ab06ec63 100644 --- a/server/src/test/java/io/druid/segment/loading/LocalDataSegmentPusherTest.java +++ b/server/src/test/java/io/druid/segment/loading/LocalDataSegmentPusherTest.java @@ -23,11 +23,13 @@ import com.google.common.collect.ImmutableList; import com.google.common.io.Files; import com.google.common.primitives.Ints; +import io.druid.java.util.common.CompressionUtils; import io.druid.java.util.common.Intervals; import io.druid.java.util.common.StringUtils; import io.druid.segment.TestHelper; import io.druid.timeline.DataSegment; import io.druid.timeline.partition.NoneShardSpec; +import org.apache.commons.io.FileUtils; import org.junit.Assert; import org.junit.Before; import org.junit.Rule; @@ -54,7 +56,18 @@ public class LocalDataSegmentPusherTest Intervals.utc(0, 1), "v1", null, + ImmutableList.of("dim1"), null, + NoneShardSpec.instance(), + null, + -1 + ); + DataSegment dataSegment2 = new DataSegment( + "ds", + Intervals.utc(0, 1), + "v1", + null, + ImmutableList.of("dim2"), null, NoneShardSpec.instance(), null, @@ -79,8 +92,8 @@ public void testPush() throws IOException */ final DataSegment dataSegment2 = dataSegment.withVersion("v2"); - DataSegment returnSegment1 = localDataSegmentPusher.push(dataSegmentFiles, dataSegment); - DataSegment returnSegment2 = localDataSegmentPusher.push(dataSegmentFiles, dataSegment2); + DataSegment returnSegment1 = localDataSegmentPusher.push(dataSegmentFiles, dataSegment, false); + DataSegment returnSegment2 = localDataSegmentPusher.push(dataSegmentFiles, dataSegment2, false); Assert.assertNotNull(returnSegment1); Assert.assertEquals(dataSegment, returnSegment1); @@ -89,14 +102,14 @@ public void testPush() throws IOException Assert.assertEquals(dataSegment2, returnSegment2); Assert.assertNotEquals( - localDataSegmentPusher.getStorageDir(dataSegment), - localDataSegmentPusher.getStorageDir(dataSegment2) + localDataSegmentPusher.getStorageDir(dataSegment, false), + localDataSegmentPusher.getStorageDir(dataSegment2, false) ); for (DataSegment returnSegment : ImmutableList.of(returnSegment1, returnSegment2)) { File outDir = new File( config.getStorageDirectory(), - localDataSegmentPusher.getStorageDir(returnSegment) + localDataSegmentPusher.getStorageDir(returnSegment, false) ); File versionFile = new File(outDir, "index.zip"); File descriptorJson = new File(outDir, "descriptor.json"); @@ -106,14 +119,35 @@ public void testPush() throws IOException } @Test - public void testFirstPushWinsForConcurrentPushes() throws IOException + public void testPushUseUniquePath() throws IOException + { + DataSegment segment = localDataSegmentPusher.push(dataSegmentFiles, dataSegment, true); + + String path = segment.getLoadSpec().get("path").toString(); + String matcher = ".*/ds/1970-01-01T00:00:00\\.000Z_1970-01-01T00:00:00\\.001Z/v1/0/[A-Za-z0-9-]{36}/index\\.zip"; + Assert.assertTrue(path, path.matches(matcher)); + Assert.assertTrue(new File(path).exists()); + } + + @Test + public void testLastPushWinsForConcurrentPushes() throws IOException { File replicatedDataSegmentFiles = temporaryFolder.newFolder(); Files.asByteSink(new File(replicatedDataSegmentFiles, "version.bin")).write(Ints.toByteArray(0x8)); - DataSegment returnSegment1 = localDataSegmentPusher.push(dataSegmentFiles, dataSegment); - DataSegment returnSegment2 = localDataSegmentPusher.push(replicatedDataSegmentFiles, dataSegment); + DataSegment returnSegment1 = localDataSegmentPusher.push(dataSegmentFiles, dataSegment, false); + DataSegment returnSegment2 = localDataSegmentPusher.push(replicatedDataSegmentFiles, dataSegment2, false); + + Assert.assertEquals(dataSegment.getDimensions(), returnSegment1.getDimensions()); + Assert.assertEquals(dataSegment2.getDimensions(), returnSegment2.getDimensions()); + + File unzipDir = new File(config.storageDirectory, "unzip"); + FileUtils.forceMkdir(unzipDir); + CompressionUtils.unzip( + new File(config.storageDirectory, "/ds/1970-01-01T00:00:00.000Z_1970-01-01T00:00:00.001Z/v1/0/index.zip"), + unzipDir + ); - Assert.assertEquals(returnSegment1, returnSegment2); + Assert.assertEquals(0x8, Ints.fromByteArray(Files.toByteArray(new File(unzipDir, "version.bin")))); } @Test @@ -124,7 +158,7 @@ public void testPushCannotCreateDirectory() throws IOException config.storageDirectory = new File(config.storageDirectory, "xxx"); Assert.assertTrue(config.storageDirectory.mkdir()); config.storageDirectory.setWritable(false); - localDataSegmentPusher.push(dataSegmentFiles, dataSegment); + localDataSegmentPusher.push(dataSegmentFiles, dataSegment, false); } @Test diff --git a/server/src/test/java/io/druid/segment/loading/SegmentLoaderLocalCacheManagerTest.java b/server/src/test/java/io/druid/segment/loading/SegmentLoaderLocalCacheManagerTest.java index ffacdb5f6a96..9b5e20d5c9d0 100644 --- a/server/src/test/java/io/druid/segment/loading/SegmentLoaderLocalCacheManagerTest.java +++ b/server/src/test/java/io/druid/segment/loading/SegmentLoaderLocalCacheManagerTest.java @@ -25,7 +25,7 @@ import com.google.common.collect.ImmutableList; import com.google.common.collect.ImmutableMap; import com.google.common.collect.Lists; -import com.metamx.emitter.EmittingLogger; +import io.druid.java.util.emitter.EmittingLogger; import io.druid.jackson.DefaultObjectMapper; import io.druid.java.util.common.Intervals; import io.druid.segment.writeout.OffHeapMemorySegmentWriteOutMediumFactory; diff --git a/server/src/test/java/io/druid/segment/realtime/appenderator/AppenderatorTest.java b/server/src/test/java/io/druid/segment/realtime/appenderator/AppenderatorTest.java index 8a5bbda46c19..17b32218fad9 100644 --- a/server/src/test/java/io/druid/segment/realtime/appenderator/AppenderatorTest.java +++ b/server/src/test/java/io/druid/segment/realtime/appenderator/AppenderatorTest.java @@ -118,7 +118,8 @@ public void testSimpleIngestion() throws Exception // push all final SegmentsAndMetadata segmentsAndMetadata = appenderator.push( appenderator.getSegments(), - committerSupplier.get() + committerSupplier.get(), + false ).get(); Assert.assertEquals(ImmutableMap.of("x", "3"), (Map) segmentsAndMetadata.getCommitMetadata()); Assert.assertEquals( @@ -190,10 +191,9 @@ public void run() Assert.assertEquals(1, ((AppenderatorImpl) appenderator).getRowsInMemory()); appenderator.add(IDENTIFIERS.get(0), IR("2000", "bob", 1), committerSupplier); Assert.assertEquals(2, ((AppenderatorImpl) appenderator).getRowsInMemory()); - appenderator.persist(ImmutableList.of(IDENTIFIERS.get(1)), committerSupplier.get()); - Assert.assertEquals(1, ((AppenderatorImpl) appenderator).getRowsInMemory()); - appenderator.close(); + appenderator.persistAll(committerSupplier.get()); Assert.assertEquals(0, ((AppenderatorImpl) appenderator).getRowsInMemory()); + appenderator.close(); } } @@ -237,12 +237,12 @@ public void run() Assert.assertEquals(4, ((AppenderatorImpl) appenderator).getRowsInMemory()); appenderator.add(IDENTIFIERS.get(0), IR("2000", "bob", 1), committerSupplier, false); Assert.assertEquals(5, ((AppenderatorImpl) appenderator).getRowsInMemory()); - appenderator.persist(ImmutableList.of(IDENTIFIERS.get(1)), committerSupplier.get()); - Assert.assertEquals(3, ((AppenderatorImpl) appenderator).getRowsInMemory()); - appenderator.close(); + appenderator.persistAll(committerSupplier.get()); Assert.assertEquals(0, ((AppenderatorImpl) appenderator).getRowsInMemory()); + appenderator.close(); } } + @Test public void testRestoreFromDisk() throws Exception { diff --git a/server/src/test/java/io/druid/segment/realtime/appenderator/AppenderatorTester.java b/server/src/test/java/io/druid/segment/realtime/appenderator/AppenderatorTester.java index 555e05d20673..ac0af8334487 100644 --- a/server/src/test/java/io/druid/segment/realtime/appenderator/AppenderatorTester.java +++ b/server/src/test/java/io/druid/segment/realtime/appenderator/AppenderatorTester.java @@ -21,9 +21,9 @@ import com.fasterxml.jackson.databind.ObjectMapper; import com.google.common.collect.ImmutableMap; -import com.metamx.emitter.EmittingLogger; -import com.metamx.emitter.core.NoopEmitter; -import com.metamx.emitter.service.ServiceEmitter; +import io.druid.java.util.emitter.EmittingLogger; +import io.druid.java.util.emitter.core.NoopEmitter; +import io.druid.java.util.emitter.service.ServiceEmitter; import io.druid.client.cache.CacheConfig; import io.druid.client.cache.MapCache; import io.druid.data.input.impl.DimensionsSpec; @@ -193,7 +193,7 @@ public String getPathForHadoop() } @Override - public DataSegment push(File file, DataSegment segment) throws IOException + public DataSegment push(File file, DataSegment segment, boolean useUniquePath) throws IOException { if (enablePushFailure && mustFail) { mustFail = false; diff --git a/server/src/test/java/io/druid/segment/realtime/appenderator/BatchAppenderatorDriverTest.java b/server/src/test/java/io/druid/segment/realtime/appenderator/BatchAppenderatorDriverTest.java new file mode 100644 index 000000000000..2fd0f087d38a --- /dev/null +++ b/server/src/test/java/io/druid/segment/realtime/appenderator/BatchAppenderatorDriverTest.java @@ -0,0 +1,199 @@ +/* + * Licensed to Metamarkets Group Inc. (Metamarkets) under one + * or more contributor license agreements. See the NOTICE file + * distributed with this work for additional information + * regarding copyright ownership. Metamarkets licenses this file + * to you under the Apache License, Version 2.0 (the + * "License"); you may not use this file except in compliance + * with the License. You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, + * software distributed under the License is distributed on an + * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY + * KIND, either express or implied. See the License for the + * specific language governing permissions and limitations + * under the License. + */ + +package io.druid.segment.realtime.appenderator; + +import com.google.common.collect.ImmutableList; +import com.google.common.collect.ImmutableMap; +import com.google.common.collect.ImmutableSet; +import io.druid.data.input.InputRow; +import io.druid.data.input.MapBasedInputRow; +import io.druid.java.util.common.DateTimes; +import io.druid.java.util.common.Intervals; +import io.druid.java.util.common.granularity.Granularities; +import io.druid.segment.loading.DataSegmentKiller; +import io.druid.segment.realtime.appenderator.BaseAppenderatorDriver.SegmentsForSequence; +import io.druid.segment.realtime.appenderator.StreamAppenderatorDriverTest.TestSegmentAllocator; +import io.druid.segment.realtime.appenderator.SegmentWithState.SegmentState; +import io.druid.timeline.partition.NumberedShardSpec; +import org.easymock.EasyMock; +import org.easymock.EasyMockSupport; +import org.junit.After; +import org.junit.Assert; +import org.junit.Before; +import org.junit.Test; + +import java.util.Arrays; +import java.util.List; +import java.util.concurrent.TimeUnit; +import java.util.stream.Collectors; + +public class BatchAppenderatorDriverTest extends EasyMockSupport +{ + private static final String DATA_SOURCE = "foo"; + private static final String VERSION = "abc123"; + private static final int MAX_ROWS_IN_MEMORY = 100; + private static final long TIMEOUT = 1000; + + private static final List ROWS = Arrays.asList( + new MapBasedInputRow( + DateTimes.of("2000"), + ImmutableList.of("dim1"), + ImmutableMap.of("dim1", "foo", "met1", "1") + ), + new MapBasedInputRow( + DateTimes.of("2000T01"), + ImmutableList.of("dim1"), + ImmutableMap.of("dim1", "foo", "met1", 2.0) + ), + new MapBasedInputRow( + DateTimes.of("2000T01"), + ImmutableList.of("dim2"), + ImmutableMap.of("dim2", "bar", "met1", 2.0) + ) + ); + + private SegmentAllocator allocator; + private AppenderatorTester appenderatorTester; + private BatchAppenderatorDriver driver; + private DataSegmentKiller dataSegmentKiller; + + @Before + public void setup() + { + appenderatorTester = new AppenderatorTester(MAX_ROWS_IN_MEMORY); + allocator = new TestSegmentAllocator(DATA_SOURCE, Granularities.HOUR); + dataSegmentKiller = createStrictMock(DataSegmentKiller.class); + driver = new BatchAppenderatorDriver( + appenderatorTester.getAppenderator(), + allocator, + new TestUsedSegmentChecker(appenderatorTester), + dataSegmentKiller + ); + + EasyMock.replay(dataSegmentKiller); + } + + @After + public void tearDown() throws Exception + { + EasyMock.verify(dataSegmentKiller); + + driver.clear(); + driver.close(); + } + + @Test + public void testSimple() throws Exception + { + Assert.assertNull(driver.startJob()); + + for (InputRow row : ROWS) { + Assert.assertTrue(driver.add(row, "dummy").isOk()); + } + + checkSegmentStates(2, SegmentState.APPENDING); + + driver.pushAllAndClear(TIMEOUT); + + checkSegmentStates(2, SegmentState.PUSHED_AND_DROPPED); + + final SegmentsAndMetadata published = driver.publishAll(makeOkPublisher()).get( + TIMEOUT, + TimeUnit.MILLISECONDS + ); + + Assert.assertEquals( + ImmutableSet.of( + new SegmentIdentifier(DATA_SOURCE, Intervals.of("2000/PT1H"), VERSION, new NumberedShardSpec(0, 0)), + new SegmentIdentifier(DATA_SOURCE, Intervals.of("2000T01/PT1H"), VERSION, new NumberedShardSpec(0, 0)) + ), + published.getSegments() + .stream() + .map(SegmentIdentifier::fromDataSegment) + .collect(Collectors.toSet()) + ); + + Assert.assertNull(published.getCommitMetadata()); + } + + @Test + public void testIncrementalPush() throws Exception + { + Assert.assertNull(driver.startJob()); + + int i = 0; + for (InputRow row : ROWS) { + Assert.assertTrue(driver.add(row, "dummy").isOk()); + + checkSegmentStates(1, SegmentState.APPENDING); + checkSegmentStates(i, SegmentState.PUSHED_AND_DROPPED); + + driver.pushAllAndClear(TIMEOUT); + checkSegmentStates(0, SegmentState.APPENDING); + checkSegmentStates(++i, SegmentState.PUSHED_AND_DROPPED); + } + + final SegmentsAndMetadata published = driver.publishAll(makeOkPublisher()).get( + TIMEOUT, + TimeUnit.MILLISECONDS + ); + + Assert.assertEquals( + ImmutableSet.of( + new SegmentIdentifier(DATA_SOURCE, Intervals.of("2000/PT1H"), VERSION, new NumberedShardSpec(0, 0)), + new SegmentIdentifier(DATA_SOURCE, Intervals.of("2000T01/PT1H"), VERSION, new NumberedShardSpec(0, 0)), + new SegmentIdentifier(DATA_SOURCE, Intervals.of("2000T01/PT1H"), VERSION, new NumberedShardSpec(1, 0)) + ), + published.getSegments() + .stream() + .map(SegmentIdentifier::fromDataSegment) + .collect(Collectors.toSet()) + ); + + Assert.assertNull(published.getCommitMetadata()); + } + + @Test + public void testRestart() + { + Assert.assertNull(driver.startJob()); + driver.close(); + appenderatorTester.getAppenderator().close(); + + Assert.assertNull(driver.startJob()); + } + + private void checkSegmentStates(int expectedNumSegmentsInState, SegmentState expectedState) + { + final SegmentsForSequence segmentsForSequence = driver.getSegments().get("dummy"); + Assert.assertNotNull(segmentsForSequence); + final List segmentWithStates = segmentsForSequence + .allSegmentStateStream() + .filter(segmentWithState -> segmentWithState.getState() == expectedState) + .collect(Collectors.toList()); + + Assert.assertEquals(expectedNumSegmentsInState, segmentWithStates.size()); + } + + static TransactionalSegmentPublisher makeOkPublisher() + { + return (segments, commitMetadata) -> true; + } +} diff --git a/server/src/test/java/io/druid/segment/realtime/appenderator/SegmentWithStateTest.java b/server/src/test/java/io/druid/segment/realtime/appenderator/SegmentWithStateTest.java new file mode 100644 index 000000000000..569eebdc8a11 --- /dev/null +++ b/server/src/test/java/io/druid/segment/realtime/appenderator/SegmentWithStateTest.java @@ -0,0 +1,54 @@ +/* + * Licensed to Metamarkets Group Inc. (Metamarkets) under one + * or more contributor license agreements. See the NOTICE file + * distributed with this work for additional information + * regarding copyright ownership. Metamarkets licenses this file + * to you under the Apache License, Version 2.0 (the + * "License"); you may not use this file except in compliance + * with the License. You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, + * software distributed under the License is distributed on an + * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY + * KIND, either express or implied. See the License for the + * specific language governing permissions and limitations + * under the License. + */ +package io.druid.segment.realtime.appenderator; + +import com.fasterxml.jackson.databind.ObjectMapper; +import io.druid.jackson.DefaultObjectMapper; +import org.junit.Assert; +import org.junit.Test; + +import java.io.IOException; + +public class SegmentWithStateTest +{ + @Test + public void testSerde() throws IOException + { + final ObjectMapper objectMapper = new DefaultObjectMapper(); + final byte[] bytes = objectMapper.writeValueAsBytes(SegmentWithState.SegmentState.APPEND_FINISHED); + Assert.assertEquals( + SegmentWithState.SegmentState.APPEND_FINISHED, + objectMapper.readValue(bytes, SegmentWithState.SegmentState.class) + ); + } + + @Test + public void testSerdeForBackwardCompatibility() throws IOException + { + final ObjectMapper objectMapper = new DefaultObjectMapper(); + Assert.assertEquals( + SegmentWithState.SegmentState.APPENDING, + objectMapper.readValue("\"ACTIVE\"", SegmentWithState.SegmentState.class) + ); + Assert.assertEquals( + SegmentWithState.SegmentState.APPEND_FINISHED, + objectMapper.readValue("\"INACTIVE\"", SegmentWithState.SegmentState.class) + ); + } +} diff --git a/server/src/test/java/io/druid/segment/realtime/appenderator/AppenderatorDriverFailTest.java b/server/src/test/java/io/druid/segment/realtime/appenderator/StreamAppenderatorDriverFailTest.java similarity index 74% rename from server/src/test/java/io/druid/segment/realtime/appenderator/AppenderatorDriverFailTest.java rename to server/src/test/java/io/druid/segment/realtime/appenderator/StreamAppenderatorDriverFailTest.java index 1cc19f4c1458..9535bb3868da 100644 --- a/server/src/test/java/io/druid/segment/realtime/appenderator/AppenderatorDriverFailTest.java +++ b/server/src/test/java/io/druid/segment/realtime/appenderator/StreamAppenderatorDriverFailTest.java @@ -34,16 +34,20 @@ import io.druid.jackson.DefaultObjectMapper; import io.druid.java.util.common.DateTimes; import io.druid.java.util.common.ISE; +import io.druid.java.util.common.Intervals; import io.druid.java.util.common.granularity.Granularities; import io.druid.query.Query; import io.druid.query.QueryRunner; import io.druid.query.SegmentDescriptor; -import io.druid.segment.incremental.IndexSizeExceededException; +import io.druid.segment.loading.DataSegmentKiller; import io.druid.segment.realtime.FireDepartmentMetrics; -import io.druid.segment.realtime.appenderator.AppenderatorDriverTest.TestCommitterSupplier; -import io.druid.segment.realtime.appenderator.AppenderatorDriverTest.TestSegmentAllocator; -import io.druid.segment.realtime.appenderator.AppenderatorDriverTest.TestSegmentHandoffNotifierFactory; +import io.druid.segment.realtime.appenderator.StreamAppenderatorDriverTest.TestCommitterSupplier; +import io.druid.segment.realtime.appenderator.StreamAppenderatorDriverTest.TestSegmentAllocator; +import io.druid.segment.realtime.appenderator.StreamAppenderatorDriverTest.TestSegmentHandoffNotifierFactory; import io.druid.timeline.DataSegment; +import io.druid.timeline.partition.NumberedShardSpec; +import org.easymock.EasyMock; +import org.easymock.EasyMockSupport; import org.hamcrest.CoreMatchers; import org.joda.time.Interval; import org.junit.After; @@ -65,7 +69,7 @@ import java.util.concurrent.TimeoutException; import java.util.stream.Collectors; -public class AppenderatorDriverFailTest +public class StreamAppenderatorDriverFailTest extends EasyMockSupport { private static final String DATA_SOURCE = "foo"; private static final ObjectMapper OBJECT_MAPPER = new DefaultObjectMapper(); @@ -91,7 +95,8 @@ public class AppenderatorDriverFailTest SegmentAllocator allocator; TestSegmentHandoffNotifierFactory segmentHandoffNotifierFactory; - AppenderatorDriver driver; + StreamAppenderatorDriver driver; + DataSegmentKiller dataSegmentKiller; @Rule public ExpectedException expectedException = ExpectedException.none(); @@ -101,6 +106,7 @@ public void setUp() { allocator = new TestSegmentAllocator(DATA_SOURCE, Granularities.HOUR); segmentHandoffNotifierFactory = new TestSegmentHandoffNotifierFactory(); + dataSegmentKiller = createStrictMock(DataSegmentKiller.class); } @After @@ -121,11 +127,12 @@ public void testFailDuringPersist() throws IOException, InterruptedException, Ti + "[[foo_2000-01-01T00:00:00.000Z_2000-01-01T01:00:00.000Z_abc123, " + "foo_2000-01-01T01:00:00.000Z_2000-01-01T02:00:00.000Z_abc123]]"); - driver = new AppenderatorDriver( + driver = new StreamAppenderatorDriver( createPersistFailAppenderator(), allocator, segmentHandoffNotifierFactory, new NoopUsedSegmentChecker(), + dataSegmentKiller, OBJECT_MAPPER, new FireDepartmentMetrics() ); @@ -139,11 +146,11 @@ public void testFailDuringPersist() throws IOException, InterruptedException, Ti for (int i = 0; i < ROWS.size(); i++) { committerSupplier.setMetadata(i + 1); - Assert.assertTrue(driver.add(ROWS.get(i), "dummy", committerSupplier).isOk()); + Assert.assertTrue(driver.add(ROWS.get(i), "dummy", committerSupplier, false, true).isOk()); } driver.publish( - AppenderatorDriverTest.makeOkPublisher(), + StreamAppenderatorDriverTest.makeOkPublisher(), committerSupplier.get(), ImmutableList.of("dummy") ).get(PUBLISH_TIMEOUT, TimeUnit.MILLISECONDS); @@ -158,11 +165,12 @@ public void testFailDuringPush() throws IOException, InterruptedException, Timeo + "[[foo_2000-01-01T00:00:00.000Z_2000-01-01T01:00:00.000Z_abc123, " + "foo_2000-01-01T01:00:00.000Z_2000-01-01T02:00:00.000Z_abc123]]"); - driver = new AppenderatorDriver( + driver = new StreamAppenderatorDriver( createPushFailAppenderator(), allocator, segmentHandoffNotifierFactory, new NoopUsedSegmentChecker(), + dataSegmentKiller, OBJECT_MAPPER, new FireDepartmentMetrics() ); @@ -176,11 +184,11 @@ public void testFailDuringPush() throws IOException, InterruptedException, Timeo for (int i = 0; i < ROWS.size(); i++) { committerSupplier.setMetadata(i + 1); - Assert.assertTrue(driver.add(ROWS.get(i), "dummy", committerSupplier).isOk()); + Assert.assertTrue(driver.add(ROWS.get(i), "dummy", committerSupplier, false, true).isOk()); } driver.publish( - AppenderatorDriverTest.makeOkPublisher(), + StreamAppenderatorDriverTest.makeOkPublisher(), committerSupplier.get(), ImmutableList.of("dummy") ).get(PUBLISH_TIMEOUT, TimeUnit.MILLISECONDS); @@ -195,11 +203,12 @@ public void testFailDuringDrop() throws IOException, InterruptedException, Timeo "Fail test while dropping segment[foo_2000-01-01T00:00:00.000Z_2000-01-01T01:00:00.000Z_abc123]" ); - driver = new AppenderatorDriver( + driver = new StreamAppenderatorDriver( createDropFailAppenderator(), allocator, segmentHandoffNotifierFactory, new NoopUsedSegmentChecker(), + dataSegmentKiller, OBJECT_MAPPER, new FireDepartmentMetrics() ); @@ -213,11 +222,11 @@ public void testFailDuringDrop() throws IOException, InterruptedException, Timeo for (int i = 0; i < ROWS.size(); i++) { committerSupplier.setMetadata(i + 1); - Assert.assertTrue(driver.add(ROWS.get(i), "dummy", committerSupplier).isOk()); + Assert.assertTrue(driver.add(ROWS.get(i), "dummy", committerSupplier, false, true).isOk()); } final SegmentsAndMetadata published = driver.publish( - AppenderatorDriverTest.makeOkPublisher(), + StreamAppenderatorDriverTest.makeOkPublisher(), committerSupplier.get(), ImmutableList.of("dummy") ).get(PUBLISH_TIMEOUT, TimeUnit.MILLISECONDS); @@ -225,6 +234,94 @@ public void testFailDuringDrop() throws IOException, InterruptedException, Timeo driver.registerHandoff(published).get(); } + @Test + public void testFailDuringPublish() throws Exception + { + expectedException.expect(ExecutionException.class); + expectedException.expectCause(CoreMatchers.instanceOf(ISE.class)); + expectedException.expectMessage( + "Failed to publish segments[[DataSegment{size=0, shardSpec=NumberedShardSpec{partitionNum=0, partitions=0}, metrics=[], dimensions=[], version='abc123', loadSpec={}, interval=2000-01-01T00:00:00.000Z/2000-01-01T01:00:00.000Z, dataSource='foo', binaryVersion='0'}, DataSegment{size=0, shardSpec=NumberedShardSpec{partitionNum=0, partitions=0}, metrics=[], dimensions=[], version='abc123', loadSpec={}, interval=2000-01-01T01:00:00.000Z/2000-01-01T02:00:00.000Z, dataSource='foo', binaryVersion='0'}]]"); + + testFailDuringPublishInternal(false); + } + + @Test + public void testFailWithExceptionDuringPublish() throws Exception + { + expectedException.expect(ExecutionException.class); + expectedException.expectCause(CoreMatchers.instanceOf(RuntimeException.class)); + expectedException.expectMessage("test"); + + testFailDuringPublishInternal(true); + } + + private void testFailDuringPublishInternal(boolean failWithException) throws Exception + { + driver = new StreamAppenderatorDriver( + new FailableAppenderator(), + allocator, + segmentHandoffNotifierFactory, + new NoopUsedSegmentChecker(), + dataSegmentKiller, + OBJECT_MAPPER, + new FireDepartmentMetrics() + ); + + driver.startJob(); + + final TestCommitterSupplier committerSupplier = new TestCommitterSupplier<>(); + segmentHandoffNotifierFactory.setHandoffDelay(100); + + Assert.assertNull(driver.startJob()); + + for (int i = 0; i < ROWS.size(); i++) { + committerSupplier.setMetadata(i + 1); + Assert.assertTrue(driver.add(ROWS.get(i), "dummy", committerSupplier, false, true).isOk()); + } + + dataSegmentKiller.killQuietly(new DataSegment( + "foo", + Intervals.of("2000-01-01T00:00:00.000Z/2000-01-01T01:00:00.000Z"), + "abc123", + ImmutableMap.of(), + ImmutableList.of(), + ImmutableList.of(), + new NumberedShardSpec(0, 0), + 0, + 0 + )); + EasyMock.expectLastCall().once(); + + dataSegmentKiller.killQuietly(new DataSegment( + "foo", + Intervals.of("2000-01-01T01:00:00.000Z/2000-01-01T02:00:00.000Z"), + "abc123", + ImmutableMap.of(), + ImmutableList.of(), + ImmutableList.of(), + new NumberedShardSpec(0, 0), + 0, + 0 + )); + EasyMock.expectLastCall().once(); + + EasyMock.replay(dataSegmentKiller); + + try { + driver.publish( + StreamAppenderatorDriverTest.makeFailingPublisher(failWithException), + committerSupplier.get(), + ImmutableList.of("dummy") + ).get(PUBLISH_TIMEOUT, TimeUnit.MILLISECONDS); + } + catch (Exception e) { + throw e; + } + finally { + EasyMock.verify(dataSegmentKiller); + } + } + private static class NoopUsedSegmentChecker implements UsedSegmentChecker { @Override @@ -305,8 +402,11 @@ public Object startJob() @Override public AppenderatorAddResult add( - SegmentIdentifier identifier, InputRow row, Supplier committerSupplier, boolean allowIncrementalPersists - ) throws IndexSizeExceededException, SegmentNotWritableException + SegmentIdentifier identifier, + InputRow row, + Supplier committerSupplier, + boolean allowIncrementalPersists + ) { rows.computeIfAbsent(identifier, k -> new ArrayList<>()).add(row); numRows++; @@ -354,21 +454,19 @@ public ListenableFuture drop(SegmentIdentifier identifier) } @Override - public ListenableFuture persist( - Collection identifiers, Committer committer - ) + public ListenableFuture persistAll(Committer committer) { if (persistEnabled) { // do nothing return Futures.immediateFuture(committer.getMetadata()); } else { - return Futures.immediateFailedFuture(new ISE("Fail test while persisting segments[%s]", identifiers)); + return Futures.immediateFailedFuture(new ISE("Fail test while persisting segments[%s]", rows.keySet())); } } @Override public ListenableFuture push( - Collection identifiers, Committer committer + Collection identifiers, Committer committer, boolean useUniquePath ) { if (pushEnabled) { @@ -388,7 +486,7 @@ public ListenableFuture push( ) .collect(Collectors.toList()); return Futures.transform( - persist(identifiers, committer), + persistAll(committer), (Function) commitMetadata -> new SegmentsAndMetadata(segments, commitMetadata) ); } else { diff --git a/server/src/test/java/io/druid/segment/realtime/appenderator/AppenderatorDriverTest.java b/server/src/test/java/io/druid/segment/realtime/appenderator/StreamAppenderatorDriverTest.java similarity index 87% rename from server/src/test/java/io/druid/segment/realtime/appenderator/AppenderatorDriverTest.java rename to server/src/test/java/io/druid/segment/realtime/appenderator/StreamAppenderatorDriverTest.java index d9681351c77e..aff1e020c530 100644 --- a/server/src/test/java/io/druid/segment/realtime/appenderator/AppenderatorDriverTest.java +++ b/server/src/test/java/io/druid/segment/realtime/appenderator/StreamAppenderatorDriverTest.java @@ -20,15 +20,12 @@ package io.druid.segment.realtime.appenderator; import com.fasterxml.jackson.databind.ObjectMapper; -import com.google.common.base.Function; import com.google.common.base.Supplier; import com.google.common.collect.ImmutableList; import com.google.common.collect.ImmutableMap; import com.google.common.collect.ImmutableSet; import com.google.common.collect.Iterables; import com.google.common.collect.Maps; -import com.google.common.collect.Ordering; -import com.google.common.collect.Sets; import com.google.common.util.concurrent.ListenableFuture; import io.druid.data.input.Committer; import io.druid.data.input.InputRow; @@ -40,14 +37,14 @@ import io.druid.java.util.common.granularity.Granularities; import io.druid.java.util.common.granularity.Granularity; import io.druid.query.SegmentDescriptor; +import io.druid.segment.loading.DataSegmentKiller; import io.druid.segment.realtime.FireDepartmentMetrics; import io.druid.segment.realtime.plumber.SegmentHandoffNotifier; import io.druid.segment.realtime.plumber.SegmentHandoffNotifierFactory; import io.druid.timeline.DataSegment; -import io.druid.timeline.TimelineObjectHolder; -import io.druid.timeline.VersionedIntervalTimeline; import io.druid.timeline.partition.NumberedShardSpec; -import io.druid.timeline.partition.PartitionChunk; +import org.easymock.EasyMock; +import org.easymock.EasyMockSupport; import org.joda.time.DateTime; import org.junit.After; import org.junit.Assert; @@ -66,7 +63,7 @@ import java.util.concurrent.atomic.AtomicInteger; import java.util.concurrent.atomic.AtomicReference; -public class AppenderatorDriverTest +public class StreamAppenderatorDriverTest extends EasyMockSupport { private static final String DATA_SOURCE = "foo"; private static final String VERSION = "abc123"; @@ -94,30 +91,37 @@ public class AppenderatorDriverTest ) ); - SegmentAllocator allocator; - AppenderatorTester appenderatorTester; - TestSegmentHandoffNotifierFactory segmentHandoffNotifierFactory; - AppenderatorDriver driver; + private SegmentAllocator allocator; + private AppenderatorTester appenderatorTester; + private TestSegmentHandoffNotifierFactory segmentHandoffNotifierFactory; + private StreamAppenderatorDriver driver; + private DataSegmentKiller dataSegmentKiller; @Before - public void setUp() + public void setUp() throws Exception { appenderatorTester = new AppenderatorTester(MAX_ROWS_IN_MEMORY); allocator = new TestSegmentAllocator(DATA_SOURCE, Granularities.HOUR); segmentHandoffNotifierFactory = new TestSegmentHandoffNotifierFactory(); - driver = new AppenderatorDriver( + dataSegmentKiller = createStrictMock(DataSegmentKiller.class); + driver = new StreamAppenderatorDriver( appenderatorTester.getAppenderator(), allocator, segmentHandoffNotifierFactory, - new TestUsedSegmentChecker(), + new TestUsedSegmentChecker(appenderatorTester), + dataSegmentKiller, OBJECT_MAPPER, new FireDepartmentMetrics() ); + + EasyMock.replay(dataSegmentKiller); } @After public void tearDown() throws Exception { + EasyMock.verify(dataSegmentKiller); + driver.clear(); driver.close(); } @@ -131,7 +135,7 @@ public void testSimple() throws Exception for (int i = 0; i < ROWS.size(); i++) { committerSupplier.setMetadata(i + 1); - Assert.assertTrue(driver.add(ROWS.get(i), "dummy", committerSupplier).isOk()); + Assert.assertTrue(driver.add(ROWS.get(i), "dummy", committerSupplier, false, true).isOk()); } final SegmentsAndMetadata published = driver.publish( @@ -177,7 +181,7 @@ public void testMaxRowsPerSegment() throws Exception 2.0 ) ); - final AppenderatorDriverAddResult addResult = driver.add(row, "dummy", committerSupplier); + final AppenderatorDriverAddResult addResult = driver.add(row, "dummy", committerSupplier, false, true); Assert.assertTrue(addResult.isOk()); if (addResult.getNumRowsInSegment() > MAX_ROWS_PER_SEGMENT) { driver.moveSegmentOut("dummy", ImmutableList.of(addResult.getSegmentIdentifier())); @@ -210,7 +214,7 @@ public void testHandoffTimeout() throws Exception for (int i = 0; i < ROWS.size(); i++) { committerSupplier.setMetadata(i + 1); - Assert.assertTrue(driver.add(ROWS.get(i), "dummy", committerSupplier).isOk()); + Assert.assertTrue(driver.add(ROWS.get(i), "dummy", committerSupplier, false, true).isOk()); } final SegmentsAndMetadata published = driver.publish( @@ -236,7 +240,7 @@ public void testPublishPerRow() throws IOException, InterruptedException, Timeou // Add the first row and publish immediately { committerSupplier.setMetadata(1); - Assert.assertTrue(driver.add(ROWS.get(0), "dummy", committerSupplier).isOk()); + Assert.assertTrue(driver.add(ROWS.get(0), "dummy", committerSupplier, false, true).isOk()); final SegmentsAndMetadata segmentsAndMetadata = driver.publishAndRegisterHandoff( makeOkPublisher(), @@ -257,7 +261,7 @@ public void testPublishPerRow() throws IOException, InterruptedException, Timeou // Add the second and third rows and publish immediately for (int i = 1; i < ROWS.size(); i++) { committerSupplier.setMetadata(i + 1); - Assert.assertTrue(driver.add(ROWS.get(i), "dummy", committerSupplier).isOk()); + Assert.assertTrue(driver.add(ROWS.get(i), "dummy", committerSupplier, false, true).isOk()); final SegmentsAndMetadata segmentsAndMetadata = driver.publishAndRegisterHandoff( makeOkPublisher(), @@ -302,11 +306,11 @@ public void testIncrementalHandoff() throws Exception Assert.assertNull(driver.startJob()); committerSupplier.setMetadata(1); - Assert.assertTrue(driver.add(ROWS.get(0), "sequence_0", committerSupplier).isOk()); + Assert.assertTrue(driver.add(ROWS.get(0), "sequence_0", committerSupplier, false, true).isOk()); for (int i = 1; i < ROWS.size(); i++) { committerSupplier.setMetadata(i + 1); - Assert.assertTrue(driver.add(ROWS.get(i), "sequence_1", committerSupplier).isOk()); + Assert.assertTrue(driver.add(ROWS.get(i), "sequence_1", committerSupplier, false, true).isOk()); } final ListenableFuture futureForSequence0 = driver.publishAndRegisterHandoff( @@ -350,19 +354,7 @@ public void testIncrementalHandoff() throws Exception private Set asIdentifiers(Iterable segments) { - return ImmutableSet.copyOf( - Iterables.transform( - segments, - new Function() - { - @Override - public SegmentIdentifier apply(DataSegment input) - { - return SegmentIdentifier.fromDataSegment(input); - } - } - ) - ); + return ImmutableSet.copyOf(Iterables.transform(segments, SegmentIdentifier::fromDataSegment)); } static TransactionalSegmentPublisher makeOkPublisher() @@ -377,6 +369,16 @@ public boolean publishSegments(Set segments, Object commitMetadata) }; } + static TransactionalSegmentPublisher makeFailingPublisher(boolean failWithException) + { + return (segments, commitMetadata) -> { + if (failWithException) { + throw new RuntimeException("test"); + } + return false; + }; + } + static class TestCommitterSupplier implements Supplier { private final AtomicReference metadata = new AtomicReference<>(); @@ -501,33 +503,4 @@ public void close() }; } } - - private class TestUsedSegmentChecker implements UsedSegmentChecker - { - @Override - public Set findUsedSegments(Set identifiers) throws IOException - { - final VersionedIntervalTimeline timeline = new VersionedIntervalTimeline<>(Ordering.natural()); - for (DataSegment dataSegment : appenderatorTester.getPushedSegments()) { - timeline.add( - dataSegment.getInterval(), - dataSegment.getVersion(), - dataSegment.getShardSpec().createChunk(dataSegment) - ); - } - - final Set retVal = Sets.newHashSet(); - for (SegmentIdentifier identifier : identifiers) { - for (TimelineObjectHolder holder : timeline.lookup(identifier.getInterval())) { - for (PartitionChunk chunk : holder.getObject()) { - if (identifiers.contains(SegmentIdentifier.fromDataSegment(chunk.getObject()))) { - retVal.add(chunk.getObject()); - } - } - } - } - - return retVal; - } - } } diff --git a/server/src/test/java/io/druid/segment/realtime/appenderator/TestUsedSegmentChecker.java b/server/src/test/java/io/druid/segment/realtime/appenderator/TestUsedSegmentChecker.java new file mode 100644 index 000000000000..e2e7cd620801 --- /dev/null +++ b/server/src/test/java/io/druid/segment/realtime/appenderator/TestUsedSegmentChecker.java @@ -0,0 +1,66 @@ +/* + * Licensed to Metamarkets Group Inc. (Metamarkets) under one + * or more contributor license agreements. See the NOTICE file + * distributed with this work for additional information + * regarding copyright ownership. Metamarkets licenses this file + * to you under the Apache License, Version 2.0 (the + * "License"); you may not use this file except in compliance + * with the License. You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, + * software distributed under the License is distributed on an + * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY + * KIND, either express or implied. See the License for the + * specific language governing permissions and limitations + * under the License. + */ + +package io.druid.segment.realtime.appenderator; + +import com.google.common.collect.Ordering; +import com.google.common.collect.Sets; +import io.druid.timeline.DataSegment; +import io.druid.timeline.TimelineObjectHolder; +import io.druid.timeline.VersionedIntervalTimeline; +import io.druid.timeline.partition.PartitionChunk; + +import java.io.IOException; +import java.util.Set; + +public class TestUsedSegmentChecker implements UsedSegmentChecker +{ + private final AppenderatorTester appenderatorTester; + + public TestUsedSegmentChecker(AppenderatorTester appenderatorTester) + { + this.appenderatorTester = appenderatorTester; + } + + @Override + public Set findUsedSegments(Set identifiers) throws IOException + { + final VersionedIntervalTimeline timeline = new VersionedIntervalTimeline<>(Ordering.natural()); + for (DataSegment dataSegment : appenderatorTester.getPushedSegments()) { + timeline.add( + dataSegment.getInterval(), + dataSegment.getVersion(), + dataSegment.getShardSpec().createChunk(dataSegment) + ); + } + + final Set retVal = Sets.newHashSet(); + for (SegmentIdentifier identifier : identifiers) { + for (TimelineObjectHolder holder : timeline.lookup(identifier.getInterval())) { + for (PartitionChunk chunk : holder.getObject()) { + if (identifiers.contains(SegmentIdentifier.fromDataSegment(chunk.getObject()))) { + retVal.add(chunk.getObject()); + } + } + } + } + + return retVal; + } +} diff --git a/server/src/test/java/io/druid/segment/realtime/plumber/RealtimePlumberSchoolTest.java b/server/src/test/java/io/druid/segment/realtime/plumber/RealtimePlumberSchoolTest.java index cfddca4aa90e..20375d45754a 100644 --- a/server/src/test/java/io/druid/segment/realtime/plumber/RealtimePlumberSchoolTest.java +++ b/server/src/test/java/io/druid/segment/realtime/plumber/RealtimePlumberSchoolTest.java @@ -26,7 +26,7 @@ import com.google.common.collect.Maps; import com.google.common.io.Files; import com.google.common.util.concurrent.MoreExecutors; -import com.metamx.emitter.service.ServiceEmitter; +import io.druid.java.util.emitter.service.ServiceEmitter; import io.druid.client.cache.MapCache; import io.druid.data.input.Committer; import io.druid.data.input.InputRow; diff --git a/server/src/test/java/io/druid/server/AsyncQueryForwardingServletTest.java b/server/src/test/java/io/druid/server/AsyncQueryForwardingServletTest.java index c1c0d3b2a4ed..19fb7509e8b1 100644 --- a/server/src/test/java/io/druid/server/AsyncQueryForwardingServletTest.java +++ b/server/src/test/java/io/druid/server/AsyncQueryForwardingServletTest.java @@ -39,22 +39,25 @@ import io.druid.guice.annotations.Smile; import io.druid.guice.http.DruidHttpClientConfig; import io.druid.initialization.Initialization; +import io.druid.java.util.common.granularity.Granularities; import io.druid.java.util.common.lifecycle.Lifecycle; import io.druid.query.DefaultGenericQueryMetricsFactory; +import io.druid.query.Druids; import io.druid.query.MapQueryToolChestWarehouse; import io.druid.query.Query; -import io.druid.query.QueryToolChest; +import io.druid.query.timeseries.TimeseriesQuery; +import io.druid.segment.TestHelper; import io.druid.server.initialization.BaseJettyTest; import io.druid.server.initialization.jetty.JettyServerInitUtils; import io.druid.server.initialization.jetty.JettyServerInitializer; -import io.druid.server.log.RequestLogger; import io.druid.server.metrics.NoopServiceEmitter; import io.druid.server.router.QueryHostFinder; import io.druid.server.router.RendezvousHashAvaticaConnectionBalancer; import io.druid.server.security.AllowAllAuthorizer; -import io.druid.server.security.NoopEscalator; +import io.druid.server.security.AuthenticatorMapper; import io.druid.server.security.Authorizer; import io.druid.server.security.AuthorizerMapper; +import org.easymock.EasyMock; import org.eclipse.jetty.client.HttpClient; import org.eclipse.jetty.server.Handler; import org.eclipse.jetty.server.Server; @@ -67,16 +70,20 @@ import org.junit.Before; import org.junit.Test; +import javax.servlet.ReadListener; import javax.servlet.ServletException; +import javax.servlet.ServletInputStream; import javax.servlet.http.HttpServlet; import javax.servlet.http.HttpServletRequest; import javax.servlet.http.HttpServletResponse; +import java.io.ByteArrayInputStream; import java.io.IOException; import java.net.HttpURLConnection; import java.net.URI; import java.net.URL; import java.util.Collection; import java.util.concurrent.CountDownLatch; +import java.util.concurrent.atomic.AtomicLong; public class AsyncQueryForwardingServletTest extends BaseJettyTest { @@ -117,7 +124,8 @@ public void configure(Binder binder) ); binder.bind(JettyServerInitializer.class).to(ProxyJettyServerInit.class).in(LazySingleton.class); binder.bind(AuthorizerMapper.class).toInstance( - new AuthorizerMapper(null) { + new AuthorizerMapper(null) + { @Override public Authorizer getAuthorizer(String name) @@ -174,6 +182,98 @@ public void testDeleteBroadcast() throws Exception latch.await(); } + @Test + public void testQueryProxy() throws Exception + { + final ObjectMapper jsonMapper = TestHelper.makeJsonMapper(); + final TimeseriesQuery query = Druids.newTimeseriesQueryBuilder() + .dataSource("foo") + .intervals("2000/P1D") + .granularity(Granularities.ALL) + .context(ImmutableMap.of("queryId", "dummy")) + .build(); + + final QueryHostFinder hostFinder = EasyMock.createMock(QueryHostFinder.class); + EasyMock.expect(hostFinder.pickServer(query)).andReturn(new TestServer("http", "1.2.3.4", 9999)).once(); + EasyMock.replay(hostFinder); + + final HttpServletRequest requestMock = EasyMock.createMock(HttpServletRequest.class); + final ByteArrayInputStream inputStream = new ByteArrayInputStream(jsonMapper.writeValueAsBytes(query)); + final ServletInputStream servletInputStream = new ServletInputStream() + { + private boolean finished; + + @Override + public boolean isFinished() + { + return finished; + } + + @Override + public boolean isReady() + { + return true; + } + + @Override + public void setReadListener(final ReadListener readListener) + { + // do nothing + } + + @Override + public int read() + { + final int b = inputStream.read(); + if (b < 0) { + finished = true; + } + return b; + } + }; + EasyMock.expect(requestMock.getContentType()).andReturn("application/json").times(2); + requestMock.setAttribute("io.druid.proxy.objectMapper", jsonMapper); + EasyMock.expectLastCall(); + EasyMock.expect(requestMock.getRequestURI()).andReturn("/druid/v2/"); + EasyMock.expect(requestMock.getMethod()).andReturn("POST"); + EasyMock.expect(requestMock.getInputStream()).andReturn(servletInputStream); + requestMock.setAttribute("io.druid.proxy.query", query); + requestMock.setAttribute("io.druid.proxy.to.host", "1.2.3.4:9999"); + requestMock.setAttribute("io.druid.proxy.to.host.scheme", "http"); + EasyMock.expectLastCall(); + EasyMock.replay(requestMock); + + final AtomicLong didService = new AtomicLong(); + final AsyncQueryForwardingServlet servlet = new AsyncQueryForwardingServlet( + new MapQueryToolChestWarehouse(ImmutableMap.of()), + jsonMapper, + TestHelper.makeSmileMapper(), + hostFinder, + null, + null, + new NoopServiceEmitter(), + requestLogLine -> { /* noop */ }, + new DefaultGenericQueryMetricsFactory(jsonMapper), + new AuthenticatorMapper(ImmutableMap.of()) + ) + { + @Override + protected void doService( + final HttpServletRequest request, + final HttpServletResponse response + ) + { + didService.incrementAndGet(); + } + }; + + servlet.service(requestMock, null); + + // This test is mostly about verifying that the servlet calls the right methods the right number of times. + EasyMock.verify(hostFinder, requestMock); + Assert.assertEquals(1, didService.get()); + } + private static Server makeTestDeleteServer(int port, final CountDownLatch latch) { Server server = new Server(port); @@ -212,13 +312,13 @@ public void initialize(Server server, Injector injector) final QueryHostFinder hostFinder = new QueryHostFinder(null, new RendezvousHashAvaticaConnectionBalancer()) { @Override - public io.druid.client.selector.Server getServer(Query query) + public io.druid.client.selector.Server pickServer(Query query) { return new TestServer("http", "localhost", node.getPlaintextPort()); } @Override - public io.druid.client.selector.Server getDefaultServer() + public io.druid.client.selector.Server pickDefaultServer() { return new TestServer("http", "localhost", node.getPlaintextPort()); } @@ -237,23 +337,16 @@ public Collection getAllServers() ObjectMapper jsonMapper = injector.getInstance(ObjectMapper.class); ServletHolder holder = new ServletHolder( new AsyncQueryForwardingServlet( - new MapQueryToolChestWarehouse(ImmutableMap., QueryToolChest>of()), + new MapQueryToolChestWarehouse(ImmutableMap.of()), jsonMapper, injector.getInstance(Key.get(ObjectMapper.class, Smile.class)), hostFinder, injector.getProvider(HttpClient.class), injector.getInstance(DruidHttpClientConfig.class), new NoopServiceEmitter(), - new RequestLogger() - { - @Override - public void log(RequestLogLine requestLogLine) throws IOException - { - // noop - } - }, + requestLogLine -> { /* noop */ }, new DefaultGenericQueryMetricsFactory(jsonMapper), - new NoopEscalator() + new AuthenticatorMapper(ImmutableMap.of()) ) { @Override @@ -308,6 +401,19 @@ public void testRewriteURI() throws Exception new URI("http://localhost/"), AsyncQueryForwardingServlet.makeURI("http", "localhost", "/", null) ); + + // Test reWrite Encoded interval with timezone info + // decoded parameters 1900-01-01T00:00:00.000+01.00 -> 1900-01-01T00:00:00.000+01:00 + Assert.assertEquals( + new URI( + "http://localhost:1234/some/path?intervals=1900-01-01T00%3A00%3A00.000%2B01%3A00%2F3000-01-01T00%3A00%3A00.000%2B01%3A00"), + AsyncQueryForwardingServlet.makeURI( + "http", + "localhost:1234", + "/some/path", + "intervals=1900-01-01T00%3A00%3A00.000%2B01%3A00%2F3000-01-01T00%3A00%3A00.000%2B01%3A00" + ) + ); } private static class TestServer implements io.druid.client.selector.Server diff --git a/server/src/test/java/io/druid/server/QueryResourceTest.java b/server/src/test/java/io/druid/server/QueryResourceTest.java index c07002b05e98..2391a718a886 100644 --- a/server/src/test/java/io/druid/server/QueryResourceTest.java +++ b/server/src/test/java/io/druid/server/QueryResourceTest.java @@ -25,8 +25,8 @@ import com.google.common.collect.ImmutableMap; import com.google.common.util.concurrent.ListenableFuture; import com.google.common.util.concurrent.MoreExecutors; -import com.metamx.emitter.EmittingLogger; -import com.metamx.emitter.service.ServiceEmitter; +import io.druid.java.util.emitter.EmittingLogger; +import io.druid.java.util.emitter.service.ServiceEmitter; import io.druid.java.util.common.concurrent.Execs; import io.druid.jackson.DefaultObjectMapper; import io.druid.java.util.common.guava.Sequence; @@ -80,7 +80,7 @@ public class QueryResourceTest { private static final QueryToolChestWarehouse warehouse = new MapQueryToolChestWarehouse(ImmutableMap., QueryToolChest>of()); private static final ObjectMapper jsonMapper = new DefaultObjectMapper(); - private static final AuthenticationResult authenticationResult = new AuthenticationResult("druid", "druid", null); + private static final AuthenticationResult authenticationResult = new AuthenticationResult("druid", "druid", null, null); private final HttpServletRequest testServletRequest = EasyMock.createMock(HttpServletRequest.class); public static final QuerySegmentWalker testSegmentWalker = new QuerySegmentWalker() @@ -247,13 +247,13 @@ public Access authorize(AuthenticationResult authenticationResult, Resource reso new DefaultGenericQueryMetricsFactory(jsonMapper), new NoopServiceEmitter(), testRequestLogger, - new AuthConfig(null, null), + new AuthConfig(), authMapper ), jsonMapper, jsonMapper, queryManager, - new AuthConfig(null, null), + new AuthConfig(), authMapper, new DefaultGenericQueryMetricsFactory(jsonMapper) ); @@ -354,13 +354,13 @@ public Access authorize(AuthenticationResult authenticationResult, Resource reso new DefaultGenericQueryMetricsFactory(jsonMapper), new NoopServiceEmitter(), testRequestLogger, - new AuthConfig(null, null), + new AuthConfig(), authMapper ), jsonMapper, jsonMapper, queryManager, - new AuthConfig(null, null), + new AuthConfig(), authMapper, new DefaultGenericQueryMetricsFactory(jsonMapper) ); @@ -475,13 +475,13 @@ public Access authorize(AuthenticationResult authenticationResult, Resource reso new DefaultGenericQueryMetricsFactory(jsonMapper), new NoopServiceEmitter(), testRequestLogger, - new AuthConfig(null, null), + new AuthConfig(), authMapper ), jsonMapper, jsonMapper, queryManager, - new AuthConfig(null, null), + new AuthConfig(), authMapper, new DefaultGenericQueryMetricsFactory(jsonMapper) ); diff --git a/server/src/test/java/io/druid/server/coordination/ServerManagerTest.java b/server/src/test/java/io/druid/server/coordination/ServerManagerTest.java index e53e756f094c..6670a3278ec2 100644 --- a/server/src/test/java/io/druid/server/coordination/ServerManagerTest.java +++ b/server/src/test/java/io/druid/server/coordination/ServerManagerTest.java @@ -27,7 +27,7 @@ import com.google.common.collect.ImmutableMap; import com.google.common.collect.Lists; import com.google.common.util.concurrent.MoreExecutors; -import com.metamx.emitter.EmittingLogger; +import io.druid.java.util.emitter.EmittingLogger; import io.druid.client.cache.CacheConfig; import io.druid.client.cache.LocalCacheProvider; import io.druid.jackson.DefaultObjectMapper; diff --git a/server/src/test/java/io/druid/server/coordination/ZkCoordinatorTest.java b/server/src/test/java/io/druid/server/coordination/ZkCoordinatorTest.java index 1ef643f81a96..d9e19034ad39 100644 --- a/server/src/test/java/io/druid/server/coordination/ZkCoordinatorTest.java +++ b/server/src/test/java/io/druid/server/coordination/ZkCoordinatorTest.java @@ -21,7 +21,7 @@ import com.fasterxml.jackson.databind.ObjectMapper; import com.google.common.collect.ImmutableMap; -import com.metamx.emitter.EmittingLogger; +import io.druid.java.util.emitter.EmittingLogger; import io.druid.curator.CuratorTestBase; import io.druid.java.util.common.Intervals; import io.druid.segment.IndexIO; diff --git a/server/src/test/java/io/druid/server/coordinator/DruidClusterTest.java b/server/src/test/java/io/druid/server/coordinator/DruidClusterTest.java index 90eb5d49e305..933c61e060b0 100644 --- a/server/src/test/java/io/druid/server/coordinator/DruidClusterTest.java +++ b/server/src/test/java/io/druid/server/coordinator/DruidClusterTest.java @@ -39,6 +39,7 @@ import java.util.Map; import java.util.NavigableSet; import java.util.Set; +import java.util.TreeMap; import java.util.TreeSet; import java.util.stream.Collectors; import java.util.stream.Stream; @@ -74,14 +75,14 @@ public class DruidClusterTest "src1", new ImmutableDruidDataSource( "src1", - ImmutableMap.of(), - ImmutableMap.of() + Collections.emptyMap(), + new TreeMap<>() ), "src2", new ImmutableDruidDataSource( "src2", - ImmutableMap.of(), - ImmutableMap.of() + Collections.emptyMap(), + new TreeMap<>() ) ); diff --git a/server/src/test/java/io/druid/server/coordinator/DruidCoordinatorBalancerProfiler.java b/server/src/test/java/io/druid/server/coordinator/DruidCoordinatorBalancerProfiler.java index b49445905f47..9f7e1ec85181 100644 --- a/server/src/test/java/io/druid/server/coordinator/DruidCoordinatorBalancerProfiler.java +++ b/server/src/test/java/io/druid/server/coordinator/DruidCoordinatorBalancerProfiler.java @@ -24,8 +24,8 @@ import com.google.common.collect.ImmutableMap; import com.google.common.collect.Lists; import com.google.common.collect.Maps; -import com.metamx.emitter.EmittingLogger; -import com.metamx.emitter.service.ServiceEmitter; +import io.druid.java.util.emitter.EmittingLogger; +import io.druid.java.util.emitter.service.ServiceEmitter; import io.druid.client.DruidServer; import io.druid.client.ImmutableDruidServer; import io.druid.java.util.common.DateTimes; diff --git a/server/src/test/java/io/druid/server/coordinator/DruidCoordinatorRuleRunnerTest.java b/server/src/test/java/io/druid/server/coordinator/DruidCoordinatorRuleRunnerTest.java index 7f2515415099..9fd6d0bdbf86 100644 --- a/server/src/test/java/io/druid/server/coordinator/DruidCoordinatorRuleRunnerTest.java +++ b/server/src/test/java/io/druid/server/coordinator/DruidCoordinatorRuleRunnerTest.java @@ -22,11 +22,12 @@ import com.google.common.collect.ImmutableMap; import com.google.common.collect.Lists; import com.google.common.collect.Maps; +import com.google.common.collect.Sets; import com.google.common.util.concurrent.ListeningExecutorService; import com.google.common.util.concurrent.MoreExecutors; -import com.metamx.emitter.EmittingLogger; -import com.metamx.emitter.service.ServiceEmitter; -import com.metamx.emitter.service.ServiceEventBuilder; +import io.druid.java.util.emitter.EmittingLogger; +import io.druid.java.util.emitter.service.ServiceEmitter; +import io.druid.java.util.emitter.service.ServiceEventBuilder; import io.druid.client.DruidServer; import io.druid.java.util.common.DateTimes; import io.druid.java.util.common.Intervals; @@ -942,6 +943,8 @@ public void testDropServerActuallyServesSegment() throws Exception LoadQueuePeon anotherMockPeon = EasyMock.createMock(LoadQueuePeon.class); EasyMock.expect(anotherMockPeon.getLoadQueueSize()).andReturn(10L).atLeastOnce(); + EasyMock.expect(anotherMockPeon.getSegmentsToLoad()).andReturn(Sets.newHashSet()).anyTimes(); + EasyMock.replay(anotherMockPeon); DruidCluster druidCluster = new DruidCluster( diff --git a/server/src/test/java/io/druid/server/coordinator/DruidCoordinatorSegmentMergerTest.java b/server/src/test/java/io/druid/server/coordinator/DruidCoordinatorSegmentMergerTest.java index 686d7399d57e..71cfb62bb4e9 100644 --- a/server/src/test/java/io/druid/server/coordinator/DruidCoordinatorSegmentMergerTest.java +++ b/server/src/test/java/io/druid/server/coordinator/DruidCoordinatorSegmentMergerTest.java @@ -22,7 +22,7 @@ import com.google.common.collect.ImmutableList; import com.google.common.collect.ImmutableSet; import com.google.common.collect.Lists; -import com.metamx.emitter.service.ServiceEmitter; +import io.druid.java.util.emitter.service.ServiceEmitter; import io.druid.client.indexing.IndexingServiceClient; import io.druid.common.config.JacksonConfigManager; import io.druid.java.util.common.Intervals; diff --git a/server/src/test/java/io/druid/server/coordinator/DruidCoordinatorTest.java b/server/src/test/java/io/druid/server/coordinator/DruidCoordinatorTest.java index d2f871c4e779..bbe5796d96ef 100644 --- a/server/src/test/java/io/druid/server/coordinator/DruidCoordinatorTest.java +++ b/server/src/test/java/io/druid/server/coordinator/DruidCoordinatorTest.java @@ -37,16 +37,18 @@ import io.druid.java.util.common.Intervals; import io.druid.java.util.common.concurrent.Execs; import io.druid.java.util.common.concurrent.ScheduledExecutorFactory; +import io.druid.java.util.emitter.core.Event; +import io.druid.java.util.emitter.service.ServiceEmitter; import io.druid.metadata.MetadataRuleManager; import io.druid.metadata.MetadataSegmentManager; import io.druid.server.DruidNode; import io.druid.server.coordination.DruidServerMetadata; import io.druid.server.coordination.ServerType; import io.druid.server.coordinator.rules.ForeverLoadRule; +import io.druid.server.coordinator.rules.IntervalLoadRule; import io.druid.server.coordinator.rules.Rule; import io.druid.server.initialization.ZkPathsConfig; import io.druid.server.lookup.cache.LookupCoordinatorManager; -import io.druid.server.metrics.NoopServiceEmitter; import io.druid.timeline.DataSegment; import it.unimi.dsi.fastutil.objects.Object2LongMap; import org.apache.curator.framework.CuratorFramework; @@ -96,6 +98,7 @@ public class DruidCoordinatorTest extends CuratorTestBase private ObjectMapper objectMapper; private JacksonConfigManager configManager; private DruidNode druidNode; + private LatchableServiceEmitter serviceEmitter = new LatchableServiceEmitter(); private static final String LOADPATH = "/druid/loadqueue/localhost:1234"; private static final long COORDINATOR_START_DELAY = 1; private static final long COORDINATOR_PERIOD = 100; @@ -178,7 +181,7 @@ public String getBase() serverInventoryView, metadataRuleManager, curator, - new NoopServiceEmitter(), + serviceEmitter, scheduledExecutorFactory, null, null, @@ -378,27 +381,17 @@ public void childEvent( assignSegmentLatch.await(); + final CountDownLatch coordinatorRunLatch = new CountDownLatch(2); + serviceEmitter.latch = coordinatorRunLatch; + coordinatorRunLatch.await(); + Assert.assertEquals(ImmutableMap.of(dataSource, 100.0), coordinator.getLoadStatus()); curator.delete().guaranteed().forPath(ZKPaths.makePath(LOADPATH, dataSegment.getIdentifier())); - // Wait for coordinator thread to run so that replication status is updated - while (coordinator.getSegmentAvailability().getLong(dataSource) != 0) { - Thread.sleep(50); - } + Map segmentAvailability = coordinator.getSegmentAvailability(); Assert.assertEquals(1, segmentAvailability.size()); Assert.assertEquals(0L, segmentAvailability.get(dataSource)); - while (coordinator.hasLoadPending(dataSource)) { - Thread.sleep(50); - } - - // wait historical data to be updated - long startMillis = System.currentTimeMillis(); - long coordinatorRunPeriodMillis = druidCoordinatorConfig.getCoordinatorPeriod().getMillis(); - while (System.currentTimeMillis() - startMillis < coordinatorRunPeriodMillis) { - Thread.sleep(100); - } - Map> replicationStatus = coordinator.getReplicationStatus(); Assert.assertNotNull(replicationStatus); Assert.assertEquals(1, replicationStatus.entrySet().size()); @@ -421,6 +414,127 @@ public void childEvent( EasyMock.verify(metadataRuleManager); } + @Test(timeout = 60_000L) + public void testCoordinatorTieredRun() throws Exception + { + final String dataSource = "dataSource", hotTierName = "hot", coldTierName = "cold"; + final Rule hotTier = new IntervalLoadRule(Intervals.of("2018-01-01/P1M"), ImmutableMap.of(hotTierName, 1)); + final Rule coldTier = new ForeverLoadRule(ImmutableMap.of(coldTierName, 1)); + final String loadPathCold = "/druid/loadqueue/cold:1234"; + final DruidServer hotServer = new DruidServer("hot", "hot", null, 5L, ServerType.HISTORICAL, hotTierName, 0); + final DruidServer coldServer = new DruidServer("cold", "cold", null, 5L, ServerType.HISTORICAL, coldTierName, 0); + + final Map dataSegments = ImmutableMap.of( + "2018-01-02T00:00:00.000Z_2018-01-03T00:00:00.000Z", + new DataSegment(dataSource, Intervals.of("2018-01-02/P1D"), "v1", null, null, null, null, 0x9, 0), + "2018-01-03T00:00:00.000Z_2018-01-04T00:00:00.000Z", + new DataSegment(dataSource, Intervals.of("2018-01-03/P1D"), "v1", null, null, null, null, 0x9, 0), + "2017-01-01T00:00:00.000Z_2017-01-02T00:00:00.000Z", + new DataSegment(dataSource, Intervals.of("2017-01-01/P1D"), "v1", null, null, null, null, 0x9, 0) + ); + + final LoadQueuePeon loadQueuePeonCold = new CuratorLoadQueuePeon( + curator, + loadPathCold, + objectMapper, + Execs.scheduledSingleThreaded("coordinator_test_load_queue_peon_cold_scheduled-%d"), + Execs.singleThreaded("coordinator_test_load_queue_peon_cold-%d"), + druidCoordinatorConfig + ); + final PathChildrenCache pathChildrenCacheCold = new PathChildrenCache( + curator, loadPathCold, true, true, Execs.singleThreaded("coordinator_test_path_children_cache_cold-%d") + ); + loadManagementPeons.putAll(ImmutableMap.of("hot", loadQueuePeon, "cold", loadQueuePeonCold)); + + loadQueuePeonCold.start(); + pathChildrenCache.start(); + pathChildrenCacheCold.start(); + + DruidDataSource[] druidDataSources = {new DruidDataSource(dataSource, Collections.emptyMap())}; + dataSegments.values().forEach(druidDataSources[0]::addSegment); + + EasyMock.expect(metadataRuleManager.getRulesWithDefault(EasyMock.anyString())) + .andReturn(ImmutableList.of(hotTier, coldTier)).atLeastOnce(); + EasyMock.expect(databaseSegmentManager.isStarted()).andReturn(true).anyTimes(); + EasyMock.expect(databaseSegmentManager.getInventory()).andReturn( + ImmutableList.of(druidDataSources[0].toImmutableDruidDataSource()) + ).atLeastOnce(); + EasyMock.expect(serverInventoryView.getInventory()) + .andReturn(ImmutableList.of(hotServer, coldServer)) + .atLeastOnce(); + EasyMock.expect(serverInventoryView.isStarted()).andReturn(true).anyTimes(); + + EasyMock.replay(metadataRuleManager, databaseSegmentManager, serverInventoryView); + + coordinator.start(); + leaderAnnouncerLatch.await(); // Wait for this coordinator to become leader + + final CountDownLatch assignSegmentLatchHot = new CountDownLatch(2); + pathChildrenCache.getListenable().addListener( + (client, event) -> { + if (event.getType().equals(PathChildrenCacheEvent.Type.CHILD_ADDED)) { + DataSegment segment = dataSegments + .entrySet() + .stream() + .filter(x -> event.getData().getPath().contains(x.getKey())) + .map(Map.Entry::getValue) + .findFirst() + .orElse(null); + + if (segment != null) { + hotServer.addDataSegment(segment); + curator.delete().guaranteed().forPath(event.getData().getPath()); + } + + assignSegmentLatchHot.countDown(); + } + } + ); + + final CountDownLatch assignSegmentLatchCold = new CountDownLatch(1); + pathChildrenCacheCold.getListenable().addListener( + (client, event) -> { + if (event.getType().equals(PathChildrenCacheEvent.Type.CHILD_ADDED)) { + DataSegment segment = dataSegments + .entrySet() + .stream() + .filter(x -> event.getData().getPath().contains(x.getKey())) + .map(Map.Entry::getValue) + .findFirst() + .orElse(null); + + if (segment != null) { + coldServer.addDataSegment(segment); + curator.delete().guaranteed().forPath(event.getData().getPath()); + } + + assignSegmentLatchCold.countDown(); + } + } + ); + + assignSegmentLatchHot.await(); + assignSegmentLatchCold.await(); + + final CountDownLatch coordinatorRunLatch = new CountDownLatch(2); + serviceEmitter.latch = coordinatorRunLatch; + coordinatorRunLatch.await(); + + Assert.assertEquals(ImmutableMap.of(dataSource, 100.0), coordinator.getLoadStatus()); + + Map> replicationStatus = coordinator.getReplicationStatus(); + Assert.assertEquals(2, replicationStatus.entrySet().size()); + Assert.assertEquals(0L, replicationStatus.get(hotTierName).getLong(dataSource)); + Assert.assertEquals(0L, replicationStatus.get(coldTierName).getLong(dataSource)); + + coordinator.stop(); + leaderUnannouncerLatch.await(); + + EasyMock.verify(serverInventoryView); + EasyMock.verify(databaseSegmentManager); + EasyMock.verify(metadataRuleManager); + } + @Test public void testOrderedAvailableDataSegments() { @@ -500,4 +614,22 @@ public void unregisterListener() listener.stopBeingLeader(); } } + + private static class LatchableServiceEmitter extends ServiceEmitter + { + private CountDownLatch latch; + + private LatchableServiceEmitter() + { + super("", "", null); + } + + @Override + public void emit(Event event) + { + if (latch != null && "segment/count".equals(event.toMap().get("metric"))) { + latch.countDown(); + } + } + } } diff --git a/server/src/test/java/io/druid/server/coordinator/HttpLoadQueuePeonTest.java b/server/src/test/java/io/druid/server/coordinator/HttpLoadQueuePeonTest.java index 885058f141a7..72fb9a36a5d5 100644 --- a/server/src/test/java/io/druid/server/coordinator/HttpLoadQueuePeonTest.java +++ b/server/src/test/java/io/druid/server/coordinator/HttpLoadQueuePeonTest.java @@ -24,9 +24,9 @@ import com.google.common.collect.ImmutableMap; import com.google.common.util.concurrent.Futures; import com.google.common.util.concurrent.ListenableFuture; -import com.metamx.http.client.HttpClient; -import com.metamx.http.client.Request; -import com.metamx.http.client.response.HttpResponseHandler; +import io.druid.java.util.http.client.HttpClient; +import io.druid.java.util.http.client.Request; +import io.druid.java.util.http.client.response.HttpResponseHandler; import io.druid.discovery.DiscoveryDruidNode; import io.druid.discovery.DruidNodeDiscovery; import io.druid.java.util.common.Intervals; diff --git a/server/src/test/java/io/druid/server/coordinator/ServerHolderTest.java b/server/src/test/java/io/druid/server/coordinator/ServerHolderTest.java index c25f37bc49fa..2affbd11a841 100644 --- a/server/src/test/java/io/druid/server/coordinator/ServerHolderTest.java +++ b/server/src/test/java/io/druid/server/coordinator/ServerHolderTest.java @@ -31,8 +31,10 @@ import org.junit.Assert; import org.junit.Test; +import java.util.Collections; import java.util.List; import java.util.Map; +import java.util.TreeMap; public class ServerHolderTest { @@ -65,14 +67,14 @@ public class ServerHolderTest "src1", new ImmutableDruidDataSource( "src1", - ImmutableMap.of(), - ImmutableMap.of() + Collections.emptyMap(), + new TreeMap<>() ), "src2", new ImmutableDruidDataSource( "src2", - ImmutableMap.of(), - ImmutableMap.of() + Collections.emptyMap(), + new TreeMap<>() ) ); diff --git a/server/src/test/java/io/druid/server/coordinator/rules/LoadRuleTest.java b/server/src/test/java/io/druid/server/coordinator/rules/LoadRuleTest.java index 76174172d9fe..c7a5e4f930b2 100644 --- a/server/src/test/java/io/druid/server/coordinator/rules/LoadRuleTest.java +++ b/server/src/test/java/io/druid/server/coordinator/rules/LoadRuleTest.java @@ -20,18 +20,20 @@ package io.druid.server.coordinator.rules; import com.fasterxml.jackson.databind.ObjectMapper; +import com.google.common.collect.ImmutableList; import com.google.common.collect.ImmutableMap; +import com.google.common.collect.ImmutableSet; import com.google.common.collect.Lists; import com.google.common.collect.Maps; import com.google.common.collect.Sets; import com.google.common.util.concurrent.ListeningExecutorService; import com.google.common.util.concurrent.MoreExecutors; //CHECKSTYLE.OFF: Regexp -import com.metamx.common.logger.Logger; +import io.druid.java.util.common.logger.Logger; //CHECKSTYLE.ON: Regexp -import com.metamx.emitter.EmittingLogger; -import com.metamx.emitter.core.LoggingEmitter; -import com.metamx.emitter.service.ServiceEmitter; +import io.druid.java.util.emitter.EmittingLogger; +import io.druid.java.util.emitter.core.LoggingEmitter; +import io.druid.java.util.emitter.service.ServiceEmitter; import io.druid.client.DruidServer; import io.druid.jackson.DefaultObjectMapper; import io.druid.java.util.common.DateTimes; @@ -60,7 +62,9 @@ import java.util.Arrays; import java.util.Collections; +import java.util.List; import java.util.Map; +import java.util.Set; import java.util.TreeSet; import java.util.concurrent.Executors; import java.util.stream.Collectors; @@ -189,6 +193,127 @@ public void testLoad() throws Exception EasyMock.verify(throttler, mockPeon, mockBalancerStrategy); } + @Test + public void testLoadPrimaryAssignDoesNotOverAssign() + { + EasyMock.expect(throttler.canCreateReplicant(EasyMock.anyString())).andReturn(true).anyTimes(); + + final LoadQueuePeon mockPeon = createEmptyPeon(); + mockPeon.loadSegment(EasyMock.anyObject(), EasyMock.anyObject()); + EasyMock.expectLastCall().atLeastOnce(); + + LoadRule rule = createLoadRule(ImmutableMap.of( + "hot", 1 + )); + + final DataSegment segment = createDataSegment("foo"); + + EasyMock.expect(mockBalancerStrategy.findNewSegmentHomeReplicator(EasyMock.anyObject(), EasyMock.anyObject())) + .andDelegateTo(balancerStrategy) + .anyTimes(); + + EasyMock.replay(throttler, mockPeon, mockBalancerStrategy); + + DruidCluster druidCluster = new DruidCluster( + null, + ImmutableMap.of( + "hot", + Stream.of( + new ServerHolder( + new DruidServer( + "serverHot", + "hostHot", + null, + 1000, + ServerType.HISTORICAL, + "hot", + 1 + ).toImmutableDruidServer(), + mockPeon + ), new ServerHolder( + new DruidServer( + "serverHot2", + "hostHot2", + null, + 1000, + ServerType.HISTORICAL, + "hot", + 1 + ).toImmutableDruidServer(), + mockPeon + ) + ).collect(Collectors.toCollection(() -> new TreeSet<>(Collections.reverseOrder()))) + ) + ); + + CoordinatorStats stats = rule.run( + null, + DruidCoordinatorRuntimeParams.newBuilder() + .withDruidCluster(druidCluster) + .withSegmentReplicantLookup(SegmentReplicantLookup.make(druidCluster)) + .withReplicationManager(throttler) + .withBalancerStrategy(mockBalancerStrategy) + .withBalancerReferenceTimestamp(DateTimes.of("2013-01-01")) + .withAvailableSegments(Arrays.asList(segment)).build(), + segment + ); + + + Assert.assertEquals(1L, stats.getTieredStat(LoadRule.ASSIGNED_COUNT, "hot")); + + // ensure multiple runs don't assign primary segment again if at replication count + final LoadQueuePeon loadingPeon = createLoadingPeon(ImmutableList.of(segment)); + EasyMock.replay(loadingPeon); + + DruidCluster afterLoad = new DruidCluster( + null, + ImmutableMap.of( + "hot", + Stream.of( + new ServerHolder( + new DruidServer( + "serverHot", + "hostHot", + null, + 1000, + ServerType.HISTORICAL, + "hot", + 1 + ).toImmutableDruidServer(), + loadingPeon + ), new ServerHolder( + new DruidServer( + "serverHot2", + "hostHot2", + null, + 1000, + ServerType.HISTORICAL, + "hot", + 1 + ).toImmutableDruidServer(), + mockPeon + ) + ).collect(Collectors.toCollection(() -> new TreeSet<>(Collections.reverseOrder()))) + ) + ); + CoordinatorStats statsAfterLoadPrimary = rule.run( + null, + DruidCoordinatorRuntimeParams.newBuilder() + .withDruidCluster(afterLoad) + .withSegmentReplicantLookup(SegmentReplicantLookup.make(afterLoad)) + .withReplicationManager(throttler) + .withBalancerStrategy(mockBalancerStrategy) + .withBalancerReferenceTimestamp(DateTimes.of("2013-01-01")) + .withAvailableSegments(Arrays.asList(segment)).build(), + segment + ); + + + Assert.assertEquals(0, statsAfterLoadPrimary.getTieredStat(LoadRule.ASSIGNED_COUNT, "hot")); + + EasyMock.verify(throttler, mockPeon, mockBalancerStrategy); + } + @Test public void testLoadPriority() throws Exception { @@ -618,4 +743,18 @@ private static LoadQueuePeon createEmptyPeon() return mockPeon; } + + private static LoadQueuePeon createLoadingPeon(List segments) + { + final Set segs = ImmutableSet.copyOf(segments); + final long loadingSize = segs.stream().mapToLong(DataSegment::getSize).sum(); + + final LoadQueuePeon mockPeon = EasyMock.createMock(LoadQueuePeon.class); + EasyMock.expect(mockPeon.getSegmentsToLoad()).andReturn(segs).anyTimes(); + EasyMock.expect(mockPeon.getSegmentsMarkedToDrop()).andReturn(Sets.newHashSet()).anyTimes(); + EasyMock.expect(mockPeon.getLoadQueueSize()).andReturn(loadingSize).anyTimes(); + EasyMock.expect(mockPeon.getNumberOfSegmentsInQueue()).andReturn(segs.size()).anyTimes(); + + return mockPeon; + } } diff --git a/server/src/test/java/io/druid/server/emitter/EmitterModuleTest.java b/server/src/test/java/io/druid/server/emitter/EmitterModuleTest.java index 52a6876a5b62..50a2956bffed 100644 --- a/server/src/test/java/io/druid/server/emitter/EmitterModuleTest.java +++ b/server/src/test/java/io/druid/server/emitter/EmitterModuleTest.java @@ -24,13 +24,14 @@ import com.google.inject.Guice; import com.google.inject.Injector; import com.google.inject.Module; -import com.metamx.emitter.core.Emitter; -import com.metamx.emitter.core.ParametrizedUriEmitter; +import io.druid.java.util.emitter.core.Emitter; +import io.druid.java.util.emitter.core.ParametrizedUriEmitter; import io.druid.guice.DruidGuiceExtensions; import io.druid.guice.JsonConfigurator; import io.druid.guice.LazySingleton; import io.druid.guice.LifecycleModule; import io.druid.guice.ServerModule; +import io.druid.jackson.JacksonModule; import org.junit.Assert; import org.junit.Test; @@ -67,6 +68,7 @@ private Injector makeInjectorWithProperties(final Properties props) new DruidGuiceExtensions(), new LifecycleModule(), new ServerModule(), + new JacksonModule(), new Module() { @Override diff --git a/server/src/test/java/io/druid/server/http/DatasourcesResourceTest.java b/server/src/test/java/io/druid/server/http/DatasourcesResourceTest.java index 96e0b583f2e0..d4b1462cff76 100644 --- a/server/src/test/java/io/druid/server/http/DatasourcesResourceTest.java +++ b/server/src/test/java/io/druid/server/http/DatasourcesResourceTest.java @@ -130,7 +130,7 @@ public void testGetFullQueryableDataSources() throws Exception ).once(); EasyMock.expect(request.getAttribute(AuthConfig.DRUID_AUTHORIZATION_CHECKED)).andReturn(null).once(); EasyMock.expect(request.getAttribute(AuthConfig.DRUID_AUTHENTICATION_RESULT)).andReturn( - new AuthenticationResult("druid", "druid", null) + new AuthenticationResult("druid", "druid", null, null) ).atLeastOnce(); request.setAttribute(AuthConfig.DRUID_AUTHORIZATION_CHECKED, true); EasyMock.expectLastCall().times(1); @@ -144,7 +144,7 @@ public void testGetFullQueryableDataSources() throws Exception ).once(); EasyMock.expect(request.getAttribute(AuthConfig.DRUID_AUTHORIZATION_CHECKED)).andReturn(null).once(); EasyMock.expect(request.getAttribute(AuthConfig.DRUID_AUTHENTICATION_RESULT)).andReturn( - new AuthenticationResult("druid", "druid", null) + new AuthenticationResult("druid", "druid", null, null) ).once(); request.setAttribute(AuthConfig.DRUID_AUTHORIZATION_CHECKED, true); EasyMock.expectLastCall().times(1); @@ -178,7 +178,7 @@ public void testGetFullQueryableDataSources() throws Exception @Test public void testSecuredGetFullQueryableDataSources() throws Exception { - AuthenticationResult authenticationResult = new AuthenticationResult("druid", "druid", null); + AuthenticationResult authenticationResult = new AuthenticationResult("druid", "druid", null, null); // first request EasyMock.expect(server.getDataSources()).andReturn( ImmutableList.of(listDataSources.get(0), listDataSources.get(1)) @@ -236,7 +236,7 @@ public Access authorize(AuthenticationResult authenticationResult1, Resource res inventoryView, null, null, - new AuthConfig(null, null), + new AuthConfig(), authMapper ); Response response = datasourcesResource.getQueryableDataSources("full", null, request); @@ -279,7 +279,7 @@ public void testGetSimpleQueryableDataSources() throws Exception ).atLeastOnce(); EasyMock.expect(request.getAttribute(AuthConfig.DRUID_AUTHORIZATION_CHECKED)).andReturn(null).once(); EasyMock.expect(request.getAttribute(AuthConfig.DRUID_AUTHENTICATION_RESULT)).andReturn( - new AuthenticationResult("druid", "druid", null) + new AuthenticationResult("druid", "druid", null, null) ).atLeastOnce(); request.setAttribute(AuthConfig.DRUID_AUTHORIZATION_CHECKED, true); EasyMock.expectLastCall().times(1); @@ -309,7 +309,7 @@ public void testGetSimpleQueryableDataSources() throws Exception @Test public void testFullGetTheDataSource() throws Exception { - DruidDataSource dataSource1 = new DruidDataSource("datasource1", new HashMap()); + DruidDataSource dataSource1 = new DruidDataSource("datasource1", new HashMap<>()); EasyMock.expect(server.getDataSource("datasource1")).andReturn( dataSource1 ).atLeastOnce(); diff --git a/server/src/test/java/io/druid/server/http/IntervalsResourceTest.java b/server/src/test/java/io/druid/server/http/IntervalsResourceTest.java index 067bf869d1b6..530a2a10232a 100644 --- a/server/src/test/java/io/druid/server/http/IntervalsResourceTest.java +++ b/server/src/test/java/io/druid/server/http/IntervalsResourceTest.java @@ -110,7 +110,7 @@ public void testGetIntervals() ).atLeastOnce(); EasyMock.expect(request.getAttribute(AuthConfig.DRUID_AUTHORIZATION_CHECKED)).andReturn(null).once(); EasyMock.expect(request.getAttribute(AuthConfig.DRUID_AUTHENTICATION_RESULT)).andReturn( - new AuthenticationResult("druid", "druid", null) + new AuthenticationResult("druid", "druid", null, null) ).once(); request.setAttribute(AuthConfig.DRUID_AUTHORIZATION_CHECKED, true); EasyMock.expectLastCall().times(1); @@ -147,7 +147,7 @@ public void testSimpleGetSpecificIntervals() ).atLeastOnce(); EasyMock.expect(request.getAttribute(AuthConfig.DRUID_AUTHORIZATION_CHECKED)).andReturn(null).once(); EasyMock.expect(request.getAttribute(AuthConfig.DRUID_AUTHENTICATION_RESULT)).andReturn( - new AuthenticationResult("druid", "druid", null) + new AuthenticationResult("druid", "druid", null, null) ).once(); request.setAttribute(AuthConfig.DRUID_AUTHORIZATION_CHECKED, true); EasyMock.expectLastCall().times(1); @@ -178,7 +178,7 @@ public void testFullGetSpecificIntervals() ).atLeastOnce(); EasyMock.expect(request.getAttribute(AuthConfig.DRUID_AUTHORIZATION_CHECKED)).andReturn(null).once(); EasyMock.expect(request.getAttribute(AuthConfig.DRUID_AUTHENTICATION_RESULT)).andReturn( - new AuthenticationResult("druid", "druid", null) + new AuthenticationResult("druid", "druid", null, null) ).once(); request.setAttribute(AuthConfig.DRUID_AUTHORIZATION_CHECKED, true); EasyMock.expectLastCall().times(1); @@ -211,7 +211,7 @@ public void testGetSpecificIntervals() ).atLeastOnce(); EasyMock.expect(request.getAttribute(AuthConfig.DRUID_AUTHORIZATION_CHECKED)).andReturn(null).once(); EasyMock.expect(request.getAttribute(AuthConfig.DRUID_AUTHENTICATION_RESULT)).andReturn( - new AuthenticationResult("druid", "druid", null) + new AuthenticationResult("druid", "druid", null, null) ).once(); request.setAttribute(AuthConfig.DRUID_AUTHORIZATION_CHECKED, true); EasyMock.expectLastCall().times(1); diff --git a/server/src/test/java/io/druid/server/http/security/PreResponseAuthorizationCheckFilterTest.java b/server/src/test/java/io/druid/server/http/security/PreResponseAuthorizationCheckFilterTest.java index 7503b4041397..e4f7f1a6d7a0 100644 --- a/server/src/test/java/io/druid/server/http/security/PreResponseAuthorizationCheckFilterTest.java +++ b/server/src/test/java/io/druid/server/http/security/PreResponseAuthorizationCheckFilterTest.java @@ -20,8 +20,8 @@ package io.druid.server.http.security; import com.google.common.collect.Lists; -import com.metamx.emitter.EmittingLogger; -import com.metamx.emitter.service.ServiceEmitter; +import io.druid.java.util.emitter.EmittingLogger; +import io.druid.java.util.emitter.service.ServiceEmitter; import io.druid.jackson.DefaultObjectMapper; import io.druid.java.util.common.ISE; import io.druid.server.security.AllowAllAuthenticator; @@ -50,7 +50,7 @@ public class PreResponseAuthorizationCheckFilterTest @Test public void testValidRequest() throws Exception { - AuthenticationResult authenticationResult = new AuthenticationResult("so-very-valid", "so-very-valid", null); + AuthenticationResult authenticationResult = new AuthenticationResult("so-very-valid", "so-very-valid", null, null); HttpServletRequest req = EasyMock.createStrictMock(HttpServletRequest.class); HttpServletResponse resp = EasyMock.createStrictMock(HttpServletResponse.class); @@ -103,7 +103,7 @@ public void testMissingAuthorizationCheck() throws Exception expectedException.expect(ISE.class); expectedException.expectMessage("Request did not have an authorization check performed."); - AuthenticationResult authenticationResult = new AuthenticationResult("so-very-valid", "so-very-valid", null); + AuthenticationResult authenticationResult = new AuthenticationResult("so-very-valid", "so-very-valid", null, null); HttpServletRequest req = EasyMock.createStrictMock(HttpServletRequest.class); HttpServletResponse resp = EasyMock.createStrictMock(HttpServletResponse.class); @@ -138,7 +138,7 @@ public void testMissingAuthorizationCheck() throws Exception public void testMissingAuthorizationCheckWithError() throws Exception { EmittingLogger.registerEmitter(EasyMock.createNiceMock(ServiceEmitter.class)); - AuthenticationResult authenticationResult = new AuthenticationResult("so-very-valid", "so-very-valid", null); + AuthenticationResult authenticationResult = new AuthenticationResult("so-very-valid", "so-very-valid", null, null); HttpServletRequest req = EasyMock.createStrictMock(HttpServletRequest.class); HttpServletResponse resp = EasyMock.createStrictMock(HttpServletResponse.class); diff --git a/server/src/test/java/io/druid/server/http/security/ResourceFilterTestHelper.java b/server/src/test/java/io/druid/server/http/security/ResourceFilterTestHelper.java index ed39de6452ef..240785c19c2e 100644 --- a/server/src/test/java/io/druid/server/http/security/ResourceFilterTestHelper.java +++ b/server/src/test/java/io/druid/server/http/security/ResourceFilterTestHelper.java @@ -111,7 +111,7 @@ public MultivaluedMap getMatrixParameters() ).anyTimes(); EasyMock.expect(request.getMethod()).andReturn(requestMethod).anyTimes(); EasyMock.expect(req.getAttribute(AuthConfig.DRUID_AUTHORIZATION_CHECKED)).andReturn(null).anyTimes(); - AuthenticationResult authenticationResult = new AuthenticationResult("druid", "druid", null); + AuthenticationResult authenticationResult = new AuthenticationResult("druid", "druid", null, null); EasyMock.expect(req.getAttribute(AuthConfig.DRUID_AUTHENTICATION_RESULT)) .andReturn(authenticationResult) .atLeastOnce(); @@ -182,7 +182,7 @@ public void configure(Binder binder) for (Key key : mockableKeys) { binder.bind((Key) key).toInstance(EasyMock.createNiceMock(key.getTypeLiteral().getRawType())); } - binder.bind(AuthConfig.class).toInstance(new AuthConfig(null, null)); + binder.bind(AuthConfig.class).toInstance(new AuthConfig()); } } ); diff --git a/server/src/test/java/io/druid/server/http/security/SecuritySanityCheckFilterTest.java b/server/src/test/java/io/druid/server/http/security/SecuritySanityCheckFilterTest.java index 891afc912343..30f88bad9ac4 100644 --- a/server/src/test/java/io/druid/server/http/security/SecuritySanityCheckFilterTest.java +++ b/server/src/test/java/io/druid/server/http/security/SecuritySanityCheckFilterTest.java @@ -58,7 +58,9 @@ public void testInvalidRequest() throws Exception FilterChain filterChain = EasyMock.createStrictMock(FilterChain.class); ServletOutputStream outputStream = EasyMock.createNiceMock(ServletOutputStream.class); - AuthenticationResult authenticationResult = new AuthenticationResult("does-not-belong", "does-not-belong", null); + AuthenticationResult authenticationResult = new AuthenticationResult("does-not-belong", "does-not-belong", + null, + null); EasyMock.expect(req.getAttribute(AuthConfig.DRUID_AUTHORIZATION_CHECKED)).andReturn(true).once(); EasyMock.expect(req.getAttribute(AuthConfig.DRUID_AUTHENTICATION_RESULT)).andReturn(authenticationResult).once(); diff --git a/server/src/test/java/io/druid/server/initialization/BaseJettyTest.java b/server/src/test/java/io/druid/server/initialization/BaseJettyTest.java index 643e8494ad75..484204ab57da 100644 --- a/server/src/test/java/io/druid/server/initialization/BaseJettyTest.java +++ b/server/src/test/java/io/druid/server/initialization/BaseJettyTest.java @@ -23,9 +23,9 @@ import com.google.inject.Injector; import com.google.inject.Key; import com.google.inject.servlet.GuiceFilter; -import com.metamx.http.client.HttpClient; -import com.metamx.http.client.HttpClientConfig; -import com.metamx.http.client.HttpClientInit; +import io.druid.java.util.http.client.HttpClient; +import io.druid.java.util.http.client.HttpClientConfig; +import io.druid.java.util.http.client.HttpClientInit; import io.druid.guice.annotations.Self; import io.druid.guice.http.LifecycleUtils; import io.druid.java.util.common.lifecycle.Lifecycle; diff --git a/server/src/test/java/io/druid/server/initialization/JettyQosTest.java b/server/src/test/java/io/druid/server/initialization/JettyQosTest.java index 44c9cb721f40..de852e651f20 100644 --- a/server/src/test/java/io/druid/server/initialization/JettyQosTest.java +++ b/server/src/test/java/io/druid/server/initialization/JettyQosTest.java @@ -28,10 +28,10 @@ import com.google.inject.Injector; import com.google.inject.Key; import com.google.inject.Module; -import com.metamx.http.client.HttpClient; -import com.metamx.http.client.Request; -import com.metamx.http.client.response.StatusResponseHandler; -import com.metamx.http.client.response.StatusResponseHolder; +import io.druid.java.util.http.client.HttpClient; +import io.druid.java.util.http.client.Request; +import io.druid.java.util.http.client.response.StatusResponseHandler; +import io.druid.java.util.http.client.response.StatusResponseHolder; import io.druid.java.util.common.concurrent.Execs; import io.druid.guice.GuiceInjectors; import io.druid.guice.Jerseys; diff --git a/server/src/test/java/io/druid/server/initialization/JettyTest.java b/server/src/test/java/io/druid/server/initialization/JettyTest.java index 207c7d853614..ef0610fb58dc 100644 --- a/server/src/test/java/io/druid/server/initialization/JettyTest.java +++ b/server/src/test/java/io/druid/server/initialization/JettyTest.java @@ -27,10 +27,10 @@ import com.google.inject.Key; import com.google.inject.Module; import com.google.inject.multibindings.Multibinder; -import com.metamx.http.client.Request; -import com.metamx.http.client.response.InputStreamResponseHandler; -import com.metamx.http.client.response.StatusResponseHandler; -import com.metamx.http.client.response.StatusResponseHolder; +import io.druid.java.util.http.client.Request; +import io.druid.java.util.http.client.response.InputStreamResponseHandler; +import io.druid.java.util.http.client.response.StatusResponseHandler; +import io.druid.java.util.http.client.response.StatusResponseHolder; import io.druid.guice.GuiceInjectors; import io.druid.guice.Jerseys; import io.druid.guice.JsonConfigProvider; diff --git a/server/src/test/java/io/druid/server/lookup/cache/LookupCoordinatorManagerTest.java b/server/src/test/java/io/druid/server/lookup/cache/LookupCoordinatorManagerTest.java index b128b0bc9b5d..3e39801352e7 100644 --- a/server/src/test/java/io/druid/server/lookup/cache/LookupCoordinatorManagerTest.java +++ b/server/src/test/java/io/druid/server/lookup/cache/LookupCoordinatorManagerTest.java @@ -26,14 +26,14 @@ import com.google.common.collect.ImmutableSet; import com.google.common.net.HostAndPort; import com.google.common.util.concurrent.SettableFuture; -import com.metamx.emitter.EmittingLogger; -import com.metamx.emitter.core.Event; -import com.metamx.emitter.core.LoggingEmitter; -import com.metamx.emitter.service.ServiceEmitter; -import com.metamx.http.client.HttpClient; -import com.metamx.http.client.Request; -import com.metamx.http.client.response.HttpResponseHandler; -import com.metamx.http.client.response.SequenceInputStreamResponseHandler; +import io.druid.java.util.emitter.EmittingLogger; +import io.druid.java.util.emitter.core.Event; +import io.druid.java.util.emitter.core.LoggingEmitter; +import io.druid.java.util.emitter.service.ServiceEmitter; +import io.druid.java.util.http.client.HttpClient; +import io.druid.java.util.http.client.Request; +import io.druid.java.util.http.client.response.HttpResponseHandler; +import io.druid.java.util.http.client.response.SequenceInputStreamResponseHandler; import io.druid.audit.AuditInfo; import io.druid.common.config.JacksonConfigManager; import io.druid.discovery.DruidNodeDiscoveryProvider; @@ -61,7 +61,6 @@ import java.util.List; import java.util.Map; import java.util.Set; -import java.util.concurrent.TimeUnit; import java.util.concurrent.atomic.AtomicInteger; import java.util.concurrent.atomic.AtomicLong; import java.util.concurrent.atomic.AtomicReference; @@ -1255,15 +1254,15 @@ public void testStartStop() throws Exception lookupCoordinatorManagerConfig ); - Assert.assertFalse(manager.lifecycleLock.awaitStarted(1, TimeUnit.MILLISECONDS)); + Assert.assertFalse(manager.isStarted()); manager.start(); - Assert.assertTrue(manager.lifecycleLock.awaitStarted(1, TimeUnit.MILLISECONDS)); + Assert.assertTrue(manager.awaitStarted(1)); Assert.assertTrue(manager.backgroundManagerIsRunning()); Assert.assertFalse(manager.waitForBackgroundTermination(10)); manager.stop(); - Assert.assertFalse(manager.lifecycleLock.awaitStarted(1, TimeUnit.MILLISECONDS)); + Assert.assertFalse(manager.awaitStarted(1)); Assert.assertTrue(manager.waitForBackgroundTermination(10)); Assert.assertFalse(manager.backgroundManagerIsRunning()); @@ -1292,35 +1291,35 @@ public void testMultipleStartStop() throws Exception lookupCoordinatorManagerConfig ); - Assert.assertFalse(manager.lifecycleLock.awaitStarted(1, TimeUnit.MILLISECONDS)); + Assert.assertFalse(manager.awaitStarted(1)); manager.start(); - Assert.assertTrue(manager.lifecycleLock.awaitStarted(1, TimeUnit.MILLISECONDS)); + Assert.assertTrue(manager.awaitStarted(1)); Assert.assertTrue(manager.backgroundManagerIsRunning()); Assert.assertFalse(manager.waitForBackgroundTermination(10)); manager.stop(); - Assert.assertFalse(manager.lifecycleLock.awaitStarted(1, TimeUnit.MILLISECONDS)); + Assert.assertFalse(manager.awaitStarted(1)); Assert.assertTrue(manager.waitForBackgroundTermination(10)); Assert.assertFalse(manager.backgroundManagerIsRunning()); manager.start(); - Assert.assertTrue(manager.lifecycleLock.awaitStarted(1, TimeUnit.MILLISECONDS)); + Assert.assertTrue(manager.awaitStarted(1)); Assert.assertTrue(manager.backgroundManagerIsRunning()); Assert.assertFalse(manager.waitForBackgroundTermination(10)); manager.stop(); - Assert.assertFalse(manager.lifecycleLock.awaitStarted(1, TimeUnit.MILLISECONDS)); + Assert.assertFalse(manager.awaitStarted(1)); Assert.assertTrue(manager.waitForBackgroundTermination(10)); Assert.assertFalse(manager.backgroundManagerIsRunning()); manager.start(); - Assert.assertTrue(manager.lifecycleLock.awaitStarted(1, TimeUnit.MILLISECONDS)); + Assert.assertTrue(manager.awaitStarted(1)); Assert.assertTrue(manager.backgroundManagerIsRunning()); Assert.assertFalse(manager.waitForBackgroundTermination(10)); manager.stop(); - Assert.assertFalse(manager.lifecycleLock.awaitStarted(1, TimeUnit.MILLISECONDS)); + Assert.assertFalse(manager.awaitStarted(1)); Assert.assertTrue(manager.waitForBackgroundTermination(10)); Assert.assertFalse(manager.backgroundManagerIsRunning()); diff --git a/server/src/test/java/io/druid/server/metrics/HistoricalMetricsMonitorTest.java b/server/src/test/java/io/druid/server/metrics/HistoricalMetricsMonitorTest.java index 652600a7d9ba..b87139a27826 100644 --- a/server/src/test/java/io/druid/server/metrics/HistoricalMetricsMonitorTest.java +++ b/server/src/test/java/io/druid/server/metrics/HistoricalMetricsMonitorTest.java @@ -23,9 +23,9 @@ import com.google.common.collect.ImmutableList; import com.google.common.collect.ImmutableMap; import com.google.common.collect.Lists; -import com.metamx.emitter.service.ServiceEmitter; -import com.metamx.emitter.service.ServiceEventBuilder; -import com.metamx.emitter.service.ServiceMetricEvent; +import io.druid.java.util.emitter.service.ServiceEmitter; +import io.druid.java.util.emitter.service.ServiceEventBuilder; +import io.druid.java.util.emitter.service.ServiceMetricEvent; import io.druid.client.DruidServerConfig; import io.druid.java.util.common.Intervals; import io.druid.server.SegmentManager; diff --git a/server/src/test/java/io/druid/server/metrics/NoopServiceEmitter.java b/server/src/test/java/io/druid/server/metrics/NoopServiceEmitter.java index 99399bf21875..e16e7723b7ed 100644 --- a/server/src/test/java/io/druid/server/metrics/NoopServiceEmitter.java +++ b/server/src/test/java/io/druid/server/metrics/NoopServiceEmitter.java @@ -19,8 +19,8 @@ package io.druid.server.metrics; -import com.metamx.emitter.core.Event; -import com.metamx.emitter.service.ServiceEmitter; +import io.druid.java.util.emitter.core.Event; +import io.druid.java.util.emitter.service.ServiceEmitter; public class NoopServiceEmitter extends ServiceEmitter { diff --git a/services/pom.xml b/services/pom.xml index 9d164c847048..4e2dee82e49c 100644 --- a/services/pom.xml +++ b/services/pom.xml @@ -25,7 +25,7 @@ io.druid druid - 0.12.0-SNAPSHOT + 0.12.2-SNAPSHOT @@ -116,7 +116,7 @@ - + diff --git a/services/src/main/java/io/druid/cli/CliCoordinator.java b/services/src/main/java/io/druid/cli/CliCoordinator.java index af706fdeb4a3..4156b65e460e 100644 --- a/services/src/main/java/io/druid/cli/CliCoordinator.java +++ b/services/src/main/java/io/druid/cli/CliCoordinator.java @@ -28,7 +28,7 @@ import com.google.inject.Module; import com.google.inject.Provides; import com.google.inject.name.Names; -import com.metamx.http.client.HttpClient; +import io.druid.java.util.http.client.HttpClient; import io.airlift.airline.Command; import io.druid.audit.AuditManager; import io.druid.client.CoordinatorServerView; diff --git a/services/src/main/java/io/druid/cli/CliOverlord.java b/services/src/main/java/io/druid/cli/CliOverlord.java index ae075abd4b84..d04bc7b175da 100644 --- a/services/src/main/java/io/druid/cli/CliOverlord.java +++ b/services/src/main/java/io/druid/cli/CliOverlord.java @@ -21,7 +21,6 @@ import com.fasterxml.jackson.databind.ObjectMapper; import com.google.common.collect.ImmutableList; -import com.google.common.collect.Lists; import com.google.inject.Binder; import com.google.inject.Injector; import com.google.inject.Key; @@ -90,6 +89,7 @@ import io.druid.server.http.RedirectInfo; import io.druid.server.initialization.jetty.JettyServerInitUtils; import io.druid.server.initialization.jetty.JettyServerInitializer; +import io.druid.server.security.AuthConfig; import io.druid.server.security.AuthenticationUtils; import io.druid.server.security.Authenticator; import io.druid.server.security.AuthenticatorMapper; @@ -116,7 +116,7 @@ public class CliOverlord extends ServerRunnable { private static Logger log = new Logger(CliOverlord.class); - private static List UNSECURED_PATHS = Lists.newArrayList( + protected static List UNSECURED_PATHS = ImmutableList.of( "/", "/console.html", "/old-console/*", @@ -319,6 +319,7 @@ public void initialize(Server server, Injector injector) final ObjectMapper jsonMapper = injector.getInstance(Key.get(ObjectMapper.class, Json.class)); final AuthenticatorMapper authenticatorMapper = injector.getInstance(AuthenticatorMapper.class); + final AuthConfig authConfig = injector.getInstance(AuthConfig.class); List authenticators = null; AuthenticationUtils.addSecuritySanityCheckFilter(root, jsonMapper); @@ -329,6 +330,8 @@ public void initialize(Server server, Injector injector) authenticators = authenticatorMapper.getAuthenticatorChain(); AuthenticationUtils.addAuthenticationFilterChain(root, authenticators); + AuthenticationUtils.addAllowOptionsFilter(root, authConfig.isAllowUnauthenticatedHttpOptions()); + JettyServerInitUtils.addExtensionFilters(root, injector); diff --git a/services/src/main/java/io/druid/cli/CliRealtimeExample.java b/services/src/main/java/io/druid/cli/CliRealtimeExample.java index b4ebfc3eeee7..7ec1c32ac4db 100644 --- a/services/src/main/java/io/druid/cli/CliRealtimeExample.java +++ b/services/src/main/java/io/druid/cli/CliRealtimeExample.java @@ -155,7 +155,7 @@ public String getPathForHadoop(String dataSource) } @Override - public DataSegment push(File file, DataSegment segment) throws IOException + public DataSegment push(File file, DataSegment segment, boolean useUniquePath) { return segment; } diff --git a/services/src/main/java/io/druid/cli/CliRouter.java b/services/src/main/java/io/druid/cli/CliRouter.java index 6f35939bbc89..b480c13125f0 100644 --- a/services/src/main/java/io/druid/cli/CliRouter.java +++ b/services/src/main/java/io/druid/cli/CliRouter.java @@ -26,7 +26,7 @@ import com.google.inject.Provides; import com.google.inject.TypeLiteral; import com.google.inject.name.Names; -import com.metamx.http.client.HttpClient; +import io.druid.java.util.http.client.HttpClient; import io.airlift.airline.Command; import io.druid.curator.discovery.DiscoveryModule; import io.druid.curator.discovery.ServerDiscoveryFactory; diff --git a/services/src/main/java/io/druid/cli/CoordinatorJettyServerInitializer.java b/services/src/main/java/io/druid/cli/CoordinatorJettyServerInitializer.java index f47644f13478..572a06296f48 100644 --- a/services/src/main/java/io/druid/cli/CoordinatorJettyServerInitializer.java +++ b/services/src/main/java/io/druid/cli/CoordinatorJettyServerInitializer.java @@ -118,11 +118,16 @@ public void initialize(Server server, Injector injector) // perform no-op authorization for these resources AuthenticationUtils.addNoopAuthorizationFilters(root, UNSECURED_PATHS); + if (beOverlord) { + AuthenticationUtils.addNoopAuthorizationFilters(root, CliOverlord.UNSECURED_PATHS); + } + authenticators = authenticatorMapper.getAuthenticatorChain(); AuthenticationUtils.addAuthenticationFilterChain(root, authenticators); - JettyServerInitUtils.addExtensionFilters(root, injector); + AuthenticationUtils.addAllowOptionsFilter(root, authConfig.isAllowUnauthenticatedHttpOptions()); + JettyServerInitUtils.addExtensionFilters(root, injector); // Check that requests were authorized before sending responses AuthenticationUtils.addPreResponseAuthorizationCheckFilter( diff --git a/services/src/main/java/io/druid/cli/MiddleManagerJettyServerInitializer.java b/services/src/main/java/io/druid/cli/MiddleManagerJettyServerInitializer.java index 2eaa4d154fe1..bc4d3a54ed92 100644 --- a/services/src/main/java/io/druid/cli/MiddleManagerJettyServerInitializer.java +++ b/services/src/main/java/io/druid/cli/MiddleManagerJettyServerInitializer.java @@ -71,6 +71,7 @@ public void initialize(Server server, Injector injector) authenticators = authenticatorMapper.getAuthenticatorChain(); AuthenticationUtils.addAuthenticationFilterChain(root, authenticators); + AuthenticationUtils.addAllowOptionsFilter(root, authConfig.isAllowUnauthenticatedHttpOptions()); JettyServerInitUtils.addExtensionFilters(root, injector); diff --git a/services/src/main/java/io/druid/cli/QueryJettyServerInitializer.java b/services/src/main/java/io/druid/cli/QueryJettyServerInitializer.java index 0bc34f17fee9..8fa30b1c36cb 100644 --- a/services/src/main/java/io/druid/cli/QueryJettyServerInitializer.java +++ b/services/src/main/java/io/druid/cli/QueryJettyServerInitializer.java @@ -33,6 +33,7 @@ import io.druid.server.initialization.jetty.JettyServerInitUtils; import io.druid.server.initialization.jetty.JettyServerInitializer; import io.druid.server.initialization.jetty.LimitRequestsFilter; +import io.druid.server.security.AuthConfig; import io.druid.server.security.AuthenticationUtils; import io.druid.server.security.Authenticator; import io.druid.server.security.AuthenticatorMapper; @@ -89,6 +90,7 @@ public void initialize(Server server, Injector injector) final ObjectMapper jsonMapper = injector.getInstance(Key.get(ObjectMapper.class, Json.class)); final AuthenticatorMapper authenticatorMapper = injector.getInstance(AuthenticatorMapper.class); + final AuthConfig authConfig = injector.getInstance(AuthConfig.class); List authenticators = null; AuthenticationUtils.addSecuritySanityCheckFilter(root, jsonMapper); @@ -99,6 +101,8 @@ public void initialize(Server server, Injector injector) authenticators = authenticatorMapper.getAuthenticatorChain(); AuthenticationUtils.addAuthenticationFilterChain(root, authenticators); + AuthenticationUtils.addAllowOptionsFilter(root, authConfig.isAllowUnauthenticatedHttpOptions()); + JettyServerInitUtils.addExtensionFilters(root, injector); // Check that requests were authorized before sending responses diff --git a/services/src/main/java/io/druid/cli/RouterJettyServerInitializer.java b/services/src/main/java/io/druid/cli/RouterJettyServerInitializer.java index fc00ca644cb6..930d1191e828 100644 --- a/services/src/main/java/io/druid/cli/RouterJettyServerInitializer.java +++ b/services/src/main/java/io/druid/cli/RouterJettyServerInitializer.java @@ -36,6 +36,7 @@ import io.druid.server.security.AuthenticationUtils; import io.druid.server.security.Authenticator; import io.druid.server.security.AuthenticatorMapper; +import io.druid.sql.avatica.DruidAvaticaHandler; import org.eclipse.jetty.server.Handler; import org.eclipse.jetty.server.Server; import org.eclipse.jetty.server.handler.HandlerList; @@ -52,7 +53,11 @@ public class RouterJettyServerInitializer implements JettyServerInitializer private static Logger log = new Logger(RouterJettyServerInitializer.class); private static List UNSECURED_PATHS = Lists.newArrayList( - "/status/health" + "/status/health", + // JDBC authentication uses the JDBC connection context instead of HTTP headers, skip the normal auth checks. + // The router will keep the connection context in the forwarded message, and the broker is responsible for + // performing the auth checks. + DruidAvaticaHandler.AVATICA_PATH ); private final AsyncQueryForwardingServlet asyncQueryForwardingServlet; @@ -100,6 +105,8 @@ public void initialize(Server server, Injector injector) authenticators = authenticatorMapper.getAuthenticatorChain(); AuthenticationUtils.addAuthenticationFilterChain(root, authenticators); + AuthenticationUtils.addAllowOptionsFilter(root, authConfig.isAllowUnauthenticatedHttpOptions()); + JettyServerInitUtils.addExtensionFilters(root, injector); // Check that requests were authorized before sending responses diff --git a/sql/pom.xml b/sql/pom.xml index 265390e8e7a6..d393d68e5d63 100644 --- a/sql/pom.xml +++ b/sql/pom.xml @@ -18,8 +18,7 @@ ~ under the License. --> - + 4.0.0 druid-sql @@ -29,7 +28,7 @@ io.druid druid - 0.12.0-SNAPSHOT + 0.12.2-SNAPSHOT @@ -41,6 +40,18 @@ org.apache.calcite calcite-core + + + + com.yahoo.datasketches + sketches-core + + org.apache.calcite diff --git a/sql/src/main/java/io/druid/sql/avatica/AvaticaMonitor.java b/sql/src/main/java/io/druid/sql/avatica/AvaticaMonitor.java index 88097653c9f2..c14e1d812254 100644 --- a/sql/src/main/java/io/druid/sql/avatica/AvaticaMonitor.java +++ b/sql/src/main/java/io/druid/sql/avatica/AvaticaMonitor.java @@ -19,9 +19,9 @@ package io.druid.sql.avatica; -import com.metamx.emitter.service.ServiceEmitter; -import com.metamx.emitter.service.ServiceMetricEvent; -import com.metamx.metrics.AbstractMonitor; +import io.druid.java.util.emitter.service.ServiceEmitter; +import io.druid.java.util.emitter.service.ServiceMetricEvent; +import io.druid.java.util.metrics.AbstractMonitor; import io.druid.java.util.common.logger.Logger; import org.apache.calcite.avatica.metrics.Counter; import org.apache.calcite.avatica.metrics.Gauge; diff --git a/sql/src/main/java/io/druid/sql/avatica/DruidAvaticaHandler.java b/sql/src/main/java/io/druid/sql/avatica/DruidAvaticaHandler.java index 471efce6a904..5e37d2a6a105 100644 --- a/sql/src/main/java/io/druid/sql/avatica/DruidAvaticaHandler.java +++ b/sql/src/main/java/io/druid/sql/avatica/DruidAvaticaHandler.java @@ -35,7 +35,7 @@ public class DruidAvaticaHandler extends AvaticaJsonHandler { - static final String AVATICA_PATH = "/druid/v2/sql/avatica/"; + public static final String AVATICA_PATH = "/druid/v2/sql/avatica/"; @Inject public DruidAvaticaHandler( diff --git a/sql/src/main/java/io/druid/sql/calcite/expression/DruidExpression.java b/sql/src/main/java/io/druid/sql/calcite/expression/DruidExpression.java index 86475a390492..b779fdd03a0c 100644 --- a/sql/src/main/java/io/druid/sql/calcite/expression/DruidExpression.java +++ b/sql/src/main/java/io/druid/sql/calcite/expression/DruidExpression.java @@ -35,7 +35,7 @@ import java.util.function.Function; /** - * Represents three kinds of expression-like concepts that native Druid queries support: + * Represents two kinds of expression-like concepts that native Druid queries support: * * (1) SimpleExtractions, which are direct column access, possibly with an extractionFn * (2) native Druid expressions. diff --git a/sql/src/main/java/io/druid/sql/calcite/expression/builtin/ExtractOperatorConversion.java b/sql/src/main/java/io/druid/sql/calcite/expression/builtin/ExtractOperatorConversion.java index 8b6de78be82b..e2cd31005e5a 100644 --- a/sql/src/main/java/io/druid/sql/calcite/expression/builtin/ExtractOperatorConversion.java +++ b/sql/src/main/java/io/druid/sql/calcite/expression/builtin/ExtractOperatorConversion.java @@ -39,6 +39,7 @@ public class ExtractOperatorConversion implements SqlOperatorConversion { private static final Map EXTRACT_UNIT_MAP = ImmutableMap.builder() + .put(TimeUnitRange.EPOCH, TimestampExtractExprMacro.Unit.EPOCH) .put(TimeUnitRange.SECOND, TimestampExtractExprMacro.Unit.SECOND) .put(TimeUnitRange.MINUTE, TimestampExtractExprMacro.Unit.MINUTE) .put(TimeUnitRange.HOUR, TimestampExtractExprMacro.Unit.HOUR) @@ -75,11 +76,6 @@ public DruidExpression toDruidExpression( return null; } - if (call.getOperator().getName().equals("EXTRACT_DATE")) { - // Arg will be in number of days since the epoch. Can't translate. - return null; - } - final TimestampExtractExprMacro.Unit druidUnit = EXTRACT_UNIT_MAP.get(calciteUnit); if (druidUnit == null) { // Don't know how to extract this time unit. diff --git a/sql/src/main/java/io/druid/sql/calcite/schema/DruidSchema.java b/sql/src/main/java/io/druid/sql/calcite/schema/DruidSchema.java index af52973cfcfe..b946afea7774 100644 --- a/sql/src/main/java/io/druid/sql/calcite/schema/DruidSchema.java +++ b/sql/src/main/java/io/druid/sql/calcite/schema/DruidSchema.java @@ -28,7 +28,6 @@ import com.google.common.collect.Sets; import com.google.common.util.concurrent.MoreExecutors; import com.google.inject.Inject; -import com.metamx.emitter.EmittingLogger; import io.druid.client.ServerView; import io.druid.client.TimelineServerView; import io.druid.guice.ManageLifecycle; @@ -40,6 +39,7 @@ import io.druid.java.util.common.guava.Yielders; import io.druid.java.util.common.lifecycle.LifecycleStart; import io.druid.java.util.common.lifecycle.LifecycleStop; +import io.druid.java.util.emitter.EmittingLogger; import io.druid.query.TableDataSource; import io.druid.query.metadata.metadata.AllColumnIncluderator; import io.druid.query.metadata.metadata.ColumnAnalysis; @@ -121,6 +121,7 @@ public class DruidSchema extends AbstractSchema private boolean refreshImmediately = false; private long lastRefresh = 0L; + private long lastFailure = 0L; private boolean isServerViewInitialized = false; @Inject @@ -197,7 +198,13 @@ public void run() final long nextRefresh = nextRefreshNoFuzz + (long) ((nextRefreshNoFuzz - lastRefresh) * 0.10); while (true) { + // Do not refresh if it's too soon after a failure (to avoid rapid cycles of failure). + final boolean wasRecentFailure = DateTimes.utc(lastFailure) + .plus(config.getMetadataRefreshPeriod()) + .isAfterNow(); + if (isServerViewInitialized && + !wasRecentFailure && (!segmentsNeedingRefresh.isEmpty() || !dataSourcesNeedingRebuild.isEmpty()) && (refreshImmediately || nextRefresh < System.currentTimeMillis())) { break; @@ -211,6 +218,7 @@ public void run() // Mutable segments need a refresh every period, since new columns could be added dynamically. segmentsNeedingRefresh.addAll(mutableSegments); + lastFailure = 0L; lastRefresh = System.currentTimeMillis(); refreshImmediately = false; } @@ -258,6 +266,7 @@ public void run() // Add our segments and dataSources back to their refresh and rebuild lists. segmentsNeedingRefresh.addAll(segmentsToRefresh); dataSourcesNeedingRebuild.addAll(dataSourcesToRebuild); + lastFailure = System.currentTimeMillis(); lock.notifyAll(); } } diff --git a/sql/src/test/java/io/druid/sql/calcite/CalciteQueryTest.java b/sql/src/test/java/io/druid/sql/calcite/CalciteQueryTest.java index 97502ea8c8cd..a1e3f5f9bace 100644 --- a/sql/src/test/java/io/druid/sql/calcite/CalciteQueryTest.java +++ b/sql/src/test/java/io/druid/sql/calcite/CalciteQueryTest.java @@ -555,7 +555,7 @@ public void testExplainSelectStar() throws Exception ImmutableList.of(), ImmutableList.of( new Object[]{ - "DruidQueryRel(query=[{\"queryType\":\"scan\",\"dataSource\":{\"type\":\"table\",\"name\":\"foo\"},\"intervals\":{\"type\":\"intervals\",\"intervals\":[\"-146136543-09-08T08:23:32.096Z/146140482-04-24T15:36:27.903Z\"]},\"virtualColumns\":[],\"resultFormat\":\"compactedList\",\"batchSize\":20480,\"limit\":9223372036854775807,\"filter\":null,\"columns\":[\"__time\",\"cnt\",\"dim1\",\"dim2\",\"m1\",\"m2\",\"unique_dim1\"],\"legacy\":false,\"context\":{\"defaultTimeout\":300000,\"maxScatterGatherBytes\":9223372036854775807,\"sqlCurrentTimestamp\":\"2000-01-01T00:00:00Z\"},\"descending\":false}], signature=[{__time:LONG, cnt:LONG, dim1:STRING, dim2:STRING, m1:FLOAT, m2:DOUBLE, unique_dim1:COMPLEX}])\n" + "DruidQueryRel(query=[{\"queryType\":\"scan\",\"dataSource\":{\"type\":\"table\",\"name\":\"foo\"},\"intervals\":{\"type\":\"intervals\",\"intervals\":[\"-146136543-09-08T08:23:32.096Z/146140482-04-24T15:36:27.903Z\"]},\"virtualColumns\":[],\"resultFormat\":\"compactedList\",\"batchSize\":20480,\"limit\":9223372036854775807,\"filter\":null,\"columns\":[\"__time\",\"cnt\",\"dim1\",\"dim2\",\"m1\",\"m2\",\"unique_dim1\"],\"legacy\":false,\"context\":{\"defaultTimeout\":300000,\"maxScatterGatherBytes\":9223372036854775807,\"sqlCurrentTimestamp\":\"2000-01-01T00:00:00Z\"},\"descending\":false,\"granularity\":{\"type\":\"all\"}}], signature=[{__time:LONG, cnt:LONG, dim1:STRING, dim2:STRING, m1:FLOAT, m2:DOUBLE, unique_dim1:COMPLEX}])\n" } ) ); @@ -801,8 +801,8 @@ public void testExplainSelfJoinWithFallback() throws Exception { final String explanation = "BindableJoin(condition=[=($0, $2)], joinType=[inner])\n" - + " DruidQueryRel(query=[{\"queryType\":\"scan\",\"dataSource\":{\"type\":\"table\",\"name\":\"foo\"},\"intervals\":{\"type\":\"intervals\",\"intervals\":[\"-146136543-09-08T08:23:32.096Z/146140482-04-24T15:36:27.903Z\"]},\"virtualColumns\":[],\"resultFormat\":\"compactedList\",\"batchSize\":20480,\"limit\":9223372036854775807,\"filter\":{\"type\":\"not\",\"field\":{\"type\":\"selector\",\"dimension\":\"dim1\",\"value\":\"\",\"extractionFn\":null}},\"columns\":[\"dim1\"],\"legacy\":false,\"context\":{\"defaultTimeout\":300000,\"maxScatterGatherBytes\":9223372036854775807,\"sqlCurrentTimestamp\":\"2000-01-01T00:00:00Z\"},\"descending\":false}], signature=[{dim1:STRING}])\n" - + " DruidQueryRel(query=[{\"queryType\":\"scan\",\"dataSource\":{\"type\":\"table\",\"name\":\"foo\"},\"intervals\":{\"type\":\"intervals\",\"intervals\":[\"-146136543-09-08T08:23:32.096Z/146140482-04-24T15:36:27.903Z\"]},\"virtualColumns\":[],\"resultFormat\":\"compactedList\",\"batchSize\":20480,\"limit\":9223372036854775807,\"filter\":null,\"columns\":[\"dim1\",\"dim2\"],\"legacy\":false,\"context\":{\"defaultTimeout\":300000,\"maxScatterGatherBytes\":9223372036854775807,\"sqlCurrentTimestamp\":\"2000-01-01T00:00:00Z\"},\"descending\":false}], signature=[{dim1:STRING, dim2:STRING}])\n"; + + " DruidQueryRel(query=[{\"queryType\":\"scan\",\"dataSource\":{\"type\":\"table\",\"name\":\"foo\"},\"intervals\":{\"type\":\"intervals\",\"intervals\":[\"-146136543-09-08T08:23:32.096Z/146140482-04-24T15:36:27.903Z\"]},\"virtualColumns\":[],\"resultFormat\":\"compactedList\",\"batchSize\":20480,\"limit\":9223372036854775807,\"filter\":{\"type\":\"not\",\"field\":{\"type\":\"selector\",\"dimension\":\"dim1\",\"value\":\"\",\"extractionFn\":null}},\"columns\":[\"dim1\"],\"legacy\":false,\"context\":{\"defaultTimeout\":300000,\"maxScatterGatherBytes\":9223372036854775807,\"sqlCurrentTimestamp\":\"2000-01-01T00:00:00Z\"},\"descending\":false,\"granularity\":{\"type\":\"all\"}}], signature=[{dim1:STRING}])\n" + + " DruidQueryRel(query=[{\"queryType\":\"scan\",\"dataSource\":{\"type\":\"table\",\"name\":\"foo\"},\"intervals\":{\"type\":\"intervals\",\"intervals\":[\"-146136543-09-08T08:23:32.096Z/146140482-04-24T15:36:27.903Z\"]},\"virtualColumns\":[],\"resultFormat\":\"compactedList\",\"batchSize\":20480,\"limit\":9223372036854775807,\"filter\":null,\"columns\":[\"dim1\",\"dim2\"],\"legacy\":false,\"context\":{\"defaultTimeout\":300000,\"maxScatterGatherBytes\":9223372036854775807,\"sqlCurrentTimestamp\":\"2000-01-01T00:00:00Z\"},\"descending\":false,\"granularity\":{\"type\":\"all\"}}], signature=[{dim1:STRING, dim2:STRING}])\n"; testQuery( PLANNER_CONFIG_FALLBACK, @@ -2514,7 +2514,6 @@ public void testCaseFilteredAggregationWithGroupBy() throws Exception } @Test - @Ignore // https://issues.apache.org/jira/browse/CALCITE-1910 public void testFilteredAggregationWithNotIn() throws Exception { testQuery( @@ -2527,12 +2526,26 @@ public void testFilteredAggregationWithNotIn() throws Exception .dataSource(CalciteTests.DATASOURCE1) .intervals(QSS(Filtration.eternity())) .granularity(Granularities.ALL) - .aggregators(AGGS()) + .aggregators( + AGGS( + new FilteredAggregatorFactory( + new CountAggregatorFactory("a0"), + NOT(SELECTOR("dim1", "1", null)) + ), + new FilteredAggregatorFactory( + new CountAggregatorFactory("a1"), + AND( + NOT(SELECTOR("dim2", null, null)), + NOT(SELECTOR("dim1", "1", null)) + ) + ) + ) + ) .context(TIMESERIES_CONTEXT_DEFAULT) .build() ), ImmutableList.of( - new Object[]{1L, 5L} + new Object[]{5L, 2L} ) ); } @@ -2817,6 +2830,27 @@ public void testCountStarWithNotOfDegenerateFilter() throws Exception ); } + @Test + public void testCountStarWithBoundFilterSimplifyOnMetric() throws Exception + { + testQuery( + "SELECT COUNT(*) FROM druid.foo WHERE 2.5 < m1 AND m1 < 3.5", + ImmutableList.of( + Druids.newTimeseriesQueryBuilder() + .dataSource(CalciteTests.DATASOURCE1) + .intervals(QSS(Filtration.eternity())) + .granularity(Granularities.ALL) + .filters(BOUND("m1", "2.5", "3.5", true, true, null, StringComparators.NUMERIC)) + .aggregators(AGGS(new CountAggregatorFactory("a0"))) + .context(TIMESERIES_CONTEXT_DEFAULT) + .build() + ), + ImmutableList.of( + new Object[]{1L} + ) + ); + } + @Test public void testCountStarWithBoundFilterSimplifyOr() throws Exception { @@ -3201,6 +3235,39 @@ public void testCountStarWithTimeFilterOnLongColumnUsingExtractEpoch() throws Ex ); } + @Test + public void testCountStarWithTimeFilterOnLongColumnUsingExtractEpochFromDate() throws Exception + { + testQuery( + "SELECT COUNT(*) FROM druid.foo WHERE " + + "cnt >= EXTRACT(EPOCH FROM DATE '1970-01-01') * 1000 " + + "AND cnt < EXTRACT(EPOCH FROM DATE '1970-01-02') * 1000", + ImmutableList.of( + Druids.newTimeseriesQueryBuilder() + .dataSource(CalciteTests.DATASOURCE1) + .intervals(QSS(Filtration.eternity())) + .granularity(Granularities.ALL) + .filters( + BOUND( + "cnt", + String.valueOf(DateTimes.of("1970-01-01").getMillis()), + String.valueOf(DateTimes.of("1970-01-02").getMillis()), + false, + true, + null, + StringComparators.NUMERIC + ) + ) + .aggregators(AGGS(new CountAggregatorFactory("a0"))) + .context(TIMESERIES_CONTEXT_DEFAULT) + .build() + ), + ImmutableList.of( + new Object[]{6L} + ) + ); + } + @Test public void testCountStarWithTimeFilterOnLongColumnUsingTimestampToMillis() throws Exception { @@ -4918,7 +4985,6 @@ public void testFilterOnTimeFloorComparisonMisaligned() throws Exception } @Test - @Ignore // https://issues.apache.org/jira/browse/CALCITE-1601 public void testFilterOnTimeExtract() throws Exception { testQuery( @@ -4928,9 +4994,15 @@ public void testFilterOnTimeExtract() throws Exception ImmutableList.of( Druids.newTimeseriesQueryBuilder() .dataSource(CalciteTests.DATASOURCE1) - .intervals(QSS(Intervals.of("2000/P1M"))) + .intervals(QSS(Filtration.eternity())) .granularity(Granularities.ALL) .aggregators(AGGS(new CountAggregatorFactory("a0"))) + .filters( + AND( + EXPRESSION_FILTER("(timestamp_extract(\"__time\",'YEAR','UTC') == 2000)"), + EXPRESSION_FILTER("(timestamp_extract(\"__time\",'MONTH','UTC') == 1)") + ) + ) .context(TIMESERIES_CONTEXT_DEFAULT) .build() ), @@ -4941,24 +5013,33 @@ public void testFilterOnTimeExtract() throws Exception } @Test - @Ignore // https://issues.apache.org/jira/browse/CALCITE-1601 - public void testFilterOnTimeExtractWithMultipleMonths() throws Exception + public void testFilterOnTimeExtractWithMultipleDays() throws Exception { testQuery( "SELECT COUNT(*) FROM druid.foo\n" + "WHERE EXTRACT(YEAR FROM __time) = 2000\n" - + "AND EXTRACT(MONTH FROM __time) IN (2, 3, 5)", + + "AND EXTRACT(DAY FROM __time) IN (2, 3, 5)", ImmutableList.of( Druids.newTimeseriesQueryBuilder() .dataSource(CalciteTests.DATASOURCE1) - .intervals(QSS(Intervals.of("2000-02-01/P2M"), Intervals.of("2000-05-01/P1M"))) + .intervals(QSS(Filtration.eternity())) .granularity(Granularities.ALL) .aggregators(AGGS(new CountAggregatorFactory("a0"))) + .filters( + AND( + EXPRESSION_FILTER("(timestamp_extract(\"__time\",'YEAR','UTC') == 2000)"), + OR( + EXPRESSION_FILTER("(timestamp_extract(\"__time\",'DAY','UTC') == 2)"), + EXPRESSION_FILTER("(timestamp_extract(\"__time\",'DAY','UTC') == 3)"), + EXPRESSION_FILTER("(timestamp_extract(\"__time\",'DAY','UTC') == 5)") + ) + ) + ) .context(TIMESERIES_CONTEXT_DEFAULT) .build() ), ImmutableList.of( - new Object[]{3L} + new Object[]{2L} ) ); } @@ -6094,7 +6175,7 @@ public void testUsingSubqueryAsPartOfOrFilter() throws Exception + " BindableFilter(condition=[OR(=($0, 'xxx'), CAST(AND(IS NOT NULL($4), <>($2, 0))):BOOLEAN)])\n" + " BindableJoin(condition=[=($1, $3)], joinType=[left])\n" + " BindableJoin(condition=[true], joinType=[inner])\n" - + " DruidQueryRel(query=[{\"queryType\":\"scan\",\"dataSource\":{\"type\":\"table\",\"name\":\"foo\"},\"intervals\":{\"type\":\"intervals\",\"intervals\":[\"-146136543-09-08T08:23:32.096Z/146140482-04-24T15:36:27.903Z\"]},\"virtualColumns\":[],\"resultFormat\":\"compactedList\",\"batchSize\":20480,\"limit\":9223372036854775807,\"filter\":null,\"columns\":[\"dim1\",\"dim2\"],\"legacy\":false,\"context\":{\"defaultTimeout\":300000,\"maxScatterGatherBytes\":9223372036854775807,\"sqlCurrentTimestamp\":\"2000-01-01T00:00:00Z\"},\"descending\":false}], signature=[{dim1:STRING, dim2:STRING}])\n" + + " DruidQueryRel(query=[{\"queryType\":\"scan\",\"dataSource\":{\"type\":\"table\",\"name\":\"foo\"},\"intervals\":{\"type\":\"intervals\",\"intervals\":[\"-146136543-09-08T08:23:32.096Z/146140482-04-24T15:36:27.903Z\"]},\"virtualColumns\":[],\"resultFormat\":\"compactedList\",\"batchSize\":20480,\"limit\":9223372036854775807,\"filter\":null,\"columns\":[\"dim1\",\"dim2\"],\"legacy\":false,\"context\":{\"defaultTimeout\":300000,\"maxScatterGatherBytes\":9223372036854775807,\"sqlCurrentTimestamp\":\"2000-01-01T00:00:00Z\"},\"descending\":false,\"granularity\":{\"type\":\"all\"}}], signature=[{dim1:STRING, dim2:STRING}])\n" + " DruidQueryRel(query=[{\"queryType\":\"timeseries\",\"dataSource\":{\"type\":\"table\",\"name\":\"foo\"},\"intervals\":{\"type\":\"intervals\",\"intervals\":[\"-146136543-09-08T08:23:32.096Z/146140482-04-24T15:36:27.903Z\"]},\"descending\":false,\"virtualColumns\":[],\"filter\":{\"type\":\"like\",\"dimension\":\"dim1\",\"pattern\":\"%bc\",\"escape\":null,\"extractionFn\":null},\"granularity\":{\"type\":\"all\"},\"aggregations\":[{\"type\":\"count\",\"name\":\"a0\"}],\"postAggregations\":[],\"context\":{\"defaultTimeout\":300000,\"maxScatterGatherBytes\":9223372036854775807,\"skipEmptyBuckets\":true,\"sqlCurrentTimestamp\":\"2000-01-01T00:00:00Z\"}}], signature=[{a0:LONG}])\n" + " DruidQueryRel(query=[{\"queryType\":\"groupBy\",\"dataSource\":{\"type\":\"table\",\"name\":\"foo\"},\"intervals\":{\"type\":\"intervals\",\"intervals\":[\"-146136543-09-08T08:23:32.096Z/146140482-04-24T15:36:27.903Z\"]},\"virtualColumns\":[{\"type\":\"expression\",\"name\":\"d1:v\",\"expression\":\"1\",\"outputType\":\"LONG\"}],\"filter\":{\"type\":\"like\",\"dimension\":\"dim1\",\"pattern\":\"%bc\",\"escape\":null,\"extractionFn\":null},\"granularity\":{\"type\":\"all\"},\"dimensions\":[{\"type\":\"default\",\"dimension\":\"dim1\",\"outputName\":\"d0\",\"outputType\":\"STRING\"},{\"type\":\"default\",\"dimension\":\"d1:v\",\"outputName\":\"d1\",\"outputType\":\"LONG\"}],\"aggregations\":[],\"postAggregations\":[],\"having\":null,\"limitSpec\":{\"type\":\"NoopLimitSpec\"},\"context\":{\"defaultTimeout\":300000,\"maxScatterGatherBytes\":9223372036854775807,\"sqlCurrentTimestamp\":\"2000-01-01T00:00:00Z\"},\"descending\":false}], signature=[{d0:STRING, d1:LONG}])\n"; diff --git a/sql/src/test/java/io/druid/sql/calcite/util/CalciteTests.java b/sql/src/test/java/io/druid/sql/calcite/util/CalciteTests.java index 5db831df0327..c22ab27c076e 100644 --- a/sql/src/test/java/io/druid/sql/calcite/util/CalciteTests.java +++ b/sql/src/test/java/io/druid/sql/calcite/util/CalciteTests.java @@ -32,8 +32,8 @@ import com.google.inject.Injector; import com.google.inject.Key; import com.google.inject.Module; -import com.metamx.emitter.core.NoopEmitter; -import com.metamx.emitter.service.ServiceEmitter; +import io.druid.java.util.emitter.core.NoopEmitter; +import io.druid.java.util.emitter.service.ServiceEmitter; import io.druid.collections.StupidPool; import io.druid.data.input.InputRow; import io.druid.data.input.impl.DimensionsSpec; @@ -170,7 +170,7 @@ public Access authorize( @Override public AuthenticationResult authenticateJDBCContext(Map context) { - return new AuthenticationResult((String) context.get("user"), AuthConfig.ALLOW_ALL_NAME, null); + return new AuthenticationResult((String) context.get("user"), AuthConfig.ALLOW_ALL_NAME, null, null); } } ); @@ -191,13 +191,13 @@ public AuthenticationResult createEscalatedAuthenticationResult() public static final AuthenticationResult REGULAR_USER_AUTH_RESULT = new AuthenticationResult( AuthConfig.ALLOW_ALL_NAME, AuthConfig.ALLOW_ALL_NAME, - null + null, null ); public static final AuthenticationResult SUPER_USER_AUTH_RESULT = new AuthenticationResult( TEST_SUPERUSER_NAME, AuthConfig.ALLOW_ALL_NAME, - null + null, null ); private static final String TIMESTAMP_COLUMN = "t";