diff --git a/docs/content/configuration/index.md b/docs/content/configuration/index.md
index 15816b3ad7a0..9bd7b97ce49c 100644
--- a/docs/content/configuration/index.md
+++ b/docs/content/configuration/index.md
@@ -395,3 +395,14 @@ the following properties.
JavaScript-based functionality is disabled by default. Please refer to the Druid
JavaScript programming guide for guidelines about using Druid's JavaScript functionality, including instructions on how to enable it.
+
+### Double Column storage
+
+Druid's storage layer uses a 32-bit float representation to store columns created by the
+doubleSum, doubleMin, and doubleMax aggregators at indexing time. To instead use 64-bit floats
+for these columns, please set the system-wide property `druid.indexing.doubleStorage=double`.
+This will become the default behavior in a future version of Druid.
+
+|Property|Description|Default|
+|--------|-----------|-------|
+|`druid.indexing.doubleStorage`|Set to "double" to use 64-bit double representation for double columns.|float|
diff --git a/examples/conf-quickstart/druid/_common/common.runtime.properties b/examples/conf-quickstart/druid/_common/common.runtime.properties
index ba73feb685fc..fd131b878cce 100644
--- a/examples/conf-quickstart/druid/_common/common.runtime.properties
+++ b/examples/conf-quickstart/druid/_common/common.runtime.properties
@@ -116,3 +116,8 @@ druid.selectors.coordinator.serviceName=druid/coordinator
druid.monitoring.monitors=["com.metamx.metrics.JvmMonitor"]
druid.emitter=logging
druid.emitter.logging.logLevel=info
+
+# Storage type of double columns
+# ommiting this will lead to index double as float at the storage layer
+
+druid.indexing.doubleStorage=double
diff --git a/examples/conf/druid/_common/common.runtime.properties b/examples/conf/druid/_common/common.runtime.properties
index 641ef03c6151..a018fa01780c 100644
--- a/examples/conf/druid/_common/common.runtime.properties
+++ b/examples/conf/druid/_common/common.runtime.properties
@@ -115,3 +115,8 @@ druid.selectors.coordinator.serviceName=druid/coordinator
druid.monitoring.monitors=["com.metamx.metrics.JvmMonitor"]
druid.emitter=logging
druid.emitter.logging.logLevel=info
+
+# Storage type of double columns
+# ommiting this will lead to index double as float at the storage layer
+
+druid.indexing.doubleStorage=double
diff --git a/pom.xml b/pom.xml
index f86bcc1858fa..6029a1393305 100644
--- a/pom.xml
+++ b/pom.xml
@@ -1036,6 +1036,8 @@
-Xmx3000m -Duser.language=en -Duser.country=US -Dfile.encoding=UTF-8
-Duser.timezone=UTC -Djava.util.logging.manager=org.apache.logging.log4j.jul.LogManager
+
+ -Ddruid.indexing.doubleStorage=double
true
@@ -1264,7 +1266,9 @@
-Xmx768m -Duser.language=en -Duser.country=US -Dfile.encoding=UTF-8
- -Duser.timezone=UTC -Djava.util.logging.manager=org.apache.logging.log4j.jul.LogManager
+ -Duser.timezone=UTC -Djava.util.logging.manager=org.apache.logging.log4j.jul.LogManager
+
+ -Ddruid.indexing.doubleStorage=double
true
diff --git a/processing/src/main/java/io/druid/query/aggregation/SimpleDoubleAggregatorFactory.java b/processing/src/main/java/io/druid/query/aggregation/SimpleDoubleAggregatorFactory.java
index ea6c1f92bb73..e627c126c642 100644
--- a/processing/src/main/java/io/druid/query/aggregation/SimpleDoubleAggregatorFactory.java
+++ b/processing/src/main/java/io/druid/query/aggregation/SimpleDoubleAggregatorFactory.java
@@ -26,6 +26,7 @@
import io.druid.math.expr.Parser;
import io.druid.segment.BaseDoubleColumnValueSelector;
import io.druid.segment.ColumnSelectorFactory;
+import io.druid.segment.column.Column;
import java.util.Collections;
import java.util.Comparator;
@@ -38,6 +39,7 @@ public abstract class SimpleDoubleAggregatorFactory extends AggregatorFactory
protected final String fieldName;
protected final String expression;
protected final ExprMacroTable macroTable;
+ protected final boolean storeDoubleAsFloat;
public SimpleDoubleAggregatorFactory(
ExprMacroTable macroTable,
@@ -50,6 +52,7 @@ public SimpleDoubleAggregatorFactory(
this.fieldName = fieldName;
this.name = name;
this.expression = expression;
+ this.storeDoubleAsFloat = Column.storeDoubleAsFloat();
Preconditions.checkNotNull(name, "Must have a valid, non-null aggregator name");
Preconditions.checkArgument(
fieldName == null ^ expression == null,
@@ -81,6 +84,9 @@ public Object deserialize(Object object)
@Override
public String getTypeName()
{
+ if (storeDoubleAsFloat) {
+ return "float";
+ }
return "double";
}
@@ -144,4 +150,5 @@ public String getExpression()
{
return expression;
}
+
}
diff --git a/processing/src/main/java/io/druid/query/aggregation/first/DoubleFirstAggregatorFactory.java b/processing/src/main/java/io/druid/query/aggregation/first/DoubleFirstAggregatorFactory.java
index 98681e37c771..561238fb3381 100644
--- a/processing/src/main/java/io/druid/query/aggregation/first/DoubleFirstAggregatorFactory.java
+++ b/processing/src/main/java/io/druid/query/aggregation/first/DoubleFirstAggregatorFactory.java
@@ -59,6 +59,7 @@ public class DoubleFirstAggregatorFactory extends AggregatorFactory
private final String fieldName;
private final String name;
+ private final boolean storeDoubleAsFloat;
@JsonCreator
public DoubleFirstAggregatorFactory(
@@ -71,6 +72,7 @@ public DoubleFirstAggregatorFactory(
this.name = name;
this.fieldName = fieldName;
+ this.storeDoubleAsFloat = Column.storeDoubleAsFloat();
}
@Override
@@ -222,6 +224,9 @@ public byte[] getCacheKey()
@Override
public String getTypeName()
{
+ if (storeDoubleAsFloat) {
+ return "float";
+ }
return "double";
}
diff --git a/processing/src/main/java/io/druid/query/aggregation/last/DoubleLastAggregatorFactory.java b/processing/src/main/java/io/druid/query/aggregation/last/DoubleLastAggregatorFactory.java
index 23f3ee425c37..a0ed875ecda5 100644
--- a/processing/src/main/java/io/druid/query/aggregation/last/DoubleLastAggregatorFactory.java
+++ b/processing/src/main/java/io/druid/query/aggregation/last/DoubleLastAggregatorFactory.java
@@ -51,6 +51,7 @@ public class DoubleLastAggregatorFactory extends AggregatorFactory
private final String fieldName;
private final String name;
+ private final boolean storeDoubleAsFloat;
@JsonCreator
public DoubleLastAggregatorFactory(
@@ -62,6 +63,7 @@ public DoubleLastAggregatorFactory(
Preconditions.checkNotNull(fieldName, "Must have a valid, non-null fieldName");
this.name = name;
this.fieldName = fieldName;
+ this.storeDoubleAsFloat = Column.storeDoubleAsFloat();
}
@Override
@@ -213,6 +215,10 @@ public byte[] getCacheKey()
@Override
public String getTypeName()
{
+
+ if (storeDoubleAsFloat) {
+ return "float";
+ }
return "double";
}
diff --git a/processing/src/main/java/io/druid/segment/column/Column.java b/processing/src/main/java/io/druid/segment/column/Column.java
index b6b52582ae41..2505170f774b 100644
--- a/processing/src/main/java/io/druid/segment/column/Column.java
+++ b/processing/src/main/java/io/druid/segment/column/Column.java
@@ -19,11 +19,21 @@
package io.druid.segment.column;
+import io.druid.java.util.common.StringUtils;
+
/**
*/
public interface Column
{
String TIME_COLUMN_NAME = "__time";
+ String DOUBLE_STORAGE_TYPE_PROPERTY = "druid.indexing.doubleStorage";
+
+ static boolean storeDoubleAsFloat()
+ {
+ String value = System.getProperty(DOUBLE_STORAGE_TYPE_PROPERTY, "float");
+ return !StringUtils.toLowerCase(value).equals("double");
+ }
+
ColumnCapabilities getCapabilities();
int getLength();
diff --git a/processing/src/test/java/io/druid/query/DoubleStorageTest.java b/processing/src/test/java/io/druid/query/DoubleStorageTest.java
new file mode 100644
index 000000000000..dbdeb81f7890
--- /dev/null
+++ b/processing/src/test/java/io/druid/query/DoubleStorageTest.java
@@ -0,0 +1,369 @@
+/*
+ * Licensed to Metamarkets Group Inc. (Metamarkets) under one
+ * or more contributor license agreements. See the NOTICE file
+ * distributed with this work for additional information
+ * regarding copyright ownership. Metamarkets licenses this file
+ * to you under the Apache License, Version 2.0 (the
+ * "License"); you may not use this file except in compliance
+ * with the License. You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing,
+ * software distributed under the License is distributed on an
+ * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
+ * KIND, either express or implied. See the License for the
+ * specific language governing permissions and limitations
+ * under the License.
+ */
+package io.druid.query;
+
+import com.google.common.base.Throwables;
+import com.google.common.collect.ImmutableList;
+import com.google.common.collect.ImmutableMap;
+import com.google.common.collect.Lists;
+import com.google.common.collect.Maps;
+import io.druid.data.input.impl.DimensionsSpec;
+import io.druid.data.input.impl.InputRowParser;
+import io.druid.data.input.impl.JSONParseSpec;
+import io.druid.data.input.impl.MapInputRowParser;
+import io.druid.data.input.impl.SpatialDimensionSchema;
+import io.druid.data.input.impl.TimestampSpec;
+import io.druid.java.util.common.DateTimes;
+import io.druid.java.util.common.Intervals;
+import io.druid.java.util.common.guava.Sequences;
+import io.druid.query.aggregation.DoubleSumAggregatorFactory;
+import io.druid.query.metadata.SegmentMetadataQueryConfig;
+import io.druid.query.metadata.SegmentMetadataQueryQueryToolChest;
+import io.druid.query.metadata.SegmentMetadataQueryRunnerFactory;
+import io.druid.query.metadata.metadata.ColumnAnalysis;
+import io.druid.query.metadata.metadata.ListColumnIncluderator;
+import io.druid.query.metadata.metadata.SegmentAnalysis;
+import io.druid.query.metadata.metadata.SegmentMetadataQuery;
+
+import io.druid.query.scan.ScanQuery;
+import io.druid.query.scan.ScanQueryConfig;
+import io.druid.query.scan.ScanQueryEngine;
+import io.druid.query.scan.ScanQueryQueryToolChest;
+import io.druid.query.scan.ScanQueryRunnerFactory;
+import io.druid.query.scan.ScanResultValue;
+import io.druid.query.spec.LegacySegmentSpec;
+import io.druid.segment.IndexIO;
+import io.druid.segment.IndexMergerV9;
+import io.druid.segment.IndexSpec;
+import io.druid.segment.QueryableIndex;
+import io.druid.segment.QueryableIndexSegment;
+import io.druid.segment.TestHelper;
+import io.druid.segment.column.ValueType;
+import io.druid.segment.incremental.IncrementalIndex;
+import io.druid.segment.incremental.IncrementalIndexSchema;
+import io.druid.segment.incremental.IndexSizeExceededException;
+import org.joda.time.Interval;
+import org.junit.After;
+import org.junit.Assert;
+import org.junit.Before;
+import org.junit.Test;
+import org.junit.runner.RunWith;
+import org.junit.runners.Parameterized;
+
+import java.io.File;
+import java.io.IOException;
+import java.util.Arrays;
+import java.util.Collection;
+import java.util.HashMap;
+import java.util.List;
+import java.util.Map;
+import java.util.stream.Collectors;
+import java.util.stream.IntStream;
+import java.util.stream.Stream;
+
+import static io.druid.segment.column.Column.DOUBLE_STORAGE_TYPE_PROPERTY;
+import static io.druid.query.scan.ScanQueryRunnerTest.verify;
+
+@RunWith(Parameterized.class)
+public class DoubleStorageTest
+{
+
+ private static final SegmentMetadataQueryRunnerFactory METADATA_QR_FACTORY = new SegmentMetadataQueryRunnerFactory(
+ new SegmentMetadataQueryQueryToolChest(new SegmentMetadataQueryConfig()),
+ QueryRunnerTestHelper.NOOP_QUERYWATCHER
+ );
+
+ private static final ScanQueryQueryToolChest scanQueryQueryToolChest = new ScanQueryQueryToolChest(
+ new ScanQueryConfig(),
+ DefaultGenericQueryMetricsFactory.instance()
+ );
+
+ private static final ScanQueryRunnerFactory SCAN_QUERY_RUNNER_FACTORY = new ScanQueryRunnerFactory(
+ scanQueryQueryToolChest,
+ new ScanQueryEngine()
+ );
+
+ private ScanQuery.ScanQueryBuilder newTestQuery()
+ {
+ return ScanQuery.newScanQueryBuilder()
+ .dataSource(new TableDataSource(QueryRunnerTestHelper.dataSource))
+ .columns(Arrays.asList())
+ .intervals(QueryRunnerTestHelper.fullOnInterval)
+ .limit(Integer.MAX_VALUE)
+ .legacy(false);
+ }
+
+
+ private static final IndexMergerV9 INDEX_MERGER_V9 = TestHelper.getTestIndexMergerV9();
+ private static final IndexIO INDEX_IO = TestHelper.getTestIndexIO();
+ private static final Integer MAX_ROWS = 10;
+ private static final String TIME_COLUMN = "__time";
+ private static final String DIM_NAME = "testDimName";
+ private static final String DIM_VALUE = "testDimValue";
+ private static final String DIM_FLOAT_NAME = "testDimFloatName";
+ private static final String SEGMENT_ID = "segmentId";
+ private static final Interval INTERVAL = Intervals.of("2011-01-13T00:00:00.000Z/2011-01-22T00:00:00.001Z");
+
+ private static final InputRowParser