From a4229fe87f540e186aa5b83565c3c0eebce37c01 Mon Sep 17 00:00:00 2001 From: turu Date: Thu, 11 Feb 2016 23:46:24 +0100 Subject: [PATCH 1/8] removed unsafe heuristics from hll compareTo and provided unit test for regression --- .../hyperloglog/HyperLogLogCollector.java | 17 +------- .../hyperloglog/HyperLogLogCollectorTest.java | 39 +++++++++++++++++++ 2 files changed, 40 insertions(+), 16 deletions(-) diff --git a/processing/src/main/java/io/druid/query/aggregation/hyperloglog/HyperLogLogCollector.java b/processing/src/main/java/io/druid/query/aggregation/hyperloglog/HyperLogLogCollector.java index 37166e938f9f..4803b2ca765e 100644 --- a/processing/src/main/java/io/druid/query/aggregation/hyperloglog/HyperLogLogCollector.java +++ b/processing/src/main/java/io/druid/query/aggregation/hyperloglog/HyperLogLogCollector.java @@ -681,21 +681,6 @@ private static short mergeAndStoreByteRegister( @Override public int compareTo(HyperLogLogCollector other) { - final int lhsOffset = (int) this.getRegisterOffset() & 0xffff; - final int rhsOffset = (int) other.getRegisterOffset() & 0xffff; - - if (lhsOffset == rhsOffset) { - final int lhsNumNonZero = (int) this.getNumNonZeroRegisters() & 0xff; - final int rhsNumNonZero = (int) this.getNumNonZeroRegisters() & 0xff; - int retVal = Double.compare(lhsNumNonZero, rhsNumNonZero); - - if (retVal == 0) { - retVal = Double.compare(this.estimateCardinality(), other.estimateCardinality()); - } - - return retVal; - } else { - return Double.compare(lhsOffset, rhsOffset); - } + return Double.compare(this.estimateCardinality(), other.estimateCardinality()); } } diff --git a/processing/src/test/java/io/druid/query/aggregation/hyperloglog/HyperLogLogCollectorTest.java b/processing/src/test/java/io/druid/query/aggregation/hyperloglog/HyperLogLogCollectorTest.java index 9f7f92be6c47..25bd2ee82906 100644 --- a/processing/src/test/java/io/druid/query/aggregation/hyperloglog/HyperLogLogCollectorTest.java +++ b/processing/src/test/java/io/druid/query/aggregation/hyperloglog/HyperLogLogCollectorTest.java @@ -776,6 +776,45 @@ public void testCompare2() throws Exception } } + @Test + public void testCompareToShouldBehaveConsistentlyWithEstimatedCardinalitiesEvenInToughCases() throws Exception { + // given + Random rand = new Random(0); + HyperUniquesAggregatorFactory factory = new HyperUniquesAggregatorFactory("foo", "bar"); + Comparator comparator = factory.getComparator(); + + for (int i = 0; i < 1000; ++i) { + // given + HyperLogLogCollector leftCollector = HyperLogLogCollector.makeLatestCollector(); + int j = rand.nextInt(9000) + 5000; + for (int l = 0; l < j; ++l) { + leftCollector.add(fn.hashLong(rand.nextLong()).asBytes()); + } + + HyperLogLogCollector rightCollector = HyperLogLogCollector.makeLatestCollector(); + int k = rand.nextInt(9000) + 5000; + for (int l = 0; l < k; ++l) { + rightCollector.add(fn.hashLong(rand.nextLong()).asBytes()); + } + + // when + final int orderedByCardinality = Double.compare(leftCollector.estimateCardinality(), + rightCollector.estimateCardinality()); + final int orderedByComparator = comparator.compare(leftCollector, rightCollector); + + // then, assert hyperloglog comparator behaves consistently with estimated cardinalities + Assert.assertEquals( + String.format("orderedByComparator=%d, orderedByCardinality=%d,\n" + + "Left={cardinality=%f, hll=%s},\n" + + "Right={cardinality=%f, hll=%s},\n", orderedByComparator, orderedByCardinality, + leftCollector.estimateCardinality(), leftCollector, + rightCollector.estimateCardinality(), rightCollector), + orderedByCardinality, + orderedByComparator + ); + } + } + @Test public void testMaxOverflow() { HyperLogLogCollector collector = HyperLogLogCollector.makeLatestCollector(); From b601ed69dfe4461985dff2b4a3dd647f3610c16d Mon Sep 17 00:00:00 2001 From: Slim Bouguerra Date: Thu, 11 Feb 2016 10:40:42 -0600 Subject: [PATCH 2/8] fix docs about search query limit --- docs/content/querying/searchquery.md | 1 + 1 file changed, 1 insertion(+) diff --git a/docs/content/querying/searchquery.md b/docs/content/querying/searchquery.md index 2bc0dedd50b1..c11243d7483f 100644 --- a/docs/content/querying/searchquery.md +++ b/docs/content/querying/searchquery.md @@ -34,6 +34,7 @@ There are several main parts to a search query: |dataSource|A String or Object defining the data source to query, very similar to a table in a relational database. See [DataSource](../querying/datasource.html) for more information.|yes| |granularity|Defines the granularity of the query. See [Granularities](../querying/granularities.html).|yes| |filter|See [Filters](../querying/filters.html).|no| +|limit| Defines the maximum number per historical node (parsed as int) of search results to return. |no (default to 1000)| |intervals|A JSON Object representing ISO-8601 Intervals. This defines the time ranges to run the query over.|yes| |searchDimensions|The dimensions to run the search over. Excluding this means the search is run over all dimensions.|no| |query|See [SearchQuerySpec](../querying/searchqueryspec.html).|yes| From 078e332ef752bbb9d856462cca4c21c620ee6891 Mon Sep 17 00:00:00 2001 From: Gian Merlino Date: Thu, 11 Feb 2016 13:51:04 -0800 Subject: [PATCH 3/8] PropertiesModule: Print properties, processors, totalMemory on startup. --- .../main/java/io/druid/guice/PropertiesModule.java | 11 +++++++++++ 1 file changed, 11 insertions(+) diff --git a/processing/src/main/java/io/druid/guice/PropertiesModule.java b/processing/src/main/java/io/druid/guice/PropertiesModule.java index 51165f644891..dfc1abf11b26 100644 --- a/processing/src/main/java/io/druid/guice/PropertiesModule.java +++ b/processing/src/main/java/io/druid/guice/PropertiesModule.java @@ -21,6 +21,7 @@ import com.google.common.base.Charsets; import com.google.common.base.Throwables; +import com.google.common.collect.Ordering; import com.google.inject.Binder; import com.google.inject.Module; import com.metamx.common.guava.CloseQuietly; @@ -86,6 +87,16 @@ public void configure(Binder binder) } } + log.info( + "Loaded properties into JVM with processors[%,d], memory[%,d].", + Runtime.getRuntime().availableProcessors(), + Runtime.getRuntime().totalMemory() + ); + + for (String propertyName : Ordering.natural().sortedCopy(props.stringPropertyNames())) { + log.info("* %s: %s", propertyName, props.getProperty(propertyName)); + } + binder.bind(Properties.class).toInstance(props); } } From 382cfa660933e2d3260222130f6fb0795b1396cd Mon Sep 17 00:00:00 2001 From: Gian Merlino Date: Fri, 12 Feb 2016 14:12:16 -0800 Subject: [PATCH 4/8] Make startup properties logging optional. Off by default, but enabled in the example config files. See also #2452. --- docs/content/configuration/index.md | 10 ++++++ .../druid/_common/common.runtime.properties | 12 ++++++- .../druid/_common/common.runtime.properties | 12 ++++++- .../java/io/druid/guice/PropertiesModule.java | 10 ------ .../io/druid/guice/StartupLoggingModule.java | 33 +++++++++++++++++++ .../druid/initialization/Initialization.java | 4 ++- .../server/log/StartupLoggingConfig.java | 33 +++++++++++++++++++ .../main/java/io/druid/cli/GuiceRunnable.java | 18 ++++++++++ 8 files changed, 119 insertions(+), 13 deletions(-) create mode 100644 server/src/main/java/io/druid/guice/StartupLoggingModule.java create mode 100644 server/src/main/java/io/druid/server/log/StartupLoggingConfig.java diff --git a/docs/content/configuration/index.md b/docs/content/configuration/index.md index a3f934a68446..31cf108955b8 100644 --- a/docs/content/configuration/index.md +++ b/docs/content/configuration/index.md @@ -74,6 +74,16 @@ The following path is used for service discovery. It is **not** affected by `dru |--------|-----------|-------| |`druid.discovery.curator.path`|Services announce themselves under this ZooKeeper path.|`/druid/discovery`| +### Startup Logging + +All nodes can log debugging information on startup. + +|Property|Description|Default| +|--------|-----------|-------| +|`druid.startup.logging.logProperties`|Log all properties on startup (from common.runtime.properties, runtime.properties, and the JVM command line).|false| + +Note that some sensitive information may be logged if these settings are enabled. + ### Request Logging All nodes that can serve queries can also log the query requests they see. diff --git a/examples/conf-quickstart/druid/_common/common.runtime.properties b/examples/conf-quickstart/druid/_common/common.runtime.properties index 23e94bb89c30..38f0f262e429 100644 --- a/examples/conf-quickstart/druid/_common/common.runtime.properties +++ b/examples/conf-quickstart/druid/_common/common.runtime.properties @@ -30,6 +30,13 @@ druid.extensions.loadList=[] # and uncomment the line below to point to your directory. #druid.extensions.hadoopDependenciesDir=/my/dir/hadoop-dependencies +# +# Logging +# + +# Log all runtime properties on startup. Disable to avoid logging properties on startup: +druid.startup.logging.logProperties=true + # # Zookeeper # @@ -41,6 +48,7 @@ druid.zk.paths.base=/druid # Metadata storage # +# For Derby server on your Druid Coordinator (only viable in a cluster with a single Coordinator, no fail-over): druid.metadata.storage.type=derby druid.metadata.storage.connector.connectURI=jdbc:derby://localhost:1527/var/druid/metadata.db;create=true druid.metadata.storage.connector.host=localhost @@ -62,6 +70,7 @@ druid.metadata.storage.connector.port=1527 # Deep storage # +# For local disk (only viable in a cluster if this is a network mount): druid.storage.type=local druid.storage.storageDirectory=var/druid/segments @@ -80,6 +89,7 @@ druid.storage.storageDirectory=var/druid/segments # Indexing service logs # +# For local disk (only viable in a cluster if this is a network mount): druid.indexer.logs.type=file druid.indexer.logs.directory=var/druid/indexing-logs @@ -105,4 +115,4 @@ druid.selectors.coordinator.serviceName=druid/coordinator druid.monitoring.monitors=["com.metamx.metrics.JvmMonitor"] druid.emitter=logging -druid.emitter.logging.logLevel=debug +druid.emitter.logging.logLevel=info diff --git a/examples/conf/druid/_common/common.runtime.properties b/examples/conf/druid/_common/common.runtime.properties index edd2712b873c..19b005257369 100644 --- a/examples/conf/druid/_common/common.runtime.properties +++ b/examples/conf/druid/_common/common.runtime.properties @@ -29,6 +29,13 @@ druid.extensions.loadList=["druid-kafka-eight", "druid-s3-extensions", "druid-hi # and uncomment the line below to point to your directory. #druid.extensions.hadoopDependenciesDir=/my/dir/hadoop-dependencies +# +# Logging +# + +# Log all runtime properties on startup. Disable to avoid logging properties on startup: +druid.startup.logging.logProperties=true + # # Zookeeper # @@ -40,6 +47,7 @@ druid.zk.paths.base=/druid # Metadata storage # +# For Derby server on your Druid Coordinator (only viable in a cluster with a single Coordinator, no fail-over): druid.metadata.storage.type=derby druid.metadata.storage.connector.connectURI=jdbc:derby://metadata.store.ip:1527/var/druid/metadata.db;create=true druid.metadata.storage.connector.host=metadata.store.ip @@ -61,6 +69,7 @@ druid.metadata.storage.connector.port=1527 # Deep storage # +# For local disk (only viable in a cluster if this is a network mount): druid.storage.type=local druid.storage.storageDirectory=var/druid/segments @@ -79,12 +88,13 @@ druid.storage.storageDirectory=var/druid/segments # Indexing service logs # +# For local disk (only viable in a cluster if this is a network mount): druid.indexer.logs.type=file druid.indexer.logs.directory=var/druid/indexing-logs # For HDFS (make sure to include the HDFS extension and that your Hadoop config files in the cp): #druid.indexer.logs.type=hdfs -#druid.indexer.logs.directory=hdfs://namenode.example.com:9000/druid/indexing-logs +#druid.indexer.logs.directory=/druid/indexing-logs # For S3: #druid.indexer.logs.type=s3 diff --git a/processing/src/main/java/io/druid/guice/PropertiesModule.java b/processing/src/main/java/io/druid/guice/PropertiesModule.java index dfc1abf11b26..408b964906cd 100644 --- a/processing/src/main/java/io/druid/guice/PropertiesModule.java +++ b/processing/src/main/java/io/druid/guice/PropertiesModule.java @@ -87,16 +87,6 @@ public void configure(Binder binder) } } - log.info( - "Loaded properties into JVM with processors[%,d], memory[%,d].", - Runtime.getRuntime().availableProcessors(), - Runtime.getRuntime().totalMemory() - ); - - for (String propertyName : Ordering.natural().sortedCopy(props.stringPropertyNames())) { - log.info("* %s: %s", propertyName, props.getProperty(propertyName)); - } - binder.bind(Properties.class).toInstance(props); } } diff --git a/server/src/main/java/io/druid/guice/StartupLoggingModule.java b/server/src/main/java/io/druid/guice/StartupLoggingModule.java new file mode 100644 index 000000000000..876589b55f92 --- /dev/null +++ b/server/src/main/java/io/druid/guice/StartupLoggingModule.java @@ -0,0 +1,33 @@ +/* + * Licensed to Metamarkets Group Inc. (Metamarkets) under one + * or more contributor license agreements. See the NOTICE file + * distributed with this work for additional information + * regarding copyright ownership. Metamarkets licenses this file + * to you under the Apache License, Version 2.0 (the + * "License"); you may not use this file except in compliance + * with the License. You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, + * software distributed under the License is distributed on an + * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY + * KIND, either express or implied. See the License for the + * specific language governing permissions and limitations + * under the License. + */ + +package io.druid.guice; + +import com.google.inject.Binder; +import com.google.inject.Module; +import io.druid.server.log.StartupLoggingConfig; + +public class StartupLoggingModule implements Module +{ + @Override + public void configure(Binder binder) + { + JsonConfigProvider.bind(binder, "druid.startup.logging", StartupLoggingConfig.class); + } +} diff --git a/server/src/main/java/io/druid/initialization/Initialization.java b/server/src/main/java/io/druid/initialization/Initialization.java index 0a13b6d78711..2448070ec3fc 100644 --- a/server/src/main/java/io/druid/initialization/Initialization.java +++ b/server/src/main/java/io/druid/initialization/Initialization.java @@ -50,6 +50,7 @@ import io.druid.guice.QueryableModule; import io.druid.guice.ServerModule; import io.druid.guice.ServerViewModule; +import io.druid.guice.StartupLoggingModule; import io.druid.guice.StorageNodeModule; import io.druid.guice.annotations.Client; import io.druid.guice.annotations.Json; @@ -302,7 +303,8 @@ public static Injector makeInjectorWithModules(final Injector baseInjector, Iter new CoordinatorDiscoveryModule(), new LocalDataStorageDruidModule(), new FirehoseModule(), - new ParsersModule() + new ParsersModule(), + new StartupLoggingModule() ); ModuleList actualModules = new ModuleList(baseInjector); diff --git a/server/src/main/java/io/druid/server/log/StartupLoggingConfig.java b/server/src/main/java/io/druid/server/log/StartupLoggingConfig.java new file mode 100644 index 000000000000..77cb33b58ec8 --- /dev/null +++ b/server/src/main/java/io/druid/server/log/StartupLoggingConfig.java @@ -0,0 +1,33 @@ +/* + * Licensed to Metamarkets Group Inc. (Metamarkets) under one + * or more contributor license agreements. See the NOTICE file + * distributed with this work for additional information + * regarding copyright ownership. Metamarkets licenses this file + * to you under the Apache License, Version 2.0 (the + * "License"); you may not use this file except in compliance + * with the License. You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, + * software distributed under the License is distributed on an + * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY + * KIND, either express or implied. See the License for the + * specific language governing permissions and limitations + * under the License. + */ + +package io.druid.server.log; + +import com.fasterxml.jackson.annotation.JsonProperty; + +public class StartupLoggingConfig +{ + @JsonProperty + private boolean logProperties = false; + + public boolean isLogProperties() + { + return logProperties; + } +} diff --git a/services/src/main/java/io/druid/cli/GuiceRunnable.java b/services/src/main/java/io/druid/cli/GuiceRunnable.java index 78658ac0e613..d75184dfe240 100644 --- a/services/src/main/java/io/druid/cli/GuiceRunnable.java +++ b/services/src/main/java/io/druid/cli/GuiceRunnable.java @@ -20,6 +20,7 @@ package io.druid.cli; import com.google.common.base.Throwables; +import com.google.common.collect.Ordering; import com.google.inject.Inject; import com.google.inject.Injector; import com.google.inject.Module; @@ -27,8 +28,10 @@ import com.metamx.common.logger.Logger; import io.druid.initialization.Initialization; import io.druid.initialization.LogLevelAdjuster; +import io.druid.server.log.StartupLoggingConfig; import java.util.List; +import java.util.Properties; /** */ @@ -68,6 +71,21 @@ public Lifecycle initLifecycle(Injector injector) try { LogLevelAdjuster.register(); final Lifecycle lifecycle = injector.getInstance(Lifecycle.class); + final StartupLoggingConfig startupLoggingConfig = injector.getInstance(StartupLoggingConfig.class); + + log.info( + "Starting up with processors[%,d], memory[%,d].", + Runtime.getRuntime().availableProcessors(), + Runtime.getRuntime().totalMemory() + ); + + if (startupLoggingConfig.isLogProperties()) { + final Properties props = injector.getInstance(Properties.class); + + for (String propertyName : Ordering.natural().sortedCopy(props.stringPropertyNames())) { + log.info("* %s: %s", propertyName, props.getProperty(propertyName)); + } + } try { lifecycle.start(); From 740dd3a83787a71c2b177679994399d58dd9f057 Mon Sep 17 00:00:00 2001 From: Robin Date: Tue, 16 Feb 2016 12:59:06 -0600 Subject: [PATCH 5/8] query tests: add max and min to segment metadata query results --- .../src/test/resources/queries/twitterstream_queries.json | 6 ++++++ .../resources/queries/wikipedia_editstream_queries.json | 4 ++++ 2 files changed, 10 insertions(+) diff --git a/integration-tests/src/test/resources/queries/twitterstream_queries.json b/integration-tests/src/test/resources/queries/twitterstream_queries.json index 8e9510fbd7a0..4c0deb7d3e71 100644 --- a/integration-tests/src/test/resources/queries/twitterstream_queries.json +++ b/integration-tests/src/test/resources/queries/twitterstream_queries.json @@ -597,6 +597,8 @@ "hasMultipleValues": false, "size": 7773438, "cardinality": 2, + "minValue":"No", + "maxValue":"Yes", "errorMessage": null } }, @@ -613,6 +615,8 @@ "hasMultipleValues": false, "size": 7901000, "cardinality": 2, + "minValue":"No", + "maxValue":"Yes", "errorMessage": null } }, @@ -629,6 +633,8 @@ "hasMultipleValues": false, "size": 7405654, "cardinality": 2, + "minValue":"No", + "maxValue":"Yes", "errorMessage": null } }, diff --git a/integration-tests/src/test/resources/queries/wikipedia_editstream_queries.json b/integration-tests/src/test/resources/queries/wikipedia_editstream_queries.json index 0b4572c11338..aa5aa1e6408c 100644 --- a/integration-tests/src/test/resources/queries/wikipedia_editstream_queries.json +++ b/integration-tests/src/test/resources/queries/wikipedia_editstream_queries.json @@ -1048,6 +1048,8 @@ "hasMultipleValues": false, "size": 41922148, "cardinality": 208, + "minValue":"", + "maxValue":"mmx._unknown", "errorMessage": null }, "language": { @@ -1055,6 +1057,8 @@ "hasMultipleValues": false, "size": 8924222, "cardinality": 36, + "minValue":"ar", + "maxValue":"zh", "errorMessage": null } }, From 09e64c94fd70fba500b7601e4a4e55068d038724 Mon Sep 17 00:00:00 2001 From: =?UTF-8?q?Andre=CC=81s=20Gomez?= Date: Mon, 15 Feb 2016 17:07:43 +0100 Subject: [PATCH 6/8] Fixed equal distribution strategy when exist disable middleManager with same currCapacityUsed. --- ...EqualDistributionWorkerSelectStrategy.java | 12 +++- .../FillCapacityWorkerSelectStrategy.java | 7 +- ...lDistributionWorkerSelectStrategyTest.java | 66 +++++++++++++++++++ 3 files changed, 83 insertions(+), 2 deletions(-) diff --git a/indexing-service/src/main/java/io/druid/indexing/overlord/setup/EqualDistributionWorkerSelectStrategy.java b/indexing-service/src/main/java/io/druid/indexing/overlord/setup/EqualDistributionWorkerSelectStrategy.java index fd1769ee8f5b..1db1fc2aaef8 100644 --- a/indexing-service/src/main/java/io/druid/indexing/overlord/setup/EqualDistributionWorkerSelectStrategy.java +++ b/indexing-service/src/main/java/io/druid/indexing/overlord/setup/EqualDistributionWorkerSelectStrategy.java @@ -47,7 +47,17 @@ public int compare( ImmutableZkWorker zkWorker, ImmutableZkWorker zkWorker2 ) { - return -Ints.compare(zkWorker2.getCurrCapacityUsed(), zkWorker.getCurrCapacityUsed()); + int retVal = -Ints.compare(zkWorker2.getCurrCapacityUsed(), zkWorker.getCurrCapacityUsed()); + // the version sorting is needed because if the workers have the same currCapacityUsed only one of them is + // returned. Exists the possibility that this worker is disabled and doesn't have valid version so can't + // run new tasks, so in this case the workers are sorted using version to ensure that if exists enable + // workers the comparator return one of them. + + if(retVal == 0) { + retVal = zkWorker2.getWorker().getVersion().compareTo(zkWorker.getWorker().getVersion()); + } + + return retVal; } } ); diff --git a/indexing-service/src/main/java/io/druid/indexing/overlord/setup/FillCapacityWorkerSelectStrategy.java b/indexing-service/src/main/java/io/druid/indexing/overlord/setup/FillCapacityWorkerSelectStrategy.java index a0e8e2700490..447fd9857395 100644 --- a/indexing-service/src/main/java/io/druid/indexing/overlord/setup/FillCapacityWorkerSelectStrategy.java +++ b/indexing-service/src/main/java/io/druid/indexing/overlord/setup/FillCapacityWorkerSelectStrategy.java @@ -50,8 +50,13 @@ public int compare( ) { int retVal = Ints.compare(zkWorker2.getCurrCapacityUsed(), zkWorker.getCurrCapacityUsed()); + // the version sorting is needed because if the workers have the same currCapacityUsed only one of them is + // returned. Exists the possibility that this worker is disabled and doesn't have valid version so can't + // run new tasks, so in this case the workers are sorted using version to ensure that if exists enable + // workers the comparator return one of them. + if (retVal == 0) { - retVal = zkWorker.getWorker().getHost().compareTo(zkWorker2.getWorker().getHost()); + retVal = zkWorker.getWorker().getVersion().compareTo(zkWorker2.getWorker().getVersion()); } return retVal; diff --git a/indexing-service/src/test/java/io/druid/indexing/overlord/setup/EqualDistributionWorkerSelectStrategyTest.java b/indexing-service/src/test/java/io/druid/indexing/overlord/setup/EqualDistributionWorkerSelectStrategyTest.java index 506aceceddc2..b43b5c6c4829 100644 --- a/indexing-service/src/test/java/io/druid/indexing/overlord/setup/EqualDistributionWorkerSelectStrategyTest.java +++ b/indexing-service/src/test/java/io/druid/indexing/overlord/setup/EqualDistributionWorkerSelectStrategyTest.java @@ -63,4 +63,70 @@ public String getDataSource() ImmutableZkWorker worker = optional.get(); Assert.assertEquals("lhost", worker.getWorker().getHost()); } + + @Test + public void testOneDisableWorkerDifferentUsedCapacity() throws Exception + { + String DISABLED_VERSION = ""; + final EqualDistributionWorkerSelectStrategy strategy = new EqualDistributionWorkerSelectStrategy(); + + Optional optional = strategy.findWorkerForTask( + new RemoteTaskRunnerConfig(), + ImmutableMap.of( + "lhost", + new ImmutableZkWorker( + new Worker("disableHost", "disableHost", 10, DISABLED_VERSION), 2, + Sets.newHashSet() + ), + "localhost", + new ImmutableZkWorker( + new Worker("enableHost", "enableHost", 10, "v1"), 5, + Sets.newHashSet() + ) + ), + new NoopTask(null, 1, 0, null, null, null) + { + @Override + public String getDataSource() + { + return "foo"; + } + } + ); + ImmutableZkWorker worker = optional.get(); + Assert.assertEquals("enableHost", worker.getWorker().getHost()); + } + + @Test + public void testOneDisableWorkerSameUsedCapacity() throws Exception + { + String DISABLED_VERSION = ""; + final EqualDistributionWorkerSelectStrategy strategy = new EqualDistributionWorkerSelectStrategy(); + + Optional optional = strategy.findWorkerForTask( + new RemoteTaskRunnerConfig(), + ImmutableMap.of( + "lhost", + new ImmutableZkWorker( + new Worker("disableHost", "disableHost", 10, DISABLED_VERSION), 5, + Sets.newHashSet() + ), + "localhost", + new ImmutableZkWorker( + new Worker("enableHost", "enableHost", 10, "v1"), 5, + Sets.newHashSet() + ) + ), + new NoopTask(null, 1, 0, null, null, null) + { + @Override + public String getDataSource() + { + return "foo"; + } + } + ); + ImmutableZkWorker worker = optional.get(); + Assert.assertEquals("enableHost", worker.getWorker().getHost()); + } } From 519b5df571db0c5586bc3d5dd5cc0a51e7914de4 Mon Sep 17 00:00:00 2001 From: Gian Merlino Date: Wed, 17 Feb 2016 08:33:21 -0800 Subject: [PATCH 7/8] Multivalued dimensions can be compressed since 0.8.0. --- docs/content/ingestion/tasks.md | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/docs/content/ingestion/tasks.md b/docs/content/ingestion/tasks.md index 4587c984bfba..42dfc62b783a 100644 --- a/docs/content/ingestion/tasks.md +++ b/docs/content/ingestion/tasks.md @@ -124,7 +124,7 @@ The indexSpec is optional and default parameters will be used if not specified. |property|description|possible values|default|required?| |--------|-----------|---------------|-------|---------| |bitmap|type of bitmap compression to use for inverted indices.|`"concise"`, `"roaring"`|`"concise"`|no| -|dimensionCompression|compression format for dimension columns (currently only affects single-value dimensions, multi-value dimensions are always uncompressed)|`"uncompressed"`, `"lz4"`, `"lzf"`|`"lz4"`|no| +|dimensionCompression|compression format for dimension columns|`"uncompressed"`, `"lz4"`, `"lzf"`|`"lz4"`|no| |metricCompression|compression format for metric columns, defaults to LZ4|`"lz4"`, `"lzf"`|`"lz4"`|no| Segment Merging Tasks From 6a014fb2f113befd718fd7a4ce6533f32099747c Mon Sep 17 00:00:00 2001 From: Robin Date: Wed, 17 Feb 2016 11:21:14 -0600 Subject: [PATCH 8/8] integration-tests/README: fix link to docker-install --- integration-tests/README.md | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/integration-tests/README.md b/integration-tests/README.md index ba12c961e957..bfcc5ae56fb4 100644 --- a/integration-tests/README.md +++ b/integration-tests/README.md @@ -3,7 +3,7 @@ Integration Testing ## Installing Docker -Please refer to instructions at [https://github.com/druid-io/docker-druid/blob/master/docker-install.md]() +Please refer to instructions at [https://github.com/druid-io/docker-druid/blob/master/docker-install.md](https://github.com/druid-io/docker-druid/blob/master/docker-install.md). ## Creating the Docker VM