diff --git a/.gitignore b/.gitignore index df8a58abf2cc..da1117c38d97 100644 --- a/.gitignore +++ b/.gitignore @@ -14,3 +14,4 @@ target *.DS_Store _site dependency-reduced-pom.xml +README.BINARY diff --git a/LABELS.md b/LABELS.md new file mode 100644 index 000000000000..26866c4e7eba --- /dev/null +++ b/LABELS.md @@ -0,0 +1,106 @@ + +### Licensing Labels + +#### Binary-only + + This product bundles fonts from Font Awesome Free version 4.2.0, copyright Font Awesome, + which is available under the SIL OFL 1.1. For details, see licenses/bin/font-awesome.silofl + * https://fontawesome.com/ + + This product bundles JavaBeans Activation Framework version 1.2.0, copyright Oracle and/or its affiliates., + which is available under the CDDL 1.1. For details, see licenses/bin/javax.activation.CDDL11 + * https://github.com/javaee/activation + * com.sun.activation:javax.activation + + This product bundles Jersey version 1.19.3, copyright Oracle and/or its affiliates., + which is available under the CDDL 1.1. For details, see licenses/bin/jersey.CDDL11 + * https://jersey.github.io/ + * com.sun.jersey:jersey-core + * com.sun.jersey:jersey-server + * com.sun.jersey:jersey-servlet + * com.sun.jersey:contribs + + This product bundles Expression Language 3.0 API version 3.0.0., copyright Oracle and/or its affiliates., + which is available under the CDDL 1.1. For details, see licenses/bin/javax.CDDL11 + * https://github.com/javaee/el-spec + * javax.el:javax.el-api + + This product bundles Java Servlet API version 3.1.0, copyright Oracle and/or its affiliates., + which is available under the CDDL 1.1. For details, see licenses/bin/javax.CDDL11 + * https://github.com/javaee/servlet-spec + * javax.servlet:javax.servlet-api + + This product bundles JSR311 API version 1.1.1, copyright Oracle and/or its affiliates., + which is available under the CDDL 1.1. For details, see licenses/bin/jsr311-api.CDDL11 + * https://github.com/javaee/jsr311 + * javax.ws.rs:jsr311-api + + This product bundles Expression Language 3.0 version 3.0.0., copyright Oracle and/or its affiliates., + which is available under the CDDL 1.1. For details, see licenses/bin/javax.CDDL11 + * https://github.com/javaee/el-spec + * org.glassfish:javax.el + + This product bundles Jersey version 1.9, copyright Oracle and/or its affiliates., + which is available under the CDDL 1.1. For details, see licenses/bin/jersey.CDDL11 + * https://jersey.github.io/ + * com.sun.jersey:jersey-client + * com.sun.jersey:jersey-core + + This product bundles JavaBeans Activation Framework version 1.1, copyright Oracle and/or its affiliates., + which is available under the CDDL 1.1. For details, see licenses/bin/javaxCDDL11 + * https://github.com/javaee/activation + * javax.activation:activation + + This product bundles Java Servlet API version 2.5, copyright Oracle and/or its affiliates., + which is available under the CDDL 1.1. For details, see licenses/bin/javax.CDDL11 + * https://github.com/javaee/servlet-spec + * javax.servlet:javax.servlet-api + + This product bundles JAXB version 2.2.2, copyright Oracle and/or its affiliates., + which is available under the CDDL 1.1. For details, see licenses/bin/javax.CDDL11 + * https://github.com/javaee/jaxb-v2 + * javax.xml.bind:jaxb-api + + This product bundles stax-api version 1.0-2, copyright Oracle and/or its affiliates., + which is available under the CDDL 1.1. For details, see licenses/bin/javax.CDDL11 + * https://github.com/javaee/ + * javax.xml.stream:stax-api + + This product bundles jsp-api version 2.1, copyright Oracle and/or its affiliates., + which is available under the CDDL 1.1. For details, see licenses/bin/javax.CDDL11 + * https://github.com/javaee/javaee-jsp-api + * javax.servlet.jsp:jsp-api + + This product bundles Jersey version 1.15, copyright Oracle and/or its affiliates., + which is available under the CDDL 1.1. For details, see licenses/bin/jersey.CDDL11 + * https://jersey.github.io/ + * com.sun.jersey:jersey-client + + This product bundles OkHttp Aether Connector version 0.0.9, copyright to original author or authors, + which is available under the Eclipse Public License 1.0. For details, see licenses/bin/aether-connector-okhttp.EPL1. + * https://github.com/takari/aether-connector-okhttp + * io.tesla.aether:aether-connector-okhttp + + This product bundles Tesla Aether version 0.0.5, copyright to original author or authors, + which is available under the Eclipse Public License 1.0. For details, see licenses/bin/tesla-aether.EPL1. + * https://github.com/tesla/tesla-aether + * io.tesla.aether:tesla-aether + + This product bundles Eclipse Aether libraries version 0.9.0.M2, copyright Sonatype, Inc., + which is available under the Eclipse Public License 1.0. For details, see licenses/bin/aether-core.EPL1. + * https://github.com/eclipse/aether-core + * org.eclipse.aether:aether-api + * org.eclipse.aether:aether-connector-file + * org.eclipse.aether:aether-impl + * org.eclipse.aether:aether-spi + * org.eclipse.aether:aether-util + + This product bundles Rhino version 1.7R5, copyright Mozilla and individual contributors., + which is available under the Mozilla Public License Version 2.0. For details, see licenses/bin/rhino.MPL2. + * https://developer.mozilla.org/en-US/docs/Mozilla/Projects/Rhino + * org.mozilla:rhino + + This product bundles "Java Concurrency In Practice" Book Annotations, copyright Brian Goetz and Tim Peierls, + which is available under the Creative Commons Attribution 2.5 license. For details, see licenses/bin/creative-commons-2.5.LICENSE. + * http://jcip.net/ + * net.jcip:jcip-annotations diff --git a/LICENSE b/LICENSE index d64569567334..db495eac2993 100644 --- a/LICENSE +++ b/LICENSE @@ -200,3 +200,99 @@ WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the License for the specific language governing permissions and limitations under the License. + + APACHE DRUID (INCUBATING) SUBCOMPONENTS: + + Apache Druid (incubating) includes a number of subcomponents with + separate copyright notices and license terms. Your use of the source + code for these subcomponents is subject to the terms and + conditions of the following licenses. + + +Apache License version 2.0 +================================ + +SOURCE/JAVA-CORE + This product contains conjunctive normal form conversion code, a variance aggregator algorithm, and Bloom filter + adapted from Apache Hive. + * processing/src/main/java/org/apache/druid/segment/filter/Filters.java + * extensions-core/stats/src/main/java/io/druid/query/aggregation/variance/VarianceAggregatorCollector.java + * extensions-core/druid-bloom-filter/src/main/java/org/apache/druid/query/filter/BloomKFilter.java + + This product contains variable length long deserialization code adapted from Apache Lucene. + * processing/src/main/java/org/apache/druid/segment/data/VSizeLongSerde.java + + This product contains SQL query planning code adapted from Apache Calcite. + * sql/src/main/java/org/apache/druid/sql/calcite/ + + This product contains Kerberos authentication code adapted from Apache Hadoop. + * extensions-core/druid-kerberos/src/main/java/org/apache/druid/security/kerberos/ + + This product contains a modified version of the java-alphanum library, + copyright Andrew Duffy (https://github.com/amjjd/java-alphanum). + * processing/src/main/java/org/apache/druid/query/ordering/StringComparators.java + + This product contains a modified version of the Metamarkets java-util library, + copyright Metamarkets Group Inc. (https://github.com/metamx/java-util). + * java-util/ + + This product contains a modified version of the Metamarkets bytebuffer-collections library, + copyright Metamarkets Group Inc. (https://github.com/metamx/bytebuffer-collections) + * processing/src/main/java/org/apache/druid/collections/ + + This product contains a modified version of the Metamarkets extendedset library, + copyright Metamarkets Group Inc. (https://github.com/metamx/extendedset) + * extendedset/ + + This product contains a modified version of the CONCISE (COmpressed 'N' Composable Integer SEt) library, + copyright Alessandro Colantonio (https://sourceforge.net/projects/concise/), extending the functionality of + ConciseSet to use IntBuffers. + * extendedset/src/main/java/org/apache/druid/extendedset/intset/ + + This product contains modified portions of the Guava library, + copyright The Guava Authors (https://github.com/google/guava). + Closer class: + * core/src/main/java/org/apache/druid/java/util/common/io/Closer.java + Splitter.splitToList() method: + * core/src/main/java/org/apache/druid/java/util/common/parsers/DelimitedParser.java + DirectExecutorService class: + * core/src/main/java/org/apache/druid/java/util/common/concurrent/DirectExecutorService.java + + This product contains modified versions of the Dockerfile and related configuration files + from SequenceIQ's Hadoop Docker image, copyright SequenceIQ, Inc. (https://github.com/sequenceiq/hadoop-docker/) + * examples/quickstart/tutorial/hadoop/docker/ + + This product contains fixed bins histogram percentile computation code adapted from Netflix Spectator, + copyright Netflix, Inc. (https://github.com/Netflix/spectator) + * extensions-core/histogram/src/main/java/org/apache/druid/query/aggregation/histogram/FixedBucketsHistogram.java + + +MIT License +================================ + +SOURCE/WEB-CONSOLE + This product bundles jQuery version 1.11.0, copyright jQuery Foundation, Inc., + which is available under an MIT license. For details, see licenses/src/jquery.MIT. + + This product bundles jQuery UI version 1.9.2, copyright jQuery Foundation and other contributors, + which is available under an MIT license. For details, see licenses/src/jquery-ui.MIT. + + This product bundles underscore version 1.2.2, copyright Jeremy Ashkenas, DocumentCloud, + which is available under an MIT license. For details, see licenses/src/underscore.MIT. + + +BSD-3-Clause License +================================ + +SOURCE/WEB-CONSOLE + This product bundles demo_table.css and jquery.dataTables.js from DataTables version 1.8.2, copyright Allan Jardine., + which is available under a BSD-3-Clause License. For details, see licenses/src/datatables.BSD3. + + +Public Domain +================================ + +SOURCE/JAVA-CORE + This product uses a smear function adapted from MurmurHash3, written by Austin Appleby who has placed + MurmurHash3 in the public domain (https://github.com/aappleby/smhasher/blob/master/src/MurmurHash3.cpp). + * processing/src/main/java/org/apache/druid/query/groupby/epinephelinae/Groupers.java diff --git a/LICENSE.BINARY b/LICENSE.BINARY new file mode 100644 index 000000000000..a46a66a18026 --- /dev/null +++ b/LICENSE.BINARY @@ -0,0 +1,1258 @@ + + Apache License + Version 2.0, January 2004 + http://www.apache.org/licenses/bin/ + + TERMS AND CONDITIONS FOR USE, REPRODUCTION, AND DISTRIBUTION + + 1. Definitions. + + "License" shall mean the terms and conditions for use, reproduction, + and distribution as defined by Sections 1 through 9 of this document. + + "Licensor" shall mean the copyright owner or entity authorized by + the copyright owner that is granting the License. + + "Legal Entity" shall mean the union of the acting entity and all + other entities that control, are controlled by, or are under common + control with that entity. For the purposes of this definition, + "control" means (i) the power, direct or indirect, to cause the + direction or management of such entity, whether by contract or + otherwise, or (ii) ownership of fifty percent (50%) or more of the + outstanding shares, or (iii) beneficial ownership of such entity. + + "You" (or "Your") shall mean an individual or Legal Entity + exercising permissions granted by this License. + + "Source" form shall mean the preferred form for making modifications, + including but not limited to software source code, documentation + source, and configuration files. + + "Object" form shall mean any form resulting from mechanical + transformation or translation of a Source form, including but + not limited to compiled object code, generated documentation, + and conversions to other media types. + + "Work" shall mean the work of authorship, whether in Source or + Object form, made available under the License, as indicated by a + copyright notice that is included in or attached to the work + (an example is provided in the Appendix below). + + "Derivative Works" shall mean any work, whether in Source or Object + form, that is based on (or derived from) the Work and for which the + editorial revisions, annotations, elaborations, or other modifications + represent, as a whole, an original work of authorship. For the purposes + of this License, Derivative Works shall not include works that remain + separable from, or merely link (or bind by name) to the interfaces of, + the Work and Derivative Works thereof. + + "Contribution" shall mean any work of authorship, including + the original version of the Work and any modifications or additions + to that Work or Derivative Works thereof, that is intentionally + submitted to Licensor for inclusion in the Work by the copyright owner + or by an individual or Legal Entity authorized to submit on behalf of + the copyright owner. For the purposes of this definition, "submitted" + means any form of electronic, verbal, or written communication sent + to the Licensor or its representatives, including but not limited to + communication on electronic mailing lists, source code control systems, + and issue tracking systems that are managed by, or on behalf of, the + Licensor for the purpose of discussing and improving the Work, but + excluding communication that is conspicuously marked or otherwise + designated in writing by the copyright owner as "Not a Contribution." + + "Contributor" shall mean Licensor and any individual or Legal Entity + on behalf of whom a Contribution has been received by Licensor and + subsequently incorporated within the Work. + + 2. Grant of Copyright License. Subject to the terms and conditions of + this License, each Contributor hereby grants to You a perpetual, + worldwide, non-exclusive, no-charge, royalty-free, irrevocable + copyright license to reproduce, prepare Derivative Works of, + publicly display, publicly perform, sublicense, and distribute the + Work and such Derivative Works in Source or Object form. + + 3. Grant of Patent License. Subject to the terms and conditions of + this License, each Contributor hereby grants to You a perpetual, + worldwide, non-exclusive, no-charge, royalty-free, irrevocable + (except as stated in this section) patent license to make, have made, + use, offer to sell, sell, import, and otherwise transfer the Work, + where such license applies only to those patent claims licensable + by such Contributor that are necessarily infringed by their + Contribution(s) alone or by combination of their Contribution(s) + with the Work to which such Contribution(s) was submitted. If You + institute patent litigation against any entity (including a + cross-claim or counterclaim in a lawsuit) alleging that the Work + or a Contribution incorporated within the Work constitutes direct + or contributory patent infringement, then any patent licenses + granted to You under this License for that Work shall terminate + as of the date such litigation is filed. + + 4. Redistribution. You may reproduce and distribute copies of the + Work or Derivative Works thereof in any medium, with or without + modifications, and in Source or Object form, provided that You + meet the following conditions: + + (a) You must give any other recipients of the Work or + Derivative Works a copy of this License; and + + (b) You must cause any modified files to carry prominent notices + stating that You changed the files; and + + (c) You must retain, in the Source form of any Derivative Works + that You distribute, all copyright, patent, trademark, and + attribution notices from the Source form of the Work, + excluding those notices that do not pertain to any part of + the Derivative Works; and + + (d) If the Work includes a "NOTICE" text file as part of its + distribution, then any Derivative Works that You distribute must + include a readable copy of the attribution notices contained + within such NOTICE file, excluding those notices that do not + pertain to any part of the Derivative Works, in at least one + of the following places: within a NOTICE text file distributed + as part of the Derivative Works; within the Source form or + documentation, if provided along with the Derivative Works; or, + within a display generated by the Derivative Works, if and + wherever such third-party notices normally appear. The contents + of the NOTICE file are for informational purposes only and + do not modify the License. You may add Your own attribution + notices within Derivative Works that You distribute, alongside + or as an addendum to the NOTICE text from the Work, provided + that such additional attribution notices cannot be construed + as modifying the License. + + You may add Your own copyright statement to Your modifications and + may provide additional or different license terms and conditions + for use, reproduction, or distribution of Your modifications, or + for any such Derivative Works as a whole, provided Your use, + reproduction, and distribution of the Work otherwise complies with + the conditions stated in this License. + + 5. Submission of Contributions. Unless You explicitly state otherwise, + any Contribution intentionally submitted for inclusion in the Work + by You to the Licensor shall be under the terms and conditions of + this License, without any additional terms or conditions. + Notwithstanding the above, nothing herein shall supersede or modify + the terms of any separate license agreement you may have executed + with Licensor regarding such Contributions. + + 6. Trademarks. This License does not grant permission to use the trade + names, trademarks, service marks, or product names of the Licensor, + except as required for reasonable and customary use in describing the + origin of the Work and reproducing the content of the NOTICE file. + + 7. Disclaimer of Warranty. Unless required by applicable law or + agreed to in writing, Licensor provides the Work (and each + Contributor provides its Contributions) on an "AS IS" BASIS, + WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or + implied, including, without limitation, any warranties or conditions + of TITLE, NON-INFRINGEMENT, MERCHANTABILITY, or FITNESS FOR A + PARTICULAR PURPOSE. You are solely responsible for determining the + appropriateness of using or redistributing the Work and assume any + risks associated with Your exercise of permissions under this License. + + 8. Limitation of Liability. In no event and under no legal theory, + whether in tort (including negligence), contract, or otherwise, + unless required by applicable law (such as deliberate and grossly + negligent acts) or agreed to in writing, shall any Contributor be + liable to You for damages, including any direct, indirect, special, + incidental, or consequential damages of any character arising as a + result of this License or out of the use or inability to use the + Work (including but not limited to damages for loss of goodwill, + work stoppage, computer failure or malfunction, or any and all + other commercial damages or losses), even if such Contributor + has been advised of the possibility of such damages. + + 9. Accepting Warranty or Additional Liability. While redistributing + the Work or Derivative Works thereof, You may choose to offer, + and charge a fee for, acceptance of support, warranty, indemnity, + or other liability obligations and/or rights consistent with this + License. However, in accepting such obligations, You may act only + on Your own behalf and on Your sole responsibility, not on behalf + of any other Contributor, and only if You agree to indemnify, + defend, and hold each Contributor harmless for any liability + incurred by, or claims asserted against, such Contributor by reason + of your accepting any such warranty or additional liability. + + END OF TERMS AND CONDITIONS + + APPENDIX: How to apply the Apache License to your work. + + To apply the Apache License to your work, attach the following + boilerplate notice, with the fields enclosed by brackets "[]" + replaced with your own identifying information. (Don't include + the brackets!) The text should be enclosed in the appropriate + comment syntax for the file format. We also recommend that a + file or class name and description of purpose be included on the + same "printed page" as the copyright notice for easier + identification within third-party archives. + + Copyright [yyyy] [name of copyright owner] + + Licensed under the Apache License, Version 2.0 (the "License"); + you may not use this file except in compliance with the License. + You may obtain a copy of the License at + + http://www.apache.org/licenses/bin/LICENSE-2.0 + + Unless required by applicable law or agreed to in writing, software + distributed under the License is distributed on an "AS IS" BASIS, + WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + See the License for the specific language governing permissions and + limitations under the License. + + APACHE DRUID (INCUBATING) SUBCOMPONENTS: + + Apache Druid (incubating) includes a number of subcomponents with + separate copyright notices and license terms. Your use of the source + code for these subcomponents is subject to the terms and + conditions of the following licenses. + + +Apache License version 2.0 +================================ + +SOURCE/JAVA-CORE + This product contains conjunctive normal form conversion code, a variance aggregator algorithm, and Bloom filter + adapted from Apache Hive. + * processing/src/main/java/org/apache/druid/segment/filter/Filters.java + * extensions-core/stats/src/main/java/io/druid/query/aggregation/variance/VarianceAggregatorCollector.java + * extensions-core/druid-bloom-filter/src/main/java/org/apache/druid/query/filter/BloomKFilter.java + + This product contains variable length long deserialization code adapted from Apache Lucene. + * processing/src/main/java/org/apache/druid/segment/data/VSizeLongSerde.java + + This product contains SQL query planning code adapted from Apache Calcite. + * sql/src/main/java/org/apache/druid/sql/calcite/ + + This product contains Kerberos authentication code adapted from Apache Hadoop. + * extensions-core/druid-kerberos/src/main/java/org/apache/druid/security/kerberos/ + + This product contains a modified version of the java-alphanum library, + copyright Andrew Duffy (https://github.com/amjjd/java-alphanum). + * processing/src/main/java/org/apache/druid/query/ordering/StringComparators.java + + This product contains a modified version of the Metamarkets java-util library, + copyright Metamarkets Group Inc. (https://github.com/metamx/java-util). + * java-util/ + + This product contains a modified version of the Metamarkets bytebuffer-collections library, + copyright Metamarkets Group Inc. (https://github.com/metamx/bytebuffer-collections) + * processing/src/main/java/org/apache/druid/collections/ + + This product contains a modified version of the Metamarkets extendedset library, + copyright Metamarkets Group Inc. (https://github.com/metamx/extendedset) + * extendedset/ + + This product contains a modified version of the CONCISE (COmpressed 'N' Composable Integer SEt) library, + copyright Alessandro Colantonio (https://sourceforge.net/projects/concise/), extending the functionality of + ConciseSet to use IntBuffers. + * extendedset/src/main/java/org/apache/druid/extendedset/intset/ + + This product contains modified portions of the Guava library, + copyright The Guava Authors (https://github.com/google/guava). + Closer class: + * core/src/main/java/org/apache/druid/java/util/common/io/Closer.java + Splitter.splitToList() method: + * core/src/main/java/org/apache/druid/java/util/common/parsers/DelimitedParser.java + DirectExecutorService class: + * core/src/main/java/org/apache/druid/java/util/common/concurrent/DirectExecutorService.java + + This product contains modified versions of the Dockerfile and related configuration files + from SequenceIQ's Hadoop Docker image, copyright SequenceIQ, Inc. (https://github.com/sequenceiq/hadoop-docker/) + * examples/quickstart/tutorial/hadoop/docker/ + + This product contains fixed bins histogram percentile computation code adapted from Netflix Spectator, + copyright Netflix, Inc. (https://github.com/Netflix/spectator) + * extensions-core/histogram/src/main/java/org/apache/druid/query/aggregation/histogram/FixedBucketsHistogram.java + + +BINARY/WEB-CONSOLE + This product bundles Microsoft tslib version 1.9.3. + + This product bundles diff-match-patch version 1.0.4. + + This product bundles @blueprintjs/core version 1.0.1. + + +BINARY/JAVA-CORE + This product bundles AWS SDK for Java version 1.11.199. + * com.amazonaws:aws-java-sdk-core + * com.amazonaws:aws-java-sdk-ec2 + * com.amazonaws:aws-java-sdk-kms + * com.amazonaws:aws-java-sdk-s3 + * com.amazonaws:jmespath-java + + This product bundles Esri Geometry API for Java version 2.0.0. + * com.esri.geometry:esri-geometry-api + + This product bundles ClassMate version 1.0.0. + * com.fasterxml:classmate + + This product bundles Jackson version 2.6.7. + * com.fasterxml.jackson.core:jackson-annotations + * com.fasterxml.jackson.core:jackson-core + * com.fasterxml.jackson.core:jackson-databind + * com.fasterxml.jackson.dataformat:jackson-dataformat-cbor + * com.fasterxml.jackson.dataformat:jackson-dataformat-smile + * com.fasterxml.jackson.datatype:jackson-datatype-guava + * com.fasterxml.jackson.datatype:jackson-datatype-joda + * com.fasterxml.jackson.jaxrs:jackson-jaxrs-base + * com.fasterxml.jackson.jaxrs:jackson-jaxrs-json-provider + * com.fasterxml.jackson.jaxrs:jackson-jaxrs-smile-provider + * com.fasterxml.jackson.module:jackson-module-jaxb-annotations + + This product bundles Caffeine version 2.5.5. + * com.github.ben-manes.caffeine:caffeine + + This product bundles Error Prone Annotations version 2.3.2. + * com.google.errorprone:error_prone_annotations + + This product bundles Guava version 16.0.1. + * com.google.guava:guava + + This product bundles Guice version 4.1.0. + * com.google.inject:guice + * com.google.inject.extensions:guice-multibindings + * com.google.inject.extensions:guice-servlet + + This product bundles JsonPath version 2.3.0. + * com.jayway.jsonpath:json-path + + This product bundles LMAX Disruptor version 3.3.6. + * com.lmax:disruptor + + This product bundles LZF Compressor version 1.0.4. + * com.ning:compress-lzf + + This product bundles OpenCSV version 4.2. + * com.opencsv:opencsv + + This product bundles OkHttp version 1.0.2, + * com.squareup.okhttp:okhttp + + This product bundles Netty Reactive Streams version 2.0.0. + * com.typesafe.netty:netty-reactive-streams + + This product bundles the following Apache Commons libraries: + * commons-beanutils 1.9.3 + * commons-cli 1.2 + * commons-codec 1.7 + * commons-collections 3.2.2 + * commons-io 2.5 + * commons-lang 2.6 + * commons-logging 1.1.1 + * commons-pool 1.6 + * commons-collections4 4.1 + * commons-compress 1.16 + * commons-dbcp2 2.0.1 + * commons-lang3 3.7 + * commons-math3 3.6.1 + * commons-pool2 2.2 + * commons-text 1.3 + + This product bundles Airline version 0.7. + * io.airlift:airline + + This product bundles DropWizard Metrics Core version 4.0.0. + * io.dropwizard.metrics:metrics-core + + This product bundles Netty version 3.10.6.Final. + * io.netty:netty + + This product bundles Netty version 4.1.30.Final. + * io.netty:netty-all + + This product bundles Netty version 4.1.29.Final. + * io.netty:netty-buffer + * io.netty:netty-codec + * io.netty:netty-codec-dns + * io.netty:netty-codec-http + * io.netty:netty-codec-socks + * io.netty:netty-common + * io.netty:netty-handler + * io.netty:netty-handler-proxy + * io.netty:netty-resolver + * io.netty:netty-resolver-dns + * io.netty:netty-transport + * io.netty:netty-transport-native-epoll + * io.netty:netty-transport-native-unix-common + + This product bundles fastutil version 8.1.0. + * it.unimi.dsi:fastutil + + This product bundles Javax Inject version 1. + * javax.inject.javax:inject + + This product bundles Bean Validation API version 1.1.0.Final. + * javax.validation:validation-api + + This product bundles Joda-Time version 2.9.9. + * joda-time:joda-time + + This product bundles Aggregate Designer Algorithm version 6.0. + * net.hydromatic:aggdesigner-algorithm + + This product bundles Java Native Access (JNA) version 4.5.1. + * net.java.dev.jna:jna + + This product bundles ASM Based Accessors Helper Used By JSON Smart version 1.2. + * net.minidev:accessors-smart + + This product bundles JSON Small and Fast Parser version 2.3. + * net.minidev:json-smart + + This product bundles Spymemcached version 2.12.3. + * net.spy:spymemcached + + This product bundles jackson-jq version 0.0.7. + * net.thisptr:jackson-jq + + This product bundles Apache Calcite version 1.17.0. + * org.apache.calcite:calcite-core + * org.apache.calcite:calcite-linq4j + + This product bundles Apache Calcite Avatica version 1.10.0. + * org.apache.calcite:avatica.avatica-core + * org.apache.calcite:avatica.avatica-metrics + * org.apache.calcite:avatica.avatica-server + + This product bundles Apache Curator version 4.1.0. + * org.apache.curator:curator-client + * org.apache.curator:curator-framework + * org.apache.curator:curator-recipes + * org.apache.curator:curator-x-discovery + + This product bundles Apache Derby version 10.11.1.1. + * org.apache.derby:derby + * org.apache.derby:derbyclient + * org.apache.derby:derbynet + + This product bundles Apache HttpClient version 4.5.3. + * org.apache.httpcomponents:httpclient + + This product bundles Apache HttpCore version 4.4.4. + * org.apache.httpcomponents:httpcore + + This product bundles Apache Log4j version 2.5. + * org.apache.logging.log4j:log4j-1.2-api + * org.apache.logging.log4j:log4j-api + * org.apache.logging.log4j:log4j-core + * org.apache.logging.log4j:log4j-jul + * org.apache.logging.log4j:log4j-slf4j-impl + + This product bundles Apache Maven version 3.1.1. + * org.apache.maven:maven-aether-provider + * org.apache.maven:maven-model + * org.apache.maven:maven-model-builder + * org.apache.maven:maven-repository-metadata + * org.apache.maven:maven-settings + * org.apache.maven:maven-settings-builder + + This product bundles Apache Maven Artifact version 3.6.0. + * org.apache.maven:maven-artifact + + This product bundles Apache Maven Wagon API version 2.4. + * org.apache.maven.wagon:wagon-provider-api + + This product bundles Apache Yetus Audience Annotations Component version 0.5.0. + * org.apache.yetus:audience-annotations + + This product bundles Apache Zookeeper version 3.4.11. + * org.apache.zookeeper:zookeeper + + This product bundles AsyncHttpClient asynchttpclient version 2.5.3. + * org.asynchttpclient:async-http-client + * org.asynchttpclient:async-http-client-netty-utils + + This product bundles components from Jackson version 1.9.13. + * org.codehaus.jackson:jackson-core-asl + * org.codehaus.jackson:jackson-mapper-asl + + This product bundles Plexus Interpolation API 1.19. + * org.codehaus.plexus:plexus-interpolation + + This product bundles Plexus Common Utilities 3.0.15. + * org.codehaus.plexus:plexus-utils + + This product bundles Jetty version 9.4.10.v20180503. + * org.eclipse.jetty:jetty-client + * org.eclipse.jetty:jetty-continuation + * org.eclipse.jetty:jetty-http + * org.eclipse.jetty:jetty-io + * org.eclipse.jetty:jetty-proxy + * org.eclipse.jetty:jetty-security + * org.eclipse.jetty:jetty-server + * org.eclipse.jetty:jetty-servlet + * org.eclipse.jetty:jetty-servlets + * org.eclipse.jetty:jetty-util + + This product bundles JVM Attach API version 1.2. + * org.gridkit.lab:jvm-attach-api + + This product bundles Hibernate Validator Engine version 5.1.3.Final. + * org.hibernate:hibernate-validator + + This product bundles SIGAR version 1.6.5.132. + * org.hyperic:sigar + + This product bundles JBoss Logging 3 version 3.1.3.GA. + * org.jboss.logging:jboss-logging + + This product bundles JDBI version 2.63.1. + * org.jdbi:jdbi + + This product bundles LZ4 Java version 1.5.0. + * org.lz4:lz4-java + + This product bundles MapDB version 1.0.8. + * org.mapdb:mapdb + + This product bundles Objenesis version 2.6. + * org.objenesis:objenesis + + This product bundles RoaringBitmap version 0.7.36. + * org.roaringbitmap:RoaringBitmap + * org.roaringbitmap:shims + + This product bundles Config Magic version 0.9. + * org.skife.config:config-magic + + This product bundles Ion Java version 1.0.2. + * software.amazon.ion:ion-java + + +BINARY/HADOOP-CLIENT + This product bundles Apache Hadoop version 2.8.3. + * org.apache.hadoop:hadoop-annotations + * org.apache.hadoop:hadoop-auth + * org.apache.hadoop:hadoop-client + * org.apache.hadoop:hadoop-common + * org.apache.hadoop:hadoop-hdfs-client + * org.apache.hadoop:hadoop-mapreduce-client-app + * org.apache.hadoop:hadoop-mapreduce-client-common + * org.apache.hadoop:hadoop-mapreduce-client-core + * org.apache.hadoop:hadoop-mapreduce-client-jobclient + * org.apache.hadoop:hadoop-mapreduce-client-shuffle + * org.apache.hadoop:hadoop-yarn-api + * org.apache.hadoop:hadoop-yarn-client + * org.apache.hadoop:hadoop-yarn-common + * org.apache.hadoop:hadoop-yarn-server-common + + This product bundles Gson version 2.2.4. + * com.google.code.gson:gson + + This product bundles Guava version 11.0.2. + * com.google.guava:guava + + This product bundles Nimbus JOSE+JWT version 3.9. + * com.nimbusds:nimbus-jose-jwt + + This product bundles OkHttp version 2.4.0, + * com.squareup.okhttp:okhttp + + This product bundles Okio version 1.4.0, + * com.squareup.okio:okio + + This product bundles the following Apache Commons libraries: + * commons-beanutils 1.7.0 + * commons-beanutils-core 1.8.0 + * commons-codec 1.4 + * commons-configuration 1.6 + * commons-digester 1.8 + * commons-io commons-io 2.4 + * commons-logging 1.1.3 + * commons-net 3.1 + * commons-compress 1.4.1 + * commons-math3 3.1.1 + + This product bundles Netty version 3.6.2.Final. + * io.netty:netty + + This product bundles Apache Log4j version 1.2.17. + * log4j:log4j + + This product bundles JSON Small and Fast Parser version 1.1.1. + * net.minidev:json-smart + + This product bundles Apache Avro version 1.7.4. + * org.apache.avro:avro + + This product bundles Apache Directory version 1.0.0-M20. + * org.apache.directory.api:api-asn1-api + * org.apache.directory.api:api-util + + This product bundles Apache Directory Server version 2.0.0-M15. + * org.apache.directory.server:apacheds-i18n + * org.apache.directory.server:apacheds-kerberos-codec + + This product bundles Apache HTrace version 4.0.1-incubating. + * org.apache.htrace:htrace-core4 + + This product bundles Apache HttpClient version 4.5.2. + * org.apache.httpcomponents:httpclient + + This product bundles Apache Zookeeper version 3.4.6. + * org.apache.zookeeper:zookeeper + + This product bundles components from Jackson version 1.9.13. + * org.codehaus.jackson:jackson-jaxrs + * org.codehaus.jackson:jackson-xc + + This product bundles Jetty version 6.1.26. + * org.mortbay.jetty:jetty-sslengine + * org.mortbay.jetty:jetty-util + + This product bundles snappy-java version 1.0.4.1. + * org.xerial.snappy:snappy-java + + +BINARY/EXTENSIONS/druid-avro-extensions + This product bundles Kafka Schema Registry Client version 3.0.1. + * io.confluent:kafka-schema-registry-client + + This product bundles Apache Avro version 1.8.2. + * org.apache.avro:avro + * org.apache.avro:avro-mapred + * org.apache.avro:avro-ipc + + This product bundles Schema Repository version 0.1.3. + * org.schemarepo:schema-repo-api + * org.schemarepo:schema-repo-avro + * org.schemarepo:schema-repo-client + * org.schemarepo:schema-repo-common + + This product bundles Gson version 2.3.1. + * com.google.code.gson:gson + + This product bundles Apache Velocity version 1.7. + * org.apache.velocity:velocity + + This product bundles Jetty version 6.1.26. + * org.mortbay.jetty:jetty + + This product bundles Jetty version 2.5-20081211. + * org.mortbay.jetty:servlet-api + + This product bundles Objenesis version 2.6. + * org.objenesis:objenesis + + This product bundles snappy-java version 1.1.1.3. + * org.xerial.snappy:snappy-java + + +BINARY/EXTENSIONS/druid-bloom-filter + + This product bundles Apache Hive version 2.7.0. + * org.apache.hive:hive-storage-api + + +BINARY/EXTENSIONS/druid-datasketches + + This product bundles DataSketches version 0.12.0. + * com.yahoo.datasketches:datasketches-core + * com.yahoo.datasketches:memory + + +BINARY/EXTENSIONS/druid-examples + + This product bundles IRC API version 1.0-0014. + * com.ircclouds.irc:irc-api + + This product bundles MaxMind GeoIP2 API version 0.4.0. + * com.maxmind.geoip2:geoip2 + + This product bundles the following Apache Commons libraries: + * commons-beanutils 1.8.3 + * commons-validator 1.4.0 + + This product bundles Twitter4J version 3.0.3. + * org.twitter4j:twitter4j-async + * org.twitter4j:twitter4j-core + * org.twitter4j:twitter4j-stream + + +BINARY/EXTENSIONS/druid-kafka-eight + + This product bundles Apache Kafka version 0.8.2.1. + * org.apache.kafka:kafka_2.10 + * org.apache.kafka:kafka-clients + + This product bundles ZkClient version 0.3. + * com.101tec:zkclient + + This product bundles Yammer Metrics version 2.2.0. + * com.yammer.metrics:metrics-core + + This product bundles snappy-java version 1.1.1.6. + * org.xerial.snappy:snappy-java + + +BINARY/EXTENSIONS/druid-kafka-indexing-service + This product bundles Apache Kafka version 0.10.2.2. + * org.apache.kafka:kafka-clients + + This product bundles snappy-java version 1.1.2.6. + * org.xerial.snappy:snappy-java + + +BINARY/EXTENSIONS/druid-kerberos + + This product bundles XML Builder version 0.4. + * com.jamesmurty.utils:java-xmlbuilder + + This product bundles Jettison version 1.1. + * org.codehaus.jettison:jettison + + This product bundles Jets3t version 0.9.0. + * net.java.dev.jets3t:jets3t + + +BINARY/EXTENSIONS/druid-kinesis-indexing-service + This product bundles AWS SDK for Java version 1.11.199. + * com.amazonaws:aws-java-sdk-kinesis + * com.amazonaws:aws-java-sdk-sts + + +BINARY/EXTENSIONS/druid-parquet-extensions + This product bundles Apache Parquet version 1.10.0. + * org.apache.parquet:parquet-avro + * org.apache.parquet:parquet-column + * org.apache.parquet:parquet-common + * org.apache.parquet:parquet-encoding + * org.apache.parquet:parquet-hadoop + * org.apache.parquet:parquet-jackson + + This product bundles Apache Parquet Format version 2.4.0. + * org.apache.parquet:parquet-format + + This product bundles snappy-java version 1.1.7.2. + * org.xerial.snappy:snappy-java + + +BINARY/EXTENSIONS/protobuf-extensions + This product bundles Protocol Buffers Dynamic Schema version 0.9.3. + * com.github.os72:protobuf-dynamic + + This product bundles Gson version 2.7. + * com.google.code.gson:gson + + + +MIT License +================================ + +SOURCE/WEB-CONSOLE + This product bundles jQuery version 1.11.0, copyright jQuery Foundation, Inc., + which is available under an MIT license. For details, see licenses/src/jquery.MIT. + + This product bundles jQuery UI version 1.9.2, copyright jQuery Foundation and other contributors, + which is available under an MIT license. For details, see licenses/src/jquery-ui.MIT. + + This product bundles underscore version 1.2.2, copyright Jeremy Ashkenas, DocumentCloud, + which is available under an MIT license. For details, see licenses/src/underscore.MIT. + + +BINARY/JAVA-CORE + This product bundles Checker Qual version 2.5.7, copyright the Checker Framework developers, + which is available under an MIT license. For details, see licenses/bin/checker-qual.MIT. + * org.checkerframework:checker-qual + + This product bundles JCodings version 1.0.13, copyright JRuby Team, + which is available under an MIT license. For details, see licenses/bin/jcodings.MIT. + * org.jruby.jcodings:jcodings + + This product bundles Joni version 2.1.11, copyright JRuby Team, + which is available under an MIT license. For details, see licenses/bin/joni.MIT. + * org.jruby.joni:joni + + This product bundles JCL 1.2 Implemented Over SLF4J version 1.7.12, copyright QOS.ch, + which is available under an MIT license. For details, see licenses/bin/jcl-over-slf4j.MIT. + * org.slf4j:jcl-over-slf4j + + This product bundles SLF4J API version 1.6.4, copyright QOS.ch, + which is available under an MIT license. For details, see licenses/bin/slf4j.MIT. + * org.slf4j:slf4j-api + + +BINARY/HADOOP-CLIENT + This product bundles SLF4J API version 1.7.10, copyright QOS.ch, + which is available under an MIT license. For details, see licenses/bin/slf4j.MIT. + * org.slf4j:slf4j-api + * org.slf4j:slf4j-log4j12 + + +BINARY/EXTENSIONS/druid-kafka-eight + This product bundles JOpt Simple version 3.2., copyright Paul R. Holser, Jr., + which is available under an MIT license. For details, see licenses/bin/jopt-simple.MIT. + * net.sf.jopt-simple:jopt-simple + + +BINARY/WEB-CONSOLE +The following dependency names are NPM package names (https://www.npmjs.com). + + This product bundles @babel/runtime version 7.3.4, copyright Sebastian McKenzie and other contributors, + which is available under an MIT license. For details, see licenses/bin/@babel-runtime.MIT. + + This product bundles array-includes version 3.0.3, copyright Jordan Harband, + which is available under an MIT license. For details, see licenses/bin/array-includes.MIT. + + This product bundles asap version 2.0.6, copyright Contributors, + which is available under an MIT license. For details, see licenses/bin/asap.MIT. + + This product bundles axios version 0.18.0, copyright Matt Zabriskie, + which is available under an MIT license. For details, see licenses/bin/axios.MIT. + + This product bundles brace version 0.11.1, copyright Thorsten Lorenz, + which is available under an MIT license. For details, see licenses/brace.MIT. + + This product bundles chain-function version 1.0.1, copyright jquense, + which is available under an MIT license. For details, see licenses/chain-function.MIT. + + This product bundles classnames version 2.2.6, copyright Jed Watson, + which is available under an MIT license. For details, see licenses/bin/classnames.MIT. + + This product bundles define-properties version 1.1.3, copyright Jordan Harband, + which is available under an MIT license. For details, see licenses/bin/define-properties.MIT. + + This product bundles dom-helpers version 3.4.0, copyright Jason Quense, + which is available under an MIT license. For details, see licenses/bin/dom-helpers.MIT. + + This product bundles dom4 version 1.8.5, copyright Andrea Giammarchi, + which is available under an MIT license. For details, see licenses/bin/dom4.MIT. + + This product bundles encoding version 0.1.12, copyright Andris Reinman, + which is available under an MIT license. For details, see licenses/bin/encoding.MIT. + + This product bundles es-abstract version 1.13.0, copyright Jordan Harband, + which is available under an MIT license. For details, see licenses/bin/es-abstract.MIT. + + This product bundles es-to-primitive version 1.2.0, copyright Jordan Harband, + which is available under an MIT license. For details, see licenses/bin/es-to-primitive.MIT. + + This product bundles es6-shim version 0.35.4, copyright Paul Miller and contributors, + which is available under an MIT license. For details, see licenses/bin/es6-shim.MIT. + + This product bundles es7-shim version 6.0.0, copyright Jordan Harband and contributors, + which is available under an MIT license. For details, see licenses/bin/es7-shim.MIT. + + This product bundles fbjs version 0.8.17, copyright Facebook, Inc., + which is available under an MIT license. For details, see licenses/bin/fbjs.MIT. + + This product bundles function-bind version 1.1.1, copyright Raynos, + which is available under an MIT license. For details, see licenses/bin/function-bind.MIT. + + This product bundles has-symbols version 1.0.0, copyright Jordan Harband, + which is available under an MIT license. For details, see licenses/bin/has-symbols.MIT. + + This product bundles has version 1.0.3, copyright Thiago de Arruda, + which is available under an MIT license. For details, see licenses/bin/has.MIT. + + This product bundles history version 4.7.2, copyright Michael Jackson, + which is available under an MIT license. For details, see licenses/bin/history.MIT. + + This product bundles hjson version 3.1.2, copyright Christian Zangl, + which is available under an MIT license. For details, see licenses/bin/hjson.MIT. + + This product bundles invariant version 2.2.4, copyright Facebook, Inc., + which is available under an MIT license. For details, see licenses/bin/invariant.MIT. + + This product bundles is-buffer version 1.1.6, copyright Feross Aboukhadijeh, + which is available under an MIT license. For details, see licenses/bin/is-buffer.MIT. + + This product bundles is-callable version 1.1.4, copyright Jordan Harband, + which is available under an MIT license. For details, see licenses/bin/is-callable.MIT. + + This product bundles is-date-object version 1.0.1, copyright Jordan Harband, + which is available under an MIT license. For details, see licenses/bin/is-date-object.MIT. + + This product bundles is-regex version 1.0.4, copyright Jordan Harband, + which is available under an MIT license. For details, see licenses/bin/is-regex.MIT. + + This product bundles is-symbol version 1.0.2, copyright Jordan Harband, + which is available under an MIT license. For details, see licenses/bin/is-symbol.MIT. + + This product bundles isarray version 0.0.1, copyright Julian Gruber, + which is available under an MIT license. For details, see licenses/bin/isarray.MIT. + + This product bundles numeral version 2.0.6, copyright Adam Draper, + which is available under an MIT license. For details, see licenses/bin/numeral.MIT. + + This product bundles object-assign version 4.1.1, copyright Sindre Sorhus, + which is available under an MIT license. For details, see licenses/bin/object-assign.MIT. + + This product bundles object-keys version 1.1.0, copyright Jordan Harband, + which is available under an MIT license. For details, see licenses/bin/object-keys.MIT. + + This product bundles path-to-regexp version 1.7.0, copyright Blake Embrey (hello@blakeembrey.com), + which is available under an MIT license. For details, see licenses/bin/path-to-regexp.MIT. + + This product bundles prop-types version 15.7.2, copyright Facebook, Inc., + which is available under an MIT license. For details, see licenses/bin/prop-types.MIT. + + This product bundles pure-render-decorator version 1.1.1, copyright Félix Girault, + which is available under an MIT license. For details, see licenses/pure-render-decorator.MIT. + + This product bundles react-ace version 6.4.0, copyright James Hrisho, + which is available under an MIT license. For details, see licenses/react-ace.MIT. + + This product bundles react-addons-css-transition-group version 15.6.2, copyright , + which is available under an MIT license. For details, see licenses/react-addons-css-transition-group.MIT. + + This product bundles react-dom version 16.8.3, copyright Facebook, Inc. and its affiliates., + which is available under an MIT license. For details, see licenses/bin/react-dom.MIT. + + This product bundles react-is version 16.8.3, copyright , + which is available under an MIT license. For details, see licenses/react-is.MIT. + + This product bundles react-router-dom version 4.3.1, copyright React Training, + which is available under an MIT license. For details, see licenses/bin/react-router-dom.MIT. + + This product bundles react-router version 4.3.1, copyright React Training, + which is available under an MIT license. For details, see licenses/bin/react-router.MIT. + + This product bundles react-table version 6.8.6, copyright Tanner Linsley, + which is available under an MIT license. For details, see licenses/bin/react-table.MIT. + + This product bundles react version 16.8.3, copyright Facebook, Inc. and its affiliates., + which is available under an MIT license. For details, see licenses/bin/react.MIT. + + This product bundles resolve-pathname version 2.2.0, copyright Michael Jackson, + which is available under an MIT license. For details, see licenses/bin/resolve-pathname.MIT. + + This product bundles scheduler version 0.13.3, copyright Facebook, Inc. and its affiliates., + which is available under an MIT license. For details, see licenses/bin/scheduler.MIT. + + This product bundles string-at version 1.0.1, copyright Jordan Harband, + which is available under an MIT license. For details, see licenses/bin/string-at.MIT. + + This product bundles tether version 1.4.5, copyright , + which is available under an MIT license. For details, see licenses/tether.MIT. + + This product bundles value-equal version 0.4.0, copyright Michael Jackson, + which is available under an MIT license. For details, see licenses/bin/value-equal.MIT. + + This product bundles warning version 4.0.3, copyright Facebook, Inc., + which is available under an MIT license. For details, see licenses/bin/warning.MIT. + + This product bundles ua-parser-js version 0.7.19, copyright Faisal Salman, + which is available under an MIT license. For details, see licenses/bin/ua-parser-js.MIT. + + This product bundles druid-console version 0.0.2, copyright Metamarkets, + which is available under an MIT license. For details, see licenses/bin/druid-console.MIT. + + This product bundles CSS from Font Awesome Free version 4.2.0, copyright Font Awesome, + which is availble under an MIT license. For details, see licenses/bin/font-awesome.MIT. + + This product bundles webpack version 4.29.0, copyright JS Foundation and other contributors, + which is available under an MIT license. For details, see licenses/bin/webpack.MIT. + + This product bundles style-loader version 0.23.1, copyright JS Foundation and other contributors, + which is available under an MIT license. For details, see licenses/bin/style-loader.MIT. + + This product bundles os-browserify version 0.3.0, copyright CoderPuppy, + which is available under an MIT license. For details, see licenses/bin/os-browserify.MIT. + + This product bundles process version 0.11.10, copyright Roman Shtylman, + which is available under an MIT license. For details, see licenses/bin/process.MIT. + + This product bundles css-loader version 2.1.0, copyright JS Foundation and other contributors, + which is available under an MIT license. For details, see licenses/bin/css-loader.MIT. + + This product bundles bootstrap version 3.1.1, copyright Twitter Inc, + which is available under an MIT license. For details, see licenses/bin/bootstrap.MIT. + + +BSD-2-Clause License +================================ + +BINARY/JAVA-CORE + This product bundles JNI binding for Zstd version 1.3.3-1, copyright Luben Karavelov, + which is available under a BSD-2-Clause License. For details, see licenses/bin/zstd-jni.BSD2. + * com.github.luben:zstd-jni + + +BSD-3-Clause License +================================ + +SOURCE/WEB-CONSOLE + This product bundles demo_table.css and jquery.dataTables.js from DataTables version 1.8.2, copyright Allan Jardine., + which is available under a BSD-3-Clause License. For details, see licenses/src/datatables.BSD3. + + +BINARY/JAVA-CORE + This product bundles Zstandard version 1.3.3, copyright Facebook, Inc., + which is available under a BSD-3-Clause License. For details, see licenses/bin/zstandard.BSD3. + + This product bundles FindBugs JSR305 version 2.0.1, copyright FindBugs, + which is available under a BSD-3-Clause License. For details, see licenses/bin/jsr305.BSD3. + * com.google.code.findbugs:jsr305 + + This product bundles Protocol Buffers version 3.1.0, copyright Google, Inc., + which is available under a BSD-3-Clause License. For details, see licenses/bin/protobuf-java.BSD3. + * com.google.protobuf:protobuf-java + + This product bundles JLine version 0.9.94, copyright Marc Prud'hommeaux, + which is available under a BSD-3-Clause License. For details, see licenses/bin/jline.BSD3. + * jline:jline + + This product bundles ANTLR 4 Runtime version 4.5.1, copyright The ANTLR Project, + which is available under a BSD-3-Clause License. For details, see licenses/bin/antlr4-runtime.BSD3. + * org.antlr:antlr4-runtime + + This product bundles Janino and Commons Compiler version 2.7.6, copyright Arno Unkrig and TIBCO Software Inc., + which are available under a BSD-3-Clause License. For details, see licenses/bin/janino.BSD3. + * org.codehaus.janino:janino + * org.codehaus.janino:commons-compiler + + This product bundles ASM version 5.2, copyright INRIA, France Telecom, + which is available under a BSD-3-Clause License. For details, see licenses/bin/asm.BSD3. + * org.ow2.asm:asm + * org.ow2.asm:asm-commons + * org.ow2.asm:asm-tree + + +BINARY/HADOOP-CLIENT + This product bundles FindBugs JSR305 version 3.0.0, copyright FindBugs, + which is available under a BSD-3-Clause License. For details, see licenses/bin/jsr305.BSD3. + * com.google.code.findbugs:jsr305 + + This product bundles Protocol Buffers version 2.5.0, copyright Google, Inc., + which is available under a BSD-3-Clause License. For details, see licenses/bin/protobuf-java.BSD3. + * com.google.protobuf:protobuf-java + + This product bundles Paranamer version 2.3, copyright Paul Hammant & ThoughtWorks Inc, + which is available under a BSD-3-Clause License. For details, see licenses/bin/paranamer.BSD3. + * com.thoughtworks.paranamer:paranamer + + This product bundles LevelDB JNI version 1.8, copyright FuseSource Corp., + which is available under a BSD-3-Clause License. For details, see licenses/bin/leveldb-jni.BSD3. + * org.fusesource.leveldbjni:leveldbjni-all + + This product bundles xmlenc version 0.5.2, copyright Ernst de Haan, + which is available under a BSD-3-Clause License. For details, see licenses/bin/xmlenc.BSD3. + * xmlenc:xmlenc + + +BINARY/EXTENSIONS/druid-avro-extensions + This product bundles Paranamer version 2.7, copyright Paul Hammant & ThoughtWorks Inc, + which is available under a BSD-3-Clause License. For details, see licenses/bin/paranamer.BSD3. + * com.thoughtworks.paranamer:paranamer + + +BINARY/EXTENSIONS/druid-kerberos + This product bundles JSch - Java Secure Channel version 0.1.54, copyright Atsuhiko Yamanaka, JCraft,Inc., + which is available under a BSD-3-Clause License. For details, see licenses/bin/jsch.BSD3. + * com.jcraft:jsch + + +BINARY/EXTENSIONS/druid-kafka-eight + This product bundles Scala Library version 2.10.4, copyright EPFL, Lightbend Inc., + which is available under a BSD-3-Clause License. For details, see licenses/bin/scala-lang.BSD3. + * org.scala-lang:scala-library + + +BINARY/EXTENSIONS/druid-lookups-cached-single + This product bundles StringTemplate version 3.2, copyright Terrence Parr, + which is available under a BSD-3-Clause License. For details, see licenses/bin/antlr-stringtemplate.BSD3. + * org.antlr:stringtemplate + + This product bundles ANTLR version 2.7.7, copyright The ANTLR Project, + which is available under a BSD-3-Clause License. For details, see licenses/bin/antlr.BSD3. + * antlr:antlr + + +BINARY/EXTENSIONS/postgresql-metadata-storage + This product bundles PostgreSQL JDBC Driver version 9.4.1208.jre7, copyright PostgreSQL Global Development Group, + which is available under a BSD-3-Clause License. For details, see licenses/bin/postgresql.BSD3. + * org.postgresql:postgresql + + +BINARY/EXTENSIONS/druid-protobuf-extensions + This product bundles Protocol Buffers version 3.1.0, copyright Google, Inc., + which is available under a BSD-3-Clause License. For details, see licenses/bin/protobuf-java.BSD3. + * com.google.protobuf:protobuf-java-util + + +BINARY/WEB-CONSOLE + This product bundles d3-array version 2.0.3, copyright Mike Bostock, + which is available under a BSD-3-Clause License. For details, see licenses/bin/d3-array.BSD3. + + This product bundles hoist-non-react-statics version 2.5.5, copyright Yahoo! Inc., + which is available under a BSD-3-Clause License. For details, see licenses/bin/hoist-non-react-statics.BSD3. + + This product bundles react-transition-group version 1.2.1, copyright 2016, React Community, + forked from React (https://github.com/facebook/react), copyright 2013-present, Facebook, Inc. + which is available under a BSD-3-Clause License. For details, see licenses/bin/react-transition-group.BSD3. + + This product bundles warning version 3.0.0, copyright Facebook, Inc., + which is available under a BSD-3-Clause License. For details, see licenses/bin/warning.BSD3. + + +ICU License +================================ + +BINARY/JAVA-CORE + This product bundles ICU4J version 54.1.1, copyright International Business Machines Corporation and others, + which is available under the ICU License. For details, see licenses/bin/icu4j.ICU. + * com.ibm.icu:icu4j + + +SIL Open Font License 1.1 +================================ + +BINARY/WEB-CONSOLE + This product bundles fonts from Font Awesome Free version 4.2.0, copyright Font Awesome, + which is available under the SIL OFL 1.1. For details, see licenses/bin/font-awesome.silofl + * https://fontawesome.com/ + + +CDDL 1.1 +================================ + +BINARY/JAVA-CORE + This product bundles JavaBeans Activation Framework version 1.2.0, copyright Oracle and/or its affiliates., + which is available under the CDDL 1.1. For details, see licenses/bin/javax.activation.CDDL11 + * https://github.com/javaee/activation + * com.sun.activation:javax.activation + + This product bundles Jersey version 1.19.3, copyright Oracle and/or its affiliates., + which is available under the CDDL 1.1. For details, see licenses/bin/jersey.CDDL11 + * https://jersey.github.io/ + * com.sun.jersey:jersey-core + * com.sun.jersey:jersey-server + * com.sun.jersey:jersey-servlet + * com.sun.jersey:contribs + + This product bundles Expression Language 3.0 API version 3.0.0., copyright Oracle and/or its affiliates., + which is available under the CDDL 1.1. For details, see licenses/bin/javax.CDDL11 + * https://github.com/javaee/el-spec + * javax.el:javax.el-api + + This product bundles Java Servlet API version 3.1.0, copyright Oracle and/or its affiliates., + which is available under the CDDL 1.1. For details, see licenses/bin/javax.CDDL11 + * https://github.com/javaee/servlet-spec + * javax.servlet:javax.servlet-api + + This product bundles JSR311 API version 1.1.1, copyright Oracle and/or its affiliates., + which is available under the CDDL 1.1. For details, see licenses/bin/jsr311-api.CDDL11 + * https://github.com/javaee/jsr311 + * javax.ws.rs:jsr311-api + + This product bundles Expression Language 3.0 version 3.0.0., copyright Oracle and/or its affiliates., + which is available under the CDDL 1.1. For details, see licenses/bin/javax.CDDL11 + * https://github.com/javaee/el-spec + * org.glassfish:javax.el + + +BINARY/HADOOP-CLIENT + This product bundles Jersey version 1.9, copyright Oracle and/or its affiliates., + which is available under the CDDL 1.1. For details, see licenses/bin/jersey.CDDL11 + * https://jersey.github.io/ + * com.sun.jersey:jersey-client + * com.sun.jersey:jersey-core + + This product bundles JavaBeans Activation Framework version 1.1, copyright Oracle and/or its affiliates., + which is available under the CDDL 1.1. For details, see licenses/bin/javaxCDDL11 + * https://github.com/javaee/activation + * javax.activation:activation + + This product bundles Java Servlet API version 2.5, copyright Oracle and/or its affiliates., + which is available under the CDDL 1.1. For details, see licenses/bin/javax.CDDL11 + * https://github.com/javaee/servlet-spec + * javax.servlet:javax.servlet-api + + This product bundles JAXB version 2.2.2, copyright Oracle and/or its affiliates., + which is available under the CDDL 1.1. For details, see licenses/bin/javax.CDDL11 + * https://github.com/javaee/jaxb-v2 + * javax.xml.bind:jaxb-api + + This product bundles stax-api version 1.0-2, copyright Oracle and/or its affiliates., + which is available under the CDDL 1.1. For details, see licenses/bin/javax.CDDL11 + * https://github.com/javaee/ + * javax.xml.stream:stax-api + + This product bundles jsp-api version 2.1, copyright Oracle and/or its affiliates., + which is available under the CDDL 1.1. For details, see licenses/bin/javax.CDDL11 + * https://github.com/javaee/javaee-jsp-api + * javax.servlet.jsp:jsp-api + + +BINARY/EXTENSIONS/druid-avro-extensions + This product bundles Jersey version 1.15, copyright Oracle and/or its affiliates., + which is available under the CDDL 1.1. For details, see licenses/bin/jersey.CDDL11 + * https://jersey.github.io/ + * com.sun.jersey:jersey-client + + +Eclipse Public License 1.0 +================================ + +BINARY/JAVA-CORE + This product bundles OkHttp Aether Connector version 0.0.9, copyright to original author or authors, + which is available under the Eclipse Public License 1.0. For details, see licenses/bin/aether-connector-okhttp.EPL1. + * https://github.com/takari/aether-connector-okhttp + * io.tesla.aether:aether-connector-okhttp + + This product bundles Tesla Aether version 0.0.5, copyright to original author or authors, + which is available under the Eclipse Public License 1.0. For details, see licenses/bin/tesla-aether.EPL1. + * https://github.com/tesla/tesla-aether + * io.tesla.aether:tesla-aether + + This product bundles Eclipse Aether libraries version 0.9.0.M2, copyright Sonatype, Inc., + which is available under the Eclipse Public License 1.0. For details, see licenses/bin/aether-core.EPL1. + * https://github.com/eclipse/aether-core + * org.eclipse.aether:aether-api + * org.eclipse.aether:aether-connector-file + * org.eclipse.aether:aether-impl + * org.eclipse.aether:aether-spi + * org.eclipse.aether:aether-util + + +Mozilla Public License Version 2.0 +================================ + +BINARY/JAVA-CORE + This product bundles Rhino version 1.7R5, copyright Mozilla and individual contributors., + which is available under the Mozilla Public License Version 2.0. For details, see licenses/bin/rhino.MPL2. + * https://developer.mozilla.org/en-US/docs/Mozilla/Projects/Rhino + * org.mozilla:rhino + + +Creative Commons Attribution 2.5 +================================ + +BINARY/HADOOP-CLIENT + This product bundles "Java Concurrency In Practice" Book Annotations, copyright Brian Goetz and Tim Peierls, + which is available under the Creative Commons Attribution 2.5 license. For details, see licenses/bin/creative-commons-2.5.LICENSE. + * http://jcip.net/ + * net.jcip:jcip-annotations + + +Creative Commons CC0 +================================ + +BINARY/JAVA-CORE + This product bundles Reactive Streams version 1.0.2, which is available under a Creative Commons CC0 license. + For details, see licenses/bin/reactive-streams.CC0. + * org.reactivestreams:reactive-streams + + +Public Domain +================================ + +SOURCE/JAVA-CORE + This product uses a smear function adapted from MurmurHash3, written by Austin Appleby who has placed + MurmurHash3 in the public domain (https://github.com/aappleby/smhasher/blob/master/src/MurmurHash3.cpp). + * processing/src/main/java/org/apache/druid/query/groupby/epinephelinae/Groupers.java + +BINARY/JAVA-CORE + This product bundles AOP Alliance version 1.0, which is in the public domain (http://aopalliance.sourceforge.net/). + * aopalliance:aopalliance + + This product bundles XZ for Java version 1.8, which is in the public domain (https://tukaani.org/xz/java.html). + * org.tukaani:xz + +BINARY/HADOOP-CLIENT + This product bundles XZ for Java version 1.0, which is in the public domain (https://tukaani.org/xz/java.html). + * org.tukaani:xz diff --git a/NOTICE b/NOTICE index 5ed39d8b339e..57018c61bde1 100644 --- a/NOTICE +++ b/NOTICE @@ -4,97 +4,62 @@ Copyright 2018 The Apache Software Foundation This product includes software developed at The Apache Software Foundation (http://www.apache.org/). + +############ SOURCE/JAVA-CORE ############ + +================= Apache Hive ================= +Apache Hive +Copyright 2008-2018 The Apache Software Foundation + + + + +================= Apache Lucene ================= +Apache Lucene +Copyright 2001-2019 The Apache Software Foundation + + + + +================= Apache Calcite ================= +Apache Calcite +Copyright 2012-2019 The Apache Software Foundation + +This product is based on source code originally developed +by DynamoBI Corporation, LucidEra Inc., SQLstream Inc. and others +under the auspices of the Eigenbase Foundation +and released as the LucidDB project. + + + + +================= Apache Hadoop ================= +Apache Hadoop +Copyright 2009-2017 The Apache Software Foundation + + + + +================= Metamarkets java-util ================= +java-util +Copyright 2011-2017 Metamarkets Group Inc. + + + + +================= Metamarkets bytebuffer-collections ================= +bytebuffer-collections +Copyright 2011-2015 Metamarkets Group Inc. + + + + +================= Metamarkets extendedset ================= +extendedset +Copyright 2012 Metamarkets Group Inc. + ------------------------------------------------------------------------------- -This product contains a modified version of Andrew Duffy's java-alphanum library - * LICENSE: - * https://github.com/amjjd/java-alphanum/blob/5c036e2e492cc7f3b7bcdebd46b8f9e2a87927e5/LICENSE.txt (Apache License, Version 2.0) - * HOMEPAGE: - * https://github.com/amjjd/java-alphanum - -This product contains conjunctive normal form conversion code, a variance aggregator algorithm, and bloom filter adapted from Apache Hive - * LICENSE: - * https://github.com/apache/hive/blob/branch-2.0/LICENSE (Apache License, Version 2.0) - * HOMEPAGE: - * https://github.com/apache/hive - -This product contains variable length long deserialization code adapted from Apache Lucene - * LICENSE: - * https://github.com/apache/lucene-solr/blob/master/lucene/LICENSE.txt (Apache License, Version 2.0) - * HOMEPAGE: - * https://github.com/apache/lucene-solr - -This product contains a modified version of Metamarkets java-util library - * LICENSE: - * https://github.com/metamx/java-util/blob/master/LICENSE (Apache License, Version 2.0) - * HOMEPAGE: - * https://github.com/metamx/java-util - * COMMIT TAG: - * https://github.com/metamx/java-util/commit/826021f - -This product contains a modified version of TestNG 6.8.7 - * LICENSE: - * http://testng.org/license/ (Apache License, Version 2.0) - * HOMEPAGE: - * http://testng.org/ - -This product contains a modified version of Metamarkets bytebuffer-collections library - * LICENSE: - * https://github.com/metamx/bytebuffer-collections/blob/master/LICENSE (Apache License, Version 2.0) - * HOMEPAGE: - * https://github.com/metamx/bytebuffer-collections - * COMMIT TAG: - * https://github.com/metamx/bytebuffer-collections/commit/3d1e7c8 - -This product contains SQL query planning code adapted from Apache Calcite - * LICENSE: - * https://github.com/apache/calcite/blob/master/LICENSE (Apache License, Version 2.0) - * HOMEPAGE: - * https://calcite.apache.org/ - -This product contains a modified version of Metamarkets extendedset library - * LICENSE: - * https://github.com/metamx/extendedset/blob/master/LICENSE (Apache License, Version 2.0) - * HOMEPAGE: - * https://github.com/metamx/extendedset - * COMMIT TAG: - * https://github.com/metamx/extendedset/commit/c9d647d - -This product contains a modified version of Alessandro Colantonio's CONCISE +This library contains a modified version of Alessandro Colantonio's CONCISE (COmpressed 'N' Composable Integer SEt) library, extending the functionality of -ConciseSet to use IntBuffers. - * (c) 2010 Alessandro Colantonio - * - * - * LICENSE: - * Apache License, Version 2.0 - * HOMEPAGE: - * https://sourceforge.net/projects/concise/ - -This product contains a modified version of The Guava Authors's Closer class from Guava library: - * LICENSE: - * https://github.com/google/guava/blob/c462d69329709f72a17a64cb229d15e76e72199c/COPYING (Apache License, Version 2.0) - * HOMEPAGE: - * https://github.com/google/guava - * COMMIT TAG: - * https://github.com/google/guava/commit/0ba7ccf36f5384a321cb78d62375bf7574e7bc24 - -This product contains code adapted from Apache Hadoop - * LICENSE: - * https://github.com/apache/hadoop/blob/trunk/LICENSE.txt (Apache License, Version 2.0) - * HOMEPAGE: - * http://hadoop.apache.org/ - -This product contains modified versions of the Dockerfile and related configuration files from SequenceIQ's Hadoop Docker image: - * LICENSE: - * https://github.com/sequenceiq/hadoop-docker/blob/master/LICENSE (Apache License, Version 2.0) - * HOMEPAGE: - * https://github.com/sequenceiq/hadoop-docker/ - * COMMIT TAG: - * update this when this patch is committed - -This product contains fixed bins histogram percentile computation code adapted from Netflix Spectator: - * LICENSE: - * https://github.com/Netflix/spectator/blob/master/LICENSE (Apache License, Version 2.0) - * HOMEPAGE: - * https://github.com/Netflix/spectator +ConciseSet to use IntBuffers. \ No newline at end of file diff --git a/NOTICE.BINARY b/NOTICE.BINARY new file mode 100644 index 000000000000..af3b9213f9b3 --- /dev/null +++ b/NOTICE.BINARY @@ -0,0 +1,2747 @@ +Apache Druid (incubating) +Copyright 2018 The Apache Software Foundation + +This product includes software developed at +The Apache Software Foundation (http://www.apache.org/). + + +############ SOURCE/JAVA-CORE ############ + +================= Apache Hive ================= +Apache Hive +Copyright 2008-2018 The Apache Software Foundation + + + + +================= Apache Lucene ================= +Apache Lucene +Copyright 2001-2019 The Apache Software Foundation + + + + +================= Apache Calcite ================= +Apache Calcite +Copyright 2012-2019 The Apache Software Foundation + +This product is based on source code originally developed +by DynamoBI Corporation, LucidEra Inc., SQLstream Inc. and others +under the auspices of the Eigenbase Foundation +and released as the LucidDB project. + + + + +================= Apache Hadoop ================= +Apache Hadoop +Copyright 2009-2017 The Apache Software Foundation + + + + +================= Metamarkets java-util ================= +java-util +Copyright 2011-2017 Metamarkets Group Inc. + + + + +================= Metamarkets bytebuffer-collections ================= +bytebuffer-collections +Copyright 2011-2015 Metamarkets Group Inc. + + + + +================= Metamarkets extendedset ================= +extendedset +Copyright 2012 Metamarkets Group Inc. + +------------------------------------------------------------------------------- + +This library contains a modified version of Alessandro Colantonio's CONCISE +(COmpressed 'N' Composable Integer SEt) library, extending the functionality of +ConciseSet to use IntBuffers. + + + + +############ BINARY/WEB-CONSOLE ############ + +================= tslib 0.9.3 ================= +/*! ***************************************************************************** +Copyright (c) Microsoft Corporation. All rights reserved. +Licensed under the Apache License, Version 2.0 (the "License"); you may not use +this file except in compliance with the License. You may obtain a copy of the +License at http://www.apache.org/licenses/LICENSE-2.0 + +THIS CODE IS PROVIDED ON AN *AS IS* BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY +KIND, EITHER EXPRESS OR IMPLIED, INCLUDING WITHOUT LIMITATION ANY IMPLIED +WARRANTIES OR CONDITIONS OF TITLE, FITNESS FOR A PARTICULAR PURPOSE, +MERCHANTABLITY OR NON-INFRINGEMENT. + +See the Apache Version 2.0 License for specific language governing permissions +and limitations under the License. +***************************************************************************** */ + + + +############ BINARY/JAVA-CORE ############ + +================= aggdesigner-algorithm-6.0.jar ================= +Aggregate Designer + +Copyright 2006 - 2013 Pentaho Corporation. All rights reserved. +Copyright 2000-2005, 2014-2016 Julian Hyde + + + + +================= airline-0.7.jar ================= +Copyright Notices +================= + +Copyright 2011 Dain Sundstrom +Copyright 2010 Cedric Beust + + + + +================= audience-annotations-0.5.0.jar ================= +Apache Yetus - Audience Annotations +Copyright 2015-2017 The Apache Software Foundation + + + + +================= avatica-core-1.10.0.jar ================= +Apache Calcite Avatica +Copyright 2012-2017 The Apache Software Foundation + + + + +================= avatica-metrics-1.10.0.jar ================= +Apache Calcite Avatica Metrics +Copyright 2012-2017 The Apache Software Foundation + + + + +================= avatica-server-1.10.0.jar ================= +Apache Calcite Avatica Server +Copyright 2012-2017 The Apache Software Foundation + + + + +================= AWS SDK for Java 1.11.199 ================= +AWS SDK for Java +Copyright 2010-2014 Amazon.com, Inc. or its affiliates. All Rights Reserved. + +This product includes software developed by +Amazon Technologies, Inc (http://www.amazon.com/). + +********************** +THIRD PARTY COMPONENTS +********************** +This software includes third party software subject to the following copyrights: +- XML parsing and utility functions from JetS3t - Copyright 2006-2009 James Murty. +- PKCS#1 PEM encoded private key parsing and utility functions from oauth.googlecode.com - Copyright 1998-2010 AOL Inc. + +The licenses for these third party components are included in LICENSE.txt + + + + +================= calcite-core-1.17.0.jar ================= +Calcite Core +Copyright 2012-2018 The Apache Software Foundation + + + + +================= calcite-linq4j-1.17.0.jar ================= +Calcite Linq4j +Copyright 2012-2018 The Apache Software Foundation + + + + +================= classmate-1.0.0.jar ================= +Java ClassMate library was originally written by Tatu Saloranta (tatu.saloranta@iki.fi) + +Other developers who have contributed code are: + +* Brian Langel + + + + +================= commons-beanutils-1.9.3.jar ================= +Apache Commons BeanUtils +Copyright 2000-2016 The Apache Software Foundation + + + + +================= commons-cli-1.2.jar ================= +Apache Commons CLI +Copyright 2001-2009 The Apache Software Foundation + + + + +================= commons-codec-1.7.jar ================= +Apache Commons Codec +Copyright 2002-2012 The Apache Software Foundation + +-------------------------------------------------------------------------------- +src/test/org/apache/commons/codec/language/DoubleMetaphoneTest.java contains +test data from http://aspell.sourceforge.net/test/batch0.tab. + +Copyright (C) 2002 Kevin Atkinson (kevina@gnu.org). Verbatim copying +and distribution of this entire article is permitted in any medium, +provided this notice is preserved. +-------------------------------------------------------------------------------- + + + + +================= commons-collections-3.2.2.jar ================= +Apache Commons Collections +Copyright 2001-2015 The Apache Software Foundation + + + + +================= commons-collections4-4.1.jar ================= +Apache Commons Collections +Copyright 2001-2015 The Apache Software Foundation + + + + +================= commons-compress-1.16.jar ================= +Apache Commons Compress +Copyright 2002-2018 The Apache Software Foundation + +The files in the package org.apache.commons.compress.archivers.sevenz +were derived from the LZMA SDK, version 9.20 (C/ and CPP/7zip/), +which has been placed in the public domain: + +"LZMA SDK is placed in the public domain." (http://www.7-zip.org/sdk.html) + + + + +================= commons-dbcp2-2.0.1.jar ================= +Apache Commons DBCP +Copyright 2001-2014 The Apache Software Foundation + + + + +================= commons-io-2.5.jar ================= +Apache Commons IO +Copyright 2002-2016 The Apache Software Foundation + + + + +================= commons-lang-2.6.jar ================= +Apache Commons Lang +Copyright 2001-2011 The Apache Software Foundation + + + + +================= commons-lang3-3.7.jar ================= +Apache Commons Lang +Copyright 2001-2017 The Apache Software Foundation + +This product includes software from the Spring Framework, +under the Apache License 2.0 (see: StringUtils.containsWhitespace()) + + + + +================= commons-logging-1.1.1.jar ================= +// ------------------------------------------------------------------ +// NOTICE file corresponding to the section 4d of The Apache License, +// Version 2.0, in this case for Commons Logging +// ------------------------------------------------------------------ + +Commons Logging +Copyright 2001-2007 The Apache Software Foundation + +This product includes/uses software(s) developed by 'an unknown organization' + - Unnamed - avalon-framework:avalon-framework:jar:4.1.3 + - Unnamed - log4j:log4j:jar:1.2.12 + - Unnamed - logkit:logkit:jar:1.0.1 + + + + +================= commons-math3-3.6.1.jar ================= +Apache Commons Math +Copyright 2001-2016 The Apache Software Foundation + +This product includes software developed for Orekit by +CS Systèmes d'Information (http://www.c-s.fr/) +Copyright 2010-2012 CS Systèmes d'Information + + + + +================= commons-pool-1.6.jar ================= +Apache Commons Pool +Copyright 2001-2012 The Apache Software Foundation + + + + +================= commons-pool2-2.2.jar ================= +Apache Commons Pool +Copyright 2001-2014 The Apache Software Foundation + +The LinkedBlockingDeque implementation is based on an implementation written by +Doug Lea with assistance from members of JCP JSR-166 Expert Group and released +to the public domain, as explained at +http://creativecommons.org/licenses/publicdomain + + + + +================= commons-text-1.3.jar ================= +Apache Commons Text +Copyright 2001-2018 The Apache Software Foundation + + + + +================= compress-lzf-1.0.4.jar ================= +# Compress LZF + +This library contains efficient implementation of LZF compression format, +as well as additional helper classes that build on JDK-provided gzip (deflat) +codec. + +## Licensing + +Library is licensed under Apache License 2.0, as per accompanying LICENSE file. + +## Credit + +Library has been written by Tatu Saloranta (tatu.saloranta@iki.fi). +It was started at Ning, inc., as an official Open Source process used by +platform backend, but after initial versions has been developed outside of +Ning by supporting community. + +Other contributors include: + +* Jon Hartlaub (first versions of streaming reader/writer; unit tests) +* Cedrik Lime: parallel LZF implementation + +Various community members have contributed bug reports, and suggested minor +fixes; these can be found from file "VERSION.txt" in SCM. + + + + +================= config-magic-0.9.jar ================= +This product includes software developed by +The Apache Software Foundation (http://www.apache.org/). + + + + +================= curator-client-4.1.0.jar ================= +Curator Client +Copyright 2011-2018 The Apache Software Foundation + + + + +================= curator-framework-4.1.0.jar ================= +Curator Framework +Copyright 2011-2018 The Apache Software Foundation + + + + +================= curator-recipes-4.1.0.jar ================= +Curator Recipes +Copyright 2011-2018 The Apache Software Foundation + + + + +================= curator-x-discovery-4.1.0.jar ================= +Curator Service Discovery +Copyright 2011-2018 The Apache Software Foundation + + + + +================= Derby 10.11.1.1 ================= +derby-10.11.1.1.jar +derbyclient-10.11.1.1.jar +derbynet-10.11.1.1.jar +================= +========================================================================= +== NOTICE file corresponding to section 4(d) of the Apache License, +== Version 2.0, in this case for the Apache Derby distribution. +== +== DO NOT EDIT THIS FILE DIRECTLY. IT IS GENERATED +== BY THE buildnotice TARGET IN THE TOP LEVEL build.xml FILE. +== +========================================================================= + +Apache Derby +Copyright 2004-2014 The Apache Software Foundation + + +========================================================================= + +Portions of Derby were originally developed by +International Business Machines Corporation and are +licensed to the Apache Software Foundation under the +"Software Grant and Corporate Contribution License Agreement", +informally known as the "Derby CLA". +The following copyright notice(s) were affixed to portions of the code +with which this file is now or was at one time distributed +and are placed here unaltered. + +(C) Copyright 1997,2004 International Business Machines Corporation. All rights reserved. + +(C) Copyright IBM Corp. 2003. + + +========================================================================= + + +The portion of the functionTests under 'nist' was originally +developed by the National Institute of Standards and Technology (NIST), +an agency of the United States Department of Commerce, and adapted by +International Business Machines Corporation in accordance with the NIST +Software Acknowledgment and Redistribution document at +http://www.itl.nist.gov/div897/ctg/sql_form.htm + + + +========================================================================= + + +The JDBC apis for small devices and JDBC3 (under java/stubs/jsr169 and +java/stubs/jdbc3) were produced by trimming sources supplied by the +Apache Harmony project. In addition, the Harmony SerialBlob and +SerialClob implementations are used. The following notice covers the Harmony sources: + +Portions of Harmony were originally developed by +Intel Corporation and are licensed to the Apache Software +Foundation under the "Software Grant and Corporate Contribution +License Agreement", informally known as the "Intel Harmony CLA". + + +========================================================================= + + +The Derby build relies on source files supplied by the Apache Felix +project. The following notice covers the Felix files: + + Apache Felix Main + Copyright 2008 The Apache Software Foundation + + + I. Included Software + + This product includes software developed at + The Apache Software Foundation (http://www.apache.org/). + Licensed under the Apache License 2.0. + + This product includes software developed at + The OSGi Alliance (http://www.osgi.org/). + Copyright (c) OSGi Alliance (2000, 2007). + Licensed under the Apache License 2.0. + + This product includes software from http://kxml.sourceforge.net. + Copyright (c) 2002,2003, Stefan Haustein, Oberhausen, Rhld., Germany. + Licensed under BSD License. + + II. Used Software + + This product uses software developed at + The OSGi Alliance (http://www.osgi.org/). + Copyright (c) OSGi Alliance (2000, 2007). + Licensed under the Apache License 2.0. + + + III. License Summary + - Apache License 2.0 + - BSD License + + +========================================================================= + + +The Derby build relies on jar files supplied by the Apache Lucene +project. The following notice covers the Lucene files: + +Apache Lucene +Copyright 2013 The Apache Software Foundation + +This product includes software developed by +The Apache Software Foundation (http://www.apache.org/). + +Includes software from other Apache Software Foundation projects, +including, but not limited to: + - Apache Ant + - Apache Jakarta Regexp + - Apache Commons + - Apache Xerces + +ICU4J, (under analysis/icu) is licensed under an MIT styles license +and Copyright (c) 1995-2008 International Business Machines Corporation and others + +Some data files (under analysis/icu/src/data) are derived from Unicode data such +as the Unicode Character Database. See http://unicode.org/copyright.html for more +details. + +Brics Automaton (under core/src/java/org/apache/lucene/util/automaton) is +BSD-licensed, created by Anders Møller. See http://www.brics.dk/automaton/ + +The levenshtein automata tables (under core/src/java/org/apache/lucene/util/automaton) were +automatically generated with the moman/finenight FSA library, created by +Jean-Philippe Barrette-LaPierre. This library is available under an MIT license, +see http://sites.google.com/site/rrettesite/moman and +http://bitbucket.org/jpbarrette/moman/overview/ + +The class org.apache.lucene.util.WeakIdentityMap was derived from +the Apache CXF project and is Apache License 2.0. + +The Google Code Prettify is Apache License 2.0. +See http://code.google.com/p/google-code-prettify/ + +JUnit (junit-4.10) is licensed under the Common Public License v. 1.0 +See http://junit.sourceforge.net/cpl-v10.html + +This product includes code (JaspellTernarySearchTrie) from Java Spelling Checkin +g Package (jaspell): http://jaspell.sourceforge.net/ +License: The BSD License (http://www.opensource.org/licenses/bsd-license.php) + +The snowball stemmers in + analysis/common/src/java/net/sf/snowball +were developed by Martin Porter and Richard Boulton. +The snowball stopword lists in + analysis/common/src/resources/org/apache/lucene/analysis/snowball +were developed by Martin Porter and Richard Boulton. +The full snowball package is available from + http://snowball.tartarus.org/ + +The KStem stemmer in + analysis/common/src/org/apache/lucene/analysis/en +was developed by Bob Krovetz and Sergio Guzman-Lara (CIIR-UMass Amherst) +under the BSD-license. + +The Arabic,Persian,Romanian,Bulgarian, and Hindi analyzers (common) come with a default +stopword list that is BSD-licensed created by Jacques Savoy. These files reside in: +analysis/common/src/resources/org/apache/lucene/analysis/ar/stopwords.txt, +analysis/common/src/resources/org/apache/lucene/analysis/fa/stopwords.txt, +analysis/common/src/resources/org/apache/lucene/analysis/ro/stopwords.txt, +analysis/common/src/resources/org/apache/lucene/analysis/bg/stopwords.txt, +analysis/common/src/resources/org/apache/lucene/analysis/hi/stopwords.txt +See http://members.unine.ch/jacques.savoy/clef/index.html. + +The German,Spanish,Finnish,French,Hungarian,Italian,Portuguese,Russian and Swedish light stemmers +(common) are based on BSD-licensed reference implementations created by Jacques Savoy and +Ljiljana Dolamic. These files reside in: +analysis/common/src/java/org/apache/lucene/analysis/de/GermanLightStemmer.java +analysis/common/src/java/org/apache/lucene/analysis/de/GermanMinimalStemmer.java +analysis/common/src/java/org/apache/lucene/analysis/es/SpanishLightStemmer.java +analysis/common/src/java/org/apache/lucene/analysis/fi/FinnishLightStemmer.java +analysis/common/src/java/org/apache/lucene/analysis/fr/FrenchLightStemmer.java +analysis/common/src/java/org/apache/lucene/analysis/fr/FrenchMinimalStemmer.java +analysis/common/src/java/org/apache/lucene/analysis/hu/HungarianLightStemmer.java +analysis/common/src/java/org/apache/lucene/analysis/it/ItalianLightStemmer.java +analysis/common/src/java/org/apache/lucene/analysis/pt/PortugueseLightStemmer.java +analysis/common/src/java/org/apache/lucene/analysis/ru/RussianLightStemmer.java +analysis/common/src/java/org/apache/lucene/analysis/sv/SwedishLightStemmer.java + +The Stempel analyzer (stempel) includes BSD-licensed software developed +by the Egothor project http://egothor.sf.net/, created by Leo Galambos, Martin Kvapil, +and Edmond Nolan. + +The Polish analyzer (stempel) comes with a default +stopword list that is BSD-licensed created by the Carrot2 project. The file resides +in stempel/src/resources/org/apache/lucene/analysis/pl/stopwords.txt. +See http://project.carrot2.org/license.html. + +The SmartChineseAnalyzer source code (smartcn) was +provided by Xiaoping Gao and copyright 2009 by www.imdict.net. + +WordBreakTestUnicode_*.java (under modules/analysis/common/src/test/) +is derived from Unicode data such as the Unicode Character Database. +See http://unicode.org/copyright.html for more details. + +The Morfologik analyzer (morfologik) includes BSD-licensed software +developed by Dawid Weiss and Marcin Miłkowski (http://morfologik.blogspot.com/). + +Morfologik uses data from Polish ispell/myspell dictionary +(http://www.sjp.pl/slownik/en/) licenced on the terms of (inter alia) +LGPL and Creative Commons ShareAlike. + +Morfologic includes data from BSD-licensed dictionary of Polish (SGJP) +(http://sgjp.pl/morfeusz/) + +Servlet-api.jar and javax.servlet-*.jar are under the CDDL license, the original +source code for this can be found at http://www.eclipse.org/jetty/downloads.php + +=========================================================================== +Kuromoji Japanese Morphological Analyzer - Apache Lucene Integration +=========================================================================== + +This software includes a binary and/or source version of data from + + mecab-ipadic-2.7.0-20070801 + +which can be obtained from + + http://atilika.com/releases/mecab-ipadic/mecab-ipadic-2.7.0-20070801.tar.gz + +or + + http://jaist.dl.sourceforge.net/project/mecab/mecab-ipadic/2.7.0-20070801/mecab-ipadic-2.7.0-20070801.tar.gz + +=========================================================================== +mecab-ipadic-2.7.0-20070801 Notice +=========================================================================== + +Nara Institute of Science and Technology (NAIST), +the copyright holders, disclaims all warranties with regard to this +software, including all implied warranties of merchantability and +fitness, in no event shall NAIST be liable for +any special, indirect or consequential damages or any damages +whatsoever resulting from loss of use, data or profits, whether in an +action of contract, negligence or other tortuous action, arising out +of or in connection with the use or performance of this software. + +A large portion of the dictionary entries +originate from ICOT Free Software. The following conditions for ICOT +Free Software applies to the current dictionary as well. + +Each User may also freely distribute the Program, whether in its +original form or modified, to any third party or parties, PROVIDED +that the provisions of Section 3 ("NO WARRANTY") will ALWAYS appear +on, or be attached to, the Program, which is distributed substantially +in the same form as set out herein and that such intended +distribution, if actually made, will neither violate or otherwise +contravene any of the laws and regulations of the countries having +jurisdiction over the User or the intended distribution itself. + +NO WARRANTY + +The program was produced on an experimental basis in the course of the +research and development conducted during the project and is provided +to users as so produced on an experimental basis. Accordingly, the +program is provided without any warranty whatsoever, whether express, +implied, statutory or otherwise. The term "warranty" used herein +includes, but is not limited to, any warranty of the quality, +performance, merchantability and fitness for a particular purpose of +the program and the nonexistence of any infringement or violation of +any right of any third party. + +Each user of the program will agree and understand, and be deemed to +have agreed and understood, that there is no warranty whatsoever for +the program and, accordingly, the entire risk arising from or +otherwise connected with the program is assumed by the user. + +Therefore, neither ICOT, the copyright holder, or any other +organization that participated in or was otherwise related to the +development of the program and their respective officials, directors, +officers and other employees shall be held liable for any and all +damages, including, without limitation, general, special, incidental +and consequential damages, arising out of or otherwise in connection +with the use or inability to use the program or any product, material +or result produced or otherwise obtained by using the program, +regardless of whether they have been advised of, or otherwise had +knowledge of, the possibility of such damages at any time during the +project or thereafter. Each user will be deemed to have agreed to the +foregoing by his or her commencement of use of the program. The term +"use" as used herein includes, but is not limited to, the use, +modification, copying and distribution of the program and the +production of secondary products from the program. + +In the case where the program, whether in its original form or +modified, was distributed or delivered to or received by a user from +any person, organization or entity other than ICOT, unless it makes or +grants independently of ICOT any specific warranty to the user in +writing, such person, organization or entity, will also be exempted +from and not be held liable to the user for any such damages as noted +above as far as the program is concerned. + + + + +================= guice-4.1.0.jar ================= +Google Guice - Core Library +Copyright 2006-2016 Google, Inc. + + + + +================= guice-multibindings-4.1.0.jar ================= +Google Guice - Extensions - MultiBindings +Copyright 2006-2016 Google, Inc. + + + + +================= guice-servlet-4.1.0.jar ================= +Google Guice - Extensions - Servlet +Copyright 2006-2016 Google, Inc. + + + + +================= httpclient-4.5.3.jar ================= +Apache HttpClient +Copyright 1999-2017 The Apache Software Foundation + + + + +================= httpcore-4.4.4.jar ================= +Apache HttpCore +Copyright 2005-2015 The Apache Software Foundation + + +This project contains annotations derived from JCIP-ANNOTATIONS +Copyright (c) 2005 Brian Goetz and Tim Peierls. See http://www.jcip.net + + + + +================= ion-java-1.0.2.jar ================= +Amazon Ion Java +Copyright 2007-2016 Amazon.com, Inc. or its affiliates. All Rights Reserved. + + + + +================= Jackson 2.6.7 ================= +jackson-annotations-2.6.7.jar +jackson-core-2.6.7.jar +jackson-databind-2.6.7.jar +jackson-dataformat-cbor-2.6.7.jar +jackson-dataformat-smile-2.6.7.jar +jackson-datatype-guava-2.6.7.jar +jackson-datatype-joda-2.6.7.jar +jackson-jaxrs-base-2.6.7.jar +jackson-jaxrs-json-provider-2.6.7.jar +jackson-jaxrs-smile-provider-2.6.7.jar +jackson-module-jaxb-annotations-2.6.7.jar +================= +# Jackson JSON processor + +Jackson is a high-performance, Free/Open Source JSON processing library. +It was originally written by Tatu Saloranta (tatu.saloranta@iki.fi), and has +been in development since 2007. +It is currently developed by a community of developers, as well as supported +commercially by FasterXML.com. + +## Licensing + +Jackson core and extension components may licensed under different licenses. +To find the details that apply to this artifact see the accompanying LICENSE file. +For more information, including possible other licensing options, contact +FasterXML.com (http://fasterxml.com). + +## Credits + +A list of contributors may be found from CREDITS file, which is included +in some artifacts (usually source distributions); but is always available +from the source code management (SCM) system project uses. + + + + +================= Jackson 1.9.13 ================= +jackson-core-asl-1.9.13.jar +jackson-mapper-asl-1.9.13.jar +================= +This product currently only contains code developed by authors +of specific components, as identified by the source code files; +if such notes are missing files have been created by +Tatu Saloranta. + +For additional credits (generally to people who reported problems) +see CREDITS file. + + + + +================== Jetty 9.4.10.v20180503 ================= +jetty-client-9.4.10.v20180503.jar +jetty-continuation-9.4.10.v20180503.jar +jetty-http-9.4.10.v20180503.jar +jetty-io-9.4.10.v20180503.jar +jetty-proxy-9.4.10.v20180503.jar +jetty-security-9.4.10.v20180503.jar +jetty-server-9.4.10.v20180503.jar +jetty-servlet-9.4.10.v20180503.jar +jetty-servlets-9.4.10.v20180503.jar +jetty-util-9.4.10.v20180503.jar +================== +============================================================== + Jetty Web Container + Copyright 1995-2018 Mort Bay Consulting Pty Ltd. +============================================================== + +The Jetty Web Container is Copyright Mort Bay Consulting Pty Ltd +unless otherwise noted. + +Jetty is dual licensed under both + + * The Apache 2.0 License + http://www.apache.org/licenses/LICENSE-2.0.html + + and + + * The Eclipse Public 1.0 License + http://www.eclipse.org/legal/epl-v10.html + +Jetty may be distributed under either license. + +------ +Eclipse + +The following artifacts are EPL. + * org.eclipse.jetty.orbit:org.eclipse.jdt.core + +The following artifacts are EPL and ASL2. + * org.eclipse.jetty.orbit:javax.security.auth.message + + +The following artifacts are EPL and CDDL 1.0. + * org.eclipse.jetty.orbit:javax.mail.glassfish + + +------ +Oracle + +The following artifacts are CDDL + GPLv2 with classpath exception. +https://glassfish.dev.java.net/nonav/public/CDDL+GPL.html + + * javax.servlet:javax.servlet-api + * javax.annotation:javax.annotation-api + * javax.transaction:javax.transaction-api + * javax.websocket:javax.websocket-api + +------ +Oracle OpenJDK + +If ALPN is used to negotiate HTTP/2 connections, then the following +artifacts may be included in the distribution or downloaded when ALPN +module is selected. + + * java.sun.security.ssl + +These artifacts replace/modify OpenJDK classes. The modififications +are hosted at github and both modified and original are under GPL v2 with +classpath exceptions. +http://openjdk.java.net/legal/gplv2+ce.html + + +------ +OW2 + +The following artifacts are licensed by the OW2 Foundation according to the +terms of http://asm.ow2.org/license.html + +org.ow2.asm:asm-commons +org.ow2.asm:asm + + +------ +Apache + +The following artifacts are ASL2 licensed. + +org.apache.taglibs:taglibs-standard-spec +org.apache.taglibs:taglibs-standard-impl + + +------ +MortBay + +The following artifacts are ASL2 licensed. Based on selected classes from +following Apache Tomcat jars, all ASL2 licensed. + +org.mortbay.jasper:apache-jsp + org.apache.tomcat:tomcat-jasper + org.apache.tomcat:tomcat-juli + org.apache.tomcat:tomcat-jsp-api + org.apache.tomcat:tomcat-el-api + org.apache.tomcat:tomcat-jasper-el + org.apache.tomcat:tomcat-api + org.apache.tomcat:tomcat-util-scan + org.apache.tomcat:tomcat-util + +org.mortbay.jasper:apache-el + org.apache.tomcat:tomcat-jasper-el + org.apache.tomcat:tomcat-el-api + + +------ +Mortbay + +The following artifacts are CDDL + GPLv2 with classpath exception. + +https://glassfish.dev.java.net/nonav/public/CDDL+GPL.html + +org.eclipse.jetty.toolchain:jetty-schemas + +------ +Assorted + +The UnixCrypt.java code implements the one way cryptography used by +Unix systems for simple password protection. Copyright 1996 Aki Yoshida, +modified April 2001 by Iris Van den Broeke, Daniel Deville. +Permission to use, copy, modify and distribute UnixCrypt +for non-commercial or commercial purposes and without fee is +granted provided that the copyright notice appears in all copies. + + + + +================= jdbi-2.63.1.jar ================= +Java ClassMate library was originally written by Tatu Saloranta (tatu.saloranta@iki.fi) + +Other developers who have contributed code are: + +* Brian Langel + + + + +================= joda-time-2.9.9.jar ================= +============================================================================= += NOTICE file corresponding to section 4d of the Apache License Version 2.0 = +============================================================================= +This product includes software developed by +Joda.org (http://www.joda.org/). + + + + +================= log4j-1.2-api-2.5.jar ================= +Apache Log4j 1.x Compatibility API +Copyright 1999-2015 Apache Software Foundation + + + + +================= log4j-api-2.5.jar ================= +Apache Log4j API +Copyright 1999-2015 Apache Software Foundation + + + + +================= log4j-core-2.5.jar ================= +Apache Log4j Core +Copyright 1999-2012 Apache Software Foundation + +ResolverUtil.java +Copyright 2005-2006 Tim Fennell + + + + +================= log4j-jul-2.5.jar ================= +Apache Log4j JUL Adapter +Copyright 1999-2015 Apache Software Foundation + + + + +================= log4j-slf4j-impl-2.5.jar ================= +Apache Log4j SLF4J Binding +Copyright 1999-2015 Apache Software Foundation + + + + +================= maven-aether-provider-3.1.1.jar ================= +Maven Aether Provider +Copyright 2001-2013 The Apache Software Foundation + + + + +================= maven-artifact-3.6.0.jar ================= +Maven Artifact +Copyright 2001-2018 The Apache Software Foundation + + + + +================= maven-model-3.1.1.jar ================= +Maven Model +Copyright 2001-2013 The Apache Software Foundation + + + + +================= maven-model-builder-3.1.1.jar ================= +Maven Model Builder +Copyright 2001-2013 The Apache Software Foundation + + + + +================= maven-repository-metadata-3.1.1.jar ================= +Maven Repository Metadata Model +Copyright 2001-2013 The Apache Software Foundation + + + + +================= maven-settings-3.1.1.jar ================= +Maven Settings +Copyright 2001-2013 The Apache Software Foundation + + + + +================= maven-settings-builder-3.1.1.jar ================= +Maven Settings Builder +Copyright 2001-2013 The Apache Software Foundation + + + + +================= metrics-core-4.0.0.jar ================= +Metrics +Copyright 2010-2013 Coda Hale and Yammer, Inc., 2014-2017 Dropwizard Team + +This product includes software developed by Coda Hale and Yammer, Inc. + +This product includes code derived from the JSR-166 project (ThreadLocalRandom, Striped64, +LongAdder), which was released with the following comments: + + Written by Doug Lea with assistance from members of JCP JSR-166 + Expert Group and released to the public domain, as explained at + http://creativecommons.org/publicdomain/zero/1.0/ + + + + +================= netty-3.10.6.Final.jar ================= + The Netty Project + ================= + +Please visit the Netty web site for more information: + + * http://netty.io/ + +Copyright 2011 The Netty Project + +The Netty Project licenses this file to you under the Apache License, +version 2.0 (the "License"); you may not use this file except in compliance +with the License. You may obtain a copy of the License at: + +http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, WITHOUT +WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the +License for the specific language governing permissions and limitations +under the License. + +Also, please refer to each LICENSE..txt file, which is located in +the 'license' directory of the distribution file, for the license terms of the +components that this product depends on. + +------------------------------------------------------------------------------- +This product contains the extensions to Java Collections Framework which has +been derived from the works by JSR-166 EG, Doug Lea, and Jason T. Greene: + + * LICENSE: + * license/LICENSE.jsr166y.txt (Public Domain) + * HOMEPAGE: + * http://gee.cs.oswego.edu/cgi-bin/viewcvs.cgi/jsr166/ + * http://viewvc.jboss.org/cgi-bin/viewvc.cgi/jbosscache/experimental/jsr166/ + +This product contains a modified version of Robert Harder's Public Domain +Base64 Encoder and Decoder, which can be obtained at: + + * LICENSE: + * license/LICENSE.base64.txt (Public Domain) + * HOMEPAGE: + * http://iharder.sourceforge.net/current/java/base64/ + +This product contains a modified version of 'JZlib', a re-implementation of +zlib in pure Java, which can be obtained at: + + * LICENSE: + * license/LICENSE.jzlib.txt (BSD Style License) + * HOMEPAGE: + * http://www.jcraft.com/jzlib/ + +This product contains a modified version of 'Webbit', a Java event based +WebSocket and HTTP server: + + * LICENSE: + * license/LICENSE.webbit.txt (BSD License) + * HOMEPAGE: + * https://github.com/joewalnes/webbit + +This product optionally depends on 'Protocol Buffers', Google's data +interchange format, which can be obtained at: + + * LICENSE: + * license/LICENSE.protobuf.txt (New BSD License) + * HOMEPAGE: + * http://code.google.com/p/protobuf/ + +This product optionally depends on 'Bouncy Castle Crypto APIs' to generate +a temporary self-signed X.509 certificate when the JVM does not provide the +equivalent functionality. It can be obtained at: + + * LICENSE: + * license/LICENSE.bouncycastle.txt (MIT License) + * HOMEPAGE: + * http://www.bouncycastle.org/ + +This product optionally depends on 'SLF4J', a simple logging facade for Java, +which can be obtained at: + + * LICENSE: + * license/LICENSE.slf4j.txt (MIT License) + * HOMEPAGE: + * http://www.slf4j.org/ + +This product optionally depends on 'Apache Commons Logging', a logging +framework, which can be obtained at: + + * LICENSE: + * license/LICENSE.commons-logging.txt (Apache License 2.0) + * HOMEPAGE: + * http://commons.apache.org/logging/ + +This product optionally depends on 'Apache Log4J', a logging framework, +which can be obtained at: + + * LICENSE: + * license/LICENSE.log4j.txt (Apache License 2.0) + * HOMEPAGE: + * http://logging.apache.org/log4j/ + +This product optionally depends on 'JBoss Logging', a logging framework, +which can be obtained at: + + * LICENSE: + * license/LICENSE.jboss-logging.txt (GNU LGPL 2.1) + * HOMEPAGE: + * http://anonsvn.jboss.org/repos/common/common-logging-spi/ + +This product optionally depends on 'Apache Felix', an open source OSGi +framework implementation, which can be obtained at: + + * LICENSE: + * license/LICENSE.felix.txt (Apache License 2.0) + * HOMEPAGE: + * http://felix.apache.org/ + + + + +================= Netty 4 ================= +netty-all-4.1.30.Final.jar +netty-buffer-4.1.29.Final.jar +netty-codec-4.1.29.Final.jar +netty-codec-dns-4.1.29.Final.jar +netty-codec-http-4.1.29.Final.jar +netty-codec-socks-4.1.29.Final.jar +netty-common-4.1.29.Final.jar +netty-handler-4.1.29.Final.jar +netty-handler-proxy-4.1.29.Final.jar +netty-reactive-streams-2.0.0.jar +netty-resolver-4.1.29.Final.jar +netty-resolver-dns-4.1.29.Final.jar +netty-transport-4.1.29.Final.jar +netty-transport-native-epoll-4.1.29.Final-linux-x86_64.jar +netty-transport-native-unix-common-4.1.29.Final.jar +================= + The Netty Project + ================= + +Please visit the Netty web site for more information: + + * http://netty.io/ + +Copyright 2014 The Netty Project + +The Netty Project licenses this file to you under the Apache License, +version 2.0 (the "License"); you may not use this file except in compliance +with the License. You may obtain a copy of the License at: + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, WITHOUT +WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the +License for the specific language governing permissions and limitations +under the License. + +Also, please refer to each LICENSE..txt file, which is located in +the 'license' directory of the distribution file, for the license terms of the +components that this product depends on. + +------------------------------------------------------------------------------- +This product contains the extensions to Java Collections Framework which has +been derived from the works by JSR-166 EG, Doug Lea, and Jason T. Greene: + + * LICENSE: + * license/LICENSE.jsr166y.txt (Public Domain) + * HOMEPAGE: + * http://gee.cs.oswego.edu/cgi-bin/viewcvs.cgi/jsr166/ + * http://viewvc.jboss.org/cgi-bin/viewvc.cgi/jbosscache/experimental/jsr166/ + +This product contains a modified version of Robert Harder's Public Domain +Base64 Encoder and Decoder, which can be obtained at: + + * LICENSE: + * license/LICENSE.base64.txt (Public Domain) + * HOMEPAGE: + * http://iharder.sourceforge.net/current/java/base64/ + +This product contains a modified portion of 'Webbit', an event based +WebSocket and HTTP server, which can be obtained at: + + * LICENSE: + * license/LICENSE.webbit.txt (BSD License) + * HOMEPAGE: + * https://github.com/joewalnes/webbit + +This product contains a modified portion of 'SLF4J', a simple logging +facade for Java, which can be obtained at: + + * LICENSE: + * license/LICENSE.slf4j.txt (MIT License) + * HOMEPAGE: + * http://www.slf4j.org/ + +This product contains a modified portion of 'Apache Harmony', an open source +Java SE, which can be obtained at: + + * NOTICE: + * license/NOTICE.harmony.txt + * LICENSE: + * license/LICENSE.harmony.txt (Apache License 2.0) + * HOMEPAGE: + * http://archive.apache.org/dist/harmony/ + +This product contains a modified portion of 'jbzip2', a Java bzip2 compression +and decompression library written by Matthew J. Francis. It can be obtained at: + + * LICENSE: + * license/LICENSE.jbzip2.txt (MIT License) + * HOMEPAGE: + * https://code.google.com/p/jbzip2/ + +This product contains a modified portion of 'libdivsufsort', a C API library to construct +the suffix array and the Burrows-Wheeler transformed string for any input string of +a constant-size alphabet written by Yuta Mori. It can be obtained at: + + * LICENSE: + * license/LICENSE.libdivsufsort.txt (MIT License) + * HOMEPAGE: + * https://github.com/y-256/libdivsufsort + +This product contains a modified portion of Nitsan Wakart's 'JCTools', Java Concurrency Tools for the JVM, + which can be obtained at: + + * LICENSE: + * license/LICENSE.jctools.txt (ASL2 License) + * HOMEPAGE: + * https://github.com/JCTools/JCTools + +This product optionally depends on 'JZlib', a re-implementation of zlib in +pure Java, which can be obtained at: + + * LICENSE: + * license/LICENSE.jzlib.txt (BSD style License) + * HOMEPAGE: + * http://www.jcraft.com/jzlib/ + +This product optionally depends on 'Compress-LZF', a Java library for encoding and +decoding data in LZF format, written by Tatu Saloranta. It can be obtained at: + + * LICENSE: + * license/LICENSE.compress-lzf.txt (Apache License 2.0) + * HOMEPAGE: + * https://github.com/ning/compress + +This product optionally depends on 'lz4', a LZ4 Java compression +and decompression library written by Adrien Grand. It can be obtained at: + + * LICENSE: + * license/LICENSE.lz4.txt (Apache License 2.0) + * HOMEPAGE: + * https://github.com/jpountz/lz4-java + +This product optionally depends on 'lzma-java', a LZMA Java compression +and decompression library, which can be obtained at: + + * LICENSE: + * license/LICENSE.lzma-java.txt (Apache License 2.0) + * HOMEPAGE: + * https://github.com/jponge/lzma-java + +This product contains a modified portion of 'jfastlz', a Java port of FastLZ compression +and decompression library written by William Kinney. It can be obtained at: + + * LICENSE: + * license/LICENSE.jfastlz.txt (MIT License) + * HOMEPAGE: + * https://code.google.com/p/jfastlz/ + +This product contains a modified portion of and optionally depends on 'Protocol Buffers', Google's data +interchange format, which can be obtained at: + + * LICENSE: + * license/LICENSE.protobuf.txt (New BSD License) + * HOMEPAGE: + * https://github.com/google/protobuf + +This product optionally depends on 'Bouncy Castle Crypto APIs' to generate +a temporary self-signed X.509 certificate when the JVM does not provide the +equivalent functionality. It can be obtained at: + + * LICENSE: + * license/LICENSE.bouncycastle.txt (MIT License) + * HOMEPAGE: + * http://www.bouncycastle.org/ + +This product optionally depends on 'Snappy', a compression library produced +by Google Inc, which can be obtained at: + + * LICENSE: + * license/LICENSE.snappy.txt (New BSD License) + * HOMEPAGE: + * https://github.com/google/snappy + +This product optionally depends on 'JBoss Marshalling', an alternative Java +serialization API, which can be obtained at: + + * LICENSE: + * license/LICENSE.jboss-marshalling.txt (GNU LGPL 2.1) + * HOMEPAGE: + * http://www.jboss.org/jbossmarshalling + +This product optionally depends on 'Caliper', Google's micro- +benchmarking framework, which can be obtained at: + + * LICENSE: + * license/LICENSE.caliper.txt (Apache License 2.0) + * HOMEPAGE: + * https://github.com/google/caliper + +This product optionally depends on 'Apache Commons Logging', a logging +framework, which can be obtained at: + + * LICENSE: + * license/LICENSE.commons-logging.txt (Apache License 2.0) + * HOMEPAGE: + * http://commons.apache.org/logging/ + +This product optionally depends on 'Apache Log4J', a logging framework, which +can be obtained at: + + * LICENSE: + * license/LICENSE.log4j.txt (Apache License 2.0) + * HOMEPAGE: + * http://logging.apache.org/log4j/ + +This product optionally depends on 'Aalto XML', an ultra-high performance +non-blocking XML processor, which can be obtained at: + + * LICENSE: + * license/LICENSE.aalto-xml.txt (Apache License 2.0) + * HOMEPAGE: + * http://wiki.fasterxml.com/AaltoHome + +This product contains a modified version of 'HPACK', a Java implementation of +the HTTP/2 HPACK algorithm written by Twitter. It can be obtained at: + + * LICENSE: + * license/LICENSE.hpack.txt (Apache License 2.0) + * HOMEPAGE: + * https://github.com/twitter/hpack + +This product contains a modified portion of 'Apache Commons Lang', a Java library +provides utilities for the java.lang API, which can be obtained at: + + * LICENSE: + * license/LICENSE.commons-lang.txt (Apache License 2.0) + * HOMEPAGE: + * https://commons.apache.org/proper/commons-lang/ + + +This product contains the Maven wrapper scripts from 'Maven Wrapper', that provides an easy way to ensure a user has everything necessary to run the Maven build. + + * LICENSE: + * license/LICENSE.mvn-wrapper.txt (Apache License 2.0) + * HOMEPAGE: + * https://github.com/takari/maven-wrapper + + + + +================= objenesis-2.6.jar ================= +// ------------------------------------------------------------------ +// NOTICE file corresponding to the section 4d of The Apache License, +// Version 2.0, in this case for Objenesis +// ------------------------------------------------------------------ + +Objenesis +Copyright 2006-2017 Joe Walnes, Henri Tremblay, Leonardo Mesquita + + + + +================= plexus-utils-3.0.15.jar ================= +This product includes software developed by the Indiana University + Extreme! Lab (http://www.extreme.indiana.edu/). + +This product includes software developed by +ThoughtWorks (http://www.thoughtworks.com). + +This product includes software developed by +javolution (http://javolution.org/). + +This product includes software developed by +Rome (https://rome.dev.java.net/). + + + + +================= sigar-1.6.5.132.jar ================= +Copyright (c) 2004-2011 VMware, Inc. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + +http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. + +ADDITIONAL LICENSE INFORMATION: + +Hyperic SIGAR includes some third-party open source components +in its distribution. The list below identifies the community or +organization and links to their appropriate license terms. + +The Hyperic team would like to thank all the communities +of the projects listed below for their contributions. + +---------------------------------------------------------- +Components under the Apache License 2.0: +---------------------------------------------------------- + +The following components are included without modification: + +- log4j - +Information: http://logging.apache.org/ +License: http://www.apache.org/licenses/LICENSE-2.0 + +The following components are included with modification: + +- cpptasks - +Information: http://ant-contrib.sourceforge.net/ +License: http://www.apache.org/licenses/LICENSE-2.0 + +- (portions of) APR - +Information: http://apr.apache.org/ +License: http://www.apache.org/licenses/LICENSE-2.0 + +---------------------------------------------------------- +Components under BSD/MIT Style Licenses: +---------------------------------------------------------- + +The following components are included with modification: + +- solaris get_mib2 - +Information: ftp://vic.cc.purdue.edu/pub/tools/unix/solaris/get_mib2/ +License: within src/os/solaris/get_mib2.[ch] + +Copyright 1995 Purdue Research Foundation, West Lafayette, Indiana +47907. All rights reserved. + +Written by Victor A. Abell + +This software is not subject to any license of the American Telephone +and Telegraph Company or the Regents of the University of California. + +Permission is granted to anyone to use this software for any purpose on +any computer system, and to alter it and redistribute it freely, subject +to the following restrictions: + +1. Neither Victor A Abell nor Purdue University are responsible for + any consequences of the use of this software. + +2. The origin of this software must not be misrepresented, either by + explicit claim or by omission. Credit to Victor A. Abell and Purdue + University must appear in documentation and sources. + +3. Altered versions must be plainly marked as such, and must not be + misrepresented as being the original software. + +4. This notice may not be removed or altered. + +- getline by Chris Thewalt - +Information: http://tinyurl.com/r438r +License: within src/sigar_getline.c + +Copyright (C) 1991, 1992 by Chris Thewalt (thewalt@ce.berkeley.edu) + +Permission to use, copy, modify, and distribute this software +for any purpose and without fee is hereby granted, provided +that the above copyright notices appear in all copies and that both the +copyright notice and this permission notice appear in supporting +documentation. This software is provided "as is" without express or +implied warranty. + +- PrintfFormat.java - +Information: http://java.sun.com/developer/technicalArticles/Programming/sprintf/PrintfFormat.java +License: within bindings/java/src/org/hyperic/sigar/util/PrintfFormat.java + +(c) 2000 Sun Microsystems, Inc. +ALL RIGHTS RESERVED + +License Grant- + +Permission to use, copy, modify, and distribute this Software and its +documentation for NON-COMMERCIAL or COMMERCIAL purposes and without fee is +hereby granted. + +This Software is provided "AS IS". All express warranties, including any +implied warranty of merchantability, satisfactory quality, fitness for a +particular purpose, or non-infringement, are disclaimed, except to the extent +that such disclaimers are held to be legally invalid. + +You acknowledge that Software is not designed, licensed or intended for use in +the design, construction, operation or maintenance of any nuclear facility +("High Risk Activities"). Sun disclaims any express or implied warranty of +fitness for such uses. + +Please refer to the file http://www.sun.com/policies/trademarks/ for further +important trademark information and to +http://java.sun.com/nav/business/index.html for further important licensing +information for the Java Technology. + + + + +================= wagon-provider-api-2.4.jar ================= +Apache Maven Wagon :: API +Copyright 2003-2013 The Apache Software Foundation + + + + +================= zookeeper-3.4.11.jar ================= +Apache ZooKeeper +Copyright 2009-2017 The Apache Software Foundation + + + + + + + + + +############ BINARY/HADOOP-CLIENT ############ + +================= apacheds-i18n-2.0.0-M15.jar ================= +ApacheDS I18n +Copyright 2003-2013 The Apache Software Foundation + + + + +================= apacheds-kerberos-codec-2.0.0-M15.jar ================= +ApacheDS Protocol Kerberos Codec +Copyright 2003-2013 The Apache Software Foundation + + + + +================= api-asn1-api-1.0.0-M20.jar ================= +Apache Directory API ASN.1 API +Copyright 2003-2013 The Apache Software Foundation + + + + +================= api-util-1.0.0-M20.jar ================= +Apache Directory LDAP API Utilities +Copyright 2003-2013 The Apache Software Foundation + + + + +================= avro-1.7.4.jar ================= +Apache Avro +Copyright 2009-2013 The Apache Software Foundation + + + + +================= commons-beanutils-1.7.0.jar ================= +This product includes software developed by +The Apache Software Foundation (http://www.apache.org/). + + + + +================= commons-beanutils-core-1.8.0.jar ================= +Apache Commons BeanUtils +Copyright 2000-2008 The Apache Software Foundation + + + + +================= commons-codec-1.4.jar ================= +Apache Commons Codec +Copyright 2002-2009 The Apache Software Foundation + +-------------------------------------------------------------------------------- +src/test/org/apache/commons/codec/language/DoubleMetaphoneTest.java contains +test data from http://aspell.sourceforge.net/test/batch0.tab. + +Copyright (C) 2002 Kevin Atkinson (kevina@gnu.org). Verbatim copying +and distribution of this entire article is permitted in any medium, +provided this notice is preserved. +-------------------------------------------------------------------------------- + + + + +================= commons-compress-1.4.1.jar ================= +Apache Commons Compress +Copyright 2002-2012 The Apache Software Foundation + + + + +================= commons-configuration-1.6.jar ================= +Apache Commons Configuration +Copyright 2001-2008 The Apache Software Foundation + + + + +================= commons-digester-1.8.jar ================= +Apache Jakarta Commons Digester +Copyright 2001-2006 The Apache Software Foundation + + + + +================= commons-io-2.4.jar ================= +Apache Commons IO +Copyright 2002-2012 The Apache Software Foundation + + + + +================= commons-logging-1.1.3.jar ================= +Apache Commons Logging +Copyright 2003-2013 The Apache Software Foundation + + + + +================= commons-math3-3.1.1.jar ================= +Apache Commons Math +Copyright 2001-2012 The Apache Software Foundation + +=============================================================================== + +The BracketFinder (package org.apache.commons.math3.optimization.univariate) +and PowellOptimizer (package org.apache.commons.math3.optimization.general) +classes are based on the Python code in module "optimize.py" (version 0.5) +developed by Travis E. Oliphant for the SciPy library (http://www.scipy.org/) +Copyright © 2003-2009 SciPy Developers. +=============================================================================== + +The LinearConstraint, LinearObjectiveFunction, LinearOptimizer, +RelationShip, SimplexSolver and SimplexTableau classes in package +org.apache.commons.math3.optimization.linear include software developed by +Benjamin McCann (http://www.benmccann.com) and distributed with +the following copyright: Copyright 2009 Google Inc. +=============================================================================== + +This product includes software developed by the +University of Chicago, as Operator of Argonne National +Laboratory. +The LevenbergMarquardtOptimizer class in package +org.apache.commons.math3.optimization.general includes software +translated from the lmder, lmpar and qrsolv Fortran routines +from the Minpack package +Minpack Copyright Notice (1999) University of Chicago. All rights reserved +=============================================================================== + +The GraggBulirschStoerIntegrator class in package +org.apache.commons.math3.ode.nonstiff includes software translated +from the odex Fortran routine developed by E. Hairer and G. Wanner. +Original source copyright: +Copyright (c) 2004, Ernst Hairer +=============================================================================== + +The EigenDecompositionImpl class in package +org.apache.commons.math3.linear includes software translated +from some LAPACK Fortran routines. Original source copyright: +Copyright (c) 1992-2008 The University of Tennessee. All rights reserved. +=============================================================================== + +The MersenneTwister class in package org.apache.commons.math3.random +includes software translated from the 2002-01-26 version of +the Mersenne-Twister generator written in C by Makoto Matsumoto and Takuji +Nishimura. Original source copyright: +Copyright (C) 1997 - 2002, Makoto Matsumoto and Takuji Nishimura, +All rights reserved +=============================================================================== + +The LocalizedFormatsTest class in the unit tests is an adapted version of +the OrekitMessagesTest class from the orekit library distributed under the +terms of the Apache 2 licence. Original source copyright: +Copyright 2010 CS Systèmes d'Information +=============================================================================== + +The HermiteInterpolator class and its corresponding test have been imported from +the orekit library distributed under the terms of the Apache 2 licence. Original +source copyright: +Copyright 2010-2012 CS Systèmes d'Information +=============================================================================== + +The creation of the package "o.a.c.m.analysis.integration.gauss" was inspired +by an original code donated by Sébastien Brisard. +=============================================================================== + + +The complete text of licenses and disclaimers associated with the the original +sources enumerated above at the time of code translation are in the LICENSE.txt +file. + + + + +================= commons-net-3.1.jar ================= +Apache Commons Net +Copyright 2001-2012 The Apache Software Foundation + + + + +================= curator-client-2.7.1.jar ================= +Curator Client +Copyright 2011-2015 The Apache Software Foundation + + + + +================= curator-framework-2.7.1.jar ================= +Curator Framework +Copyright 2011-2015 The Apache Software Foundation + + + + +================= curator-recipes-2.7.1.jar ================= +Curator Recipes +Copyright 2011-2015 The Apache Software Foundation + + + + +================= Hadoop 2.8.3 ================= +hadoop-annotations-2.8.3.jar +hadoop-auth-2.8.3.jar +hadoop-client-2.8.3.jar +hadoop-common-2.8.3.jar +hadoop-hdfs-client-2.8.3.jar +hadoop-mapreduce-client-app-2.8.3.jar +hadoop-mapreduce-client-common-2.8.3.jar +hadoop-mapreduce-client-core-2.8.3.jar +hadoop-mapreduce-client-jobclient-2.8.3.jar +hadoop-mapreduce-client-shuffle-2.8.3.jar +hadoop-yarn-api-2.8.3.jar +hadoop-yarn-client-2.8.3.jar +hadoop-yarn-common-2.8.3.jar +hadoop-yarn-server-common-2.8.3.jar +================= +The binary distribution of this product bundles binaries of +org.iq80.leveldb:leveldb-api (https://github.com/dain/leveldb), which has the +following notices: +* Copyright 2011 Dain Sundstrom +* Copyright 2011 FuseSource Corp. http://fusesource.com + +The binary distribution of this product bundles binaries of +org.fusesource.hawtjni:hawtjni-runtime (https://github.com/fusesource/hawtjni), +which has the following notices: +* This product includes software developed by FuseSource Corp. + http://fusesource.com +* This product includes software developed at + Progress Software Corporation and/or its subsidiaries or affiliates. +* This product includes software developed by IBM Corporation and others. + +The binary distribution of this product bundles binaries of +AWS Java SDK 1.10.6, +which has the following notices: + * This software includes third party software subject to the following + copyrights: - XML parsing and utility functions from JetS3t - Copyright + 2006-2009 James Murty. - JSON parsing and utility functions from JSON.org - + Copyright 2002 JSON.org. - PKCS#1 PEM encoded private key parsing and utility + functions from oauth.googlecode.com - Copyright 1998-2010 AOL Inc. + +The binary distribution of this product bundles binaries of +Gson 2.2.4, +which has the following notices: + + The Netty Project + ================= + +Please visit the Netty web site for more information: + + * http://netty.io/ + +Copyright 2014 The Netty Project + +The Netty Project licenses this file to you under the Apache License, +version 2.0 (the "License"); you may not use this file except in compliance +with the License. You may obtain a copy of the License at: + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, WITHOUT +WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the +License for the specific language governing permissions and limitations +under the License. + +Also, please refer to each LICENSE..txt file, which is located in +the 'license' directory of the distribution file, for the license terms of the +components that this product depends on. + +------------------------------------------------------------------------------- +This product contains the extensions to Java Collections Framework which has +been derived from the works by JSR-166 EG, Doug Lea, and Jason T. Greene: + + * LICENSE: + * license/LICENSE.jsr166y.txt (Public Domain) + * HOMEPAGE: + * http://gee.cs.oswego.edu/cgi-bin/viewcvs.cgi/jsr166/ + * http://viewvc.jboss.org/cgi-bin/viewvc.cgi/jbosscache/experimental/jsr166/ + +This product contains a modified version of Robert Harder's Public Domain +Base64 Encoder and Decoder, which can be obtained at: + + * LICENSE: + * license/LICENSE.base64.txt (Public Domain) + * HOMEPAGE: + * http://iharder.sourceforge.net/current/java/base64/ + +This product contains a modified portion of 'Webbit', an event based +WebSocket and HTTP server, which can be obtained at: + + * LICENSE: + * license/LICENSE.webbit.txt (BSD License) + * HOMEPAGE: + * https://github.com/joewalnes/webbit + +This product contains a modified portion of 'SLF4J', a simple logging +facade for Java, which can be obtained at: + + * LICENSE: + * license/LICENSE.slf4j.txt (MIT License) + * HOMEPAGE: + * http://www.slf4j.org/ + +This product contains a modified portion of 'ArrayDeque', written by Josh +Bloch of Google, Inc: + + * LICENSE: + * license/LICENSE.deque.txt (Public Domain) + +This product contains a modified portion of 'Apache Harmony', an open source +Java SE, which can be obtained at: + + * LICENSE: + * license/LICENSE.harmony.txt (Apache License 2.0) + * HOMEPAGE: + * http://archive.apache.org/dist/harmony/ + +This product contains a modified version of Roland Kuhn's ASL2 +AbstractNodeQueue, which is based on Dmitriy Vyukov's non-intrusive MPSC queue. +It can be obtained at: + + * LICENSE: + * license/LICENSE.abstractnodequeue.txt (Public Domain) + * HOMEPAGE: + * https://github.com/akka/akka/blob/wip-2.2.3-for-scala-2.11/akka-actor/src/main/java/akka/dispatch/AbstractNodeQueue.java + +This product contains a modified portion of 'jbzip2', a Java bzip2 compression +and decompression library written by Matthew J. Francis. It can be obtained at: + + * LICENSE: + * license/LICENSE.jbzip2.txt (MIT License) + * HOMEPAGE: + * https://code.google.com/p/jbzip2/ + +This product contains a modified portion of 'libdivsufsort', a C API library to construct +the suffix array and the Burrows-Wheeler transformed string for any input string of +a constant-size alphabet written by Yuta Mori. It can be obtained at: + + * LICENSE: + * license/LICENSE.libdivsufsort.txt (MIT License) + * HOMEPAGE: + * https://code.google.com/p/libdivsufsort/ + +This product contains a modified portion of Nitsan Wakart's 'JCTools', Java Concurrency Tools for the JVM, + which can be obtained at: + + * LICENSE: + * license/LICENSE.jctools.txt (ASL2 License) + * HOMEPAGE: + * https://github.com/JCTools/JCTools + +This product optionally depends on 'JZlib', a re-implementation of zlib in +pure Java, which can be obtained at: + + * LICENSE: + * license/LICENSE.jzlib.txt (BSD style License) + * HOMEPAGE: + * http://www.jcraft.com/jzlib/ + +This product optionally depends on 'Compress-LZF', a Java library for encoding and +decoding data in LZF format, written by Tatu Saloranta. It can be obtained at: + + * LICENSE: + * license/LICENSE.compress-lzf.txt (Apache License 2.0) + * HOMEPAGE: + * https://github.com/ning/compress + +This product optionally depends on 'lz4', a LZ4 Java compression +and decompression library written by Adrien Grand. It can be obtained at: + + * LICENSE: + * license/LICENSE.lz4.txt (Apache License 2.0) + * HOMEPAGE: + * https://github.com/jpountz/lz4-java + +This product optionally depends on 'lzma-java', a LZMA Java compression +and decompression library, which can be obtained at: + + * LICENSE: + * license/LICENSE.lzma-java.txt (Apache License 2.0) + * HOMEPAGE: + * https://github.com/jponge/lzma-java + +This product contains a modified portion of 'jfastlz', a Java port of FastLZ compression +and decompression library written by William Kinney. It can be obtained at: + + * LICENSE: + * license/LICENSE.jfastlz.txt (MIT License) + * HOMEPAGE: + * https://code.google.com/p/jfastlz/ + +This product contains a modified portion of and optionally depends on 'Protocol Buffers', Google's data +interchange format, which can be obtained at: + + * LICENSE: + * license/LICENSE.protobuf.txt (New BSD License) + * HOMEPAGE: + * http://code.google.com/p/protobuf/ + +This product optionally depends on 'Bouncy Castle Crypto APIs' to generate +a temporary self-signed X.509 certificate when the JVM does not provide the +equivalent functionality. It can be obtained at: + + * LICENSE: + * license/LICENSE.bouncycastle.txt (MIT License) + * HOMEPAGE: + * http://www.bouncycastle.org/ + +This product optionally depends on 'Snappy', a compression library produced +by Google Inc, which can be obtained at: + + * LICENSE: + * license/LICENSE.snappy.txt (New BSD License) + * HOMEPAGE: + * http://code.google.com/p/snappy/ + +This product optionally depends on 'JBoss Marshalling', an alternative Java +serialization API, which can be obtained at: + + * LICENSE: + * license/LICENSE.jboss-marshalling.txt (GNU LGPL 2.1) + * HOMEPAGE: + * http://www.jboss.org/jbossmarshalling + +This product optionally depends on 'Caliper', Google's micro- +benchmarking framework, which can be obtained at: + + * LICENSE: + * license/LICENSE.caliper.txt (Apache License 2.0) + * HOMEPAGE: + * http://code.google.com/p/caliper/ + +This product optionally depends on 'Apache Commons Logging', a logging +framework, which can be obtained at: + + * LICENSE: + * license/LICENSE.commons-logging.txt (Apache License 2.0) + * HOMEPAGE: + * http://commons.apache.org/logging/ + +This product optionally depends on 'Apache Log4J', a logging framework, which +can be obtained at: + + * LICENSE: + * license/LICENSE.log4j.txt (Apache License 2.0) + * HOMEPAGE: + * http://logging.apache.org/log4j/ + +This product optionally depends on 'Aalto XML', an ultra-high performance +non-blocking XML processor, which can be obtained at: + + * LICENSE: + * license/LICENSE.aalto-xml.txt (Apache License 2.0) + * HOMEPAGE: + * http://wiki.fasterxml.com/AaltoHome + +This product contains a modified version of 'HPACK', a Java implementation of +the HTTP/2 HPACK algorithm written by Twitter. It can be obtained at: + + * LICENSE: + * license/LICENSE.hpack.txt (Apache License 2.0) + * HOMEPAGE: + * https://github.com/twitter/hpack + +This product contains a modified portion of 'Apache Commons Lang', a Java library +provides utilities for the java.lang API, which can be obtained at: + + * LICENSE: + * license/LICENSE.commons-lang.txt (Apache License 2.0) + * HOMEPAGE: + * https://commons.apache.org/proper/commons-lang/ + +The binary distribution of this product bundles binaries of +Commons Codec 1.4, +which has the following notices: + * src/test/org/apache/commons/codec/language/DoubleMetaphoneTest.javacontains test data from http://aspell.net/test/orig/batch0.tab.Copyright (C) 2002 Kevin Atkinson (kevina@gnu.org) + =============================================================================== + The content of package org.apache.commons.codec.language.bm has been translated + from the original php source code available at http://stevemorse.org/phoneticinfo.htm + with permission from the original authors. + Original source copyright:Copyright (c) 2008 Alexander Beider & Stephen P. Morse. + +The binary distribution of this product bundles binaries of +Commons Lang 2.6, +which has the following notices: + * This product includes software from the Spring Framework,under the Apache License 2.0 (see: StringUtils.containsWhitespace()) + +The binary distribution of this product bundles binaries of +Apache Log4j 1.2.17, +which has the following notices: + * ResolverUtil.java + Copyright 2005-2006 Tim Fennell + Dumbster SMTP test server + Copyright 2004 Jason Paul Kitchen + TypeUtil.java + Copyright 2002-2012 Ramnivas Laddad, Juergen Hoeller, Chris Beams + +The binary distribution of this product bundles binaries of +Java Concurrency in Practice book annotations 1.0, +which has the following notices: + * Copyright (c) 2005 Brian Goetz and Tim Peierls Released under the Creative + Commons Attribution License (http://creativecommons.org/licenses/by/2.5) + Official home: http://www.jcip.net Any republication or derived work + distributed in source code form must include this copyright and license + notice. + +The binary distribution of this product bundles binaries of +Jetty 6.1.26, +which has the following notices: + * ============================================================== + Jetty Web Container + Copyright 1995-2016 Mort Bay Consulting Pty Ltd. + ============================================================== + + The Jetty Web Container is Copyright Mort Bay Consulting Pty Ltd + unless otherwise noted. + + Jetty is dual licensed under both + + * The Apache 2.0 License + http://www.apache.org/licenses/LICENSE-2.0.html + + and + + * The Eclipse Public 1.0 License + http://www.eclipse.org/legal/epl-v10.html + + Jetty may be distributed under either license. + + ------ + Eclipse + + The following artifacts are EPL. + * org.eclipse.jetty.orbit:org.eclipse.jdt.core + + The following artifacts are EPL and ASL2. + * org.eclipse.jetty.orbit:javax.security.auth.message + + + The following artifacts are EPL and CDDL 1.0. + * org.eclipse.jetty.orbit:javax.mail.glassfish + + + ------ + Oracle + + The following artifacts are CDDL + GPLv2 with classpath exception. + https://glassfish.dev.java.net/nonav/public/CDDL+GPL.html + + * javax.servlet:javax.servlet-api + * javax.annotation:javax.annotation-api + * javax.transaction:javax.transaction-api + * javax.websocket:javax.websocket-api + + ------ + Oracle OpenJDK + + If ALPN is used to negotiate HTTP/2 connections, then the following + artifacts may be included in the distribution or downloaded when ALPN + module is selected. + + * java.sun.security.ssl + + These artifacts replace/modify OpenJDK classes. The modififications + are hosted at github and both modified and original are under GPL v2 with + classpath exceptions. + http://openjdk.java.net/legal/gplv2+ce.html + + + ------ + OW2 + + The following artifacts are licensed by the OW2 Foundation according to the + terms of http://asm.ow2.org/license.html + + org.ow2.asm:asm-commons + org.ow2.asm:asm + + + ------ + Apache + + The following artifacts are ASL2 licensed. + + org.apache.taglibs:taglibs-standard-spec + org.apache.taglibs:taglibs-standard-impl + + + ------ + MortBay + + The following artifacts are ASL2 licensed. Based on selected classes from + following Apache Tomcat jars, all ASL2 licensed. + + org.mortbay.jasper:apache-jsp + org.apache.tomcat:tomcat-jasper + org.apache.tomcat:tomcat-juli + org.apache.tomcat:tomcat-jsp-api + org.apache.tomcat:tomcat-el-api + org.apache.tomcat:tomcat-jasper-el + org.apache.tomcat:tomcat-api + org.apache.tomcat:tomcat-util-scan + org.apache.tomcat:tomcat-util + + org.mortbay.jasper:apache-el + org.apache.tomcat:tomcat-jasper-el + org.apache.tomcat:tomcat-el-api + + + ------ + Mortbay + + The following artifacts are CDDL + GPLv2 with classpath exception. + + https://glassfish.dev.java.net/nonav/public/CDDL+GPL.html + + org.eclipse.jetty.toolchain:jetty-schemas + + ------ + Assorted + + The UnixCrypt.java code implements the one way cryptography used by + Unix systems for simple password protection. Copyright 1996 Aki Yoshida, + modified April 2001 by Iris Van den Broeke, Daniel Deville. + Permission to use, copy, modify and distribute UnixCrypt + for non-commercial or commercial purposes and without fee is + granted provided that the copyright notice appears in all copies./ + +The binary distribution of this product bundles binaries of +Snappy for Java 1.0.4.1, +which has the following notices: + * This product includes software developed by Google + Snappy: http://code.google.com/p/snappy/ (New BSD License) + + This product includes software developed by Apache + PureJavaCrc32C from apache-hadoop-common http://hadoop.apache.org/ + (Apache 2.0 license) + + This library containd statically linked libstdc++. This inclusion is allowed by + "GCC RUntime Library Exception" + http://gcc.gnu.org/onlinedocs/libstdc++/manual/license.html + + == Contributors == + * Tatu Saloranta + * Providing benchmark suite + * Alec Wysoker + * Performance and memory usage improvement + +The binary distribution of this product bundles binaries of +Xerces2 Java Parser 2.9.1, +which has the following notices: + * ========================================================================= + == NOTICE file corresponding to section 4(d) of the Apache License, == + == Version 2.0, in this case for the Apache Xerces Java distribution. == + ========================================================================= + + Apache Xerces Java + Copyright 1999-2007 The Apache Software Foundation + + This product includes software developed at + The Apache Software Foundation (http://www.apache.org/). + + Portions of this software were originally based on the following: + - software copyright (c) 1999, IBM Corporation., http://www.ibm.com. + - software copyright (c) 1999, Sun Microsystems., http://www.sun.com. + - voluntary contributions made by Paul Eng on behalf of the + Apache Software Foundation that were originally developed at iClick, Inc., + software copyright (c) 1999. + + + + +================= htrace-core4-4.0.1-incubating.jar ================= +htrace-core4 +Copyright 2015 The Apache Software Foundation + + + + +================= httpclient-4.5.2.jar ================= +Apache HttpClient +Copyright 1999-2016 The Apache Software Foundation + + + + +================= Jackson 1.9.13 ================= +jackson-jaxrs-1.9.13.jar +jackson-xc-1.9.13.jar +================= +This product currently only contains code developed by authors +of specific components, as identified by the source code files; +if such notes are missing files have been created by +Tatu Saloranta. + +For additional credits (generally to people who reported problems) +see CREDITS file. + + + + +================= Jetty 6.1.26 ================= +jetty-sslengine-6.1.26.jar +jetty-util-6.1.26.jar +================= +============================================================== + Jetty Web Container + Copyright 1995-2009 Mort Bay Consulting Pty Ltd +============================================================== + +The Jetty Web Container is Copyright Mort Bay Consulting Pty Ltd +unless otherwise noted. It is dual licensed under the apache 2.0 +license and eclipse 1.0 license. Jetty may be distributed under +either license. + +The javax.servlet package used was sourced from the Apache +Software Foundation and is distributed under the apache 2.0 +license. + +The UnixCrypt.java code implements the one way cryptography used by +Unix systems for simple password protection. Copyright 1996 Aki Yoshida, +modified April 2001 by Iris Van den Broeke, Daniel Deville. +Permission to use, copy, modify and distribute UnixCrypt +for non-commercial or commercial purposes and without fee is +granted provided that the copyright notice appears in all copies. + + + + +================= log4j-1.2.17.jar ================= +Apache log4j +Copyright 2007 The Apache Software Foundation + + + + +================= netty-3.6.2.Final.jar ================= + The Netty Project + ================= + +Please visit the Netty web site for more information: + + * http://netty.io/ + +Copyright 2011 The Netty Project + +The Netty Project licenses this file to you under the Apache License, +version 2.0 (the "License"); you may not use this file except in compliance +with the License. You may obtain a copy of the License at: + +http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, WITHOUT +WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the +License for the specific language governing permissions and limitations +under the License. + +Also, please refer to each LICENSE..txt file, which is located in +the 'license' directory of the distribution file, for the license terms of the +components that this product depends on. + +------------------------------------------------------------------------------- +This product contains the extensions to Java Collections Framework which has +been derived from the works by JSR-166 EG, Doug Lea, and Jason T. Greene: + + * LICENSE: + * license/LICENSE.jsr166y.txt (Public Domain) + * HOMEPAGE: + * http://gee.cs.oswego.edu/cgi-bin/viewcvs.cgi/jsr166/ + * http://viewvc.jboss.org/cgi-bin/viewvc.cgi/jbosscache/experimental/jsr166/ + +This product contains a modified version of Robert Harder's Public Domain +Base64 Encoder and Decoder, which can be obtained at: + + * LICENSE: + * license/LICENSE.base64.txt (Public Domain) + * HOMEPAGE: + * http://iharder.sourceforge.net/current/java/base64/ + +This product contains a modified version of 'JZlib', a re-implementation of +zlib in pure Java, which can be obtained at: + + * LICENSE: + * license/LICENSE.jzlib.txt (BSD Style License) + * HOMEPAGE: + * http://www.jcraft.com/jzlib/ + +This product optionally depends on 'Protocol Buffers', Google's data +interchange format, which can be obtained at: + + * LICENSE: + * license/LICENSE.protobuf.txt (New BSD License) + * HOMEPAGE: + * http://code.google.com/p/protobuf/ + +This product optionally depends on 'SLF4J', a simple logging facade for Java, +which can be obtained at: + + * LICENSE: + * license/LICENSE.slf4j.txt (MIT License) + * HOMEPAGE: + * http://www.slf4j.org/ + +This product optionally depends on 'Apache Commons Logging', a logging +framework, which can be obtained at: + + * LICENSE: + * license/LICENSE.commons-logging.txt (Apache License 2.0) + * HOMEPAGE: + * http://commons.apache.org/logging/ + +This product optionally depends on 'Apache Log4J', a logging framework, +which can be obtained at: + + * LICENSE: + * license/LICENSE.log4j.txt (Apache License 2.0) + * HOMEPAGE: + * http://logging.apache.org/log4j/ + +This product optionally depends on 'JBoss Logging', a logging framework, +which can be obtained at: + + * LICENSE: + * license/LICENSE.jboss-logging.txt (GNU LGPL 2.1) + * HOMEPAGE: + * http://anonsvn.jboss.org/repos/common/common-logging-spi/ + +This product optionally depends on 'Apache Felix', an open source OSGi +framework implementation, which can be obtained at: + + * LICENSE: + * license/LICENSE.felix.txt (Apache License 2.0) + * HOMEPAGE: + * http://felix.apache.org/ + +This product optionally depends on 'Webbit', a Java event based +WebSocket and HTTP server: + + * LICENSE: + * license/LICENSE.webbit.txt (BSD License) + * HOMEPAGE: + * https://github.com/joewalnes/webbit + + + + +================= snappy-1.0.4.1.jar ================= +This product includes software developed by Google + Snappy: http://code.google.com/p/snappy/ (New BSD License) + + +This library containd statically linked libstdc++. This inclusion is allowed by +"GCC RUntime Library Exception" +http://gcc.gnu.org/onlinedocs/libstdc++/manual/license.html + +== Contributors == + * Tatu Saloranta + * Providing benchmark suite + * Alec Wysoker + * Performance and memory usage improvement + + + + + +================= zookeeper-3.4.6.jar ================= +Apache ZooKeeper +Copyright 2009-2012 The Apache Software Foundation + + + + + + + +############ BINARY/EXTENSIONS/druid-avro-extensions ############ + +================= avro-1.8.2.jar ================= +Apache Avro +Copyright 2009-2017 The Apache Software Foundation + + + + +================= avro-ipc-1.8.2.jar ================= +Apache Avro IPC +Copyright 2009-2017 The Apache Software Foundation + + + + +================= avro-mapred-1.8.2-hadoop2.jar ================= +Apache Avro +Copyright 2010 The Apache Software Foundation + +Based upon the representations of upstream licensors, it is understood that +portions of the mapreduce API included in the Java implementation are licensed +from various contributors under one or more contributor license agreements to +Odiago, Inc. and were then contributed by Odiago to Apache Avro, which has now +made them available under the Apache 2.0 license. The original file header text +is: + +| Licensed to Odiago, Inc. under one or more contributor license +| agreements. See the NOTICE file distributed with this work for +| additional information regarding copyright ownership. Odiago, Inc. +| licenses this file to you under the Apache License, Version 2.0 +| (the "License"); you may not use this file except in compliance +| with the License. You may obtain a copy of the License at +| +| http://www.apache.org/licenses/LICENSE-2.0 +| +| Unless required by applicable law or agreed to in writing, software +| distributed under the License is distributed on an "AS IS" BASIS, +| WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or +| implied. See the License for the specific language governing +| permissions and limitations under the License. + +The Odiago NOTICE at the time of the contribution: + +| This product includes software developed by Odiago, Inc. +| (http://www.wibidata.com). + + + + +================= commons-collections-3.2.1.jar ================= +Apache Commons Collections +Copyright 2001-2008 The Apache Software Foundation + + + + +================= snappy-1.1.1.3.jar ================= +This product includes software developed by Google + Snappy: http://code.google.com/p/snappy/ (New BSD License) + +This product includes software developed by Apache + PureJavaCrc32C from apache-hadoop-common http://hadoop.apache.org/ + (Apache 2.0 license) + +This library containd statically linked libstdc++. This inclusion is allowed by +"GCC RUntime Library Exception" +http://gcc.gnu.org/onlinedocs/libstdc++/manual/license.html + +== Contributors == + * Tatu Saloranta + * Providing benchmark suite + * Alec Wysoker + * Performance and memory usage improvement + + + + +================= velocity-1.7.jar ================= +Apache Velocity + +Copyright (C) 2000-2007 The Apache Software Foundation + + + + +############ BINARY/EXTENSIONS/druid-bloom-filter ############ + +================= hive-storage-api-2.7.0.jar ================= +Hive Storage API +Copyright 2018 The Apache Software Foundation + + + + +############ BINARY/EXTENSIONS/druid-examples ############ + +================= commons-beanutils-1.8.3.jar ================= +Apache Commons BeanUtils +Copyright 2000-2010 The Apache Software Foundation + + + + +================= commons-validator-1.4.0.jar ================= +Apache Commons Validator +Copyright 2001-2012 The Apache Software Foundation + + + + +############ BINARY/EXTENSIONS/druid-kafka-eight ############ + +================= kafka-clients-0.8.2.1.jar ================= +Apache Kafka +Copyright 2012 The Apache Software Foundation. + + + + +================= kafka_2.10-0.8.2.1.jar ================= +Apache Kafka +Copyright 2012 The Apache Software Foundation. + + + + +================= metrics-core-2.2.0.jar ================= +Metrics +Copyright 2010-2012 Coda Hale and Yammer, Inc. + +This product includes software developed by Coda Hale and Yammer, Inc. + +This product includes code derived from the JSR-166 project (ThreadLocalRandom), which was released +with the following comments: + + Written by Doug Lea with assistance from members of JCP JSR-166 + Expert Group and released to the public domain, as explained at + http://creativecommons.org/publicdomain/zero/1.0/ + + + + +================= snappy-1.1.1.6.jar ================= +This product includes software developed by Google + Snappy: http://code.google.com/p/snappy/ (New BSD License) + +This product includes software developed by Apache + PureJavaCrc32C from apache-hadoop-common http://hadoop.apache.org/ + (Apache 2.0 license) + +This library containd statically linked libstdc++. This inclusion is allowed by +"GCC RUntime Library Exception" +http://gcc.gnu.org/onlinedocs/libstdc++/manual/license.html + +== Contributors == + * Tatu Saloranta + * Providing benchmark suite + * Alec Wysoker + * Performance and memory usage improvement + + + + +############ BINARY/EXTENSIONS/druid-kafka-indexing-service ############ + +================= kafka-clients-0.10.2.2.jar ================= +Apache Kafka +Copyright 2018 The Apache Software Foundation. + +This distribution has a binary dependency on jersey, which is available under the CDDL +License. The source code of jersey can be found at https://github.com/jersey/jersey/. + + + + +================= Jackson 1.9.2 ================= +jackson-jaxrs-1.9.2.jar +jackson-xc-1.9.2.jar +================= +This product currently only contains code developed by authors +of specific components, as identified by the source code files; +if such notes are missing files have been created by +Tatu Saloranta. + +For additional credits (generally to people who reported problems) +see CREDITS file. + + + + +================= snappy-1.1.2.6.jar ================= +This product includes software developed by Google + Snappy: http://code.google.com/p/snappy/ (New BSD License) + +This product includes software developed by Apache + PureJavaCrc32C from apache-hadoop-common http://hadoop.apache.org/ + (Apache 2.0 license) + +This library containd statically linked libstdc++. This inclusion is allowed by +"GCC RUntime Library Exception" +http://gcc.gnu.org/onlinedocs/libstdc++/manual/license.html + +== Contributors == + * Tatu Saloranta + * Providing benchmark suite + * Alec Wysoker + * Performance and memory usage improvement + + + + +############ BINARY/EXTENSIONS/druid-kerberos ############ + +================= jets3t-0.9.0.jar ================= + ========================================================================= + == NOTICE file corresponding to section 4(d) of the Apache License, == + == Version 2.0, in this case for the distribution of jets3t. == + ========================================================================= + + This product includes software developed by: + + The Apache Software Foundation (http://www.apache.org/). + + The ExoLab Project (http://www.exolab.org/) + + Sun Microsystems (http://www.sun.com/) + + Codehaus (http://castor.codehaus.org) + + Safehaus (http://jug.safehaus.org/Home) + + Tatu Saloranta (http://wiki.fasterxml.com/TatuSaloranta) + + + + +############ BINARY/EXTENSIONS/druid-parquet ############ + +================= parquet-avro-1.10.0.jar ================= +Apache Parquet MR (Incubating) +Copyright 2014-2015 The Apache Software Foundation + +-------------------------------------------------------------------------------- + +This product includes code from Apache Avro, which includes the following in +its NOTICE file: + + Apache Avro + Copyright 2010-2015 The Apache Software Foundation + + This product includes software developed at + The Apache Software Foundation (http://www.apache.org/). + + + + +================= parquet-format-2.4.0.jar ================= +Apache Parquet Format +Copyright 2017 The Apache Software Foundation + + + + +================= parquet-jackson-1.10.0.jar ================= +This product currently only contains code developed by authors +of specific components, as identified by the source code files; +if such notes are missing files have been created by +Tatu Saloranta. + +For additional credits (generally to people who reported problems) +see CREDITS file. + + + + +================= snappy-1.1.7.2.jar ================= +This product includes software developed by Google + Snappy: http://code.google.com/p/snappy/ (New BSD License) + +This product includes software developed by Apache + PureJavaCrc32C from apache-hadoop-common http://hadoop.apache.org/ + (Apache 2.0 license) + +This library containd statically linked libstdc++. This inclusion is allowed by +"GCC RUntime Library Exception" +http://gcc.gnu.org/onlinedocs/libstdc++/manual/license.html + +== Contributors == + * Tatu Saloranta + * Providing benchmark suite + * Alec Wysoker + * Performance and memory usage improvement diff --git a/README.md b/README.md index 429589d114e5..e44c4c13992a 100644 --- a/README.md +++ b/README.md @@ -71,3 +71,4 @@ For instructions on building Druid from source, see [docs/content/development/bu ### Contributing Please follow the guidelines listed [here](http://druid.io/community/). + diff --git a/aws-common/pom.xml b/aws-common/pom.xml index 2a65b70c42e6..f8cbc3b51dbd 100644 --- a/aws-common/pom.xml +++ b/aws-common/pom.xml @@ -28,7 +28,7 @@ org.apache.druid druid - 0.14.0-incubating-SNAPSHOT + 0.14.1-incubating-SNAPSHOT @@ -47,7 +47,7 @@ org.checkerframework - checker + checker-qual ${checkerframework.version} diff --git a/benchmarks/pom.xml b/benchmarks/pom.xml index 685ffe051311..9594c52e7c3e 100644 --- a/benchmarks/pom.xml +++ b/benchmarks/pom.xml @@ -27,7 +27,7 @@ org.apache.druid druid - 0.14.0-incubating-SNAPSHOT + 0.14.1-incubating-SNAPSHOT diff --git a/core/pom.xml b/core/pom.xml index 9dbe3de96caf..abb3269a41de 100644 --- a/core/pom.xml +++ b/core/pom.xml @@ -18,9 +18,7 @@ ~ under the License. --> - + 4.0.0 @@ -31,7 +29,7 @@ druid org.apache.druid - 0.14.0-incubating-SNAPSHOT + 0.14.1-incubating-SNAPSHOT diff --git a/core/src/main/java/org/apache/druid/indexer/TaskStatusPlus.java b/core/src/main/java/org/apache/druid/indexer/TaskStatusPlus.java index 4912900fb88f..34733af08bb8 100644 --- a/core/src/main/java/org/apache/druid/indexer/TaskStatusPlus.java +++ b/core/src/main/java/org/apache/druid/indexer/TaskStatusPlus.java @@ -23,7 +23,6 @@ import com.fasterxml.jackson.annotation.JsonProperty; import com.google.common.base.Preconditions; import org.apache.druid.java.util.common.RE; -import org.apache.druid.java.util.common.logger.Logger; import org.joda.time.DateTime; import javax.annotation.Nullable; @@ -31,8 +30,6 @@ public class TaskStatusPlus { - private static final Logger log = new Logger(TaskStatusPlus.class); - private final String id; private final String type; private final DateTime createdTime; @@ -74,7 +71,6 @@ public TaskStatusPlus( ); } - @JsonCreator public TaskStatusPlus( @JsonProperty("id") String id, diff --git a/core/src/main/java/org/apache/druid/java/util/common/DateTimes.java b/core/src/main/java/org/apache/druid/java/util/common/DateTimes.java index 94f1295d2e69..de1fc403b010 100644 --- a/core/src/main/java/org/apache/druid/java/util/common/DateTimes.java +++ b/core/src/main/java/org/apache/druid/java/util/common/DateTimes.java @@ -107,7 +107,17 @@ public static DateTime utc(long instant) public static DateTime of(String instant) { - return new DateTime(instant, ISOChronology.getInstanceUTC()); + try { + return new DateTime(instant, ISOChronology.getInstanceUTC()); + } + catch (IllegalArgumentException ex) { + try { + return new DateTime(Long.valueOf(instant), ISOChronology.getInstanceUTC()); + } + catch (IllegalArgumentException ex2) { + throw ex; + } + } } public static DateTime of( diff --git a/core/src/main/java/org/apache/druid/java/util/common/URIs.java b/core/src/main/java/org/apache/druid/java/util/common/URIs.java new file mode 100644 index 000000000000..f2476e71f094 --- /dev/null +++ b/core/src/main/java/org/apache/druid/java/util/common/URIs.java @@ -0,0 +1,43 @@ +/* + * Licensed to the Apache Software Foundation (ASF) under one + * or more contributor license agreements. See the NOTICE file + * distributed with this work for additional information + * regarding copyright ownership. The ASF licenses this file + * to you under the Apache License, Version 2.0 (the + * "License"); you may not use this file except in compliance + * with the License. You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, + * software distributed under the License is distributed on an + * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY + * KIND, either express or implied. See the License for the + * specific language governing permissions and limitations + * under the License. + */ + +package org.apache.druid.java.util.common; + +import com.google.common.base.Preconditions; + +import java.net.URI; + +public final class URIs +{ + public static URI parse(String strUri, String defaultScheme) + { + Preconditions.checkNotNull(strUri, "strUri"); + Preconditions.checkNotNull(defaultScheme, "defaultScheme"); + final String[] tokens = strUri.split("://"); + if (tokens.length == 1) { + return URI.create(StringUtils.format("%s://%s", defaultScheme, strUri)); + } else { + return URI.create(strUri); + } + } + + private URIs() + { + } +} diff --git a/core/src/main/java/org/apache/druid/timeline/LogicalSegment.java b/core/src/main/java/org/apache/druid/timeline/LogicalSegment.java index 673abb01b8e7..714eb6715685 100644 --- a/core/src/main/java/org/apache/druid/timeline/LogicalSegment.java +++ b/core/src/main/java/org/apache/druid/timeline/LogicalSegment.java @@ -22,8 +22,26 @@ import org.apache.druid.guice.annotations.PublicApi; import org.joda.time.Interval; +/** + * A logical segment can represent an entire segment or a part of a segment. As a result, it can have a different + * interval from its actual base segment. {@link #getInterval()} and {@link #getTrueInterval()} return the interval of + * this logical segment and the interval of the base segment, respectively. + * + * For example, suppose we have 2 segments as below: + * + * - Segment A has an interval of 2017/2018. + * - Segment B has an interval of 2017-08-01/2017-08-02. + * + * For these segments, {@link VersionedIntervalTimeline#lookup} returns 3 segments as below: + * + * - interval of 2017/2017-08-01 (trueInterval: 2017/2018) + * - interval of 2017-08-01/2017-08-02 (trueInterval: 2017-08-01/2017-08-02) + * - interval of 2017-08-02/2018 (trueInterval: 2017/2018) + */ @PublicApi public interface LogicalSegment { Interval getInterval(); + + Interval getTrueInterval(); } diff --git a/core/src/main/java/org/apache/druid/timeline/TimelineObjectHolder.java b/core/src/main/java/org/apache/druid/timeline/TimelineObjectHolder.java index 8e95fc623f77..3feca88495c3 100644 --- a/core/src/main/java/org/apache/druid/timeline/TimelineObjectHolder.java +++ b/core/src/main/java/org/apache/druid/timeline/TimelineObjectHolder.java @@ -19,6 +19,7 @@ package org.apache.druid.timeline; +import com.google.common.annotations.VisibleForTesting; import org.apache.druid.timeline.partition.PartitionHolder; import org.joda.time.Interval; @@ -27,16 +28,25 @@ public class TimelineObjectHolder implements LogicalSegment { private final Interval interval; + private final Interval trueInterval; private final VersionType version; private final PartitionHolder object; + @VisibleForTesting + public TimelineObjectHolder(Interval interval, VersionType version, PartitionHolder object) + { + this(interval, interval, version, object); + } + public TimelineObjectHolder( Interval interval, + Interval trueInterval, VersionType version, PartitionHolder object ) { this.interval = interval; + this.trueInterval = trueInterval; this.version = version; this.object = object; } @@ -47,6 +57,12 @@ public Interval getInterval() return interval; } + @Override + public Interval getTrueInterval() + { + return trueInterval; + } + public VersionType getVersion() { return version; @@ -62,6 +78,7 @@ public String toString() { return "TimelineObjectHolder{" + "interval=" + interval + + ", trueInterval=" + trueInterval + ", version=" + version + ", object=" + object + '}'; diff --git a/core/src/main/java/org/apache/druid/timeline/VersionedIntervalTimeline.java b/core/src/main/java/org/apache/druid/timeline/VersionedIntervalTimeline.java index 85855ad8ee01..36c177dfb0f0 100644 --- a/core/src/main/java/org/apache/druid/timeline/VersionedIntervalTimeline.java +++ b/core/src/main/java/org/apache/druid/timeline/VersionedIntervalTimeline.java @@ -300,6 +300,7 @@ public TimelineObjectHolder last() private TimelineObjectHolder timelineEntryToObjectHolder(TimelineEntry entry) { return new TimelineObjectHolder<>( + entry.getTrueInterval(), entry.getTrueInterval(), entry.getVersion(), new PartitionHolder<>(entry.getPartitionHolder()) @@ -586,10 +587,11 @@ private List> lookup(Interval inte if (timelineInterval.overlaps(interval)) { retVal.add( - new TimelineObjectHolder( + new TimelineObjectHolder<>( timelineInterval, + val.getTrueInterval(), val.getVersion(), - new PartitionHolder(val.getPartitionHolder()) + new PartitionHolder<>(val.getPartitionHolder()) ) ); } @@ -604,8 +606,9 @@ private List> lookup(Interval inte .isAfter(firstEntry.getInterval().getStart())) { retVal.set( 0, - new TimelineObjectHolder( + new TimelineObjectHolder<>( new Interval(interval.getStart(), firstEntry.getInterval().getEnd()), + firstEntry.getTrueInterval(), firstEntry.getVersion(), firstEntry.getObject() ) @@ -616,8 +619,9 @@ private List> lookup(Interval inte if (interval.overlaps(lastEntry.getInterval()) && interval.getEnd().isBefore(lastEntry.getInterval().getEnd())) { retVal.set( retVal.size() - 1, - new TimelineObjectHolder( + new TimelineObjectHolder<>( new Interval(lastEntry.getInterval().getStart(), interval.getEnd()), + lastEntry.getTrueInterval(), lastEntry.getVersion(), lastEntry.getObject() ) diff --git a/core/src/test/java/org/apache/druid/java/util/common/DateTimesTest.java b/core/src/test/java/org/apache/druid/java/util/common/DateTimesTest.java index 61bc746ca291..15f30336c851 100644 --- a/core/src/test/java/org/apache/druid/java/util/common/DateTimesTest.java +++ b/core/src/test/java/org/apache/druid/java/util/common/DateTimesTest.java @@ -37,4 +37,23 @@ public void testCommonDateTimePattern() Assert.assertTrue(DateTimes.COMMON_DATE_TIME_PATTERN.matcher(dt.toString()).matches()); } } + + @Test + public void testStringToDateTimeConversion() + { + String seconds = "2018-01-30T06:00:00"; + DateTime dt2 = DateTimes.of(seconds); + Assert.assertEquals("2018-01-30T06:00:00.000Z", dt2.toString()); + + String milis = "1517292000000"; + DateTime dt1 = DateTimes.of(milis); + Assert.assertEquals("2018-01-30T06:00:00.000Z", dt1.toString()); + } + + @Test(expected = IllegalArgumentException.class) + public void testStringToDateTimeConverstion_RethrowInitialException() + { + String invalid = "51729200AZ"; + DateTimes.of(invalid); + } } diff --git a/core/src/test/java/org/apache/druid/java/util/common/URIsTest.java b/core/src/test/java/org/apache/druid/java/util/common/URIsTest.java new file mode 100644 index 000000000000..bb65e8e59817 --- /dev/null +++ b/core/src/test/java/org/apache/druid/java/util/common/URIsTest.java @@ -0,0 +1,70 @@ +/* + * Licensed to the Apache Software Foundation (ASF) under one + * or more contributor license agreements. See the NOTICE file + * distributed with this work for additional information + * regarding copyright ownership. The ASF licenses this file + * to you under the Apache License, Version 2.0 (the + * "License"); you may not use this file except in compliance + * with the License. You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, + * software distributed under the License is distributed on an + * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY + * KIND, either express or implied. See the License for the + * specific language governing permissions and limitations + * under the License. + */ + +package org.apache.druid.java.util.common; + +import org.junit.Assert; +import org.junit.Test; + +import java.net.URI; + +public class URIsTest +{ + @Test + public void testFullUri() + { + final String strUri = "https://test-user@127.0.0.1:8000/test/path?test-query#test-fragment"; + final URI uri = URIs.parse(strUri, "http"); + + Assert.assertEquals("https", uri.getScheme()); + Assert.assertEquals("test-user", uri.getUserInfo()); + Assert.assertEquals("127.0.0.1", uri.getHost()); + Assert.assertEquals(8000, uri.getPort()); + Assert.assertEquals("/test/path", uri.getPath()); + Assert.assertEquals("test-query", uri.getQuery()); + Assert.assertEquals("test-fragment", uri.getFragment()); + } + + @Test + public void testWithoutScheme() + { + final String strUri = "test-user@127.0.0.1:8000/test/path?test-query#test-fragment"; + final URI uri = URIs.parse(strUri, "http"); + + Assert.assertEquals("http", uri.getScheme()); + Assert.assertEquals("test-user", uri.getUserInfo()); + Assert.assertEquals("127.0.0.1", uri.getHost()); + Assert.assertEquals(8000, uri.getPort()); + Assert.assertEquals("/test/path", uri.getPath()); + Assert.assertEquals("test-query", uri.getQuery()); + Assert.assertEquals("test-fragment", uri.getFragment()); + } + + @Test + public void testSimpleUri() + { + final String strUri = "127.0.0.1:8000"; + final URI uri = URIs.parse(strUri, "https"); + + Assert.assertEquals("https", uri.getScheme()); + Assert.assertNull(uri.getUserInfo()); + Assert.assertEquals("127.0.0.1", uri.getHost()); + Assert.assertEquals(8000, uri.getPort()); + } +} diff --git a/distribution/pom.xml b/distribution/pom.xml index bf069aa38f94..a8746609a840 100644 --- a/distribution/pom.xml +++ b/distribution/pom.xml @@ -18,8 +18,7 @@ ~ under the License. --> - + 4.0.0 pom @@ -31,7 +30,7 @@ druid org.apache.druid - 0.14.0-incubating-SNAPSHOT + 0.14.1-incubating-SNAPSHOT @@ -77,6 +76,27 @@ false + + org.apache.maven.plugins + maven-antrun-plugin + 1.8 + + + prepare-package + + + + + + + + + + run + + + + @@ -105,7 +125,7 @@ java -classpath - + -Ddruid.extensions.loadList=[] -Ddruid.extensions.directory=${project.build.directory}/extensions @@ -251,7 +271,7 @@ java -classpath - + -Ddruid.extensions.loadList=[] -Ddruid.extensions.directory=${project.build.directory}/extensions diff --git a/distribution/src/assembly/assembly.xml b/distribution/src/assembly/assembly.xml index ab3e5e0d787d..856f59316087 100644 --- a/distribution/src/assembly/assembly.xml +++ b/distribution/src/assembly/assembly.xml @@ -112,6 +112,13 @@ quickstart/tutorial/conf/druid/middleManager + + ../examples/quickstart/tutorial/conf/druid/router + + * + + quickstart/tutorial/conf/druid/router + ../examples/quickstart/tutorial/conf/tranquility @@ -199,6 +206,13 @@ conf/druid/middleManager + + ../examples/conf/druid/router + + * + + conf/druid/router + ../examples/conf/tranquility @@ -218,12 +232,31 @@ ../ - LICENSE - NOTICE DISCLAIMER + licenses/** + + + ../LICENSE.BINARY + . + LICENSE + keep + + + ../NOTICE.BINARY + . + NOTICE + keep + + + ../README.BINARY + . + README + keep + + false diff --git a/distribution/src/assembly/source-assembly.xml b/distribution/src/assembly/source-assembly.xml index 59903d4fe987..20d92ede5e9d 100644 --- a/distribution/src/assembly/source-assembly.xml +++ b/distribution/src/assembly/source-assembly.xml @@ -46,8 +46,21 @@ .gitignore .travis.yml + README.BINARY publications/** upload.sh + web-console/node_modules/** + web-console/node/** + web-console/resources/** + web-console/public/** + web-console/assets/** + web-console/lib/*.css + web-console/coordinator-console/** + web-console/pages/** + web-console/index.html + web-console/.tscache/** + web-console/tscommand-*.tmp.txt + web-console/licenses.json diff --git a/docs/_bin/generate-license-dependency-reports.py b/docs/_bin/generate-license-dependency-reports.py new file mode 100755 index 000000000000..506c3867c390 --- /dev/null +++ b/docs/_bin/generate-license-dependency-reports.py @@ -0,0 +1,77 @@ +#!/usr/bin/env python3 + +# Licensed to the Apache Software Foundation (ASF) under one or more +# contributor license agreements. See the NOTICE file distributed with +# this work for additional information regarding copyright ownership. +# The ASF licenses this file to You under the Apache License, Version 2.0 +# (the "License"); you may not use this file except in compliance with +# the License. You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. + +import os +import subprocess +import sys + + +existing_jar_dict_notice = {} + +def main(): + if len(sys.argv) != 3: + sys.stderr.write('usage: program \n') + sys.exit(1) + + druid_path = sys.argv[1] + tmp_path = sys.argv[2] + + generate_reports(druid_path, tmp_path) + +def generate_reports(druid_path, tmp_path): + license_main_path = tmp_path + "/license-reports" + license_ext_path = tmp_path + "/license-reports/ext" + os.mkdir(license_main_path) + os.mkdir(license_ext_path) + + print("********** Generating main LICENSE report.... **********") + os.chdir(druid_path) + command = "mvn -Pdist -Ddependency.locations.enabled=false project-info-reports:dependencies" + outstr = subprocess.check_output(command, shell=True).decode('UTF-8') + command = "cp -r distribution/target/site {}/site".format(license_main_path) + outstr = subprocess.check_output(command, shell=True).decode('UTF-8') + + sys.exit() + + print("********** Generating extension LICENSE reports.... **********") + extension_dirs = os.listdir("extensions-core") + for extension_dir in extension_dirs: + full_extension_dir = druid_path + "/extensions-core/" + extension_dir + if not os.path.isdir(full_extension_dir): + continue + + print("--- Generating report for {}... ---".format(extension_dir)) + + extension_report_dir = "{}/{}".format(license_ext_path, extension_dir) + os.mkdir(extension_report_dir) + os.chdir(full_extension_dir) + + try: + command = "mvn -Ddependency.locations.enabled=false project-info-reports:dependencies" + outstr = subprocess.check_output(command, shell=True).decode('UTF-8') + command = "cp -r target/site {}/site".format(extension_report_dir) + outstr = subprocess.check_output(command, shell=True).decode('UTF-8') + except: + print("Encountered error when generating report for: " + extension_dir) + + os.chdir("..") + +if __name__ == "__main__": + try: + main() + except KeyboardInterrupt: + print('Interrupted, closing.') \ No newline at end of file diff --git a/docs/_bin/jar-notice-lister.py b/docs/_bin/jar-notice-lister.py new file mode 100755 index 000000000000..f45f1ee00cbf --- /dev/null +++ b/docs/_bin/jar-notice-lister.py @@ -0,0 +1,105 @@ +#!/usr/bin/env python3 + +# Licensed to the Apache Software Foundation (ASF) under one or more +# contributor license agreements. See the NOTICE file distributed with +# this work for additional information regarding copyright ownership. +# The ASF licenses this file to You under the Apache License, Version 2.0 +# (the "License"); you may not use this file except in compliance with +# the License. You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. + +import os +import subprocess +import sys + +existing_jar_dict_notice = {} + +def main(): + if len(sys.argv) != 3: + sys.stderr.write('usage: program \n') + sys.exit(1) + + druid_path = sys.argv[1] + tmp_path = sys.argv[2] + + # copy everything in lib/ to the staging dir + lib_path = druid_path + "/lib" + tmp_lib_path = tmp_path + "/1-lib" + os.mkdir(tmp_lib_path) + command = "cp -r {}/* {}".format(lib_path, tmp_lib_path) + outstr = subprocess.check_output(command, shell=True).decode('UTF-8') + + # copy hadoop deps to the staging dir + hdeps_path = druid_path + "/hadoop-dependencies" + tmp_hdeps_path = tmp_path + "/2-hdeps" + os.mkdir(tmp_hdeps_path) + command = "cp -r {}/* {}".format(hdeps_path, tmp_hdeps_path) + outstr = subprocess.check_output(command, shell=True).decode('UTF-8') + + + # copy all extension folders to the staging dir + ext_path = druid_path + "/extensions" + tmp_ext_path = tmp_path + "/3-ext" + os.mkdir(tmp_ext_path) + command = "cp -r {}/* {}".format(ext_path, tmp_ext_path) + outstr = subprocess.check_output(command, shell=True).decode('UTF-8') + + + get_notices(tmp_path) + +def get_notices(tmp_jar_path): + print("********** Scanning directory for NOTICE" + tmp_jar_path + " **********") + jar_files = os.listdir(tmp_jar_path) + os.chdir(tmp_jar_path) + + for jar_file in jar_files: + if os.path.isdir(jar_file): + get_notices(jar_file) + continue + elif not os.path.isfile(jar_file) or ".jar" not in jar_file: + continue + + if existing_jar_dict_notice.get(jar_file) is not None: + print("---------- Already saw file: " + jar_file) + continue + else: + existing_jar_dict_notice[jar_file] = True + + try: + command = "jar tf {} | grep NOTICE".format(jar_file) + outstr = subprocess.check_output(command, shell=True).decode('UTF-8') + except: + print("---------- no NOTICE file found in: " + jar_file) + continue + + for line in outstr.splitlines(): + try: + command = "jar xf {} {}".format(jar_file, line) + outstr = subprocess.check_output(command, shell=True).decode('UTF-8') + + command = "mv {} {}.NOTICE-FILE".format(line, jar_file) + outstr = subprocess.check_output(command, shell=True).decode('UTF-8') + + command = "cat {}.NOTICE-FILE".format(jar_file) + outstr = subprocess.check_output(command, shell=True).decode('UTF-8') + print("================= " + jar_file + " =================") + print(outstr) + print("\n") + except: + print("Error while grabbing NOTICE file: " + jar_file) + continue + + os.chdir("..") + +if __name__ == "__main__": + try: + main() + except KeyboardInterrupt: + print('Interrupted, closing.') \ No newline at end of file diff --git a/docs/_bin/npm-license-helper.py b/docs/_bin/npm-license-helper.py new file mode 100755 index 000000000000..b6db90b05ac2 --- /dev/null +++ b/docs/_bin/npm-license-helper.py @@ -0,0 +1,73 @@ +#!/usr/bin/env python3 + +# Licensed to the Apache Software Foundation (ASF) under one or more +# contributor license agreements. See the NOTICE file distributed with +# this work for additional information regarding copyright ownership. +# The ASF licenses this file to You under the Apache License, Version 2.0 +# (the "License"); you may not use this file except in compliance with +# the License. You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. + +import json +import re +import shutil +import sys + +# Helper program for generating LICENSE contents for dependencies under web-console. +# Generates entries for MIT-licensed deps and dumps info for non-MIT deps. +# Uses JSON output from https://www.npmjs.com/package/license-checker. + +if len(sys.argv) != 3: + sys.stderr.write('usage: program \n') + sys.stderr.write('Run the following command in web-console/ to generate the input license report:\n') + sys.stderr.write(' license-checker --production --json\n') + sys.exit(1) + +license_report_path = sys.argv[1] +license_output_path = sys.argv[2] + +non_mit_licenses = [] + +license_entry_template = "This product bundles {} version {}, copyright {},\n which is available under an MIT license. For details, see licenses/{}.MIT.\n" + +with open(license_report_path, 'r') as license_report_file: + license_report = json.load(license_report_file) + for dependency_name_version in license_report: + dependency = license_report[dependency_name_version] + + match_result = re.match("(.+)@(.+)", dependency_name_version) + dependency_name = match_result.group(1) + nice_dependency_name = dependency_name.replace("/", "-") + dependency_ver = match_result.group(2) + + try: + licenseType = dependency["licenses"] + licenseFile = dependency["licenseFile"] + except: + print("No license file for {}".format(dependency_name_version)) + + try: + publisher = dependency["publisher"] + except: + publisher = "" + + if licenseType != "MIT": + non_mit_licenses.append(dependency) + continue + + fullDependencyPath = dependency["path"] + partialDependencyPath = re.match(".*/(web-console.*)", fullDependencyPath).group(1) + + print(license_entry_template.format(dependency_name, dependency_ver, publisher, nice_dependency_name)) + shutil.copy2(licenseFile, license_output_path + "/" + nice_dependency_name + ".MIT") + + print("\nNon-MIT licenses:\n--------------------\n") + for non_mit_license in non_mit_licenses: + print(non_mit_license) diff --git a/docs/_bin/web-console-dep-lister.py b/docs/_bin/web-console-dep-lister.py new file mode 100755 index 000000000000..3ee66a10f2c8 --- /dev/null +++ b/docs/_bin/web-console-dep-lister.py @@ -0,0 +1,37 @@ +#!/usr/bin/env python3 + +# Licensed to the Apache Software Foundation (ASF) under one or more +# contributor license agreements. See the NOTICE file distributed with +# this work for additional information regarding copyright ownership. +# The ASF licenses this file to You under the Apache License, Version 2.0 +# (the "License"); you may not use this file except in compliance with +# the License. You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. + +import re +import sys + +# Helper program for listing the deps in the compiled web-console-.js file in druid-console.jar + +if len(sys.argv) != 2: + sys.stderr.write('usage: program \n') + sys.exit(1) + +web_console_path = sys.argv[1] + +dep_dict = {} +with open(web_console_path, 'r') as web_console_file: + for line in web_console_file.readlines(): + match_result = re.match('/\*\*\*/ "\./node_modules/([\@\-a-zA-Z0-9_]+)/.*', line) + if match_result != None: + dependency_name = match_result.group(1) + dep_dict[dependency_name] = True + for dep in dep_dict: + print(dep) \ No newline at end of file diff --git a/docs/content/comparisons/druid-vs-kudu.md b/docs/content/comparisons/druid-vs-kudu.md index 2af14969aee3..8d8b70adafb9 100644 --- a/docs/content/comparisons/druid-vs-kudu.md +++ b/docs/content/comparisons/druid-vs-kudu.md @@ -37,4 +37,4 @@ fast in Druid, whereas updates of older data is higher latency. This is by desig and does not need to be updated too frequently. Kudu supports arbitrary primary keys with uniqueness constraints, and efficient lookup by ranges of those keys. Kudu chooses not to include the execution engine, but supports sufficient operations so as to allow node-local processing from the execution engines. This means that Kudu can support multiple frameworks on the same data (eg MR, Spark, and SQL). -Druid includes its own query layer that allows it to push down aggregations and computations directly to data nodes for faster query processing. +Druid includes its own query layer that allows it to push down aggregations and computations directly to data processes for faster query processing. diff --git a/docs/content/comparisons/druid-vs-redshift.md b/docs/content/comparisons/druid-vs-redshift.md index 86e20768444d..fc4fe57c8f2f 100644 --- a/docs/content/comparisons/druid-vs-redshift.md +++ b/docs/content/comparisons/druid-vs-redshift.md @@ -42,7 +42,7 @@ Druid’s write semantics are not as fluid and does not support full joins (we s ### Data distribution model -Druid’s data distribution is segment-based and leverages a highly available "deep" storage such as S3 or HDFS. Scaling up (or down) does not require massive copy actions or downtime; in fact, losing any number of Historical nodes does not result in data loss because new Historical nodes can always be brought up by reading data from "deep" storage. +Druid’s data distribution is segment-based and leverages a highly available "deep" storage such as S3 or HDFS. Scaling up (or down) does not require massive copy actions or downtime; in fact, losing any number of Historical processes does not result in data loss because new Historical processes can always be brought up by reading data from "deep" storage. To contrast, ParAccel’s data distribution model is hash-based. Expanding the cluster requires re-hashing the data across the nodes, making it difficult to perform without taking downtime. Amazon’s Redshift works around this issue with a multi-step process: @@ -52,7 +52,7 @@ To contrast, ParAccel’s data distribution model is hash-based. Expanding the c ### Replication strategy -Druid employs segment-level data distribution meaning that more nodes can be added and rebalanced without having to perform a staged swap. The replication strategy also makes all replicas available for querying. Replication is done automatically and without any impact to performance. +Druid employs segment-level data distribution meaning that more processes can be added and rebalanced without having to perform a staged swap. The replication strategy also makes all replicas available for querying. Replication is done automatically and without any impact to performance. ParAccel’s hash-based distribution generally means that replication is conducted via hot spares. This puts a numerical limit on the number of nodes you can lose without losing data, and this replication strategy often does not allow the hot spare to help share query load. diff --git a/docs/content/configuration/index.md b/docs/content/configuration/index.md index c63eb41994e6..712e1b732755 100644 --- a/docs/content/configuration/index.md +++ b/docs/content/configuration/index.md @@ -51,7 +51,7 @@ This page documents all of the configuration properties for each Druid service t * [Master Server](#master-server) * [Coordinator](#coordinator) * [Static Configuration](#static-configuration) - * [Node Config](#coordinator-node-config) + * [Process Config](#coordinator-process-config) * [Coordinator Operation](#coordinator-operation) * [Segment Management](#segment-management) * [Metadata Retrieval](#metadata-retrieval) @@ -60,34 +60,34 @@ This page documents all of the configuration properties for each Druid service t * [Compaction](#compaction-dynamic-configuration) * [Overlord](#overlord) * [Static Configuration](#overlord-static-configuration) - * [Node Config](#overlord-node-config) + * [Process Config](#overlord-process-config) * [Overlord Operations](#overlord-operations) * [Dynamic Configuration](#overlord-dynamic-configuration) * [Worker Select Strategy](#worker-select-strategy) * [Autoscaler](#autoscaler) * [Data Server](#data-server) * [MiddleManager & Peons](#middlemanager-and-peons) - * [Node Config](#middlemanager-node-config) + * [Process Config](#middlemanager-process-config) * [MiddleManager Configuration](#middlemanager-configuration) * [Peon Processing](#peon-processing) * [Peon Query Configuration](#peon-query-configuration) * [Caching](#peon-caching) * [Additional Peon Configuration](#additional-peon-configuration) * [Historical](#historical) - * [Node Configuration](#historical-node-config) + * [Process Configuration](#historical-process-config) * [General Configuration](#historical-general-configuration) * [Query Configs](#historical-query-configs) * [Caching](#historical-caching) * [Query Server](#query-server) * [Broker](#broker) - * [Node Config](#broker-node-configs) + * [Process Config](#broker-process-configs) * [Query Configuration](#broker-query-configuration) * [SQL](#sql) * [Caching](#broker-caching) * [Segment Discovery](#segment-discovery) * [Caching](#cache-configuration) * [General Query Configuration](#general-query-configuration) - * [Realtime nodes (Deprecated)](#realtime-nodes) + * [Realtime processes (Deprecated)](#realtime-processes) ## Recommended Configuration File Organization @@ -159,7 +159,7 @@ Many of Druid's external dependencies can be plugged in as modules. Extensions c |Property|Description|Default| |--------|-----------|-------| -|`druid.modules.excludeList`|A JSON array of canonical class names (e. g. `"org.apache.druid.somepackage.SomeModule"`) of module classes which shouldn't be loaded, even if they are found in extensions specified by `druid.extensions.loadList`, or in the list of core modules specified to be loaded on a particular Druid node type. Useful when some useful extension contains some module, which shouldn't be loaded on some Druid node type because some dependencies of that module couldn't be satisfied.|[]| +|`druid.modules.excludeList`|A JSON array of canonical class names (e. g. `"org.apache.druid.somepackage.SomeModule"`) of module classes which shouldn't be loaded, even if they are found in extensions specified by `druid.extensions.loadList`, or in the list of core modules specified to be loaded on a particular Druid process type. Useful when some useful extension contains some module, which shouldn't be loaded on some Druid process type because some dependencies of that module couldn't be satisfied.|[]| ### Zookeeper We recommend just setting the base ZK path and the ZK service host, but all ZK paths that Druid uses can be overwritten to absolute paths. @@ -187,11 +187,11 @@ Druid interacts with ZK through a set of standard path configurations. We recomm |--------|-----------|-------| |`druid.zk.paths.base`|Base Zookeeper path.|`/druid`| |`druid.zk.paths.propertiesPath`|Zookeeper properties path.|`${druid.zk.paths.base}/properties`| -|`druid.zk.paths.announcementsPath`|Druid node announcement path.|`${druid.zk.paths.base}/announcements`| -|`druid.zk.paths.liveSegmentsPath`|Current path for where Druid nodes announce their segments.|`${druid.zk.paths.base}/segments`| -|`druid.zk.paths.loadQueuePath`|Entries here cause Historical nodes to load and drop segments.|`${druid.zk.paths.base}/loadQueue`| +|`druid.zk.paths.announcementsPath`|Druid process announcement path.|`${druid.zk.paths.base}/announcements`| +|`druid.zk.paths.liveSegmentsPath`|Current path for where Druid processes announce their segments.|`${druid.zk.paths.base}/segments`| +|`druid.zk.paths.loadQueuePath`|Entries here cause Historical processes to load and drop segments.|`${druid.zk.paths.base}/loadQueue`| |`druid.zk.paths.coordinatorPath`|Used by the Coordinator for leader election.|`${druid.zk.paths.base}/coordinator`| -|`druid.zk.paths.servedSegmentsPath`|@Deprecated. Legacy path for where Druid nodes announce their segments.|`${druid.zk.paths.base}/servedSegments`| +|`druid.zk.paths.servedSegmentsPath`|@Deprecated. Legacy path for where Druid processes announce their segments.|`${druid.zk.paths.base}/servedSegments`| The indexing service also uses its own set of paths. These configs can be included in the common configuration. @@ -238,7 +238,7 @@ Note that `druid.zk.service.host` is used as a backup in case an Exhibitor insta |`druid.enableTlsPort`|Enable/Disable HTTPS connector.|`false`| Although not recommended but both HTTP and HTTPS connectors can be enabled at a time and respective ports are configurable using `druid.plaintextPort` -and `druid.tlsPort` properties on each node. Please see `Configuration` section of individual nodes to check the valid and default values for these ports. +and `druid.tlsPort` properties on each process. Please see `Configuration` section of individual processes to check the valid and default values for these ports. #### Jetty Server TLS Configuration @@ -299,7 +299,7 @@ For configuration options for specific auth extensions, please refer to the exte ### Startup Logging -All nodes can log debugging information on startup. +All processes can log debugging information on startup. |Property|Description|Default| |--------|-----------|-------| @@ -310,7 +310,7 @@ Note that some sensitive information may be logged if these settings are enabled ### Request Logging -All nodes that can serve queries can also log the query requests they see. Broker nodes can additionally log the SQL requests (both from HTTP and JDBC) they see. +All processes that can serve queries can also log the query requests they see. Broker processes can additionally log the SQL requests (both from HTTP and JDBC) they see. |Property|Description|Default| |--------|-----------|-------| @@ -324,7 +324,7 @@ Daily request logs are stored on disk. |Property|Description|Default| |--------|-----------|-------| -|`druid.request.logging.dir`|Historical, Realtime and Broker nodes maintain request logs of all of the requests they get (interacton is via POST, so normal request logs don’t generally capture information about the actual query), this specifies the directory to store the request logs in|none| +|`druid.request.logging.dir`|Historical, Realtime and Broker processes maintain request logs of all of the requests they get (interacton is via POST, so normal request logs don’t generally capture information about the actual query), this specifies the directory to store the request logs in|none| |`druid.request.logging.filePattern`|[Joda datetime format](http://www.joda.org/joda-time/apidocs/org/joda/time/format/DateTimeFormat.html) for each file|"yyyy-MM-dd'.log'"| The format of request logs is TSV, one line per requests, with five fields: timestamp, remote\_addr, native\_query, query\_context, sql\_query. @@ -397,25 +397,25 @@ Switching Request Logger routes native query's request logs to one request logge ### Enabling Metrics -Druid nodes periodically emit metrics and different metrics monitors can be included. Each node can overwrite the default list of monitors. +Druid processes periodically emit metrics and different metrics monitors can be included. Each process can overwrite the default list of monitors. |Property|Description|Default| |--------|-----------|-------| |`druid.monitoring.emissionPeriod`|How often metrics are emitted.|PT1M| -|`druid.monitoring.monitors`|Sets list of Druid monitors used by a node. See below for names and more information. For example, you can specify monitors for a Broker with `druid.monitoring.monitors=["org.apache.druid.java.util.metrics.SysMonitor","org.apache.druid.java.util.metrics.JvmMonitor"]`.|none (no monitors)| +|`druid.monitoring.monitors`|Sets list of Druid monitors used by a process. See below for names and more information. For example, you can specify monitors for a Broker with `druid.monitoring.monitors=["org.apache.druid.java.util.metrics.SysMonitor","org.apache.druid.java.util.metrics.JvmMonitor"]`.|none (no monitors)| The following monitors are available: |Name|Description| |----|-----------| -|`org.apache.druid.client.cache.CacheMonitor`|Emits metrics (to logs) about the segment results cache for Historical and Broker nodes. Reports typical cache statistics include hits, misses, rates, and size (bytes and number of entries), as well as timeouts and and errors.| +|`org.apache.druid.client.cache.CacheMonitor`|Emits metrics (to logs) about the segment results cache for Historical and Broker processes. Reports typical cache statistics include hits, misses, rates, and size (bytes and number of entries), as well as timeouts and and errors.| |`org.apache.druid.java.util.metrics.SysMonitor`|This uses the [SIGAR library](https://github.com/hyperic/sigar) to report on various system activities and statuses.| -|`org.apache.druid.server.metrics.HistoricalMetricsMonitor`|Reports statistics on Historical nodes.| +|`org.apache.druid.server.metrics.HistoricalMetricsMonitor`|Reports statistics on Historical processes.| |`org.apache.druid.java.util.metrics.JvmMonitor`|Reports various JVM-related statistics.| |`org.apache.druid.java.util.metrics.JvmCpuMonitor`|Reports statistics of CPU consumption by the JVM.| |`org.apache.druid.java.util.metrics.CpuAcctDeltaMonitor`|Reports consumed CPU as per the cpuacct cgroup.| |`org.apache.druid.java.util.metrics.JvmThreadsMonitor`|Reports Thread statistics in the JVM, like numbers of total, daemon, started, died threads.| -|`org.apache.druid.segment.realtime.RealtimeMetricsMonitor`|Reports statistics on Realtime nodes.| +|`org.apache.druid.segment.realtime.RealtimeMetricsMonitor`|Reports statistics on Realtime processes.| |`org.apache.druid.server.metrics.EventReceiverFirehoseMonitor`|Reports how many events have been queued in the EventReceiverFirehose.| |`org.apache.druid.server.metrics.QueryCountStatsMonitor`|Reports how many queries have been successful/failed/interrupted.| |`org.apache.druid.server.emitter.HttpEmittingMonitor`|Reports internal metrics of `http` or `parametrized` emitter (see below). Must not be used with another emitter type. See the description of the metrics here: https://github.com/apache/incubator-druid/pull/4973.| @@ -452,7 +452,7 @@ The Druid servers [emit various metrics](../operations/metrics.html) and alerts #### Http Emitter Module TLS Overrides -When emitting events to a TLS-enabled receiver, the Http Emitter will by default use an SSLContext obtained via the process described at [Druid's internal communication over TLS](../operations/tls-support.html#druids-internal-communication-over-tls), i.e., the same SSLContext that would be used for internal communications between Druid nodes. +When emitting events to a TLS-enabled receiver, the Http Emitter will by default use an SSLContext obtained via the process described at [Druid's internal communication over TLS](../operations/tls-support.html#druids-internal-communication-over-tls), i.e., the same SSLContext that would be used for internal communications between Druid processes. In some use cases it may be desirable to have the Http Emitter use its own separate truststore configuration. For example, there may be organizational policies that prevent the TLS-enabled metrics receiver's certificate from being added to the same truststore used by Druid's internal HTTP client. @@ -492,7 +492,7 @@ To use graphite as emitter set `druid.emitter=graphite`. For configuration detai ### Metadata Storage -These properties specify the jdbc connection and other configuration around the metadata storage. The only processes that connect to the metadata storage with these properties are the [Coordinator](../design/coordinator.html), [Overlord](../design/overlord.html) and [Realtime Nodes](../design/realtime.html). +These properties specify the jdbc connection and other configuration around the metadata storage. The only processes that connect to the metadata storage with these properties are the [Coordinator](../design/coordinator.html), [Overlord](../design/overlord.html) and [Realtime Processes](../design/realtime.html). |Property|Description|Default| |--------|-----------|-------| @@ -575,7 +575,7 @@ If you are running the indexing service in remote mode, the task logs must be st |`druid.indexer.logs.type`|Choices:noop, s3, azure, google, hdfs, file. Where to store task logs|file| You can also configure the Overlord to automatically retain the task logs in log directory and entries in task-related metadata storage tables only for last x milliseconds by configuring following additional properties. -Caution: Automatic log file deletion typically works based on log file modification timestamp on the backing store, so large clock skews between druid nodes and backing store nodes might result in un-intended behavior. +Caution: Automatic log file deletion typically works based on log file modification timestamp on the backing store, so large clock skews between druid processes and backing store nodes might result in un-intended behavior. |Property|Description|Default| |--------|-----------|-------| @@ -635,16 +635,16 @@ This config is used to find the [Overlord](../design/overlord.html) using Curato |Property|Description|Default| |--------|-----------|-------| -|`druid.selectors.indexing.serviceName`|The druid.service name of the Overlord node. To start the Overlord with a different name, set it with this property. |druid/overlord| +|`druid.selectors.indexing.serviceName`|The druid.service name of the Overlord process. To start the Overlord with a different name, set it with this property. |druid/overlord| ### Coordinator Discovery -This config is used to find the [Coordinator](../design/coordinator.html) using Curator service discovery. This config is used by the realtime indexing nodes to get information about the segments loaded in the cluster. +This config is used to find the [Coordinator](../design/coordinator.html) using Curator service discovery. This config is used by the realtime indexing processes to get information about the segments loaded in the cluster. |Property|Description|Default| |--------|-----------|-------| -|`druid.selectors.coordinator.serviceName`|The druid.service name of the Coordinator node. To start the Coordinator with a different name, set it with this property. |druid/coordinator| +|`druid.selectors.coordinator.serviceName`|The druid.service name of the Coordinator process. To start the Coordinator with a different name, set it with this property. |druid/coordinator| ### Announcing Segments @@ -695,18 +695,18 @@ This section contains the configuration options for the processes that reside on ### Coordinator -For general Coordinator Node information, see [here](../design/coordinator.html). +For general Coordinator Process information, see [here](../design/coordinator.html). #### Static Configuration These Coordinator static configurations can be defined in the `coordinator/runtime.properties` file. -##### Coordinator Node Config +##### Coordinator Process Config |Property|Description|Default| |--------|-----------|-------| -|`druid.host`|The host for the current node. This is used to advertise the current processes location as reachable from another node and should generally be specified such that `http://${druid.host}/` could actually talk to this process|InetAddress.getLocalHost().getCanonicalHostName()| -|`druid.bindOnHost`|Indicating whether the node's internal jetty server bind on `druid.host`. Default is false, which means binding to all interfaces.|false| +|`druid.host`|The host for the current process. This is used to advertise the current processes location as reachable from another process and should generally be specified such that `http://${druid.host}/` could actually talk to this process|InetAddress.getLocalHost().getCanonicalHostName()| +|`druid.bindOnHost`|Indicating whether the process's internal jetty server bind on `druid.host`. Default is false, which means binding to all interfaces.|false| |`druid.plaintextPort`|This is the port to actually listen on; unless port mapping is used, this will be the same port as is on `druid.host`|8081| |`druid.tlsPort`|TLS port for HTTPS connector, if [druid.enableTlsPort](../operations/tls-support.html) is set then this config will be used. If `druid.host` contains port then that port will be ignored. This should be a non-negative Integer.|8281| |`druid.service`|The name of the service. This is used as a dimension when emitting metrics and alerts to differentiate between the various services|druid/coordinator| @@ -719,16 +719,16 @@ These Coordinator static configurations can be defined in the `coordinator/runti |`druid.coordinator.period.indexingPeriod`|How often to send compact/merge/conversion tasks to the indexing service. It's recommended to be longer than `druid.manager.segments.pollDuration`|PT1800S (30 mins)| |`druid.coordinator.startDelay`|The operation of the Coordinator works on the assumption that it has an up-to-date view of the state of the world when it runs, the current ZK interaction code, however, is written in a way that doesn’t allow the Coordinator to know for a fact that it’s done loading the current state of the world. This delay is a hack to give it enough time to believe that it has all the data.|PT300S| |`druid.coordinator.merge.on`|Boolean flag for whether or not the Coordinator should try and merge small segments into a more optimal segment size.|false| -|`druid.coordinator.load.timeout`|The timeout duration for when the Coordinator assigns a segment to a Historical node.|PT15M| +|`druid.coordinator.load.timeout`|The timeout duration for when the Coordinator assigns a segment to a Historical process.|PT15M| |`druid.coordinator.kill.pendingSegments.on`|Boolean flag for whether or not the Coordinator clean up old entries in the `pendingSegments` table of metadata store. If set to true, Coordinator will check the created time of most recently complete task. If it doesn't exist, it finds the created time of the earlist running/pending/waiting tasks. Once the created time is found, then for all dataSources not in the `killPendingSegmentsSkipList` (see [Dynamic configuration](#dynamic-configuration)), Coordinator will ask the Overlord to clean up the entries 1 day or more older than the found created time in the `pendingSegments` table. This will be done periodically based on `druid.coordinator.period` specified.|false| |`druid.coordinator.kill.on`|Boolean flag for whether or not the Coordinator should submit kill task for unused segments, that is, hard delete them from metadata store and deep storage. If set to true, then for all whitelisted dataSources (or optionally all), Coordinator will submit tasks periodically based on `period` specified. These kill tasks will delete all segments except for the last `durationToRetain` period. Whitelist or All can be set via dynamic configuration `killAllDataSources` and `killDataSourceWhitelist` described later.|false| |`druid.coordinator.kill.period`|How often to send kill tasks to the indexing service. Value must be greater than `druid.coordinator.period.indexingPeriod`. Only applies if kill is turned on.|P1D (1 Day)| |`druid.coordinator.kill.durationToRetain`| Do not kill segments in last `durationToRetain`, must be greater or equal to 0. Only applies and MUST be specified if kill is turned on. Note that default value is invalid.|PT-1S (-1 seconds)| |`druid.coordinator.kill.maxSegments`|Kill at most n segments per kill task submission, must be greater than 0. Only applies and MUST be specified if kill is turned on. Note that default value is invalid.|0| -|`druid.coordinator.balancer.strategy`|Specify the type of balancing strategy that the Coordinator should use to distribute segments among the Historicals. `cachingCost` is logically equivalent to `cost` but is more CPU-efficient on large clusters and will replace `cost` in the future versions, users are invited to try it. Use `diskNormalized` to distribute segments among nodes so that the disks fill up uniformly and use `random` to randomly pick nodes to distribute segments.|`cost`| +|`druid.coordinator.balancer.strategy`|Specify the type of balancing strategy that the Coordinator should use to distribute segments among the Historicals. `cachingCost` is logically equivalent to `cost` but is more CPU-efficient on large clusters and will replace `cost` in the future versions, users are invited to try it. Use `diskNormalized` to distribute segments among Historical processes so that the disks fill up uniformly and use `random` to randomly pick nodes to distribute segments.|`cost`| |`druid.coordinator.loadqueuepeon.repeatDelay`|The start and repeat delay for the loadqueuepeon , which manages the load and drop of segments.|PT0.050S (50 ms)| -|`druid.coordinator.asOverlord.enabled`|Boolean value for whether this Coordinator node should act like an Overlord as well. This configuration allows users to simplify a druid cluster by not having to deploy any standalone Overlord nodes. If set to true, then Overlord console is available at `http://coordinator-host:port/console.html` and be sure to set `druid.coordinator.asOverlord.overlordService` also. See next.|false| -|`druid.coordinator.asOverlord.overlordService`| Required, if `druid.coordinator.asOverlord.enabled` is `true`. This must be same value as `druid.service` on standalone Overlord nodes and `druid.selectors.indexing.serviceName` on Middle Managers.|NULL| +|`druid.coordinator.asOverlord.enabled`|Boolean value for whether this Coordinator process should act like an Overlord as well. This configuration allows users to simplify a druid cluster by not having to deploy any standalone Overlord processes. If set to true, then Overlord console is available at `http://coordinator-host:port/console.html` and be sure to set `druid.coordinator.asOverlord.overlordService` also. See next.|false| +|`druid.coordinator.asOverlord.overlordService`| Required, if `druid.coordinator.asOverlord.enabled` is `true`. This must be same value as `druid.service` on standalone Overlord processes and `druid.selectors.indexing.serviceName` on Middle Managers.|NULL| ##### Segment Management |Property|Possible Values|Description|Default| @@ -739,7 +739,7 @@ These Coordinator static configurations can be defined in the `coordinator/runti ###### Additional config when "http" loadqueuepeon is used |Property|Description|Default| |--------|-----------|-------| -|`druid.coordinator.loadqueuepeon.http.batchSize`|Number of segment load/drop requests to batch in one HTTP request. Note that it must be smaller than `druid.segmentCache.numLoadingThreads` config on Historical node.|1| +|`druid.coordinator.loadqueuepeon.http.batchSize`|Number of segment load/drop requests to batch in one HTTP request. Note that it must be smaller than `druid.segmentCache.numLoadingThreads` config on Historical process.|1| ##### Metadata Retrieval @@ -780,8 +780,8 @@ A sample Coordinator dynamic config JSON object is shown below: "replicationThrottleLimit": 10, "emitBalancingStats": false, "killDataSourceWhitelist": ["wikipedia", "testDatasource"], - "historicalNodesInMaintenance": ["localhost:8182", "localhost:8282"], - "nodesInMaintenancePriority": 7 + "decommissioningNodes": ["localhost:8182", "localhost:8282"], + "decommissioningMaxPercentOfMaxSegmentsToMove": 70 } ``` @@ -795,14 +795,14 @@ Issuing a GET request at the same URL will return the spec that is currently in |`maxSegmentsToMove`|The maximum number of segments that can be moved at any given time.|5| |`replicantLifetime`|The maximum number of Coordinator runs for a segment to be replicated before we start alerting.|15| |`replicationThrottleLimit`|The maximum number of segments that can be replicated at one time.|10| -|`balancerComputeThreads`|Thread pool size for computing moving cost of segments in segment balancing. Consider increasing this if you have a lot of segments and moving segment starts to get stuck.|1| +|`balancerComputeThreads`|Thread pool size for computing moving cost of segments in segment balancing. Consider increasing this if you have a lot of segments and moving segments starts to get stuck.|1| |`emitBalancingStats`|Boolean flag for whether or not we should emit balancing stats. This is an expensive operation.|false| |`killDataSourceWhitelist`|List of dataSources for which kill tasks are sent if property `druid.coordinator.kill.on` is true. This can be a list of comma-separated dataSources or a JSON array.|none| |`killAllDataSources`|Send kill tasks for ALL dataSources if property `druid.coordinator.kill.on` is true. If this is set to true then `killDataSourceWhitelist` must not be specified or be empty list.|false| |`killPendingSegmentsSkipList`|List of dataSources for which pendingSegments are _NOT_ cleaned up if property `druid.coordinator.kill.pendingSegments.on` is true. This can be a list of comma-separated dataSources or a JSON array.|none| |`maxSegmentsInNodeLoadingQueue`|The maximum number of segments that could be queued for loading to any given server. This parameter could be used to speed up segments loading process, especially if there are "slow" nodes in the cluster (with low loading speed) or if too much segments scheduled to be replicated to some particular node (faster loading could be preferred to better segments distribution). Desired value depends on segments loading speed, acceptable replication time and number of nodes. Value 1000 could be a start point for a rather big cluster. Default value is 0 (loading queue is unbounded) |0| -|`historicalNodesInMaintenance`| List of Historical nodes in maintenance mode. Coordinator doesn't assign new segments on those nodes and moves segments from the nodes according to a specified priority.|none| -|`nodesInMaintenancePriority`| Priority of segments from servers in maintenance. Coordinator takes ceil(maxSegmentsToMove * (priority / 10)) from servers in maitenance during balancing phase, i.e.:
0 - no segments from servers in maintenance will be processed during balancing
5 - 50% segments from servers in maintenance
10 - 100% segments from servers in maintenance
By leveraging the priority an operator can prevent general nodes from overload or decrease maitenance time instead.|7| +|`decommissioningNodes`| List of historical servers to 'decommission'. Coordinator will not assign new segments to 'decommissioning' servers, and segments will be moved away from them to be placed on non-decommissioning servers at the maximum rate specified by `decommissioningMaxPercentOfMaxSegmentsToMove`.|none| +|`decommissioningMaxPercentOfMaxSegmentsToMove`| The maximum number of segments that may be moved away from 'decommissioning' servers to non-decommissioning (that is, active) servers during one Coordinator run. This value is relative to the total maximum segment movements allowed during one run which is determined by `maxSegmentsToMove`. If `decommissioningMaxPercentOfMaxSegmentsToMove` is 0, segments will neither be moved from _or to_ 'decommissioning' servers, effectively putting them in a sort of "maintenance" mode that will not participate in balancing or assignment by load rules. Decommissioning can also become stalled if there are no available active servers to place the segments. By leveraging the maximum percent of decommissioning segment movements, an operator can prevent active servers from overload by prioritizing balancing, or decrease decommissioning time instead. The value should be between 0 and 100.|70| To view the audit history of Coordinator dynamic config issue a GET request to the URL - @@ -823,17 +823,19 @@ These configuration options control the behavior of the Lookup dynamic configura |Property|Description|Default| |--------|-----------|-------| -|`druid.manager.lookups.hostDeleteTimeout`|How long to wait for a `DELETE` request to a particular node before considering the `DELETE` a failure|PT1S| -|`druid.manager.lookups.hostUpdateTimeout`|How long to wait for a `POST` request to a particular node before considering the `POST` a failure|PT10S| +|`druid.manager.lookups.hostDeleteTimeout`|How long to wait for a `DELETE` request to a particular process before considering the `DELETE` a failure|PT1S| +|`druid.manager.lookups.hostUpdateTimeout`|How long to wait for a `POST` request to a particular process before considering the `POST` a failure|PT10S| |`druid.manager.lookups.deleteAllTimeout`|How long to wait for all `DELETE` requests to finish before considering the delete attempt a failure|PT10S| |`druid.manager.lookups.updateAllTimeout`|How long to wait for all `POST` requests to finish before considering the attempt a failure|PT60S| -|`druid.manager.lookups.threadPoolSize`|How many nodes can be managed concurrently (concurrent POST and DELETE requests). Requests this limit will wait in a queue until a slot becomes available.|10| +|`druid.manager.lookups.threadPoolSize`|How many processes can be managed concurrently (concurrent POST and DELETE requests). Requests this limit will wait in a queue until a slot becomes available.|10| |`druid.manager.lookups.period`|How many milliseconds between checks for configuration changes|30_000| ##### Compaction Dynamic Configuration -Compaction configurations can also be set or updated dynamically without restarting Coordinators. For segment compaction, -please see [Compacting Segments](../design/coordinator.html#compacting-segments). +Compaction configurations can also be set or updated dynamically using +[Coordinator's API](../operations/api-reference.html#compaction-configuration) without restarting Coordinators. + +For details about segment compaction, please check [Segment Size Optimization](../operations/segment-optimization.html). A description of the compaction config is: @@ -877,18 +879,18 @@ If you see this problem, it's recommended to set `skipOffsetFromLatest` to some ### Overlord -For general Overlord Node information, see [here](../design/overlord.html). +For general Overlord Process information, see [here](../design/overlord.html). #### Overlord Static Configuration These Overlord static configurations can be defined in the `overlord/runtime.properties` file. -##### Overlord Node Configs +##### Overlord Process Configs |Property|Description|Default| |--------|-----------|-------| -|`druid.host`|The host for the current node. This is used to advertise the current processes location as reachable from another node and should generally be specified such that `http://${druid.host}/` could actually talk to this process|InetAddress.getLocalHost().getCanonicalHostName()| -|`druid.bindOnHost`|Indicating whether the node's internal jetty server bind on `druid.host`. Default is false, which means binding to all interfaces.|false| +|`druid.host`|The host for the current process. This is used to advertise the current processes location as reachable from another process and should generally be specified such that `http://${druid.host}/` could actually talk to this process|InetAddress.getLocalHost().getCanonicalHostName()| +|`druid.bindOnHost`|Indicating whether the process's internal jetty server bind on `druid.host`. Default is false, which means binding to all interfaces.|false| |`druid.plaintextPort`|This is the port to actually listen on; unless port mapping is used, this will be the same port as is on `druid.host`|8090| |`druid.tlsPort`|TLS port for HTTPS connector, if [druid.enableTlsPort](../operations/tls-support.html) is set then this config will be used. If `druid.host` contains port then that port will be ignored. This should be a non-negative Integer.|8290| |`druid.service`|The name of the service. This is used as a dimension when emitting metrics and alerts to differentiate between the various services|druid/overlord| @@ -1097,12 +1099,12 @@ This section contains the configuration options for the processes that reside on These MiddleManager and Peon configurations can be defined in the `middleManager/runtime.properties` file. -#### MiddleManager Node Config +#### MiddleManager Process Config |Property|Description|Default| |--------|-----------|-------| -|`druid.host`|The host for the current node. This is used to advertise the current processes location as reachable from another node and should generally be specified such that `http://${druid.host}/` could actually talk to this process|InetAddress.getLocalHost().getCanonicalHostName()| -|`druid.bindOnHost`|Indicating whether the node's internal jetty server bind on `druid.host`. Default is false, which means binding to all interfaces.|false| +|`druid.host`|The host for the current process. This is used to advertise the current processes location as reachable from another process and should generally be specified such that `http://${druid.host}/` could actually talk to this process|InetAddress.getLocalHost().getCanonicalHostName()| +|`druid.bindOnHost`|Indicating whether the process's internal jetty server bind on `druid.host`. Default is false, which means binding to all interfaces.|false| |`druid.plaintextPort`|This is the port to actually listen on; unless port mapping is used, this will be the same port as is on `druid.host`|8091| |`druid.tlsPort`|TLS port for HTTPS connector, if [druid.enableTlsPort](../operations/tls-support.html) is set then this config will be used. If `druid.host` contains port then that port will be ignored. This should be a non-negative Integer.|8291| |`druid.service`|The name of the service. This is used as a dimension when emitting metrics and alerts to differentiate between the various services|druid/middlemanager| @@ -1133,9 +1135,9 @@ Processing properties set on the Middlemanager will be passed through to Peons. |Property|Description|Default| |--------|-----------|-------| -|`druid.processing.buffer.sizeBytes`|This specifies a buffer size for the storage of intermediate results. The computation engine in both the Historical and Realtime nodes will use a scratch buffer of this size to do all of their intermediate computations off-heap. Larger values allow for more aggregations in a single pass over the data while smaller values can require more passes depending on the query that is being executed.|auto (max 1GB)| +|`druid.processing.buffer.sizeBytes`|This specifies a buffer size for the storage of intermediate results. The computation engine in both the Historical and Realtime processes will use a scratch buffer of this size to do all of their intermediate computations off-heap. Larger values allow for more aggregations in a single pass over the data while smaller values can require more passes depending on the query that is being executed.|auto (max 1GB)| |`druid.processing.buffer.poolCacheMaxCount`|processing buffer pool caches the buffers for later use, this is the maximum count cache will grow to. note that pool can create more buffers than it can cache if necessary.|Integer.MAX_VALUE| -|`druid.processing.formatString`|Realtime and Historical nodes use this format string to name their processing threads.|processing-%s| +|`druid.processing.formatString`|Realtime and Historical processes use this format string to name their processing threads.|processing-%s| |`druid.processing.numMergeBuffers`|The number of direct memory buffers available for merging query results. The buffers are sized by `druid.processing.buffer.sizeBytes`. This property is effectively a concurrency limit for queries that require merging buffers. If you are using any queries that require merge buffers (currently, just groupBy v2) then you should have at least two of these.|`max(2, druid.processing.numThreads / 4)`| |`druid.processing.numThreads`|The number of processing threads to have available for parallel processing of segments. Our rule of thumb is `num_cores - 1`, which means that even under heavy load there will still be one core available to do background tasks like talking with ZooKeeper and pulling down segments. If only one core is available, this property defaults to the value `1`.|Number of cores - 1 (or 1)| |`druid.processing.columnCache.sizeBytes`|Maximum size in bytes for the dimension value lookup cache. Any value greater than `0` enables the cache. It is currently disabled by default. Enabling the lookup cache can significantly improve the performance of aggregators operating on dimension values, such as the JavaScript aggregator, or cardinality aggregator, but can slow things down if the cache hit rate is low (i.e. dimensions with few repeating values). Enabling it may also require additional garbage collection tuning to avoid long GC pauses.|`0` (disabled)| @@ -1174,7 +1176,7 @@ Additional peon configs include: |Property|Description|Default| |--------|-----------|-------| -|`druid.peon.mode`|Choices are "local" and "remote". Setting this to local means you intend to run the peon as a standalone node (Not recommended).|remote| +|`druid.peon.mode`|Choices are "local" and "remote". Setting this to local means you intend to run the peon as a standalone process (Not recommended).|remote| |`druid.indexer.task.baseDir`|Base temporary working directory.|`System.getProperty("java.io.tmpdir")`| |`druid.indexer.task.baseTaskDir`|Base temporary working directory for tasks.|`${druid.indexer.task.baseDir}/persistent/tasks`| |`druid.indexer.task.defaultHadoopCoordinates`|Hadoop version to use with HadoopIndexTasks that do not request a particular version.|org.apache.hadoop:hadoop-client:2.8.3| @@ -1218,15 +1220,15 @@ then the value from the configuration below is used: ### Historical -For general Historical Node information, see [here](../design/historical.html). +For general Historical Process information, see [here](../design/historical.html). These Historical configurations can be defined in the `historical/runtime.properties` file. -#### Historical Node Configuration +#### Historical Process Configuration |Property|Description|Default| |--------|-----------|-------| -|`druid.host`|The host for the current node. This is used to advertise the current processes location as reachable from another node and should generally be specified such that `http://${druid.host}/` could actually talk to this process|InetAddress.getLocalHost().getCanonicalHostName()| -|`druid.bindOnHost`|Indicating whether the node's internal jetty server bind on `druid.host`. Default is false, which means binding to all interfaces.|false| +|`druid.host`|The host for the current process. This is used to advertise the current processes location as reachable from another process and should generally be specified such that `http://${druid.host}/` could actually talk to this process|InetAddress.getLocalHost().getCanonicalHostName()| +|`druid.bindOnHost`|Indicating whether the process's internal jetty server bind on `druid.host`. Default is false, which means binding to all interfaces.|false| |`druid.plaintextPort`|This is the port to actually listen on; unless port mapping is used, this will be the same port as is on `druid.host`|8083| |`druid.tlsPort`|TLS port for HTTPS connector, if [druid.enableTlsPort](../operations/tls-support.html) is set then this config will be used. If `druid.host` contains port then that port will be ignored. This should be a non-negative Integer.|8283| |`druid.service`|The name of the service. This is used as a dimension when emitting metrics and alerts to differentiate between the various services|druid/historical| @@ -1236,18 +1238,18 @@ These Historical configurations can be defined in the `historical/runtime.proper |Property|Description|Default| |--------|-----------|-------| -|`druid.server.maxSize`|The maximum number of bytes-worth of segments that the node wants assigned to it. This is not a limit that Historical nodes actually enforces, just a value published to the Coordinator node so it can plan accordingly.|0| -|`druid.server.tier`| A string to name the distribution tier that the storage node belongs to. Many of the [rules Coordinator nodes use](../operations/rule-configuration.html) to manage segments can be keyed on tiers. | `_default_tier` | -|`druid.server.priority`|In a tiered architecture, the priority of the tier, thus allowing control over which nodes are queried. Higher numbers mean higher priority. The default (no priority) works for architecture with no cross replication (tiers that have no data-storage overlap). Data centers typically have equal priority. | 0 | +|`druid.server.maxSize`|The maximum number of bytes-worth of segments that the process wants assigned to it. This is not a limit that Historical processes actually enforces, just a value published to the Coordinator process so it can plan accordingly.|0| +|`druid.server.tier`| A string to name the distribution tier that the storage process belongs to. Many of the [rules Coordinator processes use](../operations/rule-configuration.html) to manage segments can be keyed on tiers. | `_default_tier` | +|`druid.server.priority`|In a tiered architecture, the priority of the tier, thus allowing control over which processes are queried. Higher numbers mean higher priority. The default (no priority) works for architecture with no cross replication (tiers that have no data-storage overlap). Data centers typically have equal priority. | 0 | #### Storing Segments |Property|Description|Default| |--------|-----------|-------| -|`druid.segmentCache.locations`|Segments assigned to a Historical node are first stored on the local file system (in a disk cache) and then served by the Historical node. These locations define where that local cache resides. This value cannot be NULL or EMPTY. Here is an example `druid.segmentCache.locations=[{"path": "/mnt/druidSegments", "maxSize": 10000, "freeSpacePercent": 1.0}]`. "freeSpacePercent" is optional, if provided then enforces that much of free disk partition space while storing segments. But, it depends on File.getTotalSpace() and File.getFreeSpace() methods, so enable if only if they work for your File System.| none | -|`druid.segmentCache.deleteOnRemove`|Delete segment files from cache once a node is no longer serving a segment.|true| -|`druid.segmentCache.dropSegmentDelayMillis`|How long a node delays before completely dropping segment.|30000 (30 seconds)| -|`druid.segmentCache.infoDir`|Historical nodes keep track of the segments they are serving so that when the process is restarted they can reload the same segments without waiting for the Coordinator to reassign. This path defines where this metadata is kept. Directory will be created if needed.|${first_location}/info_dir| +|`druid.segmentCache.locations`|Segments assigned to a Historical process are first stored on the local file system (in a disk cache) and then served by the Historical process. These locations define where that local cache resides. This value cannot be NULL or EMPTY. Here is an example `druid.segmentCache.locations=[{"path": "/mnt/druidSegments", "maxSize": 10000, "freeSpacePercent": 1.0}]`. "freeSpacePercent" is optional, if provided then enforces that much of free disk partition space while storing segments. But, it depends on File.getTotalSpace() and File.getFreeSpace() methods, so enable if only if they work for your File System.| none | +|`druid.segmentCache.deleteOnRemove`|Delete segment files from cache once a process is no longer serving a segment.|true| +|`druid.segmentCache.dropSegmentDelayMillis`|How long a process delays before completely dropping segment.|30000 (30 seconds)| +|`druid.segmentCache.infoDir`|Historical processes keep track of the segments they are serving so that when the process is restarted they can reload the same segments without waiting for the Coordinator to reassign. This path defines where this metadata is kept. Directory will be created if needed.|${first_location}/info_dir| |`druid.segmentCache.announceIntervalMillis`|How frequently to announce segments while segments are loading from cache. Set this value to zero to wait for all segments to be loaded before announcing.|5000 (5 seconds)| |`druid.segmentCache.numLoadingThreads`|How many segments to drop or load concurrently from from deep storage.|10| |`druid.segmentCache.numBootstrapThreads`|How many segments to load concurrently from local storage at startup.|Same as numLoadingThreads| @@ -1276,9 +1278,9 @@ Druid uses Jetty to serve HTTP requests. |Property|Description|Default| |--------|-----------|-------| -|`druid.processing.buffer.sizeBytes`|This specifies a buffer size for the storage of intermediate results. The computation engine in both the Historical and Realtime nodes will use a scratch buffer of this size to do all of their intermediate computations off-heap. Larger values allow for more aggregations in a single pass over the data while smaller values can require more passes depending on the query that is being executed.|auto (max 1GB)| +|`druid.processing.buffer.sizeBytes`|This specifies a buffer size for the storage of intermediate results. The computation engine in both the Historical and Realtime processes will use a scratch buffer of this size to do all of their intermediate computations off-heap. Larger values allow for more aggregations in a single pass over the data while smaller values can require more passes depending on the query that is being executed.|auto (max 1GB)| |`druid.processing.buffer.poolCacheMaxCount`|processing buffer pool caches the buffers for later use, this is the maximum count cache will grow to. note that pool can create more buffers than it can cache if necessary.|Integer.MAX_VALUE| -|`druid.processing.formatString`|Realtime and Historical nodes use this format string to name their processing threads.|processing-%s| +|`druid.processing.formatString`|Realtime and Historical processes use this format string to name their processing threads.|processing-%s| |`druid.processing.numMergeBuffers`|The number of direct memory buffers available for merging query results. The buffers are sized by `druid.processing.buffer.sizeBytes`. This property is effectively a concurrency limit for queries that require merging buffers. If you are using any queries that require merge buffers (currently, just groupBy v2) then you should have at least two of these.|`max(2, druid.processing.numThreads / 4)`| |`druid.processing.numThreads`|The number of processing threads to have available for parallel processing of segments. Our rule of thumb is `num_cores - 1`, which means that even under heavy load there will still be one core available to do background tasks like talking with ZooKeeper and pulling down segments. If only one core is available, this property defaults to the value `1`.|Number of cores - 1 (or 1)| |`druid.processing.columnCache.sizeBytes`|Maximum size in bytes for the dimension value lookup cache. Any value greater than `0` enables the cache. It is currently disabled by default. Enabling the lookup cache can significantly improve the performance of aggregators operating on dimension values, such as the JavaScript aggregator, or cardinality aggregator, but can slow things down if the cache hit rate is low (i.e. dimensions with few repeating values). Enabling it may also require additional garbage collection tuning to avoid long GC pauses.|`0` (disabled)| @@ -1316,12 +1318,12 @@ For general Broker process information, see [here](../design/broker.html). These Broker configurations can be defined in the `broker/runtime.properties` file. -#### Broker Node Configs +#### Broker Process Configs |Property|Description|Default| |--------|-----------|-------| -|`druid.host`|The host for the current node. This is used to advertise the current processes location as reachable from another node and should generally be specified such that `http://${druid.host}/` could actually talk to this process|InetAddress.getLocalHost().getCanonicalHostName()| -|`druid.bindOnHost`|Indicating whether the node's internal jetty server bind on `druid.host`. Default is false, which means binding to all interfaces.|false| +|`druid.host`|The host for the current process. This is used to advertise the current processes location as reachable from another process and should generally be specified such that `http://${druid.host}/` could actually talk to this process|InetAddress.getLocalHost().getCanonicalHostName()| +|`druid.bindOnHost`|Indicating whether the process's internal jetty server bind on `druid.host`. Default is false, which means binding to all interfaces.|false| |`druid.plaintextPort`|This is the port to actually listen on; unless port mapping is used, this will be the same port as is on `druid.host`|8082| |`druid.tlsPort`|TLS port for HTTPS connector, if [druid.enableTlsPort](../operations/tls-support.html) is set then this config will be used. If `druid.host` contains port then that port will be ignored. This should be a non-negative Integer.|8282| |`druid.service`|The name of the service. This is used as a dimension when emitting metrics and alerts to differentiate between the various services|druid/broker| @@ -1332,7 +1334,7 @@ These Broker configurations can be defined in the `broker/runtime.properties` fi |Property|Possible Values|Description|Default| |--------|---------------|-----------|-------| -|`druid.broker.balancer.type`|`random`, `connectionCount`|Determines how the broker balances connections to Historical nodes. `random` choose randomly, `connectionCount` picks the node with the fewest number of active connections to|`random`| +|`druid.broker.balancer.type`|`random`, `connectionCount`|Determines how the broker balances connections to Historical processes. `random` choose randomly, `connectionCount` picks the process with the fewest number of active connections to|`random`| |`druid.broker.select.tier`|`highestPriority`, `lowestPriority`, `custom`|If segments are cross-replicated across tiers in a cluster, you can tell the broker to prefer to select segments in a tier with a certain priority.|`highestPriority`| |`druid.broker.select.tier.custom.priorities`|`An array of integer priorities.`|Select servers in tiers with a custom priority list.|None| @@ -1347,7 +1349,7 @@ Druid uses Jetty to serve HTTP requests. |`druid.server.http.maxIdleTime`|The Jetty max idle time for a connection.|PT5M| |`druid.server.http.enableRequestLimit`|If enabled, no requests would be queued in jetty queue and "HTTP 429 Too Many Requests" error response would be sent. |false| |`druid.server.http.defaultQueryTimeout`|Query timeout in millis, beyond which unfinished queries will be cancelled|300000| -|`druid.server.http.maxScatterGatherBytes`|Maximum number of bytes gathered from data nodes such as Historicals and realtime processes to execute a query. Queries that exceed this limit will fail. This is an advance configuration that allows to protect in case Broker is under heavy load and not utilizing the data gathered in memory fast enough and leading to OOMs. This limit can be further reduced at query time using `maxScatterGatherBytes` in the context. Note that having large limit is not necessarily bad if broker is never under heavy concurrent load in which case data gathered is processed quickly and freeing up the memory used.|Long.MAX_VALUE| +|`druid.server.http.maxScatterGatherBytes`|Maximum number of bytes gathered from data processes such as Historicals and realtime processes to execute a query. Queries that exceed this limit will fail. This is an advance configuration that allows to protect in case Broker is under heavy load and not utilizing the data gathered in memory fast enough and leading to OOMs. This limit can be further reduced at query time using `maxScatterGatherBytes` in the context. Note that having large limit is not necessarily bad if broker is never under heavy concurrent load in which case data gathered is processed quickly and freeing up the memory used.|Long.MAX_VALUE| |`druid.server.http.gracefulShutdownTimeout`|The maximum amount of time Jetty waits after receiving shutdown signal. After this timeout the threads will be forcefully shutdown. This allows any queries that are executing to complete.|`PT0S` (do not wait)| |`druid.server.http.unannouncePropagationDelay`|How long to wait for zookeeper unannouncements to propagate before shutting down Jetty. This is a minimum and `druid.server.http.gracefulShutdownTimeout` does not start counting down until after this period elapses.|`PT0S` (do not wait)| |`druid.server.http.maxQueryTimeout`|Maximum allowed value (in milliseconds) for `timeout` parameter. See [query-context](../querying/query-context.html) to know more about `timeout`. Query is rejected if the query context `timeout` is greater than this value. |Long.MAX_VALUE| @@ -1360,7 +1362,7 @@ client has the following configuration options. |Property|Description|Default| |--------|-----------|-------| -|`druid.broker.http.numConnections`|Size of connection pool for the Broker to connect to Historical and real-time processes. If there are more queries than this number that all need to speak to the same node, then they will queue up.|20| +|`druid.broker.http.numConnections`|Size of connection pool for the Broker to connect to Historical and real-time processes. If there are more queries than this number that all need to speak to the same process, then they will queue up.|20| |`druid.broker.http.compressionCodec`|Compression codec the Broker uses to communicate with Historical and real-time processes. May be "gzip" or "identity".|gzip| |`druid.broker.http.readTimeout`|The timeout for data reads from Historical servers and real-time tasks.|PT15M| |`druid.broker.http.unusedConnectionTimeout`|The timeout for idle connections in connection pool. This timeout should be less than `druid.broker.http.readTimeout`. Set this timeout = ~90% of `druid.broker.http.readTimeout`|`PT4M`| @@ -1380,9 +1382,9 @@ The broker uses processing configs for nested groupBy queries. And, if you use g |Property|Description|Default| |--------|-----------|-------| -|`druid.processing.buffer.sizeBytes`|This specifies a buffer size for the storage of intermediate results. The computation engine in both the Historical and Realtime nodes will use a scratch buffer of this size to do all of their intermediate computations off-heap. Larger values allow for more aggregations in a single pass over the data while smaller values can require more passes depending on the query that is being executed.|auto (max 1GB)| +|`druid.processing.buffer.sizeBytes`|This specifies a buffer size for the storage of intermediate results. The computation engine in both the Historical and Realtime processes will use a scratch buffer of this size to do all of their intermediate computations off-heap. Larger values allow for more aggregations in a single pass over the data while smaller values can require more passes depending on the query that is being executed.|auto (max 1GB)| |`druid.processing.buffer.poolCacheMaxCount`|processing buffer pool caches the buffers for later use, this is the maximum count cache will grow to. note that pool can create more buffers than it can cache if necessary.|Integer.MAX_VALUE| -|`druid.processing.formatString`|Realtime and Historical nodes use this format string to name their processing threads.|processing-%s| +|`druid.processing.formatString`|Realtime and Historical processes use this format string to name their processing threads.|processing-%s| |`druid.processing.numMergeBuffers`|The number of direct memory buffers available for merging query results. The buffers are sized by `druid.processing.buffer.sizeBytes`. This property is effectively a concurrency limit for queries that require merging buffers. If you are using any queries that require merge buffers (currently, just groupBy v2) then you should have at least two of these.|`max(2, druid.processing.numThreads / 4)`| |`druid.processing.numThreads`|The number of processing threads to have available for parallel processing of segments. Our rule of thumb is `num_cores - 1`, which means that even under heavy load there will still be one core available to do background tasks like talking with ZooKeeper and pulling down segments. If only one core is available, this property defaults to the value `1`.|Number of cores - 1 (or 1)| |`druid.processing.columnCache.sizeBytes`|Maximum size in bytes for the dimension value lookup cache. Any value greater than `0` enables the cache. It is currently disabled by default. Enabling the lookup cache can significantly improve the performance of aggregators operating on dimension values, such as the JavaScript aggregator, or cardinality aggregator, but can slow things down if the cache hit rate is low (i.e. dimensions with few repeating values). Enabling it may also require additional garbage collection tuning to avoid long GC pauses.|`0` (disabled)| @@ -1444,8 +1446,8 @@ See [cache configuration](#cache-configuration) for how to configure cache setti |Property|Possible Values|Description|Default| |--------|---------------|-----------|-------| |`druid.serverview.type`|batch or http|Segment discovery method to use. "http" enables discovering segments using HTTP instead of zookeeper.|batch| -|`druid.broker.segment.watchedTiers`|List of strings|Broker watches the segment announcements from nodes serving segments to build cache of which node is serving which segments, this configuration allows to only consider segments being served from a whitelist of tiers. By default, Broker would consider all tiers. This can be used to partition your dataSources in specific Historical tiers and configure brokers in partitions so that they are only queryable for specific dataSources.|none| -|`druid.broker.segment.watchedDataSources`|List of strings|Broker watches the segment announcements from nodes serving segments to build cache of which node is serving which segments, this configuration allows to only consider segments being served from a whitelist of dataSources. By default, Broker would consider all datasources. This can be used to configure brokers in partitions so that they are only queryable for specific dataSources.|none| +|`druid.broker.segment.watchedTiers`|List of strings|Broker watches the segment announcements from processes serving segments to build cache of which process is serving which segments, this configuration allows to only consider segments being served from a whitelist of tiers. By default, Broker would consider all tiers. This can be used to partition your dataSources in specific Historical tiers and configure brokers in partitions so that they are only queryable for specific dataSources.|none| +|`druid.broker.segment.watchedDataSources`|List of strings|Broker watches the segment announcements from processes serving segments to build cache of which process is serving which segments, this configuration allows to only consider segments being served from a whitelist of dataSources. By default, Broker would consider all datasources. This can be used to configure brokers in partitions so that they are only queryable for specific dataSources.|none| |`druid.broker.segment.awaitInitializationOnStart`|Boolean|Whether the the Broker will wait for its view of segments to fully initialize before starting up. If set to 'true', the Broker's HTTP server will not start up, and the Broker will not announce itself as available, until the server view is initialized. See also `druid.sql.planner.awaitInitializationOnStart`, a related setting.|true| ## Cache Configuration @@ -1459,7 +1461,7 @@ Druid uses a local in-memory cache by default, unless a diffrent type of cache i Use the `druid.cache.type` configuration to set a different kind of cache. Cache settings are set globally, so the same configuration can be re-used -for both Broker and Historical nodes, when defined in the common properties file. +for both Broker and Historical processes, when defined in the common properties file. ### Cache Type @@ -1498,7 +1500,7 @@ Below are the configuration options known to this module: |`druid.cache.sizeInBytes`|The maximum size of the cache in bytes on heap.|min(1GB, Runtime.maxMemory / 10)| |`druid.cache.expireAfter`|The time (in ms) after an access for which a cache entry may be expired|None (no time limit)| |`druid.cache.cacheExecutorFactory`|The executor factory to use for Caffeine maintenance. One of `COMMON_FJP`, `SINGLE_THREAD`, or `SAME_THREAD`|ForkJoinPool common pool (`COMMON_FJP`)| -|`druid.cache.evictOnClose`|If a close of a namespace (ex: removing a segment from a node) should cause an eager eviction of associated cache values|`false`| +|`druid.cache.evictOnClose`|If a close of a namespace (ex: removing a segment from a process) should cause an eager eviction of associated cache values|`false`| ##### `druid.cache.cacheExecutorFactory` @@ -1520,7 +1522,7 @@ In addition to the normal cache metrics, the caffeine cache implementation also ##### Memcached -Uses memcached as cache backend. This allows all nodes to share the same cache. +Uses memcached as cache backend. This allows all processes to share the same cache. |Property|Description|Default| |--------|-----------|-------| @@ -1548,12 +1550,12 @@ If there is an L1 miss and L2 hit, it will also populate L1. |`druid.cache.l2.type`|type of cache to use for L2 cache. See `druid.cache.type` configuration for valid types.|`caffeine`| |`druid.cache.l1.*`|Any property valid for the given type of L1 cache can be set using this prefix. For instance, if you are using a `caffeine` L1 cache, specify `druid.cache.l1.sizeInBytes` to set its size.|defaults are the same as for the given cache type.| |`druid.cache.l2.*`|Prefix for L2 cache settings, see description for L1.|defaults are the same as for the given cache type.| -|`druid.cache.useL2`|A boolean indicating whether to query L2 cache, if it's a miss in L1. It makes sense to configure this to `false` on Historical nodes, if L2 is a remote cache like `memcached`, and this cache also used on brokers, because in this case if a query reached Historical it means that a broker didn't find corresponding results in the same remote cache, so a query to the remote cache from Historical is guaranteed to be a miss.|`true`| +|`druid.cache.useL2`|A boolean indicating whether to query L2 cache, if it's a miss in L1. It makes sense to configure this to `false` on Historical processes, if L2 is a remote cache like `memcached`, and this cache also used on brokers, because in this case if a query reached Historical it means that a broker didn't find corresponding results in the same remote cache, so a query to the remote cache from Historical is guaranteed to be a miss.|`true`| |`druid.cache.populateL2`|A boolean indicating whether to put results into L2 cache.|`true`| ## General Query Configuration -This section describes configurations that control behavior of Druid's query types, applicable to Broker, Historical, and MiddleManager nodes. +This section describes configurations that control behavior of Druid's query types, applicable to Broker, Historical, and MiddleManager processes. ### TopN Query config @@ -1577,7 +1579,7 @@ This section describes configurations that control behavior of Druid's query typ ### GroupBy Query Config -This section describes the configurations for groupBy queries. You can set the runtime properties in the `runtime.properties` file on Broker, Historical, and MiddleManager nodes. You can set the query context parameters through the [query context](../querying/query-context.html). +This section describes the configurations for groupBy queries. You can set the runtime properties in the `runtime.properties` file on Broker, Historical, and MiddleManager processes. You can set the query context parameters through the [query context](../querying/query-context.html). #### Configurations for groupBy v2 @@ -1624,7 +1626,7 @@ Supported runtime properties: |`druid.query.groupBy.bufferGrouperInitialBuckets`|Initial number of buckets in the off-heap hash table used for grouping results. Set to 0 to use a reasonable default (1024).|0| |`druid.query.groupBy.bufferGrouperMaxLoadFactor`|Maximum load factor of the off-heap hash table used for grouping results. When the load factor exceeds this size, the table will be grown or spilled to disk. Set to 0 to use a reasonable default (0.7).|0| |`druid.query.groupBy.forceHashAggregation`|Force to use hash-based aggregation.|false| -|`druid.query.groupBy.intermediateCombineDegree`|Number of intermediate nodes combined together in the combining tree. Higher degrees will need less threads which might be helpful to improve the query performance by reducing the overhead of too many threads if the server has sufficiently powerful cpu cores.|8| +|`druid.query.groupBy.intermediateCombineDegree`|Number of intermediate processes combined together in the combining tree. Higher degrees will need less threads which might be helpful to improve the query performance by reducing the overhead of too many threads if the server has sufficiently powerful cpu cores.|8| |`druid.query.groupBy.numParallelCombineThreads`|Hint for the number of parallel combining threads. This should be larger than 1 to turn on the parallel combining feature. The actual number of threads used for parallel combining is min(`druid.query.groupBy.numParallelCombineThreads`, `druid.processing.numThreads`).|1 (disabled)| Supported query contexts: @@ -1637,7 +1639,7 @@ Supported query contexts: |`intermediateCombineDegree`|Overrides the value of `druid.query.groupBy.intermediateCombineDegree`|None| |`numParallelCombineThreads`|Overrides the value of `druid.query.groupBy.numParallelCombineThreads`|None| |`sortByDimsFirst`|Sort the results first by dimension values and then by timestamp.|false| -|`forceLimitPushDown`|When all fields in the orderby are part of the grouping key, the broker will push limit application down to the Historical nodes. When the sorting order uses fields that are not in the grouping key, applying this optimization can result in approximate results with unknown accuracy, so this optimization is disabled by default in that case. Enabling this context flag turns on limit push down for limit/orderbys that contain non-grouping key columns.|false| +|`forceLimitPushDown`|When all fields in the orderby are part of the grouping key, the broker will push limit application down to the Historical processes. When the sorting order uses fields that are not in the grouping key, applying this optimization can result in approximate results with unknown accuracy, so this optimization is disabled by default in that case. Enabling this context flag turns on limit push down for limit/orderbys that contain non-grouping key columns.|false| #### GroupBy v1 configurations @@ -1658,6 +1660,6 @@ Supported query contexts: |`useOffheap`|Set to true to store aggregations off-heap when merging results.|false| -## Realtime nodes +## Realtime processes -Configuration for the deprecated realtime node can be found [here](../configuration/realtime.html). +Configuration for the deprecated realtime process can be found [here](../configuration/realtime.html). diff --git a/docs/content/configuration/logging.md b/docs/content/configuration/logging.md index 97de828a90c5..131bc87e4e61 100644 --- a/docs/content/configuration/logging.md +++ b/docs/content/configuration/logging.md @@ -24,7 +24,7 @@ title: "Logging" # Logging -Druid nodes will emit logs that are useful for debugging to the console. Druid nodes also emit periodic metrics about their state. For more about metrics, see [Configuration](../configuration/index.html#enabling-metrics). Metric logs are printed to the console by default, and can be disabled with `-Ddruid.emitter.logging.logLevel=debug`. +Druid processes will emit logs that are useful for debugging to the console. Druid processes also emit periodic metrics about their state. For more about metrics, see [Configuration](../configuration/index.html#enabling-metrics). Metric logs are printed to the console by default, and can be disabled with `-Ddruid.emitter.logging.logLevel=debug`. Druid uses [log4j2](http://logging.apache.org/log4j/2.x/) for logging. Logging can be configured with a log4j2.xml file. Add the path to the directory containing the log4j2.xml file (e.g. the _common/ dir) to your classpath if you want to override default Druid log configuration. Note that this directory should be earlier in the classpath than the druid jars. The easiest way to do this is to prefix the classpath with the config dir. diff --git a/docs/content/configuration/realtime.md b/docs/content/configuration/realtime.md index 396e1f7ee7e7..4e806da670d5 100644 --- a/docs/content/configuration/realtime.md +++ b/docs/content/configuration/realtime.md @@ -1,6 +1,6 @@ --- layout: doc_page -title: "Realtime Node Configuration" +title: "Realtime Process Configuration" --- -# Realtime Node Configuration +# Realtime Process Configuration -For general Realtime Node information, see [here](../design/realtime.html). +For general Realtime Process information, see [here](../design/realtime.html). Runtime Configuration --------------------- -The realtime node uses several of the global configs in [Configuration](../configuration/index.html) and has the following set of configurations as well: +The realtime process uses several of the global configs in [Configuration](../configuration/index.html) and has the following set of configurations as well: -### Node Config +### Process Config |Property|Description|Default| |--------|-----------|-------| -|`druid.host`|The host for the current node. This is used to advertise the current processes location as reachable from another node and should generally be specified such that `http://${druid.host}/` could actually talk to this process|InetAddress.getLocalHost().getCanonicalHostName()| +|`druid.host`|The host for the current process. This is used to advertise the current processes location as reachable from another process and should generally be specified such that `http://${druid.host}/` could actually talk to this process|InetAddress.getLocalHost().getCanonicalHostName()| |`druid.plaintextPort`|This is the port to actually listen on; unless port mapping is used, this will be the same port as is on `druid.host`|8084| |`druid.tlsPort`|TLS port for HTTPS connector, if [druid.enableTlsPort](../operations/tls-support.html) is set then this config will be used. If `druid.host` contains port then that port will be ignored. This should be a non-negative Integer.|8284| |`druid.service`|The name of the service. This is used as a dimension when emitting metrics and alerts to differentiate between the various services|druid/realtime| @@ -60,8 +60,8 @@ The realtime node uses several of the global configs in [Configuration](../confi |Property|Description|Default| |--------|-----------|-------| -|`druid.processing.buffer.sizeBytes`|This specifies a buffer size for the storage of intermediate results. The computation engine in both the Historical and Realtime nodes will use a scratch buffer of this size to do all of their intermediate computations off-heap. Larger values allow for more aggregations in a single pass over the data while smaller values can require more passes depending on the query that is being executed.|auto (max 1GB)| -|`druid.processing.formatString`|Realtime and Historical nodes use this format string to name their processing threads.|processing-%s| +|`druid.processing.buffer.sizeBytes`|This specifies a buffer size for the storage of intermediate results. The computation engine in both the Historical and Realtime processes will use a scratch buffer of this size to do all of their intermediate computations off-heap. Larger values allow for more aggregations in a single pass over the data while smaller values can require more passes depending on the query that is being executed.|auto (max 1GB)| +|`druid.processing.formatString`|Realtime and Historical processes use this format string to name their processing threads.|processing-%s| |`druid.processing.numMergeBuffers`|The number of direct memory buffers available for merging query results. The buffers are sized by `druid.processing.buffer.sizeBytes`. This property is effectively a concurrency limit for queries that require merging buffers. If you are using any queries that require merge buffers (currently, just groupBy v2) then you should have at least two of these.|`max(2, druid.processing.numThreads / 4)`| |`druid.processing.numThreads`|The number of processing threads to have available for parallel processing of segments. Our rule of thumb is `num_cores - 1`, which means that even under heavy load there will still be one core available to do background tasks like talking with ZooKeeper and pulling down segments. If only one core is available, this property defaults to the value `1`.|Number of cores - 1 (or 1)| |`druid.processing.columnCache.sizeBytes`|Maximum size in bytes for the dimension value lookup cache. Any value greater than `0` enables the cache. It is currently disabled by default. Enabling the lookup cache can significantly improve the performance of aggregators operating on dimension values, such as the JavaScript aggregator, or cardinality aggregator, but can slow things down if the cache hit rate is low (i.e. dimensions with few repeating values). Enabling it may also require additional garbage collection tuning to avoid long GC pauses.|`0` (disabled)| @@ -86,7 +86,7 @@ See [groupBy server configuration](../querying/groupbyquery.html#server-configur ### Caching -You can optionally configure caching to be enabled on the realtime node by setting caching configs here. +You can optionally configure caching to be enabled on the realtime process by setting caching configs here. |Property|Possible Values|Description|Default| |--------|---------------|-----------|-------| diff --git a/docs/content/dependencies/cassandra-deep-storage.md b/docs/content/dependencies/cassandra-deep-storage.md index 7eb4b1a1ffa3..4137f04047cb 100644 --- a/docs/content/dependencies/cassandra-deep-storage.md +++ b/docs/content/dependencies/cassandra-deep-storage.md @@ -29,8 +29,8 @@ title: "Cassandra Deep Storage" Druid can use Cassandra as a deep storage mechanism. Segments and their metadata are stored in Cassandra in two tables: `index_storage` and `descriptor_storage`. Underneath the hood, the Cassandra integration leverages Astyanax. The index storage table is a [Chunked Object](https://github.com/Netflix/astyanax/wiki/Chunked-Object-Store) repository. It contains -compressed segments for distribution to Historical nodes. Since segments can be large, the Chunked Object storage allows the integration to multi-thread -the write to Cassandra, and spreads the data across all the nodes in a cluster. The descriptor storage table is a normal C* table that +compressed segments for distribution to Historical processes. Since segments can be large, the Chunked Object storage allows the integration to multi-thread +the write to Cassandra, and spreads the data across all the processes in a cluster. The descriptor storage table is a normal C* table that stores the segment metadatak. ## Schema diff --git a/docs/content/dependencies/deep-storage.md b/docs/content/dependencies/deep-storage.md index 8d12d49f3a81..eace739b7b20 100644 --- a/docs/content/dependencies/deep-storage.md +++ b/docs/content/dependencies/deep-storage.md @@ -24,7 +24,7 @@ title: "Deep Storage" # Deep Storage -Deep storage is where segments are stored. It is a storage mechanism that Druid does not provide. This deep storage infrastructure defines the level of durability of your data, as long as Druid nodes can see this storage infrastructure and get at the segments stored on it, you will not lose data no matter how many Druid nodes you lose. If segments disappear from this storage layer, then you will lose whatever data those segments represented. +Deep storage is where segments are stored. It is a storage mechanism that Druid does not provide. This deep storage infrastructure defines the level of durability of your data, as long as Druid processes can see this storage infrastructure and get at the segments stored on it, you will not lose data no matter how many Druid nodes you lose. If segments disappear from this storage layer, then you will lose whatever data those segments represented. ## Local Mount diff --git a/docs/content/dependencies/metadata-storage.md b/docs/content/dependencies/metadata-storage.md index bfc77cb79054..5135eca19ead 100644 --- a/docs/content/dependencies/metadata-storage.md +++ b/docs/content/dependencies/metadata-storage.md @@ -134,8 +134,8 @@ config changes. The Metadata Storage is accessed only by: -1. Indexing Service Nodes (if any) -2. Realtime Nodes (if any) -3. Coordinator Nodes +1. Indexing Service Processes (if any) +2. Realtime Processes (if any) +3. Coordinator Processes Thus you need to give permissions (eg in AWS Security Groups) only for these machines to access the Metadata storage. diff --git a/docs/content/dependencies/zookeeper.md b/docs/content/dependencies/zookeeper.md index d2b4cd04b73e..e944441b35e4 100644 --- a/docs/content/dependencies/zookeeper.md +++ b/docs/content/dependencies/zookeeper.md @@ -44,7 +44,7 @@ ${druid.zk.paths.coordinatorPath}/_COORDINATOR The `announcementsPath` and `servedSegmentsPath` are used for this. -All [Historical](../design/historical.html) and [Realtime](../design/realtime.html) nodes publish themselves on the `announcementsPath`, specifically, they will create an ephemeral znode at +All [Historical](../design/historical.html) and [Realtime](../design/realtime.html) processes publish themselves on the `announcementsPath`, specifically, they will create an ephemeral znode at ``` ${druid.zk.paths.announcementsPath}/${druid.host} @@ -62,16 +62,16 @@ And as they load up segments, they will attach ephemeral znodes that look like ${druid.zk.paths.servedSegmentsPath}/${druid.host}/_segment_identifier_ ``` -Nodes like the [Coordinator](../design/coordinator.html) and [Broker](../design/broker.html) can then watch these paths to see which nodes are currently serving which segments. +Processes like the [Coordinator](../design/coordinator.html) and [Broker](../design/broker.html) can then watch these paths to see which processes are currently serving which segments. ### Segment load/drop protocol between Coordinator and Historical The `loadQueuePath` is used for this. -When the [Coordinator](../design/coordinator.html) decides that a [Historical](../design/historical.html) node should load or drop a segment, it writes an ephemeral znode to +When the [Coordinator](../design/coordinator.html) decides that a [Historical](../design/historical.html) process should load or drop a segment, it writes an ephemeral znode to ``` -${druid.zk.paths.loadQueuePath}/_host_of_historical_node/_segment_identifier +${druid.zk.paths.loadQueuePath}/_host_of_historical_process/_segment_identifier ``` -This node will contain a payload that indicates to the Historical node what it should do with the given segment. When the Historical node is done with the work, it will delete the znode in order to signify to the Coordinator that it is complete. +This znode will contain a payload that indicates to the Historical process what it should do with the given segment. When the Historical process is done with the work, it will delete the znode in order to signify to the Coordinator that it is complete. diff --git a/docs/content/design/auth.md b/docs/content/design/auth.md index 95f8be65ea93..8063889c218a 100644 --- a/docs/content/design/auth.md +++ b/docs/content/design/auth.md @@ -123,7 +123,7 @@ An Authenticator implementation should provide some means through configuration ## Internal System User -Internal requests between Druid nodes (non-user initiated communications) need to have authentication credentials attached. +Internal requests between Druid processes (non-user initiated communications) need to have authentication credentials attached. These requests should be run as an "internal system user", an identity that represents the Druid cluster itself, with full access permissions. diff --git a/docs/content/design/broker.md b/docs/content/design/broker.md index 1eb51cd16ced..fe9119d4b8b8 100644 --- a/docs/content/design/broker.md +++ b/docs/content/design/broker.md @@ -26,7 +26,7 @@ title: "Broker" ### Configuration -For Broker Node Configuration, see [Broker Configuration](../configuration/index.html#broker). +For Broker Process Configuration, see [Broker Configuration](../configuration/index.html#broker). ### HTTP endpoints @@ -34,8 +34,8 @@ For a list of API endpoints supported by the Broker, see [Broker API](../operati ### Overview -The Broker is the node to route queries to if you want to run a distributed cluster. It understands the metadata published to ZooKeeper about what segments exist on what nodes and routes queries such that they hit the right nodes. This node also merges the result sets from all of the individual nodes together. -On start up, Realtime nodes announce themselves and the segments they are serving in Zookeeper. +The Broker is the process to route queries to if you want to run a distributed cluster. It understands the metadata published to ZooKeeper about what segments exist on what processes and routes queries such that they hit the right processes. This process also merges the result sets from all of the individual processes together. +On start up, Historical processes announce themselves and the segments they are serving in Zookeeper. ### Running @@ -45,11 +45,11 @@ org.apache.druid.cli.Main server broker ### Forwarding Queries -Most druid queries contain an interval object that indicates a span of time for which data is requested. Likewise, Druid [Segments](../design/segments.html) are partitioned to contain data for some interval of time and segments are distributed across a cluster. Consider a simple datasource with 7 segments where each segment contains data for a given day of the week. Any query issued to the datasource for more than one day of data will hit more than one segment. These segments will likely be distributed across multiple nodes, and hence, the query will likely hit multiple nodes. +Most druid queries contain an interval object that indicates a span of time for which data is requested. Likewise, Druid [Segments](../design/segments.html) are partitioned to contain data for some interval of time and segments are distributed across a cluster. Consider a simple datasource with 7 segments where each segment contains data for a given day of the week. Any query issued to the datasource for more than one day of data will hit more than one segment. These segments will likely be distributed across multiple processes, and hence, the query will likely hit multiple processes. -To determine which nodes to forward queries to, the Broker node first builds a view of the world from information in Zookeeper. Zookeeper maintains information about [Historical](../design/historical.html) and [Realtime](../design/realtime.html) nodes and the segments they are serving. For every datasource in Zookeeper, the Broker node builds a timeline of segments and the nodes that serve them. When queries are received for a specific datasource and interval, the Broker node performs a lookup into the timeline associated with the query datasource for the query interval and retrieves the nodes that contain data for the query. The Broker process then forwards down the query to the selected nodes. +To determine which processes to forward queries to, the Broker process first builds a view of the world from information in Zookeeper. Zookeeper maintains information about [Historical](../design/historical.html) and streaming ingestion [Peon](../design/peon.html) processes and the segments they are serving. For every datasource in Zookeeper, the Broker process builds a timeline of segments and the processes that serve them. When queries are received for a specific datasource and interval, the Broker process performs a lookup into the timeline associated with the query datasource for the query interval and retrieves the processes that contain data for the query. The Broker process then forwards down the query to the selected processes. ### Caching -Broker nodes employ a cache with a LRU cache invalidation strategy. The Broker cache stores per-segment results. The cache can be local to each Broker process or shared across multiple processes using an external distributed cache such as [memcached](http://memcached.org/). Each time a broker node receives a query, it first maps the query to a set of segments. A subset of these segment results may already exist in the cache and the results can be directly pulled from the cache. For any segment results that do not exist in the cache, the broker node will forward the query to the -Historical nodes. Once the Historical processes return their results, the Broker will store those results in the cache. Real-time segments are never cached and hence requests for real-time data will always be forwarded to real-time nodes. Real-time data is perpetually changing and caching the results would be unreliable. +Broker processes employ a cache with a LRU cache invalidation strategy. The Broker cache stores per-segment results. The cache can be local to each Broker process or shared across multiple processes using an external distributed cache such as [memcached](http://memcached.org/). Each time a broker process receives a query, it first maps the query to a set of segments. A subset of these segment results may already exist in the cache and the results can be directly pulled from the cache. For any segment results that do not exist in the cache, the broker process will forward the query to the +Historical processes. Once the Historical processes return their results, the Broker will store those results in the cache. Real-time segments are never cached and hence requests for real-time data will always be forwarded to real-time processes. Real-time data is perpetually changing and caching the results would be unreliable. diff --git a/docs/content/design/coordinator.md b/docs/content/design/coordinator.md index a0fdb6b79133..9571f3a697bd 100644 --- a/docs/content/design/coordinator.md +++ b/docs/content/design/coordinator.md @@ -1,6 +1,6 @@ --- layout: doc_page -title: "Coordinator Node" +title: "Coordinator Process" --- -# Coordinator Node +# Coordinator Process ### Configuration -For Coordinator Node Configuration, see [Coordinator Configuration](../configuration/index.html#coordinator). +For Coordinator Process Configuration, see [Coordinator Configuration](../configuration/index.html#coordinator). ### HTTP endpoints @@ -34,11 +34,11 @@ For a list of API endpoints supported by the Coordinator, see [Coordinator API]( ### Overview -The Druid Coordinator node is primarily responsible for segment management and distribution. More specifically, the Druid Coordinator node communicates to Historical nodes to load or drop segments based on configurations. The Druid Coordinator is responsible for loading new segments, dropping outdated segments, managing segment replication, and balancing segment load. +The Druid Coordinator process is primarily responsible for segment management and distribution. More specifically, the Druid Coordinator process communicates to Historical processes to load or drop segments based on configurations. The Druid Coordinator is responsible for loading new segments, dropping outdated segments, managing segment replication, and balancing segment load. The Druid Coordinator runs periodically and the time between each run is a configurable parameter. Each time the Druid Coordinator runs, it assesses the current state of the cluster before deciding on the appropriate actions to take. Similar to the Broker and Historical processses, the Druid Coordinator maintains a connection to a Zookeeper cluster for current cluster information. The Coordinator also maintains a connection to a database containing information about available segments and rules. Available segments are stored in a segment table and list all segments that should be loaded in the cluster. Rules are stored in a rule table and indicate how segments should be handled. -Before any unassigned segments are serviced by Historical nodes, the available Historical nodes for each tier are first sorted in terms of capacity, with least capacity servers having the highest priority. Unassigned segments are always assigned to the nodes with least capacity to maintain a level of balance between nodes. The Coordinator does not directly communicate with a historical node when assigning it a new segment; instead the Coordinator creates some temporary information about the new segment under load queue path of the historical node. Once this request is seen, the historical node will load the segment and begin servicing it. +Before any unassigned segments are serviced by Historical processes, the available Historical processes for each tier are first sorted in terms of capacity, with least capacity servers having the highest priority. Unassigned segments are always assigned to the processes with least capacity to maintain a level of balance between processes. The Coordinator does not directly communicate with a historical process when assigning it a new segment; instead the Coordinator creates some temporary information about the new segment under load queue path of the historical process. Once this request is seen, the historical process will load the segment and begin servicing it. ### Running @@ -57,16 +57,16 @@ Note that if all segments in database are deleted(or marked unused), then Coordi ### Segment Availability -If a Historical node restarts or becomes unavailable for any reason, the Druid Coordinator will notice a node has gone missing and treat all segments served by that node as being dropped. Given a sufficient period of time, the segments may be reassigned to other Historical nodes in the cluster. However, each segment that is dropped is not immediately forgotten. Instead, there is a transitional data structure that stores all dropped segments with an associated lifetime. The lifetime represents a period of time in which the Coordinator will not reassign a dropped segment. Hence, if a historical node becomes unavailable and available again within a short period of time, the historical node will start up and serve segments from its cache without any those segments being reassigned across the cluster. +If a Historical process restarts or becomes unavailable for any reason, the Druid Coordinator will notice a process has gone missing and treat all segments served by that process as being dropped. Given a sufficient period of time, the segments may be reassigned to other Historical processes in the cluster. However, each segment that is dropped is not immediately forgotten. Instead, there is a transitional data structure that stores all dropped segments with an associated lifetime. The lifetime represents a period of time in which the Coordinator will not reassign a dropped segment. Hence, if a historical process becomes unavailable and available again within a short period of time, the historical process will start up and serve segments from its cache without any those segments being reassigned across the cluster. ### Balancing Segment Load -To ensure an even distribution of segments across Historical nodes in the cluster, the Coordinator node will find the total size of all segments being served by every Historical node each time the Coordinator runs. For every Historical node tier in the cluster, the Coordinator node will determine the Historical node with the highest utilization and the Historical node with the lowest utilization. The percent difference in utilization between the two nodes is computed, and if the result exceeds a certain threshold, a number of segments will be moved from the highest utilized node to the lowest utilized node. There is a configurable limit on the number of segments that can be moved from one node to another each time the Coordinator runs. Segments to be moved are selected at random and only moved if the resulting utilization calculation indicates the percentage difference between the highest and lowest servers has decreased. +To ensure an even distribution of segments across Historical processes in the cluster, the Coordinator process will find the total size of all segments being served by every Historical process each time the Coordinator runs. For every Historical process tier in the cluster, the Coordinator process will determine the Historical process with the highest utilization and the Historical process with the lowest utilization. The percent difference in utilization between the two processes is computed, and if the result exceeds a certain threshold, a number of segments will be moved from the highest utilized process to the lowest utilized process. There is a configurable limit on the number of segments that can be moved from one process to another each time the Coordinator runs. Segments to be moved are selected at random and only moved if the resulting utilization calculation indicates the percentage difference between the highest and lowest servers has decreased. ### Compacting Segments Each run, the Druid Coordinator compacts small segments abutting each other. This is useful when you have a lot of small -segments which may degrade the query performance as well as increasing the disk space usage. +segments which may degrade query performance as well as increase disk space usage. See [Segment Size Optimization](../operations/segment-optimization.html) for details. The Coordinator first finds the segments to compact together based on the [segment search policy](#segment-search-policy). Once some segments are found, it launches a [compaction task](../ingestion/tasks.html#compaction-task) to compact those segments. @@ -113,28 +113,20 @@ If it finds such segments, it simply skips them. ### The Coordinator Console -The Druid Coordinator exposes a web GUI for displaying cluster information and rule configuration. After the Coordinator starts, the console can be accessed at: - -``` -http://: -``` - - There exists a full cluster view (which shows only the realtime and Historical nodes), as well as views for individual Historical nodes, datasources and segments themselves. Segment information can be displayed in raw JSON form or as part of a sortable and filterable table. - -The Coordinator console also exposes an interface to creating and editing rules. All valid datasources configured in the segment database, along with a default datasource, are available for configuration. Rules of different types can be added, deleted or edited. +The Druid Coordinator exposes a web GUI for displaying cluster information and rule configuration. For more details, please see [coordinator console](../operations/web-consoles.html#coordinator-console). ### FAQ -1. **Do clients ever contact the Coordinator node?** +1. **Do clients ever contact the Coordinator process?** The Coordinator is not involved in a query. - Historical nodes never directly contact the Coordinator node. The Druid Coordinator tells the Historical nodes to load/drop data via Zookeeper, but the Historical nodes are completely unaware of the Coordinator. + Historical processes never directly contact the Coordinator process. The Druid Coordinator tells the Historical processes to load/drop data via Zookeeper, but the Historical processes are completely unaware of the Coordinator. - Brokers also never contact the Coordinator. Brokers base their understanding of the data topology on metadata exposed by the Historical nodes via ZK and are completely unaware of the Coordinator. + Brokers also never contact the Coordinator. Brokers base their understanding of the data topology on metadata exposed by the Historical processes via ZK and are completely unaware of the Coordinator. -2. **Does it matter if the Coordinator node starts up before or after other processes?** +2. **Does it matter if the Coordinator process starts up before or after other processes?** - No. If the Druid Coordinator is not started up, no new segments will be loaded in the cluster and outdated segments will not be dropped. However, the Coordinator node can be started up at any time, and after a configurable delay, will start running Coordinator tasks. + No. If the Druid Coordinator is not started up, no new segments will be loaded in the cluster and outdated segments will not be dropped. However, the Coordinator process can be started up at any time, and after a configurable delay, will start running Coordinator tasks. This also means that if you have a working cluster and all of your Coordinators die, the cluster will continue to function, it just won’t experience any changes to its data topology. diff --git a/docs/content/design/historical.md b/docs/content/design/historical.md index 5d4efb407b58..c44d3fbea38a 100644 --- a/docs/content/design/historical.md +++ b/docs/content/design/historical.md @@ -1,6 +1,6 @@ --- layout: doc_page -title: "Historical Node" +title: "Historical Process" --- -# Historical Node +# Historical Process ### Configuration -For Historical Node Configuration, see [Historical Configuration](../configuration/index.html#historical). +For Historical Process Configuration, see [Historical Configuration](../configuration/index.html#historical). ### HTTP Endpoints @@ -40,20 +40,20 @@ org.apache.druid.cli.Main server historical ### Loading and Serving Segments -Each Historical node maintains a constant connection to Zookeeper and watches a configurable set of Zookeeper paths for new segment information. Historical nodes do not communicate directly with each other or with the Coordinator nodes but instead rely on Zookeeper for coordination. +Each Historical process maintains a constant connection to Zookeeper and watches a configurable set of Zookeeper paths for new segment information. Historical processes do not communicate directly with each other or with the Coordinator processes but instead rely on Zookeeper for coordination. -The [Coordinator](../design/coordinator.html) node is responsible for assigning new segments to Historical nodes. Assignment is done by creating an ephemeral Zookeeper entry under a load queue path associated with a Historical node. For more information on how the Coordinator assigns segments to Historical nodes, please see [Coordinator](../design/coordinator.html). +The [Coordinator](../design/coordinator.html) process is responsible for assigning new segments to Historical processes. Assignment is done by creating an ephemeral Zookeeper entry under a load queue path associated with a Historical process. For more information on how the Coordinator assigns segments to Historical processes, please see [Coordinator](../design/coordinator.html). -When a Historical node notices a new load queue entry in its load queue path, it will first check a local disk directory (cache) for the information about segment. If no information about the segment exists in the cache, the Historical node will download metadata about the new segment to serve from Zookeeper. This metadata includes specifications about where the segment is located in deep storage and about how to decompress and process the segment. For more information about segment metadata and Druid segments in general, please see [Segments](../design/segments.html). Once a Historical node completes processing a segment, the segment is announced in Zookeeper under a served segments path associated with the node. At this point, the segment is available for querying. +When a Historical process notices a new load queue entry in its load queue path, it will first check a local disk directory (cache) for the information about segment. If no information about the segment exists in the cache, the Historical process will download metadata about the new segment to serve from Zookeeper. This metadata includes specifications about where the segment is located in deep storage and about how to decompress and process the segment. For more information about segment metadata and Druid segments in general, please see [Segments](../design/segments.html). Once a Historical process completes processing a segment, the segment is announced in Zookeeper under a served segments path associated with the process. At this point, the segment is available for querying. ### Loading and Serving Segments From Cache -Recall that when a Historical node notices a new segment entry in its load queue path, the Historical node first checks a configurable cache directory on its local disk to see if the segment had been previously downloaded. If a local cache entry already exists, the Historical node will directly read the segment binary files from disk and load the segment. +Recall that when a Historical process notices a new segment entry in its load queue path, the Historical process first checks a configurable cache directory on its local disk to see if the segment had been previously downloaded. If a local cache entry already exists, the Historical process will directly read the segment binary files from disk and load the segment. -The segment cache is also leveraged when a Historical node is first started. On startup, a Historical node will search through its cache directory and immediately load and serve all segments that are found. This feature allows Historical nodes to be queried as soon they come online. +The segment cache is also leveraged when a Historical process is first started. On startup, a Historical process will search through its cache directory and immediately load and serve all segments that are found. This feature allows Historical processes to be queried as soon they come online. ### Querying Segments -Please see [Querying](../querying/querying.html) for more information on querying Historical nodes. +Please see [Querying](../querying/querying.html) for more information on querying Historical processes. A Historical can be configured to log and report metrics for every query it services. diff --git a/docs/content/design/indexing-service.md b/docs/content/design/indexing-service.md index 8a3d5a6ea43b..19d788b7daa7 100644 --- a/docs/content/design/indexing-service.md +++ b/docs/content/design/indexing-service.md @@ -29,7 +29,7 @@ The indexing service is a highly-available, distributed service that runs indexi Indexing [tasks](../ingestion/tasks.html) create (and sometimes destroy) Druid [segments](../design/segments.html). The indexing service has a master/slave like architecture. The indexing service is composed of three main components: a [Peon](../design/peons.html) component that can run a single task, a [Middle Manager](../design/middlemanager.html) component that manages Peons, and an [Overlord](../design/overlord.html) component that manages task distribution to MiddleManagers. -Overlords and MiddleManagers may run on the same node or across multiple nodes while MiddleManagers and Peons always run on the same node. +Overlords and MiddleManagers may run on the same process or across multiple processes while MiddleManagers and Peons always run on the same process. Tasks are managed using API endpoints on the Overlord service. Please see [Overlord Task API](../operations/api-reference.html#overlord-tasks) for more information. diff --git a/docs/content/design/middlemanager.md b/docs/content/design/middlemanager.md index fa5b19142196..60d1d0ee46b4 100644 --- a/docs/content/design/middlemanager.md +++ b/docs/content/design/middlemanager.md @@ -1,6 +1,6 @@ --- layout: doc_page -title: "MiddleManager Node" +title: "MiddleManager Process" --- -# MiddleManager Node +# MiddleManager Process ### Configuration -For Middlemanager Node Configuration, see [Indexing Service Configuration](../configuration/index.html#middlemanager-and-peons). +For Middlemanager Process Configuration, see [Indexing Service Configuration](../configuration/index.html#middlemanager-and-peons). ### HTTP Endpoints @@ -34,7 +34,7 @@ For a list of API endpoints supported by the MiddleManager, please see the [API ### Overview -The MiddleManager node is a worker node that executes submitted tasks. Middle Managers forward tasks to Peons that run in separate JVMs. +The MiddleManager process is a worker process that executes submitted tasks. Middle Managers forward tasks to Peons that run in separate JVMs. The reason we have separate JVMs for tasks is for resource and log isolation. Each [Peon](../design/peons.html) is capable of running only one task at a time, however, a MiddleManager may have multiple Peons. ### Running diff --git a/docs/content/design/overlord.md b/docs/content/design/overlord.md index fb5cf5662b8a..40f3c440215d 100644 --- a/docs/content/design/overlord.md +++ b/docs/content/design/overlord.md @@ -1,6 +1,6 @@ --- layout: doc_page -title: "Overlord Node" +title: "Overlord Process" --- -# Overlord Node +# Overlord Process ### Configuration -For Overlord Node Configuration, see [Overlord Configuration](../configuration/index.html#overlord). +For Overlord Process Configuration, see [Overlord Configuration](../configuration/index.html#overlord). ### HTTP Endpoints @@ -34,22 +34,18 @@ For a list of API endpoints supported by the Overlord, please see the [API refer ### Overview -The Overlord node is responsible for accepting tasks, coordinating task distribution, creating locks around tasks, and returning statuses to callers. Overlord can be configured to run in one of two modes - local or remote (local being default). +The Overlord process is responsible for accepting tasks, coordinating task distribution, creating locks around tasks, and returning statuses to callers. Overlord can be configured to run in one of two modes - local or remote (local being default). In local mode Overlord is also responsible for creating Peons for executing tasks. When running the Overlord in local mode, all MiddleManager and Peon configurations must be provided as well. Local mode is typically used for simple workflows. In remote mode, the Overlord and MiddleManager are run in separate processes and you can run each on a different server. This mode is recommended if you intend to use the indexing service as the single endpoint for all Druid indexing. ### Overlord Console -The Overlord console can be used to view pending tasks, running tasks, available workers, and recent worker creation and termination. The console can be accessed at: - -``` -http://:/console.html -``` +The Overlord provides a UI for managing tasks and workers. For more details, please see [overlord console](../operations/web-consoles.html#overlord-console). ### Blacklisted Workers -If the workers fail tasks above a threshold, the Overlord will blacklist these workers. No more than 20% of the nodes can be blacklisted. Blacklisted nodes will be periodically whitelisted. +If a MiddleManager has task failures above a threshold, the Overlord will blacklist these MiddleManagers. No more than 20% of the MiddleManagers can be blacklisted. Blacklisted MiddleManagers will be periodically whitelisted. The following vairables can be used to set the threshold and blacklist timeouts. @@ -62,6 +58,6 @@ druid.indexer.runner.maxPercentageBlacklistWorkers ### Autoscaling -The Autoscaling mechanisms currently in place are tightly coupled with our deployment infrastructure but the framework should be in place for other implementations. We are highly open to new implementations or extensions of the existing mechanisms. In our own deployments, MiddleManager nodes are Amazon AWS EC2 nodes and they are provisioned to register themselves in a [galaxy](https://github.com/ning/galaxy) environment. +The Autoscaling mechanisms currently in place are tightly coupled with our deployment infrastructure but the framework should be in place for other implementations. We are highly open to new implementations or extensions of the existing mechanisms. In our own deployments, MiddleManager processes are Amazon AWS EC2 nodes and they are provisioned to register themselves in a [galaxy](https://github.com/ning/galaxy) environment. -If autoscaling is enabled, new MiddleManagers may be added when a task has been in pending state for too long. Middle managers may be terminated if they have not run any tasks for a period of time. +If autoscaling is enabled, new MiddleManagers may be added when a task has been in pending state for too long. MiddleManagers may be terminated if they have not run any tasks for a period of time. diff --git a/docs/content/design/processes.md b/docs/content/design/processes.md index 87730ecfce69..3cab6eca94b7 100644 --- a/docs/content/design/processes.md +++ b/docs/content/design/processes.md @@ -78,6 +78,8 @@ caller. End users typically query Brokers rather than querying Historicals or Mi Overlords, and Coordinators. They are optional since you can also simply contact the Druid Brokers, Overlords, and Coordinators directly. +The Router also runs the [Druid Console](../operations/management-uis.html#druid-console), a management UI for datasources, segments, tasks, data processes (Historicals and MiddleManagers), and coordinator dynamic configuration. The user can also run SQL and native Druid queries within the console. + ### Data server A Data server executes ingestion jobs and stores queryable data. @@ -124,6 +126,6 @@ Please see [Coordinator Configuration: Operation](../configuration/index.html#co ### Historicals and MiddleManagers -With higher levels of ingestion or query load, it can make sense to deploy the Historical and MiddleManager processes on separate nodes to to avoid CPU and memory contention. +With higher levels of ingestion or query load, it can make sense to deploy the Historical and MiddleManager processes on separate hosts to to avoid CPU and memory contention. The Historical also benefits from having free memory for memory mapped segments, which can be another reason to deploy the Historical and MiddleManager processes separately. \ No newline at end of file diff --git a/docs/content/design/realtime.md b/docs/content/design/realtime.md index c058bbfd4a63..f3b618a91563 100644 --- a/docs/content/design/realtime.md +++ b/docs/content/design/realtime.md @@ -1,6 +1,6 @@ --- layout: doc_page -title: "Real-time Node" +title: "Real-time Process" --- -# Real-time Node +# Real-time Process
-NOTE: Realtime nodes are deprecated. Please use the Kafka Indexing Service for stream pull use cases instead. +NOTE: Realtime processes are deprecated. Please use the Kafka Indexing Service for stream pull use cases instead.
-For Real-time Node Configuration, see [Realtime Configuration](../configuration/realtime.html). +For Real-time Process Configuration, see [Realtime Configuration](../configuration/realtime.html). For Real-time Ingestion, see [Realtime Ingestion](../ingestion/stream-ingestion.html). -Realtime nodes provide a realtime index. Data indexed via these nodes is immediately available for querying. Realtime nodes will periodically build segments representing the data they’ve collected over some span of time and transfer these segments off to [Historical](../design/historical.html) nodes. They use ZooKeeper to monitor the transfer and the metadata storage to store metadata about the transferred segment. Once transfered, segments are forgotten by the Realtime nodes. +Realtime processes provide a realtime index. Data indexed via these processes is immediately available for querying. Realtime processes will periodically build segments representing the data they’ve collected over some span of time and transfer these segments off to [Historical](../design/historical.html) processes. They use ZooKeeper to monitor the transfer and the metadata storage to store metadata about the transferred segment. Once transfered, segments are forgotten by the Realtime processes. ### Running @@ -71,10 +71,10 @@ Given those expectations, adding a firehose is straightforward and completely en HTTP Endpoints -------------- -The real-time node exposes several HTTP endpoints for interactions. +The real-time process exposes several HTTP endpoints for interactions. ### GET * `/status` -Returns the Druid version, loaded extensions, memory used, total memory and other useful information about the node. +Returns the Druid version, loaded extensions, memory used, total memory and other useful information about the process. diff --git a/docs/content/development/experimental.md b/docs/content/development/experimental.md index 70abd6dff38c..e09d26b68428 100644 --- a/docs/content/development/experimental.md +++ b/docs/content/development/experimental.md @@ -36,4 +36,4 @@ To enable experimental features, include their artifacts in the configuration ru druid.extensions.loadList=["druid-histogram"] ``` -The configuration files for all the indexer and query nodes need to be updated with this. +The configuration files for all the Druid processes need to be updated with this. diff --git a/docs/content/development/extensions-contrib/kafka-simple.md b/docs/content/development/extensions-contrib/kafka-simple.md index 5268b307a43b..998d12b53554 100644 --- a/docs/content/development/extensions-contrib/kafka-simple.md +++ b/docs/content/development/extensions-contrib/kafka-simple.md @@ -28,7 +28,7 @@ To use this extension, make sure to [include](../../operations/including-extensi ## Firehose -This is an experimental firehose to ingest data from kafka using kafka simple consumer api. Currently, this firehose would only work inside standalone realtime nodes. +This is an experimental firehose to ingest data from kafka using kafka simple consumer api. Currently, this firehose would only work inside standalone realtime processes. The configuration for KafkaSimpleConsumerFirehose is similar to the Kafka Eight Firehose , except `firehose` should be replaced with `firehoseV2` like this: ```json diff --git a/docs/content/development/extensions-contrib/materialized-view.md b/docs/content/development/extensions-contrib/materialized-view.md index 65fa689503e9..96d8ffb02180 100644 --- a/docs/content/development/extensions-contrib/materialized-view.md +++ b/docs/content/development/extensions-contrib/materialized-view.md @@ -131,4 +131,4 @@ There are 2 parts in a view query: |queryType |The query type. This should always be view |yes| |query |The real query of this `view` query. The real query must be [groupBy](../../querying/groupbyquery.html), [topN](../../querying/topnquery.html), or [timeseries](../../querying/timeseriesquery.html) type.|yes| -**Note that Materialized View is currently designated as experimental. Please make sure the time of all nodes are the same and increase monotonically. Otherwise, some unexpected errors may happen on query results.** +**Note that Materialized View is currently designated as experimental. Please make sure the time of all processes are the same and increase monotonically. Otherwise, some unexpected errors may happen on query results.** diff --git a/docs/content/development/extensions-contrib/opentsdb-emitter.md b/docs/content/development/extensions-contrib/opentsdb-emitter.md index 780436bfe139..dc49dff49d90 100644 --- a/docs/content/development/extensions-contrib/opentsdb-emitter.md +++ b/docs/content/development/extensions-contrib/opentsdb-emitter.md @@ -42,7 +42,7 @@ All the configuration parameters for the opentsdb emitter are under `druid.emitt |`druid.emitter.opentsdb.readTimeout`|`Jersey client` read timeout(in milliseconds).|no|2000| |`druid.emitter.opentsdb.flushThreshold`|Queue flushing threshold.(Events will be sent as one batch)|no|100| |`druid.emitter.opentsdb.maxQueueSize`|Maximum size of the queue used to buffer events.|no|1000| -|`druid.emitter.opentsdb.consumeDelay`|Queue consuming delay(in milliseconds). Actually, we use `ScheduledExecutorService` to schedule consuming events, so this `consumeDelay` means the delay between the termination of one execution and the commencement of the next. If your druid nodes produce metric events fast, then you should decrease this `consumeDelay` or increase the `maxQueueSize`.|no|10000| +|`druid.emitter.opentsdb.consumeDelay`|Queue consuming delay(in milliseconds). Actually, we use `ScheduledExecutorService` to schedule consuming events, so this `consumeDelay` means the delay between the termination of one execution and the commencement of the next. If your druid processes produce metric events fast, then you should decrease this `consumeDelay` or increase the `maxQueueSize`.|no|10000| |`druid.emitter.opentsdb.metricMapPath`|JSON file defining the desired metrics and dimensions for every Druid metric|no|./src/main/resources/defaultMetrics.json| ### Druid to OpenTSDB Event Converter diff --git a/docs/content/development/extensions-contrib/redis-cache.md b/docs/content/development/extensions-contrib/redis-cache.md index d7a006f0f47f..e32ea991c54b 100644 --- a/docs/content/development/extensions-contrib/redis-cache.md +++ b/docs/content/development/extensions-contrib/redis-cache.md @@ -29,9 +29,9 @@ A cache implementation for Druid based on [Redis](https://github.com/antirez/red # Configuration Below are the configuration options known to this module. -Note that just adding these properties does not enable the cache. You still need to add the `druid..cache.useCache` and `druid..cache.populateCache` properties for the nodes you want to enable the cache on as described in the [cache configuration docs](../../configuration/index.html#cache-configuration). +Note that just adding these properties does not enable the cache. You still need to add the `druid..cache.useCache` and `druid..cache.populateCache` properties for the processes you want to enable the cache on as described in the [cache configuration docs](../../configuration/index.html#cache-configuration). -A possible configuration would be to keep the properties below in your `common.runtime.properties` file (present on all nodes) and then add `druid..cache.useCache` and `druid..cache.populateCache` in the `runtime.properties` file of the node types you want to enable caching on. +A possible configuration would be to keep the properties below in your `common.runtime.properties` file (present on all processes) and then add `druid..cache.useCache` and `druid..cache.populateCache` in the `runtime.properties` file of the process types you want to enable caching on. |`common.runtime.properties`|Description|Default|Required| diff --git a/docs/content/development/extensions-contrib/thrift.md b/docs/content/development/extensions-contrib/thrift.md index bd9a378b5467..40ca31b372be 100644 --- a/docs/content/development/extensions-contrib/thrift.md +++ b/docs/content/development/extensions-contrib/thrift.md @@ -30,6 +30,10 @@ This extension enables Druid to ingest thrift compact data online (`ByteBuffer`) You may want to use another version of thrift, change the dependency in pom and compile yourself. +## LZO Support + +If you plan to read LZO-compressed Thrift files, you will need to download version 0.4.19 of the [hadoop-lzo JAR](https://mvnrepository.com/artifact/com.hadoop.gplcompression/hadoop-lzo/0.4.19) and place it in your `extensions/druid-thrift-extensions` directory. + ## Thrift Parser diff --git a/docs/content/development/extensions-core/approximate-histograms.md b/docs/content/development/extensions-core/approximate-histograms.md index ea9e17b32435..b60ff1310e90 100644 --- a/docs/content/development/extensions-core/approximate-histograms.md +++ b/docs/content/development/extensions-core/approximate-histograms.md @@ -28,7 +28,11 @@ Make sure to [include](../../operations/including-extensions.html) `druid-histog The `druid-histogram` extension provides an approximate histogram aggregator and a fixed buckets histogram aggregator. -## Approximate Histogram aggregator +## Approximate Histogram aggregator (Deprecated) + +
+The Approximate Histogram aggregator is deprecated. Please use DataSketches Quantiles instead which provides a superior distribution-independent algorithm with formal error guarantees. +
This aggregator is based on [http://jmlr.org/papers/volume11/ben-haim10a/ben-haim10a.pdf](http://jmlr.org/papers/volume11/ben-haim10a/ben-haim10a.pdf) diff --git a/docs/content/development/extensions-core/druid-basic-security.md b/docs/content/development/extensions-core/druid-basic-security.md index 890b2738d3a7..442cad659f97 100644 --- a/docs/content/development/extensions-core/druid-basic-security.md +++ b/docs/content/development/extensions-core/druid-basic-security.md @@ -43,7 +43,7 @@ These configuration properties should be added to the common runtime properties ### Properties |Property|Description|Default|required| |--------|-----------|-------|--------| -|`druid.auth.basic.common.pollingPeriod`|Defines in milliseconds how often nodes should poll the Coordinator for the current authenticator/authorizer database state.|60000|No| +|`druid.auth.basic.common.pollingPeriod`|Defines in milliseconds how often processes should poll the Coordinator for the current authenticator/authorizer database state.|60000|No| |`druid.auth.basic.common.maxRandomDelay`|Defines in milliseconds the amount of random delay to add to the pollingPeriod, to spread polling requests across time.|6000|No| |`druid.auth.basic.common.maxSyncRetries`|Determines how many times a service will retry if the authentication/authorization database state sync with the Coordinator fails.|10|No| |`druid.auth.basic.common.cacheDirectory`|If defined, snapshots of the basic Authenticator and Authorizer database caches will be stored on disk in this directory. If this property is defined, when a service is starting, it will attempt to initialize its caches from these on-disk snapshots, if the service is unable to initialize its state by communicating with the Coordinator.|null|No| @@ -74,8 +74,8 @@ The configuration examples in the rest of this document will use "MyBasicAuthent |Property|Description|Default|required| |--------|-----------|-------|--------| |`druid.auth.authenticator.MyBasicAuthenticator.initialAdminPassword`|Initial [Password Provider](../../operations/password-provider.html) for the automatically created default admin user. If no password is specified, the default admin user will not be created. If the default admin user already exists, setting this property will not affect its password.|null|No| -|`druid.auth.authenticator.MyBasicAuthenticator.initialInternalClientPassword`|Initial [Password Provider](../../operations/password-provider.html) for the default internal system user, used for internal node communication. If no password is specified, the default internal system user will not be created. If the default internal system user already exists, setting this property will not affect its password.|null|No| -|`druid.auth.authenticator.MyBasicAuthenticator.enableCacheNotifications`|If true, the Coordinator will notify Druid nodes whenever a configuration change to this Authenticator occurs, allowing them to immediately update their state without waiting for polling.|true|No| +|`druid.auth.authenticator.MyBasicAuthenticator.initialInternalClientPassword`|Initial [Password Provider](../../operations/password-provider.html) for the default internal system user, used for internal process communication. If no password is specified, the default internal system user will not be created. If the default internal system user already exists, setting this property will not affect its password.|null|No| +|`druid.auth.authenticator.MyBasicAuthenticator.enableCacheNotifications`|If true, the Coordinator will notify Druid processes whenever a configuration change to this Authenticator occurs, allowing them to immediately update their state without waiting for polling.|true|No| |`druid.auth.authenticator.MyBasicAuthenticator.cacheNotificationTimeout`|The timeout in milliseconds for the cache notifications.|5000|No| |`druid.auth.authenticator.MyBasicAuthenticator.credentialIterations`|Number of iterations to use for password hashing.|10000|No| |`druid.auth.authenticator.MyBasicAuthenticator.authorizerName`|Authorizer that requests should be directed to|N/A|Yes| @@ -116,7 +116,7 @@ druid.auth.authorizer.. #### Properties |Property|Description|Default|required| |--------|-----------|-------|--------| -|`druid.auth.authorizer.MyBasicAuthorizer.enableCacheNotifications`|If true, the Coordinator will notify Druid nodes whenever a configuration change to this Authorizer occurs, allowing them to immediately update their state without waiting for polling.|true|No| +|`druid.auth.authorizer.MyBasicAuthorizer.enableCacheNotifications`|If true, the Coordinator will notify Druid processes whenever a configuration change to this Authorizer occurs, allowing them to immediately update their state without waiting for polling.|true|No| |`druid.auth.authorizer.MyBasicAuthorizer.cacheNotificationTimeout`|The timeout in milliseconds for the cache notifications.|5000|No| ## Usage @@ -260,7 +260,7 @@ There are two possible resource names for the "CONFIG" resource type, "CONFIG" a "CONFIG" resource name covers the following endpoints: -|Endpoint|Node Type| +|Endpoint|Process Type| |--------|---------| |`/druid/coordinator/v1/config`|coordinator| |`/druid/indexer/v1/worker`|overlord| @@ -270,7 +270,7 @@ There are two possible resource names for the "CONFIG" resource type, "CONFIG" a "security" resource name covers the following endpoint: -|Endpoint|Node Type| +|Endpoint|Process Type| |--------|---------| |`/druid-ext/basic-security/authentication`|coordinator| |`/druid-ext/basic-security/authorization`|coordinator| @@ -280,7 +280,7 @@ There is only one possible resource name for the "STATE" config resource type, " "STATE" resource name covers the following endpoints: -|Endpoint|Node Type| +|Endpoint|Process Type| |--------|---------| |`/druid/coordinator/v1`|coordinator| |`/druid/coordinator/v1/rules`|coordinator| @@ -312,10 +312,10 @@ GET requires READ permission, while POST and DELETE require WRITE permission. ## Configuration Propagation -To prevent excessive load on the Coordinator, the Authenticator and Authorizer user/role database state is cached on each Druid node. +To prevent excessive load on the Coordinator, the Authenticator and Authorizer user/role database state is cached on each Druid process. -Each node will periodically poll the Coordinator for the latest database state, controlled by the `druid.auth.basic.common.pollingPeriod` and `druid.auth.basic.common.maxRandomDelay` properties. +Each process will periodically poll the Coordinator for the latest database state, controlled by the `druid.auth.basic.common.pollingPeriod` and `druid.auth.basic.common.maxRandomDelay` properties. -When a configuration update occurs, the Coordinator can optionally notify each node with the updated database state. This behavior is controlled by the `enableCacheNotifications` and `cacheNotificationTimeout` properties on Authenticators and Authorizers. +When a configuration update occurs, the Coordinator can optionally notify each process with the updated database state. This behavior is controlled by the `enableCacheNotifications` and `cacheNotificationTimeout` properties on Authenticators and Authorizers. -Note that because of the caching, changes made to the user/role database may not be immediately reflected at each Druid node. +Note that because of the caching, changes made to the user/role database may not be immediately reflected at each Druid process. diff --git a/docs/content/development/extensions-core/druid-kerberos.md b/docs/content/development/extensions-core/druid-kerberos.md index c97004a00eb7..649dc2015508 100644 --- a/docs/content/development/extensions-core/druid-kerberos.md +++ b/docs/content/development/extensions-core/druid-kerberos.md @@ -24,7 +24,7 @@ title: "Kerberos" # Kerberos -Druid Extension to enable Authentication for Druid Nodes using Kerberos. +Druid Extension to enable Authentication for Druid Processes using Kerberos. This extension adds an Authenticator which is used to protect HTTP Endpoints using the simple and protected GSSAPI negotiation mechanism [SPNEGO](https://en.wikipedia.org/wiki/SPNEGO). Make sure to [include](../../operations/including-extensions.html) `druid-kerberos` as an extension. @@ -51,14 +51,14 @@ The configuration examples in the rest of this document will use "kerberos" as t ### Properties |Property|Possible Values|Description|Default|required| |--------|---------------|-----------|-------|--------| -|`druid.auth.authenticator.kerberos.serverPrincipal`|`HTTP/_HOST@EXAMPLE.COM`| SPNego service principal used by druid nodes|empty|Yes| -|`druid.auth.authenticator.kerberos.serverKeytab`|`/etc/security/keytabs/spnego.service.keytab`|SPNego service keytab used by druid nodes|empty|Yes| +|`druid.auth.authenticator.kerberos.serverPrincipal`|`HTTP/_HOST@EXAMPLE.COM`| SPNego service principal used by druid processes|empty|Yes| +|`druid.auth.authenticator.kerberos.serverKeytab`|`/etc/security/keytabs/spnego.service.keytab`|SPNego service keytab used by druid processes|empty|Yes| |`druid.auth.authenticator.kerberos.authToLocal`|`RULE:[1:$1@$0](druid@EXAMPLE.COM)s/.*/druid DEFAULT`|It allows you to set a general rule for mapping principal names to local user names. It will be used if there is not an explicit mapping for the principal name that is being translated.|DEFAULT|No| |`druid.auth.authenticator.kerberos.excludedPaths`|`['/status','/health']`| Array of HTTP paths which which does NOT need to be authenticated.|None|No| |`druid.auth.authenticator.kerberos.cookieSignatureSecret`|`secretString`| Secret used to sign authentication cookies. It is advisable to explicitly set it, if you have multiple druid ndoes running on same machine with different ports as the Cookie Specification does not guarantee isolation by port.||No| |`druid.auth.authenticator.kerberos.authorizerName`|Depends on available authorizers|Authorizer that requests should be directed to|Empty|Yes| -As a note, it is required that the SPNego principal in use by the druid nodes must start with HTTP (This specified by [RFC-4559](https://tools.ietf.org/html/rfc4559)) and must be of the form "HTTP/_HOST@REALM". +As a note, it is required that the SPNego principal in use by the druid processes must start with HTTP (This specified by [RFC-4559](https://tools.ietf.org/html/rfc4559)) and must be of the form "HTTP/_HOST@REALM". The special string _HOST will be replaced automatically with the value of config `druid.host` ### Auth to Local Syntax @@ -74,14 +74,14 @@ In such cases, max request header size that druid can handle can be increased by ## Configuring Kerberos Escalated Client -Druid internal nodes communicate with each other using an escalated http Client. A Kerberos enabled escalated HTTP Client can be configured by following properties - +Druid internal processes communicate with each other using an escalated http Client. A Kerberos enabled escalated HTTP Client can be configured by following properties - |Property|Example Values|Description|Default|required| |--------|---------------|-----------|-------|--------| -|`druid.escalator.type`|`kerberos`| Type of Escalator client used for internal node communication.|n/a|Yes| -|`druid.escalator.internalClientPrincipal`|`druid@EXAMPLE.COM`| Principal user name, used for internal node communication|n/a|Yes| -|`druid.escalator.internalClientKeytab`|`/etc/security/keytabs/druid.keytab`|Path to keytab file used for internal node communication|n/a|Yes| +|`druid.escalator.type`|`kerberos`| Type of Escalator client used for internal process communication.|n/a|Yes| +|`druid.escalator.internalClientPrincipal`|`druid@EXAMPLE.COM`| Principal user name, used for internal process communication|n/a|Yes| +|`druid.escalator.internalClientKeytab`|`/etc/security/keytabs/druid.keytab`|Path to keytab file used for internal process communication|n/a|Yes| |`druid.escalator.authorizerName`|`MyBasicAuthorizer`|Authorizer that requests should be directed to.|n/a|Yes| ## Accessing Druid HTTP end points when kerberos security is enabled diff --git a/docs/content/development/extensions-core/kafka-ingestion.md b/docs/content/development/extensions-core/kafka-ingestion.md index b24e8748538d..2816ab062322 100644 --- a/docs/content/development/extensions-core/kafka-ingestion.md +++ b/docs/content/development/extensions-core/kafka-ingestion.md @@ -192,7 +192,7 @@ For Roaring bitmaps: |`topic`|String|The Kafka topic to read from. This must be a specific topic as topic patterns are not supported.|yes| |`consumerProperties`|Map|A map of properties to be passed to the Kafka consumer. This must contain a property `bootstrap.servers` with a list of Kafka brokers in the form: `:,:,...`. For SSL connections, the `keystore`, `truststore` and `key` passwords can be provided as a [Password Provider](../../operations/password-provider.html) or String password.|yes| |`pollTimeout`|Long|The length of time to wait for the kafka consumer to poll records, in milliseconds|no (default == 100)| -|`replicas`|Integer|The number of replica sets, where 1 means a single set of tasks (no replication). Replica tasks will always be assigned to different workers to provide resiliency against node failure.|no (default == 1)| +|`replicas`|Integer|The number of replica sets, where 1 means a single set of tasks (no replication). Replica tasks will always be assigned to different workers to provide resiliency against process failure.|no (default == 1)| |`taskCount`|Integer|The maximum number of *reading* tasks in a *replica set*. This means that the maximum number of reading tasks will be `taskCount * replicas` and the total number of tasks (*reading* + *publishing*) will be higher than this. See 'Capacity Planning' below for more details. The number of reading tasks will be less than `taskCount` if `taskCount > {numKafkaPartitions}`.|no (default == 1)| |`taskDuration`|ISO8601 Period|The length of time before tasks stop reading and begin publishing their segment.|no (default == PT1H)| |`startDelay`|ISO8601 Period|The period to wait before the supervisor starts managing tasks.|no (default == PT5S)| @@ -279,7 +279,7 @@ in data loss (assuming the tasks run before Kafka purges those offsets). A running task will normally be in one of two states: *reading* or *publishing*. A task will remain in reading state for `taskDuration`, at which point it will transition to publishing state. A task will remain in publishing state for as long -as it takes to generate segments, push segments to deep storage, and have them be loaded and served by a Historical node +as it takes to generate segments, push segments to deep storage, and have them be loaded and served by a Historical process (or until `completionTimeout` elapses). The number of reading tasks is controlled by `replicas` and `taskCount`. In general, there will be `replicas * taskCount` @@ -336,7 +336,7 @@ for this segment granularity is created for further events. Kafka Indexing Task means that all the segments created by a task will not be held up till the task duration is over. As soon as maxRowsPerSegment, maxTotalRows or intermediateHandoffPeriod limit is hit, all the segments held by the task at that point in time will be handed-off and new set of segments will be created for further events. This means that the task can run for longer durations of time -without accumulating old segments locally on Middle Manager nodes and it is encouraged to do so. +without accumulating old segments locally on Middle Manager processes and it is encouraged to do so. Kafka Indexing Service may still produce some small segments. Lets say the task duration is 4 hours, segment granularity is set to an HOUR and Supervisor was started at 9:10 then after 4 hours at 13:10, new set of tasks will be started and diff --git a/docs/content/development/extensions-core/kinesis-ingestion.md b/docs/content/development/extensions-core/kinesis-ingestion.md index d0c29dee9ab4..47041709c37e 100644 --- a/docs/content/development/extensions-core/kinesis-ingestion.md +++ b/docs/content/development/extensions-core/kinesis-ingestion.md @@ -192,7 +192,7 @@ For Roaring bitmaps: |-----|----|-----------|--------| |`stream`|String|The Kinesis stream to read.|yes| |`endpoint`|String|The AWS Kinesis stream endpoint for a region. You can find a list of endpoints [here](http://docs.aws.amazon.com/general/latest/gr/rande.html#ak_region).|no (default == kinesis.us-east-1.amazonaws.com)| -|`replicas`|Integer|The number of replica sets, where 1 means a single set of tasks (no replication). Replica tasks will always be assigned to different workers to provide resiliency against node failure.|no (default == 1)| +|`replicas`|Integer|The number of replica sets, where 1 means a single set of tasks (no replication). Replica tasks will always be assigned to different workers to provide resiliency against process failure.|no (default == 1)| |`taskCount`|Integer|The maximum number of *reading* tasks in a *replica set*. This means that the maximum number of reading tasks will be `taskCount * replicas` and the total number of tasks (*reading* + *publishing*) will be higher than this. See 'Capacity Planning' below for more details. The number of reading tasks will be less than `taskCount` if `taskCount > {numKinesisshards}`.|no (default == 1)| |`taskDuration`|ISO8601 Period|The length of time before tasks stop reading and begin publishing their segment.|no (default == PT1H)| |`startDelay`|ISO8601 Period|The period to wait before the supervisor starts managing tasks.|no (default == PT5S)| @@ -282,7 +282,7 @@ in data loss (assuming the tasks run before Kinesis purges those sequence number A running task will normally be in one of two states: *reading* or *publishing*. A task will remain in reading state for `taskDuration`, at which point it will transition to publishing state. A task will remain in publishing state for as long -as it takes to generate segments, push segments to deep storage, and have them be loaded and served by a Historical node +as it takes to generate segments, push segments to deep storage, and have them be loaded and served by a Historical process (or until `completionTimeout` elapses). The number of reading tasks is controlled by `replicas` and `taskCount`. In general, there will be `replicas * taskCount` @@ -339,7 +339,7 @@ for this segment granularity is created for further events. Kinesis Indexing Tas means that all the segments created by a task will not be held up till the task duration is over. As soon as maxRowsPerSegment, maxTotalRows or intermediateHandoffPeriod limit is hit, all the segments held by the task at that point in time will be handed-off and new set of segments will be created for further events. This means that the task can run for longer durations of time -without accumulating old segments locally on Middle Manager nodes and it is encouraged to do so. +without accumulating old segments locally on Middle Manager processes and it is encouraged to do so. Kinesis Indexing Service may still produce some small segments. Lets say the task duration is 4 hours, segment granularity is set to an HOUR and Supervisor was started at 9:10 then after 4 hours at 13:10, new set of tasks will be started and diff --git a/docs/content/development/extensions-core/lookups-cached-global.md b/docs/content/development/extensions-core/lookups-cached-global.md index 31e6f2f45c9e..eed608494644 100644 --- a/docs/content/development/extensions-core/lookups-cached-global.md +++ b/docs/content/development/extensions-core/lookups-cached-global.md @@ -38,9 +38,9 @@ Static configuration is no longer supported. Lookups can be configured through Globally cached lookups are appropriate for lookups which are not possible to pass at query time due to their size, or are not desired to be passed at query time because the data is to reside in and be handled by the Druid servers, -and are small enough to reasonably populate on a node. This usually means tens to tens of thousands of entries per lookup. +and are small enough to reasonably populate in-memory. This usually means tens to tens of thousands of entries per lookup. -Globally cached lookups all draw from the same cache pool, allowing each node to have a fixed cache pool that can be used by cached lookups. +Globally cached lookups all draw from the same cache pool, allowing each process to have a fixed cache pool that can be used by cached lookups. Globally cached lookups can be specified as part of the [cluster wide config for lookups](../../querying/lookups.html) as a type of `cachedNamespace` @@ -93,9 +93,9 @@ The parameters are as follows |`firstCacheTimeout`|How long to wait (in ms) for the first run of the cache to populate. 0 indicates to not wait|No|`0` (do not wait)| |`injective`|If the underlying map is [injective](../../querying/lookups.html#query-execution) (keys and values are unique) then optimizations can occur internally by setting this to `true`|No|`false`| -If `firstCacheTimeout` is set to a non-zero value, it should be less than `druid.manager.lookups.hostUpdateTimeout`. If `firstCacheTimeout` is NOT set, then management is essentially asynchronous and does not know if a lookup succeeded or failed in starting. In such a case logs from the lookup nodes should be monitored for repeated failures. +If `firstCacheTimeout` is set to a non-zero value, it should be less than `druid.manager.lookups.hostUpdateTimeout`. If `firstCacheTimeout` is NOT set, then management is essentially asynchronous and does not know if a lookup succeeded or failed in starting. In such a case logs from the processes using lookups should be monitored for repeated failures. -Proper functionality of globally cached lookups requires the following extension to be loaded on the Broker, Peon, and Historical nodes: +Proper functionality of globally cached lookups requires the following extension to be loaded on the Broker, Peon, and Historical processes: `druid-lookups-cached-global` ## Example configuration @@ -160,7 +160,7 @@ Where the Coordinator endpoint `/druid/coordinator/v1/lookups/realtime_customer2 ## Cache Settings -Lookups are cached locally on Historical nodes. The following are settings used by the nodes which service queries when +Lookups are cached locally on Historical processes. The following are settings used by the processes which service queries when setting namespaces (Broker, Peon, Historical) |Property|Description|Default| diff --git a/docs/content/development/extensions-core/simple-client-sslcontext.md b/docs/content/development/extensions-core/simple-client-sslcontext.md index 6fc0dfd36753..107739eddc61 100644 --- a/docs/content/development/extensions-core/simple-client-sslcontext.md +++ b/docs/content/development/extensions-core/simple-client-sslcontext.md @@ -25,7 +25,7 @@ title: "Simple SSLContext Provider Module" # Simple SSLContext Provider Module This module contains a simple implementation of [SSLContext](http://docs.oracle.com/javase/8/docs/api/javax/net/ssl/SSLContext.html) -that will be injected to be used with HttpClient that Druid nodes use internally to communicate with each other. To learn more about +that will be injected to be used with HttpClient that Druid processes use internally to communicate with each other. To learn more about Java's SSL support, please refer to [this](http://docs.oracle.com/javase/8/docs/technotes/guides/security/jsse/JSSERefGuide.html) guide. # Configuration diff --git a/docs/content/development/extensions.md b/docs/content/development/extensions.md index a5b8a5b3f57a..91f6adda8103 100644 --- a/docs/content/development/extensions.md +++ b/docs/content/development/extensions.md @@ -47,12 +47,12 @@ Core extensions are maintained by Druid committers. |druid-caffeine-cache|A local cache implementation backed by Caffeine.|[link](../development/extensions-core/caffeine-cache.html)| |druid-datasketches|Support for approximate counts and set operations with [DataSketches](http://datasketches.github.io/).|[link](../development/extensions-core/datasketches-extension.html)| |druid-hdfs-storage|HDFS deep storage.|[link](../development/extensions-core/hdfs.html)| -|druid-histogram|Approximate histograms and quantiles aggregator.|[link](../development/extensions-core/approximate-histograms.html)| +|druid-histogram|Approximate histograms and quantiles aggregator. Deprecated, please use the [DataSketches quantiles aggregator](../development/extensions-core/datasketches-quantiles.html) from the `druid-datasketches` extension instead.|[link](../development/extensions-core/approximate-histograms.html)| |druid-kafka-eight|Kafka ingest firehose (high level consumer) for realtime nodes.|[link](../development/extensions-core/kafka-eight-firehose.html)| |druid-kafka-extraction-namespace|Kafka-based namespaced lookup. Requires namespace lookup extension.|[link](../development/extensions-core/kafka-extraction-namespace.html)| |druid-kafka-indexing-service|Supervised exactly-once Kafka ingestion for the indexing service.|[link](../development/extensions-core/kafka-ingestion.html)| |druid-kinesis-indexing-service|Supervised exactly-once Kinesis ingestion for the indexing service.|[link](../development/extensions-core/kinesis-ingestion.html)| -|druid-kerberos|Kerberos authentication for druid nodes.|[link](../development/extensions-core/druid-kerberos.html)| +|druid-kerberos|Kerberos authentication for druid processes.|[link](../development/extensions-core/druid-kerberos.html)| |druid-lookups-cached-global|A module for [lookups](../querying/lookups.html) providing a jvm-global eager caching for lookups. It provides JDBC and URI implementations for fetching lookup data.|[link](../development/extensions-core/lookups-cached-global.html)| |druid-lookups-cached-single| Per lookup caching module to support the use cases where a lookup need to be isolated from the global pool of lookups |[link](../development/extensions-core/druid-lookups.html)| |druid-parquet-extensions|Support for data in Apache Parquet data format. Requires druid-avro-extensions to be loaded.|[link](../development/extensions-core/parquet.html)| @@ -61,7 +61,7 @@ Core extensions are maintained by Druid committers. |druid-stats|Statistics related module including variance and standard deviation.|[link](../development/extensions-core/stats.html)| |mysql-metadata-storage|MySQL metadata store.|[link](../development/extensions-core/mysql.html)| |postgresql-metadata-storage|PostgreSQL metadata store.|[link](../development/extensions-core/postgresql.html)| -|simple-client-sslcontext|Simple SSLContext provider module to be used by internal HttpClient talking to other nodes over HTTPS.|[link](../development/extensions-core/simple-client-sslcontext.html)| +|simple-client-sslcontext|Simple SSLContext provider module to be used by Druid's internal HttpClient when talking to other Druid processes over HTTPS.|[link](../development/extensions-core/simple-client-sslcontext.html)| # Community Extensions diff --git a/docs/content/development/javascript.md b/docs/content/development/javascript.md index 1bfbff632d15..087d626348cc 100644 --- a/docs/content/development/javascript.md +++ b/docs/content/development/javascript.md @@ -59,7 +59,7 @@ unpredictable results if global variables are used. ## Performance Simple JavaScript functions typically have a slight performance penalty to native speed. More complex JavaScript -functions can have steeper performance penalties. Druid compiles JavaScript functions once per node per query. +functions can have steeper performance penalties. Druid compiles JavaScript functions once on each data process per query. You may need to pay special attention to garbage collection when making heavy use of JavaScript functions, especially garbage collection of the compiled classes themselves. Be sure to use a garbage collector configuration that supports diff --git a/docs/content/development/modules.md b/docs/content/development/modules.md index c8d6ba980b49..ef9d168f61df 100644 --- a/docs/content/development/modules.md +++ b/docs/content/development/modules.md @@ -110,7 +110,7 @@ In addition to DataSegmentPusher and DataSegmentPuller, you can also bind: It's recommended to use batch ingestion tasks to validate your implementation. The segment will be automatically rolled up to Historical note after ~20 seconds. -In this way, you can validate both push (at realtime node) and pull (at Historical node) segments. +In this way, you can validate both push (at realtime process) and pull (at Historical process) segments. * DataSegmentPusher @@ -118,9 +118,9 @@ Wherever your data storage (cloud storage service, distributed file system, etc. * DataSegmentPuller -After ~20 secs your ingestion task ends, you should be able to see your Historical node trying to load the new segment. +After ~20 secs your ingestion task ends, you should be able to see your Historical process trying to load the new segment. -The following example was retrieved from a Historical node configured to use Azure for deep storage: +The following example was retrieved from a Historical process configured to use Azure for deep storage: ``` 2015-04-14T02:42:33,450 INFO [ZkCoordinator-0] org.apache.druid.server.coordination.ZkCoordinator - New request[LOAD: dde_2015-01-02T00:00:00.000Z_2015-01-03T00:00:00 diff --git a/docs/content/development/overview.md b/docs/content/development/overview.md index 1e0d6f5c354e..25579c3d008b 100644 --- a/docs/content/development/overview.md +++ b/docs/content/development/overview.md @@ -48,7 +48,7 @@ the query logic is to start from `QueryResource.java`. ## Coordination -Most of the coordination logic for Historical nodes is on the Druid Coordinator. The starting point here is `DruidCoordinator.java`. +Most of the coordination logic for Historical processes is on the Druid Coordinator. The starting point here is `DruidCoordinator.java`. Most of the coordination logic for (real-time) ingestion is in the Druid indexing service. The starting point here is `OverlordResource.java`. ## Real-time Ingestion diff --git a/docs/content/development/router.md b/docs/content/development/router.md index 018fba10c791..0aa69c3b76ac 100644 --- a/docs/content/development/router.md +++ b/docs/content/development/router.md @@ -1,6 +1,6 @@ --- layout: doc_page -title: "Router Node" +title: "Router Process" --- -# Router Node +# Router Process + +The Router process can be used to route queries to different Broker processes. By default, the broker routes queries based on how [Rules](../operations/rule-configuration.html) are set up. For example, if 1 month of recent data is loaded into a `hot` cluster, queries that fall within the recent month can be routed to a dedicated set of brokers. Queries outside this range are routed to another set of brokers. This set up provides query isolation such that queries for more important data are not impacted by queries for less important data. + +For query routing purposes, you should only ever need the Router process if you have a Druid cluster well into the terabyte range. + +In addition to query routing, the Router also runs the [Druid Console](../operations/management-uis.html#druid-console), a management UI for datasources, segments, tasks, data processes (Historicals and MiddleManagers), and coordinator dynamic configuration. The user can also run SQL and native Druid queries within the console. -You should only ever need the Router node if you have a Druid cluster well into the terabyte range. The Router node can be used to route queries to different Broker nodes. By default, the broker routes queries based on how [Rules](../operations/rule-configuration.html) are set up. For example, if 1 month of recent data is loaded into a `hot` cluster, queries that fall within the recent month can be routed to a dedicated set of brokers. Queries outside this range are routed to another set of brokers. This set up provides query isolation such that queries for more important data are not impacted by queries for less important data. Running ------- @@ -36,7 +41,7 @@ org.apache.druid.cli.Main server router Example Production Configuration -------------------------------- -In this example, we have two tiers in our production cluster: `hot` and `_default_tier`. Queries for the `hot` tier are routed through the `broker-hot` set of Brokers, and queries for the `_default_tier` are routed through the `broker-cold` set of Brokers. If any exceptions or network problems occur, queries are routed to the `broker-cold` set of brokers. In our example, we are running with a c3.2xlarge EC2 node. We assume a `common.runtime.properties` already exists. +In this example, we have two tiers in our production cluster: `hot` and `_default_tier`. Queries for the `hot` tier are routed through the `broker-hot` set of Brokers, and queries for the `_default_tier` are routed through the `broker-cold` set of Brokers. If any exceptions or network problems occur, queries are routed to the `broker-cold` set of brokers. In our example, we are running with a c3.2xlarge EC2 instance. We assume a `common.runtime.properties` already exists. JVM settings: @@ -176,13 +181,13 @@ This is a non-default implementation that is provided for experimentation purpos HTTP Endpoints -------------- -The Router node exposes several HTTP endpoints for interactions. +The Router process exposes several HTTP endpoints for interactions. ### GET * `/status` -Returns the Druid version, loaded extensions, memory used, total memory and other useful information about the node. +Returns the Druid version, loaded extensions, memory used, total memory and other useful information about the process. * `/druid/v2/datasources` @@ -203,7 +208,7 @@ Returns the metrics of the datasource. Router as Management Proxy -------------------------- -The Router can be configured to forward requests to the active Coordinator or Overlord node. This may be useful for +The Router can be configured to forward requests to the active Coordinator or Overlord process. This may be useful for setting up a highly available cluster in situations where the HTTP redirect mechanism of the inactive -> active Coordinator/Overlord does not function correctly (servers are behind a load balancer, the hostname used in the redirect is only resolvable internally, etc.). @@ -224,10 +229,10 @@ determined from the original request path based on Druid API path conventions. F that using the management proxy does not require modifying the API request other than issuing the request to the Router instead of the Coordinator or Overlord. Most Druid API requests can be routed implicitly. -Explicit routes are those where the request to the Router contains a path prefix indicating which node the request +Explicit routes are those where the request to the Router contains a path prefix indicating which process the request should be routed to. For the Coordinator this prefix is `/proxy/coordinator` and for the Overlord it is `/proxy/overlord`. This is required for API calls with an ambiguous destination. For example, the `/status` API is present on all Druid -nodes, so explicit routing needs to be used to indicate the proxy destination. +processes, so explicit routing needs to be used to indicate the proxy destination. This is summarized in the table below: diff --git a/docs/content/ingestion/faq.md b/docs/content/ingestion/faq.md index e9584e8b133e..b2c304b5e9fc 100644 --- a/docs/content/ingestion/faq.md +++ b/docs/content/ingestion/faq.md @@ -56,9 +56,9 @@ Other common reasons that hand-off fails are as follows: 1) Druid is unable to write to the metadata storage. Make sure your configurations are correct. -2) Historical nodes are out of capacity and cannot download any more segments. You'll see exceptions in the Coordinator logs if this occurs and the Coordinator console will show the Historicals are near capacity. +2) Historical processes are out of capacity and cannot download any more segments. You'll see exceptions in the Coordinator logs if this occurs and the Coordinator console will show the Historicals are near capacity. -3) Segments are corrupt and cannot be downloaded. You'll see exceptions in your Historical nodes if this occurs. +3) Segments are corrupt and cannot be downloaded. You'll see exceptions in your Historical processes if this occurs. 4) Deep storage is improperly configured. Make sure that your segment actually exists in deep storage and that the Coordinator logs have no errors. @@ -66,9 +66,9 @@ Other common reasons that hand-off fails are as follows: Make sure to include the `druid-hdfs-storage` and all the hadoop configuration, dependencies (that can be obtained by running command `hadoop classpath` on a machine where hadoop has been setup) in the classpath. And, provide necessary HDFS settings as described in [Deep Storage](../dependencies/deep-storage.html) . -## I don't see my Druid segments on my Historical nodes +## I don't see my Druid segments on my Historical processes -You can check the Coordinator console located at `:`. Make sure that your segments have actually loaded on [Historical nodes](../design/historical.html). If your segments are not present, check the Coordinator logs for messages about capacity of replication errors. One reason that segments are not downloaded is because Historical nodes have maxSizes that are too small, making them incapable of downloading more data. You can change that with (for example): +You can check the Coordinator console located at `:`. Make sure that your segments have actually loaded on [Historical processes](../design/historical.html). If your segments are not present, check the Coordinator logs for messages about capacity of replication errors. One reason that segments are not downloaded is because Historical processes have maxSizes that are too small, making them incapable of downloading more data. You can change that with (for example): ``` -Ddruid.segmentCache.locations=[{"path":"/tmp/druid/storageLocation","maxSize":"500000000000"}] @@ -99,7 +99,7 @@ See [Update Existing Data](../ingestion/update-existing-data.html) for more deta ## Real-time ingestion seems to be stuck -There are a few ways this can occur. Druid will throttle ingestion to prevent out of memory problems if the intermediate persists are taking too long or if hand-off is taking too long. If your node logs indicate certain columns are taking a very long time to build (for example, if your segment granularity is hourly, but creating a single column takes 30 minutes), you should re-evaluate your configuration or scale up your real-time ingestion. +There are a few ways this can occur. Druid will throttle ingestion to prevent out of memory problems if the intermediate persists are taking too long or if hand-off is taking too long. If your process logs indicate certain columns are taking a very long time to build (for example, if your segment granularity is hourly, but creating a single column takes 30 minutes), you should re-evaluate your configuration or scale up your real-time ingestion. ## More information diff --git a/docs/content/ingestion/hadoop-vs-native-batch.md b/docs/content/ingestion/hadoop-vs-native-batch.md new file mode 100644 index 000000000000..89a8e022a9ad --- /dev/null +++ b/docs/content/ingestion/hadoop-vs-native-batch.md @@ -0,0 +1,43 @@ +--- +layout: doc_page +title: "Hadoop-based Batch Ingestion VS Native Batch Ingestion" +--- + + + +# Comparison of Batch Ingestion Methods + +Druid basically supports three types of batch ingestion: Hadoop-based +batch ingestion, native parallel batch ingestion, and native local batch +ingestion. The below table shows what features are supported by each +ingestion method. + + +| |Hadoop-based ingestion|Native parallel ingestion|Native local ingestion| +|---|----------------------|-------------------------|----------------------| +| Parallel indexing | Always parallel | Parallel if firehose is splittable | Always sequential | +| Supported indexing modes | Replacing mode | Both appending and replacing modes | Both appending and replacing modes | +| External dependency | Hadoop (it internally submits Hadoop jobs) | No dependency | No dependency | +| Supported [rollup modes](http://druid.io/docs/latest/ingestion/index.html#roll-up-modes) | Perfect rollup | Best-effort rollup | Both perfect and best-effort rollup | +| Supported partitioning methods | [Both Hash-based and range partitioning](http://druid.io/docs/latest/ingestion/hadoop.html#partitioning-specification) | N/A | Hash-based partitioning (when `forceGuaranteedRollup` = true) | +| Supported input locations | All locations accessible via HDFS client or Druid dataSource | All implemented [firehoses](./firehose.html) | All implemented [firehoses](./firehose.html) | +| Supported file formats | All implemented Hadoop InputFormats | Currently text file formats (CSV, TSV, JSON) by default. Additional formats can be added though a [custom extension](../development/modules.html) implementing [`FiniteFirehoseFactory`](https://github.com/apache/incubator-druid/blob/master/core/src/main/java/org/apache/druid/data/input/FiniteFirehoseFactory.java) | Currently text file formats (CSV, TSV, JSON) by default. Additional formats can be added though a [custom extension](../development/modules.html) implementing [`FiniteFirehoseFactory`](https://github.com/apache/incubator-druid/blob/master/core/src/main/java/org/apache/druid/data/input/FiniteFirehoseFactory.java) | +| Saving parse exceptions in ingestion report | Currently not supported | Currently not supported | Supported | +| Custom segment version | Supported, but this is NOT recommended | N/A | N/A | diff --git a/docs/content/ingestion/hadoop.md b/docs/content/ingestion/hadoop.md index 4f8174c40a95..c824fd0809ca 100644 --- a/docs/content/ingestion/hadoop.md +++ b/docs/content/ingestion/hadoop.md @@ -25,7 +25,9 @@ title: "Hadoop-based Batch Ingestion" # Hadoop-based Batch Ingestion Hadoop-based batch ingestion in Druid is supported via a Hadoop-ingestion task. These tasks can be posted to a running -instance of a Druid [Overlord](../design/overlord.html). +instance of a Druid [Overlord](../design/overlord.html). + +Please check [Hadoop-based Batch Ingestion VS Native Batch Ingestion](./hadoop-vs-native-batch.html) for differences between native batch ingestion and Hadoop-based ingestion. ## Command Line Hadoop Indexer diff --git a/docs/content/ingestion/index.md b/docs/content/ingestion/index.md index db1edfa0172e..a9b12beca075 100644 --- a/docs/content/ingestion/index.md +++ b/docs/content/ingestion/index.md @@ -162,13 +162,13 @@ to load that segment and instructs that Historical to do so. ## Ingestion methods In most ingestion methods, this work is done by Druid -MiddleManager nodes. One exception is Hadoop-based ingestion, where this work is instead done using a Hadoop MapReduce -job on YARN (although MiddleManager nodes are still involved in starting and monitoring the Hadoop jobs). +MiddleManager processes. One exception is Hadoop-based ingestion, where this work is instead done using a Hadoop MapReduce +job on YARN (although MiddleManager processes are still involved in starting and monitoring the Hadoop jobs). -Once segments have been generated and stored in [deep storage](../dependencies/deep-storage.html), they will be loaded by Druid Historical nodes. Some Druid -ingestion methods additionally support _real-time queries_, meaning you can query in-flight data on MiddleManager nodes +Once segments have been generated and stored in [deep storage](../dependencies/deep-storage.html), they will be loaded by Druid Historical processes. Some Druid +ingestion methods additionally support _real-time queries_, meaning you can query in-flight data on MiddleManager processes before it is finished being converted and written to deep storage. In general, a small amount of data will be in-flight -on MiddleManager nodes relative to the larger amount of historical data being served from Historical nodes. +on MiddleManager processes relative to the larger amount of historical data being served from Historical processes. See the [Design](../design/index.html) page for more details on how Druid stores and manages your data. @@ -178,7 +178,7 @@ the best one for your situation. |Method|How it works|Can append and overwrite?|Can handle late data?|Exactly-once ingestion?|Real-time queries?| |------|------------|-------------------------|---------------------|-----------------------|------------------| |[Native batch](native_tasks.html)|Druid loads data directly from S3, HTTP, NFS, or other networked storage.|Append or overwrite|Yes|Yes|No| -|[Hadoop](hadoop.html)|Druid launches Hadoop Map/Reduce jobs to load data files.|Append or overwrite|Yes|Yes|No| +|[Hadoop](hadoop.html)|Druid launches Hadoop Map/Reduce jobs to load data files.|Overwrite|Yes|Yes|No| |[Kafka indexing service](../development/extensions-core/kafka-ingestion.html)|Druid reads directly from Kafka.|Append only|Yes|Yes|Yes| |[Tranquility](stream-push.html)|You use Tranquility, a client side library, to push individual records into Druid.|Append only|No - late data is dropped|No - may drop or duplicate data|Yes| @@ -191,7 +191,7 @@ a _time chunk_, and each time chunk contains one or more [segments](../design/se particular time chunk may be partitioned further using options that vary based on the ingestion method you have chosen. * With [Hadoop](hadoop.html) you can do hash- or range-based partitioning on one or more columns. - * With [Native batch](native_tasks.html) you can partition on a hash of all dimension columns. This is useful when + * With [Native batch](native_tasks.html) you can partition on a hash of dimension columns. This is useful when rollup is enabled, since it maximizes your space savings. * With [Kafka indexing](../development/extensions-core/kafka-ingestion.html), partitioning is based on Kafka partitions, and is not configurable through Druid. You can configure it on the Kafka side by using the partitioning @@ -291,9 +291,9 @@ For compaction documentation, please see [tasks](../ingestion/tasks.html). Druid supports retention rules, which are used to define intervals of time where data should be preserved, and intervals where data should be discarded. -Druid also supports separating Historical nodes into tiers, and the retention rules can be configured to assign data for specific intervals to specific tiers. +Druid also supports separating Historical processes into tiers, and the retention rules can be configured to assign data for specific intervals to specific tiers. -These features are useful for performance/cost management; a common use case is separating Historical nodes into a "hot" tier and a "cold" tier. +These features are useful for performance/cost management; a common use case is separating Historical processes into a "hot" tier and a "cold" tier. For more information, please see [Load rules](../operations/rule-configuration.html). diff --git a/docs/content/ingestion/ingestion-spec.md b/docs/content/ingestion/ingestion-spec.md index 46e7334aeffa..b578b545dd09 100644 --- a/docs/content/ingestion/ingestion-spec.md +++ b/docs/content/ingestion/ingestion-spec.md @@ -286,7 +286,7 @@ This spec is used to generated segments with uniform intervals. | segmentGranularity | string | The granularity to create time chunks at. Multiple segments can be created per time chunk. For example, with 'DAY' `segmentGranularity`, the events of the same day fall into the same time chunk which can be optionally further partitioned into multiple segments based on other configurations and input size. See [Granularity](../querying/granularities.html) for supported granularities.| no (default == 'DAY') | | queryGranularity | string | The minimum granularity to be able to query results at and the granularity of the data inside the segment. E.g. a value of "minute" will mean that data is aggregated at minutely granularity. That is, if there are collisions in the tuple (minute(timestamp), dimensions), then it will aggregate values together using the aggregators instead of storing individual rows. A granularity of 'NONE' means millisecond granularity. See [Granularity](../querying/granularities.html) for supported granularities.| no (default == 'NONE') | | rollup | boolean | rollup or not | no (default == true) | -| intervals | string | A list of intervals for the raw data being ingested. Ignored for real-time ingestion. | no. If specified, batch ingestion tasks may skip determining partitions phase which results in faster ingestion. | +| intervals | JSON string array | A list of intervals for the raw data being ingested. Ignored for real-time ingestion. | no. If specified, Hadoop and native non-parallel batch ingestion tasks may skip determining partitions phase which results in faster ingestion; native parallel ingestion tasks can request all their locks up-front instead of one by one. Batch ingestion will thrown away any data not in the specified intervals. | ### Arbitrary Granularity Spec @@ -296,7 +296,7 @@ This spec is used to generate segments with arbitrary intervals (it tries to cre |-------|------|-------------|----------| | queryGranularity | string | The minimum granularity to be able to query results at and the granularity of the data inside the segment. E.g. a value of "minute" will mean that data is aggregated at minutely granularity. That is, if there are collisions in the tuple (minute(timestamp), dimensions), then it will aggregate values together using the aggregators instead of storing individual rows. A granularity of 'NONE' means millisecond granularity. See [Granularity](../querying/granularities.html) for supported granularities.| no (default == 'NONE') | | rollup | boolean | rollup or not | no (default == true) | -| intervals | string | A list of intervals for the raw data being ingested. Ignored for real-time ingestion. | no. If specified, batch ingestion tasks may skip determining partitions phase which results in faster ingestion. | +| intervals | JSON string array | A list of intervals for the raw data being ingested. Ignored for real-time ingestion. | no. If specified, Hadoop and native non-parallel batch ingestion tasks may skip determining partitions phase which results in faster ingestion; native parallel ingestion tasks can request all their locks up-front instead of one by one. Batch ingestion will thrown away any data not in the specified intervals. | # Transform Spec diff --git a/docs/content/ingestion/locking-and-priority.md b/docs/content/ingestion/locking-and-priority.md index 09424661d6ff..6dbe013a33da 100644 --- a/docs/content/ingestion/locking-and-priority.md +++ b/docs/content/ingestion/locking-and-priority.md @@ -26,7 +26,7 @@ title: "Task Locking & Priority" ## Locking -Once an Overlord node accepts a task, the task acquires locks for the data source and intervals specified in the task. +Once an Overlord process accepts a task, the task acquires locks for the data source and intervals specified in the task. There are two lock types, i.e., _shared lock_ and _exclusive lock_. diff --git a/docs/content/ingestion/native_tasks.md b/docs/content/ingestion/native_tasks.md index e5b2e7d28710..4ecaccf53ac3 100644 --- a/docs/content/ingestion/native_tasks.md +++ b/docs/content/ingestion/native_tasks.md @@ -25,9 +25,14 @@ title: "Native Index Tasks" # Native Index Tasks Druid currently has two types of native batch indexing tasks, `index_parallel` which runs tasks -in parallel on multiple MiddleManager nodes, and `index` which will run a single indexing task locally on a single +in parallel on multiple MiddleManager processes, and `index` which will run a single indexing task locally on a single MiddleManager. +Please check [Hadoop-based Batch Ingestion VS Native Batch Ingestion](./hadoop-vs-native-batch.html) for differences between native batch ingestion and Hadoop-based ingestion. + +To run either kind of native batch indexing task, write an ingestion spec as specified below. Then POST it to the +[`/druid/indexer/v1/task` endpoint on the Overlord](../operations/api-reference.html#tasks), or use the `post-index-task` script included with Druid. + Parallel Index Task -------------------------------- @@ -49,7 +54,17 @@ which specifies a split and submits worker tasks using those specs. As a result, the implementation of splittable firehoses. Please note that multiple tasks can be created for the same worker task spec if one of them fails. -Since this task doesn't shuffle intermediate data, it isn't available for [perfect rollup](../ingestion/index.html#roll-up-modes). +You may want to consider the below points: +- Since this task doesn't shuffle intermediate data, it isn't available for [perfect rollup](../ingestion/index.html#roll-up-modes). +- The number of tasks for parallel ingestion is decided by `maxNumSubTasks` in the tuningConfig. + Since the supervisor task creates up to `maxNumSubTasks` worker tasks regardless of the available task slots, + it may affect to other ingestion performance. As a result, it's important to set `maxNumSubTasks` properly. + See the below [Capacity Planning](#capacity-planning) section for more details. +- By default, batch ingestion replaces all data in any segment that it writes to. If you'd like to add to the segment + instead, set the appendToExisting flag in ioConfig. Note that it only replaces data in segments where it actively adds + data: if there are segments in your granularitySpec's intervals that have no data written by this task, they will be + left alone. + An example ingestion spec is: @@ -117,6 +132,10 @@ An example ingestion spec is: "baseDir": "examples/indexing/", "filter": "wikipedia_index_data*" } + }, + "tuningconfig": { + "type": "index_parallel", + "maxNumSubTasks": 2 } } } @@ -137,6 +156,14 @@ This field is required. See [Ingestion Spec DataSchema](../ingestion/ingestion-spec.html#dataschema) +If you specify `intervals` explicitly in your dataSchema's granularitySpec, batch ingestion will lock the full intervals +specified when it starts up, and you will learn quickly if the specified interval overlaps with locks held by other +tasks (eg, Kafka ingestion). Otherwise, batch ingestion will lock each interval as it is discovered, so you may only +learn that the task overlaps with a higher-priority task later in ingestion. If you specify `intervals` explicitly, any +rows outside the specified intervals will be thrown away. We recommend setting `intervals` explicitly if you know the +time range of the data so that locking failure happens faster, and so that you don't accidentally replace data outside +that range if there's some stray data with unexpected timestamps. + #### IOConfig |property|description|default|required?| @@ -163,7 +190,7 @@ The tuningConfig is optional and default parameters will be used if no tuningCon |reportParseExceptions|If true, exceptions encountered during parsing will be thrown and will halt ingestion; if false, unparseable rows and fields will be skipped.|false|no| |pushTimeout|Milliseconds to wait for pushing segments. It must be >= 0, where 0 means to wait forever.|0|no| |segmentWriteOutMediumFactory|Segment write-out medium to use when creating segments. See [SegmentWriteOutMediumFactory](#segmentWriteOutMediumFactory).|Not specified, the value from `druid.peon.defaultSegmentWriteOutMediumFactory.type` is used|no| -|maxNumSubTasks|Maximum number of tasks which can be run at the same time.|Integer.MAX_VALUE|no| +|maxNumSubTasks|Maximum number of tasks which can be run at the same time. The supervisor task would spawn worker tasks up to `maxNumSubTasks` regardless of the available task slots. If this value is set to 1, the supervisor task processes data ingestion on its own instead of spawning worker tasks. If this value is set to too large, too many worker tasks can be created which might block other ingestion. Check [Capacity Planning](#capacity-planning) for more details.|1|no| |maxRetry|Maximum number of retries on task failures.|3|no| |taskStatusCheckPeriodMs|Polling period in milleseconds to check running task statuses.|1000|no| |chatHandlerTimeout|Timeout for reporting the pushed segments in worker tasks.|PT10S|no| @@ -354,7 +381,7 @@ An example of the result is "reportParseExceptions": false, "pushTimeout": 0, "segmentWriteOutMediumFactory": null, - "maxNumSubTasks": 2147483647, + "maxNumSubTasks": 4, "maxRetry": 3, "taskStatusCheckPeriodMs": 1000, "chatHandlerTimeout": "PT10S", @@ -390,6 +417,27 @@ An example of the result is Returns the task attempt history of the worker task spec of the given id, or HTTP 404 Not Found error if the supervisor task is running in the sequential mode. +### Capacity Planning + +The supervisor task can create up to `maxNumSubTasks` worker tasks no matter how many task slots are currently available. +As a result, total number of tasks which can be run at the same time is `(maxNumSubTasks + 1)` (including the supervisor task). +Please note that this can be even larger than total number of task slots (sum of the capacity of all workers). +If `maxNumSubTasks` is larger than `n (available task slots)`, then +`maxNumSubTasks` tasks are created by the supervisor task, but only `n` tasks would be started. +Others will wait in the pending state until any running task is finished. + +If you are using the Parallel Index Task with stream ingestion together, +we would recommend to limit the max capacity for batch ingestion to prevent +stream ingestion from being blocked by batch ingestion. Suppose you have +`t` Parallel Index Tasks to run at the same time, but want to limit +the max number of tasks for batch ingestion to `b`. Then, (sum of `maxNumSubTasks` +of all Parallel Index Tasks + `t` (for supervisor tasks)) must be smaller than `b`. + +If you have some tasks of a higher priority than others, you may set their +`maxNumSubTasks` to a higher value than lower priority tasks. +This may help the higher priority tasks to finish earlier than lower priority tasks +by assigning more task slots to them. + Local Index Task ---------------- @@ -461,6 +509,11 @@ The Local Index Task is designed to be used for smaller data sets. The task exec } ``` +By default, batch ingestion replaces all data in any segment that it writes to. If you'd like to add to the segment +instead, set the appendToExisting flag in ioConfig. Note that it only replaces data in segments where it actively adds +data: if there are segments in your granularitySpec's intervals that have no data written by this task, they will be +left alone. + #### Task Properties |property|description|required?| @@ -476,6 +529,12 @@ This field is required. See [Ingestion Spec DataSchema](../ingestion/ingestion-spec.html#dataschema) +If you do not specify `intervals` explicitly in your dataSchema's granularitySpec, the Local Index Task will do an extra +pass over the data to determine the range to lock when it starts up. If you specify `intervals` explicitly, any rows +outside the specified intervals will be thrown away. We recommend setting `intervals` explicitly if you know the time +range of the data because it allows the task to skip the extra pass, and so that you don't accidentally replace data outside +that range if there's some stray data with unexpected timestamps. + #### IOConfig |property|description|default|required?| @@ -500,7 +559,7 @@ The tuningConfig is optional and default parameters will be used if no tuningCon |indexSpec|defines segment storage format options to be used at indexing time, see [IndexSpec](#indexspec)|null|no| |maxPendingPersists|Maximum number of persists that can be pending but not started. If this limit would be exceeded by a new intermediate persist, ingestion will block until the currently-running persist finishes. Maximum heap memory usage for indexing scales with maxRowsInMemory * (2 + maxPendingPersists).|0 (meaning one persist can be running concurrently with ingestion, and none can be queued up)|no| |forceExtendableShardSpecs|Forces use of extendable shardSpecs. Experimental feature intended for use with the [Kafka indexing service extension](../development/extensions-core/kafka-ingestion.html).|false|no| -|forceGuaranteedRollup|Forces guaranteeing the [perfect rollup](../ingestion/index.html#roll-up-modes). The perfect rollup optimizes the total size of generated segments and querying time while indexing time will be increased. This flag cannot be used with either `appendToExisting` of IOConfig or `forceExtendableShardSpecs`. For more details, see the below __Segment pushing modes__ section.|false|no| +|forceGuaranteedRollup|Forces guaranteeing the [perfect rollup](../ingestion/index.html#roll-up-modes). The perfect rollup optimizes the total size of generated segments and querying time while indexing time will be increased. If this is set to true, the index task will read the entire input data twice: one for finding the optimal number of partitions per time chunk and one for generating segments. Note that the result segments would be hash-partitioned. You can set `forceExtendableShardSpecs` if you plan to append more data to the same time range in the future. This flag cannot be used with `appendToExisting` of IOConfig. For more details, see the below __Segment pushing modes__ section.|false|no| |reportParseExceptions|DEPRECATED. If true, exceptions encountered during parsing will be thrown and will halt ingestion; if false, unparseable rows and fields will be skipped. Setting `reportParseExceptions` to true will override existing configurations for `maxParseExceptions` and `maxSavedParseExceptions`, setting `maxParseExceptions` to 0 and limiting `maxSavedParseExceptions` to no more than 1.|false|no| |pushTimeout|Milliseconds to wait for pushing segments. It must be >= 0, where 0 means to wait forever.|0|no| |segmentWriteOutMediumFactory|Segment write-out medium to use when creating segments. See [SegmentWriteOutMediumFactory](#segmentWriteOutMediumFactory).|Not specified, the value from `druid.peon.defaultSegmentWriteOutMediumFactory.type` is used|no| @@ -548,12 +607,12 @@ the Index task supports two segment pushing modes, i.e., _bulk pushing mode_ and [perfect rollup and best-effort rollup](../ingestion/index.html#roll-up-modes), respectively. In the bulk pushing mode, every segment is pushed at the very end of the index task. Until then, created segments -are stored in the memory and local storage of the node running the index task. As a result, this mode might cause a +are stored in the memory and local storage of the process running the index task. As a result, this mode might cause a problem due to limited storage capacity, and is not recommended to use in production. On the contrary, in the incremental pushing mode, segments are incrementally pushed, that is they can be pushed in the middle of the index task. More precisely, the index task collects data and stores created segments in the memory -and disks of the node running that task until the total number of collected rows exceeds `maxTotalRows`. Once it exceeds, +and disks of the process running that task until the total number of collected rows exceeds `maxTotalRows`. Once it exceeds, the index task immediately pushes all segments created until that moment, cleans all pushed segments up, and continues to ingest remaining data. diff --git a/docs/content/ingestion/stream-pull.md b/docs/content/ingestion/stream-pull.md index 0ec8b7fd48bb..ea1aff7543b6 100644 --- a/docs/content/ingestion/stream-pull.md +++ b/docs/content/ingestion/stream-pull.md @@ -23,7 +23,7 @@ title: "Stream Pull Ingestion" -->
-NOTE: Realtime nodes are deprecated. Please use the Kafka Indexing Service for stream pull use cases instead. +NOTE: Realtime processes are deprecated. Please use the Kafka Indexing Service for stream pull use cases instead.
# Stream Pull Ingestion @@ -32,20 +32,20 @@ If you have an external service that you want to pull data from, you have two op option is to set up a "copying" service that reads from the data source and writes to Druid using the [stream push method](stream-push.html). -Another option is *stream pull*. With this approach, a Druid Realtime Node ingests data from a +Another option is *stream pull*. With this approach, a Druid Realtime Process ingests data from a [Firehose](../ingestion/firehose.html) connected to the data you want to -read. The Druid quickstart and tutorials do not include information about how to set up standalone realtime nodes, but -they can be used in place for Tranquility server and the indexing service. Please note that Realtime nodes have different properties and roles than the indexing service. +read. The Druid quickstart and tutorials do not include information about how to set up standalone realtime processes, but +they can be used in place for Tranquility server and the indexing service. Please note that Realtime processes have different properties and roles than the indexing service. -## Realtime Node Ingestion +## Realtime Process Ingestion -Much of the configuration governing Realtime nodes and the ingestion of data is set in the Realtime spec file, discussed on this page. +Much of the configuration governing Realtime processes and the ingestion of data is set in the Realtime spec file, discussed on this page. -For general Real-time Node information, see [here](../design/realtime.html). +For general Real-time Process information, see [here](../design/realtime.html). -For Real-time Node Configuration, see [Realtime Configuration](../configuration/realtime.html). +For Real-time Process Configuration, see [Realtime Configuration](../configuration/realtime.html). -For writing your own plugins to the real-time node, see [Firehose](../ingestion/firehose.html). +For writing your own plugins to the real-time process, see [Firehose](../ingestion/firehose.html). ## Realtime "specFile" @@ -127,7 +127,7 @@ The property `druid.realtime.specFile` has the path of a file (absolute or relat ] ``` -This is a JSON Array so you can give more than one realtime stream to a given node. The number you can put in the same process depends on the exact configuration. In general, it is best to think of each realtime stream handler as requiring 2-threads: 1 thread for data consumption and aggregation, 1 thread for incremental persists and other background tasks. +This is a JSON Array so you can give more than one realtime stream to a given process. The number you can put in the same process depends on the exact configuration. In general, it is best to think of each realtime stream handler as requiring 2-threads: 1 thread for data consumption and aggregation, 1 thread for incremental persists and other background tasks. There are three parts to a realtime stream specification, `dataSchema`, `IOConfig`, and `tuningConfig` which we will go into here. @@ -172,7 +172,7 @@ The tuningConfig is optional and default parameters will be used if no tuningCon |versioningPolicy|Object|How to version segments.|no (default == based on segment start time)| |rejectionPolicy|Object|Controls how data sets the data acceptance policy for creating and handing off segments. More on this below.|no (default == 'serverTime')| |maxPendingPersists|Integer|Maximum number of persists that can be pending, but not started. If this limit would be exceeded by a new intermediate persist, ingestion will block until the currently-running persist finishes. Maximum heap memory usage for indexing scales with maxRowsInMemory * (2 + maxPendingPersists).|no (default == 0; meaning one persist can be running concurrently with ingestion, and none can be queued up)| -|shardSpec|Object|This describes the shard that is represented by this server. This must be specified properly in order to have multiple realtime nodes indexing the same data stream in a [sharded fashion](#sharding).|no (default == 'NoneShardSpec')| +|shardSpec|Object|This describes the shard that is represented by this server. This must be specified properly in order to have multiple realtime processes indexing the same data stream in a [sharded fashion](#sharding).|no (default == 'NoneShardSpec')| |persistThreadPriority|int|If `-XX:+UseThreadPriorities` is properly enabled, this will set the thread priority of the persisting thread to `Thread.NORM_PRIORITY` plus this value within the bounds of `Thread.MIN_PRIORITY` and `Thread.MAX_PRIORITY`. A value of 0 indicates to not change the thread priority.|no (default == 0; inherit and do not override)| |mergeThreadPriority|int|If `-XX:+UseThreadPriorities` is properly enabled, this will set the thread priority of the merging thread to `Thread.NORM_PRIORITY` plus this value within the bounds of `Thread.MIN_PRIORITY` and `Thread.MAX_PRIORITY`. A value of 0 indicates to not change the thread priority.|no (default == 0; inherit and do not override)| |reportParseExceptions|Boolean|If true, exceptions encountered during parsing will be thrown and will halt ingestion. If false, unparseable rows and fields will be skipped. If an entire row is skipped, the "unparseable" counter will be incremented. If some fields in a row were parseable and some were not, the parseable fields will be indexed and the "unparseable" counter will not be incremented.|no (default == false)| @@ -233,7 +233,7 @@ In small-data scenarios, sharding is unnecessary and can be set to none (the def "shardSpec": {"type": "none"} ``` -However, in scenarios with multiple realtime nodes, `none` is less useful as it cannot help with scaling data volume (see below). Note that for the batch indexing service, no explicit configuration is required; sharding is provided automatically. +However, in scenarios with multiple realtime processes, `none` is less useful as it cannot help with scaling data volume (see below). Note that for the batch indexing service, no explicit configuration is required; sharding is provided automatically. Druid uses sharding based on the `shardSpec` setting you configure. The recommended choices, `linear` and `numbered`, are discussed below; other types have been useful for internal Druid development but are not appropriate for production setups. @@ -243,7 +243,7 @@ Keep in mind, that sharding configuration has nothing to do with configured fire This strategy provides following advantages: -* There is no need to update the fileSpec configurations of existing nodes when adding new nodes. +* There is no need to update the fileSpec configurations of existing processes when adding new processes. * All unique shards are queried, regardless of whether the partition numbering is sequential or not (it allows querying of partitions 0 and 2, even if partition 1 is missing). Configure `linear` under `schema`: @@ -273,9 +273,9 @@ Configure `numbered` under `schema`: ##### Scale and Redundancy -The `shardSpec` configuration can be used to create redundancy by having the same `partitionNum` values on different nodes. +The `shardSpec` configuration can be used to create redundancy by having the same `partitionNum` values on different processes. -For example, if RealTimeNode1 has: +For example, if RealTimeProcess1 has: ```json "shardSpec": { @@ -284,7 +284,7 @@ For example, if RealTimeNode1 has: } ``` -and RealTimeNode2 has: +and RealTimeProcess2 has: ```json "shardSpec": { @@ -293,9 +293,9 @@ and RealTimeNode2 has: } ``` -then two realtime nodes can store segments with the same datasource, version, time interval, and partition number. Brokers that query for data in such segments will assume that they hold the same data, and the query will target only one of the segments. +then two realtime processes can store segments with the same datasource, version, time interval, and partition number. Brokers that query for data in such segments will assume that they hold the same data, and the query will target only one of the segments. -`shardSpec` can also help achieve scale. For this, add nodes with a different `partionNum`. Continuing with the example, if RealTimeNode3 has: +`shardSpec` can also help achieve scale. For this, add processes with a different `partionNum`. Continuing with the example, if RealTimeProcess3 has: ```json "shardSpec": { @@ -304,7 +304,7 @@ then two realtime nodes can store segments with the same datasource, version, ti } ``` -then it can store segments with the same datasource, time interval, and version as in the first two nodes, but with a different partition number. Brokers that query for data in such segments will assume that a segment from RealTimeNode3 holds *different* data, and the query will target it along with a segment from the first two nodes. +then it can store segments with the same datasource, time interval, and version as in the first two processes, but with a different partition number. Brokers that query for data in such segments will assume that a segment from RealTimeProcess3 holds *different* data, and the query will target it along with a segment from the first two processes. You can use type `numbered` similarly. Note that type `none` is essentially type `linear` with all shards having a fixed `partitionNum` of 0. @@ -327,45 +327,45 @@ The normal, expected use cases have the following overall constraints: `intermed ### Kafka -Standalone realtime nodes use the Kafka high level consumer, which imposes a few restrictions. +Standalone realtime processes use the Kafka high level consumer, which imposes a few restrictions. -Druid replicates segment such that logically equivalent data segments are concurrently hosted on N nodes. If N–1 nodes go down, -the data will still be available for querying. On real-time nodes, this process depends on maintaining logically equivalent -data segments on each of the N nodes, which is not possible with standard Kafka consumer groups if your Kafka topic requires more than one consumer +Druid replicates segment such that logically equivalent data segments are concurrently hosted on N processes. If N–1 processes go down, +the data will still be available for querying. On real-time processes, this process depends on maintaining logically equivalent +data segments on each of the N processes, which is not possible with standard Kafka consumer groups if your Kafka topic requires more than one consumer (because consumers in different consumer groups will split up the data differently). -For example, let's say your topic is split across Kafka partitions 1, 2, & 3 and you have 2 real-time nodes with linear shard specs 1 & 2. -Both of the real-time nodes are in the same consumer group. Real-time node 1 may consume data from partitions 1 & 3, and real-time node 2 may consume data from partition 2. +For example, let's say your topic is split across Kafka partitions 1, 2, & 3 and you have 2 real-time processes with linear shard specs 1 & 2. +Both of the real-time processes are in the same consumer group. Real-time process 1 may consume data from partitions 1 & 3, and real-time process 2 may consume data from partition 2. Querying for your data through the Broker will yield correct results. -The problem arises if you want to replicate your data by creating real-time nodes 3 & 4. These new real-time nodes also +The problem arises if you want to replicate your data by creating real-time processes 3 & 4. These new real-time processes also have linear shard specs 1 & 2, and they will consume data from Kafka using a different consumer group. In this case, -real-time node 3 may consume data from partitions 1 & 2, and real-time node 4 may consume data from partition 2. -From Druid's perspective, the segments hosted by real-time nodes 1 and 3 are the same, and the data hosted by real-time nodes +real-time process 3 may consume data from partitions 1 & 2, and real-time process 4 may consume data from partition 2. +From Druid's perspective, the segments hosted by real-time processes 1 and 3 are the same, and the data hosted by real-time processes 2 and 4 are the same, although they are reading from different Kafka partitions. Querying for the data will yield inconsistent results. Is this always a problem? No. If your data is small enough to fit on a single Kafka partition, you can replicate without issues. -Otherwise, you can run real-time nodes without replication. +Otherwise, you can run real-time processes without replication. Please note that druid will skip over event that failed its checksum and it is corrupt. ### Locking -Using stream pull ingestion with Realtime nodes together batch ingestion may introduce data override issues. For example, if you +Using stream pull ingestion with Realtime processes together batch ingestion may introduce data override issues. For example, if you are generating hourly segments for the current day, and run a daily batch job for the current day's data, the segments created by the batch job will have a more recent version than most of the segments generated by realtime ingestion. If your batch job is indexing data that isn't yet complete for the day, the daily segment created by the batch job can override recent segments created by -realtime nodes. A portion of data will appear to be lost in this case. +realtime processes. A portion of data will appear to be lost in this case. ### Schema changes -Standalone realtime nodes require stopping a node to update a schema, and starting it up again for the schema to take effect. +Standalone realtime processes require stopping a process to update a schema, and starting it up again for the schema to take effect. This can be difficult to manage at scale, especially with multiple partitions. ### Log management -Each standalone realtime node has its own set of logs. Diagnosing errors across many partitions across many servers may be +Each standalone realtime process has its own set of logs. Diagnosing errors across many partitions across many servers may be difficult to manage and track at scale. ## Deployment Notes diff --git a/docs/content/ingestion/stream-push.md b/docs/content/ingestion/stream-push.md index 9552499f7ec7..708ee541574e 100644 --- a/docs/content/ingestion/stream-push.md +++ b/docs/content/ingestion/stream-push.md @@ -125,7 +125,7 @@ The windowPeriod is the slack time permitted for events. For example, a windowPe than ten minutes in the future, will be dropped. These are important configurations because they influence how long tasks will be alive for, and how -long data stays in the realtime system before being handed off to the Historical nodes. For example, +long data stays in the realtime system before being handed off to the Historical processes. For example, if your configuration has segmentGranularity "hour" and windowPeriod ten minutes, tasks will stay around listening for events for an hour and ten minutes. For this reason, to prevent excessive buildup of tasks, it is recommended that your windowPeriod be less than your segmentGranularity. diff --git a/docs/content/ingestion/tasks.md b/docs/content/ingestion/tasks.md index 41f7b52444b1..4653d6ba2ed7 100644 --- a/docs/content/ingestion/tasks.md +++ b/docs/content/ingestion/tasks.md @@ -41,6 +41,10 @@ See [batch ingestion](../ingestion/hadoop.html). Druid provides a native index task which doesn't need any dependencies on other systems. See [native index tasks](./native_tasks.html) for more details. +
+Please check [Hadoop-based Batch Ingestion VS Native Batch Ingestion](./hadoop-vs-native-batch.html) for differences between native batch ingestion and Hadoop-based ingestion. +
+ ### Kafka Indexing Tasks Kafka Indexing tasks are automatically created by a Kafka Supervisor and are responsible for pulling data from Kafka streams. These tasks are not meant to be created/submitted directly by users. See [Kafka Indexing Service](../development/extensions-core/kafka-ingestion.html) for more details. diff --git a/docs/content/operations/api-reference.md b/docs/content/operations/api-reference.md index 7a0b43421fc9..5ad0e6c00285 100644 --- a/docs/content/operations/api-reference.md +++ b/docs/content/operations/api-reference.md @@ -40,15 +40,15 @@ This page documents all of the API endpoints for each Druid service type. ## Common -The following endpoints are supported by all nodes. +The following endpoints are supported by all processes. -### Node information +### Process information #### GET * `/status` -Returns the Druid version, loaded extensions, memory used, total memory and other useful information about the node. +Returns the Druid version, loaded extensions, memory used, total memory and other useful information about the process. * `/status/health` @@ -56,7 +56,7 @@ An endpoint that always returns a boolean "true" value with a 200 OK response, u * `/status/properties` -Returns the current configuration properties of the node. +Returns the current configuration properties of the process. ## Master Server @@ -74,17 +74,15 @@ Returns the current leader Coordinator of the cluster. * `/druid/coordinator/v1/isLeader` -Returns true if the Coordinator receiving the request is the current leader. - -#### Segment Loading - -##### GET - Returns a JSON object with field "leader", either true or false, indicating if this server is the current leader Coordinator of the cluster. In addition, returns HTTP 200 if the server is the current leader and HTTP 404 if not. This is suitable for use as a load balancer status check if you only want the active leader to be considered in-service at the load balancer. +#### Segment Loading + +##### GET + * `/druid/coordinator/v1/loadstatus` Returns the percentage of segments actually loaded in the cluster versus segments that should be loaded in the cluster. @@ -99,15 +97,15 @@ Returns the number of segments left to load in each tier until segments that sho * `/druid/coordinator/v1/loadqueue` -Returns the ids of segments to load and drop for each Historical node. +Returns the ids of segments to load and drop for each Historical process. * `/druid/coordinator/v1/loadqueue?simple` -Returns the number of segments to load and drop, as well as the total segment load and drop size in bytes for each Historical node. +Returns the number of segments to load and drop, as well as the total segment load and drop size in bytes for each Historical process. * `/druid/coordinator/v1/loadqueue?full` -Returns the serialized JSON of segments to load and drop for each Historical node. +Returns the serialized JSON of segments to load and drop for each Historical process. #### Metadata store information @@ -145,14 +143,17 @@ Returns full segment metadata for a specific segment as stored in the metadata s * `/druid/coordinator/v1/metadata/datasources/{dataSourceName}/segments` -Returns a list of all segments, overlapping with any of given intervals, for a datasource as stored in the metadata store. Request body is array of string intervals like [interval1, interval2,...] for example ["2012-01-01T00:00:00.000/2012-01-03T00:00:00.000", "2012-01-05T00:00:00.000/2012-01-07T00:00:00.000"] +Returns a list of all segments, overlapping with any of given intervals, for a datasource as stored in the metadata store. Request body is array of string IS0 8601 intervals like [interval1, interval2,...] for example ["2012-01-01T00:00:00.000/2012-01-03T00:00:00.000", "2012-01-05T00:00:00.000/2012-01-07T00:00:00.000"] * `/druid/coordinator/v1/metadata/datasources/{dataSourceName}/segments?full` -Returns a list of all segments, overlapping with any of given intervals, for a datasource with the full segment metadata as stored in the metadata store. Request body is array of string intervals like [interval1, interval2,...] for example ["2012-01-01T00:00:00.000/2012-01-03T00:00:00.000", "2012-01-05T00:00:00.000/2012-01-07T00:00:00.000"] +Returns a list of all segments, overlapping with any of given intervals, for a datasource with the full segment metadata as stored in the metadata store. Request body is array of string ISO 8601 intervals like [interval1, interval2,...] for example ["2012-01-01T00:00:00.000/2012-01-03T00:00:00.000", "2012-01-05T00:00:00.000/2012-01-07T00:00:00.000"] #### Datasources +Note that all _interval_ URL parameters are ISO 8601 strings delimited by a `_` instead of a `/` +(e.g., 2016-06-27_2016-06-28). + ##### GET * `/druid/coordinator/v1/datasources` @@ -189,7 +190,7 @@ Returns a map of an interval to a map of segment metadata to a set of server nam * `/druid/coordinator/v1/datasources/{dataSourceName}/intervals/{interval}` -Returns a set of segment ids for an ISO8601 interval. Note that {interval} parameters are delimited by a `_` instead of a `/` (e.g., 2016-06-27_2016-06-28). +Returns a set of segment ids for an interval. * `/druid/coordinator/v1/datasources/{dataSourceName}/intervals/{interval}?simple` @@ -236,18 +237,19 @@ Enables a segment of a datasource. Disables a datasource. * `/druid/coordinator/v1/datasources/{dataSourceName}/intervals/{interval}` -* `@Deprecated. /druid/coordinator/v1/datasources/{dataSourceName}?kill=true&interval={myISO8601Interval}` +* `@Deprecated. /druid/coordinator/v1/datasources/{dataSourceName}?kill=true&interval={myInterval}` Runs a [Kill task](../ingestion/tasks.html) for a given interval and datasource. -Note that {interval} parameters are delimited by a `_` instead of a `/` (e.g., 2016-06-27_2016-06-28). - * `/druid/coordinator/v1/datasources/{dataSourceName}/segments/{segmentId}` Disables a segment. #### Retention Rules +Note that all _interval_ URL parameters are ISO 8601 strings delimited by a `_` instead of a `/` +(e.g., 2016-06-27_2016-06-28). + ##### GET * `/druid/coordinator/v1/rules` @@ -294,9 +296,10 @@ Optional Header Parameters for auditing the config change can also be specified. #### Intervals -##### GET +Note that all _interval_ URL parameters are ISO 8601 strings delimited by a `_` instead of a `/` +(e.g., 2016-06-27_2016-06-28). -Note that {interval} parameters are delimited by a `_` instead of a `/` (e.g., 2016-06-27_2016-06-28). +##### GET * `/druid/coordinator/v1/intervals` @@ -338,7 +341,9 @@ will be set for them. * `/druid/coordinator/v1/config/compaction` -Creates or updates the compaction config for a dataSource. See [Compaction Configuration](../configuration/index.html#compaction-dynamic-configuration) for configuration details. +Creates or updates the compaction config for a dataSource. +See [Compaction Configuration](../configuration/index.html#compaction-dynamic-configuration) for configuration details. + ##### DELETE @@ -353,18 +358,18 @@ Removes the compaction config for a dataSource. * `/druid/coordinator/v1/servers` Returns a list of servers URLs using the format `{hostname}:{port}`. Note that -nodes that run with different types will appear multiple times with different +processes that run with different types will appear multiple times with different ports. * `/druid/coordinator/v1/servers?simple` Returns a list of server data objects in which each object has the following keys: -- `host`: host URL include (`{hostname}:{port}`) -- `type`: node type (`indexer-executor`, `historical`) -- `currSize`: storage size currently used -- `maxSize`: maximum storage size -- `priority` -- `tier` +* `host`: host URL include (`{hostname}:{port}`) +* `type`: process type (`indexer-executor`, `historical`) +* `currSize`: storage size currently used +* `maxSize`: maximum storage size +* `priority` +* `tier` ### Overlord @@ -384,8 +389,44 @@ only want the active leader to be considered in-service at the load balancer. #### Tasks +Note that all _interval_ URL parameters are ISO 8601 strings delimited by a `_` instead of a `/` +(e.g., 2016-06-27_2016-06-28). + ##### GET +* `/druid/indexer/v1/tasks` + +Retrieve list of tasks. Accepts query string parameters `state`, `datasource`, `createdTimeInterval`, `max`, and `type`. + +|Query Parameter |Description | +|---|---| +|`state`|filter list of tasks by task state, valid options are `running`, `complete`, `waiting`, and `pending`.| +| `datasource`| return tasks filtered by Druid datasource.| +| `createdTimeInterval`| return tasks created within the specified interval. | +| `max`| maximum number of `"complete"` tasks to return. Only applies when `state` is set to `"complete"`.| +| `type`| filter tasks by task type. See [task documentation](../ingestion/tasks.html) for more details.| + + +* `/druid/indexer/v1/completeTasks` + +Retrieve list of complete tasks. Equivalent to `/druid/indexer/v1/tasks?state=complete`. + +* `/druid/indexer/v1/runningTasks` + +Retrieve list of running tasks. Equivalent to `/druid/indexer/v1/tasks?state=running`. + +* `/druid/indexer/v1/waitingTasks` + +Retrieve list of waiting tasks. Equivalent to `/druid/indexer/v1/tasks?state=waiting`. + +* `/druid/indexer/v1/pendingTasks` + +Retrieve list of pending tasks. Equivalent to `/druid/indexer/v1/tasks?state=pending`. + +* `/druid/indexer/v1/task/{taskId}` + +Retrieve the 'payload' of a task. + * `/druid/indexer/v1/task/{taskId}/status` Retrieve the status of a task. @@ -408,14 +449,27 @@ Retrieve a [task completion report](../ingestion/reports.html) for a task. Only Endpoint for submitting tasks and supervisor specs to the Overlord. Returns the taskId of the submitted task. -* `druid/indexer/v1/task/{taskId}/shutdown` +* `/druid/indexer/v1/task/{taskId}/shutdown` Shuts down a task. -* `druid/indexer/v1/datasources/{dataSource}/shutdownAllTasks` +* `/druid/indexer/v1/datasources/{dataSource}/shutdownAllTasks` Shuts down all tasks for a dataSource. +* `/druid/indexer/v1/taskStatus` + +Retrieve list of task status objects for list of task id strings in request body. + +##### DELETE + +* `/druid/indexer/v1/pendingSegments/{dataSource}` + +Manually clean up pending segments table in metadata storage for `datasource`. Returns a JSON object response with +`numDeleted` and count of rows deleted from the pending segments table. This API is used by the +`druid.coordinator.kill.pendingSegments.on` [coordinator setting](../configuration/index.html#coordinator-operation) +which automates this operation to perform periodically. + #### Supervisors ##### GET @@ -492,13 +546,94 @@ This API is deprecated and will be removed in future releases. Please use the equivalent 'terminate' instead. +#### Dynamic Configuration +See [Overlord Dynamic Configuration](../configuration/index.html#overlord-dynamic-configuration) for details. + +Note that all _interval_ URL parameters are ISO 8601 strings delimited by a `_` instead of a `/` +(e.g., 2016-06-27_2016-06-28). + +##### GET + +* `/druid/indexer/v1/worker` + +Retreives current overlord dynamic configuration. + +* `/druid/indexer/v1/worker/history?interval={interval}&counter={count}` + +Retrieves history of changes to overlord dynamic configuration. Accepts `interval` and `count` query string parameters +to filter by interval and limit the number of results respectively. + +* `/druid/indexer/v1/scaling` + +Retrieves overlord scaling events if auto-scaling runners are in use. + +##### POST + +* /druid/indexer/v1/worker + +Update overlord dynamic worker configuration. + ## Data Server -This section documents the API endpoints for the processes that reside on Data servers (MiddleManagers/Peons and Historicals) in the suggested [three-server configuration](../design/processes.html#server-types). +This section documents the API endpoints for the processes that reside on Data servers (MiddleManagers/Peons and Historicals) +in the suggested [three-server configuration](../design/processes.html#server-types). ### MiddleManager -The MiddleManager does not have any API endpoints beyond the [common endpoints](#common). +##### GET + +* `/druid/worker/v1/enabled` + +Check whether a MiddleManager is in an enabled or disabled state. Returns JSON object keyed by the combined `druid.host` +and `druid.port` with the boolean state as the value. + +```json +{"localhost:8091":true} +``` + +* `/druid/worker/v1/tasks` + +Retrieve a list of active tasks being run on MiddleManager. Returns JSON list of taskid strings. Normal usage should +prefer to use the `/druid/indexer/v1/tasks` [Overlord API](#overlord) or one of it's task state specific variants instead. + +```json +["index_wikiticker_2019-02-11T02:20:15.316Z"] +``` + +* `/druid/worker/v1/task/{taskid}/log` + +Retrieve task log output stream by task id. Normal usage should prefer to use the `/druid/indexer/v1/task/{taskId}/log` +[Overlord API](#overlord) instead. + +##### POST + +* `/druid/worker/v1/disable` + +'Disable' a MiddleManager, causing it to stop accepting new tasks but complete all existing tasks. Returns JSON object +keyed by the combined `druid.host` and `druid.port`: + +```json +{"localhost:8091":"disabled"} +``` + +* `/druid/worker/v1/enable` + +'Enable' a MiddleManager, allowing it to accept new tasks again if it was previously disabled. Returns JSON object +keyed by the combined `druid.host` and `druid.port`: + +```json +{"localhost:8091":"enabled"} +``` + +* `/druid/worker/v1/task/{taskid}/shutdown` + +Shutdown a running task by `taskid`. Normal usage should prefer to use the `/druid/indexer/v1/task/{taskId}/shutdown` +[Overlord API](#overlord) instead. Returns JSON: + +```json +{"task":"index_kafka_wikiticker_f7011f8ffba384b_fpeclode"} +``` + ### Peon @@ -521,7 +656,7 @@ Retrieve an unparseable events report from a Peon. See [task reports](../ingesti * `/druid/historical/v1/loadstatus` Returns JSON of the form `{"cacheInitialized":}`, where value is either `true` or `false` indicating if all -segments in the local cache have been loaded. This can be used to know when a Historical node is ready +segments in the local cache have been loaded. This can be used to know when a Historical process is ready to be queried after a restart. * `/druid/historical/v1/readiness` @@ -538,6 +673,9 @@ This section documents the API endpoints for the processes that reside on Query #### Datasource Information +Note that all _interval_ URL parameters are ISO 8601 strings delimited by a `_` instead of a `/` +(e.g., 2016-06-27_2016-06-28). + ##### GET * `/druid/v2/datasources` @@ -548,7 +686,7 @@ Returns a list of queryable datasources. Returns the dimensions and metrics of the datasource. Optionally, you can provide request parameter "full" to get list of served intervals with dimensions and metrics being served for those intervals. You can also provide request param "interval" explicitly to refer to a particular interval. -If no interval is specified, a default interval spanning a configurable period before the current time will be used. The duration of this interval is specified in ISO8601 format via: +If no interval is specified, a default interval spanning a configurable period before the current time will be used. The default duration of this interval is specified in ISO 8601 duration format via: druid.query.segmentMetadata.defaultHistory @@ -557,7 +695,7 @@ druid.query.segmentMetadata.defaultHistory Returns the dimensions of the datasource.
-This API is deprecated and will be removed in future releases. Please use [SegmentMetadataQuery](../querying/segmentmetadataquery.html) instead +This API is deprecated and will be removed in future releases. Please use SegmentMetadataQuery instead which provides more comprehensive information and supports all dataSource types including streaming dataSources. It's also encouraged to use [INFORMATION_SCHEMA tables](../querying/sql.html#retrieving-metadata) if you're using SQL.
@@ -567,12 +705,12 @@ if you're using SQL. Returns the metrics of the datasource.
-This API is deprecated and will be removed in future releases. Please use [SegmentMetadataQuery](../querying/segmentmetadataquery.html) instead +This API is deprecated and will be removed in future releases. Please use SegmentMetadataQuery instead which provides more comprehensive information and supports all dataSource types including streaming dataSources. It's also encouraged to use [INFORMATION_SCHEMA tables](../querying/sql.html#retrieving-metadata) if you're using SQL.
-* `/druid/v2/datasources/{dataSourceName}/candidates?intervals={comma-separated-intervals-in-ISO8601-format}&numCandidates={numCandidates}` +* `/druid/v2/datasources/{dataSourceName}/candidates?intervals={comma-separated-intervals}&numCandidates={numCandidates}` Returns segment information lists including server locations for the given datasource and intervals. If "numCandidates" is not specified, it will return all servers for each interval. diff --git a/docs/content/operations/management-uis.md b/docs/content/operations/management-uis.md new file mode 100644 index 000000000000..8029fd6835d1 --- /dev/null +++ b/docs/content/operations/management-uis.md @@ -0,0 +1,78 @@ +--- +layout: doc_page +title: "Management UIs" +--- + + + +# Management UIs + +## Druid Console + +Druid provides a console for managing datasources, segments, tasks, data processes (Historicals and MiddleManagers), and coordinator dynamic configuration. The user can also run SQL and native Druid queries within the console. + +The Druid Console is hosted by the [Router](../development/router.html) process. We recommend running the Router process on your [Query server](../design/processes.html). + +In addition, the following cluster settings must be enabled: + +- the Router's [management proxy](../development/router.html#enabling-the-management-proxy) must be enabled. +- the Broker processes in the cluster must have [Druid SQL](../querying/sql.html) enabled. + +After enabling Druid SQL on the Brokers and deploying a Router with the managment proxy enabled, the Druid console can be accessed at: + +``` +http://: +``` + +The Druid Console contains all of the functionality provided by the older consoles described below, which are still available if needed. The legacy consoles may be replaced by the Druid Console in the future. + +## Legacy Consoles + +These older consoles provide a subset of the functionality of the Druid Console. We recommend using the Druid Console if possible. + +### Coordinator Consoles + +#### Version 2 + +The Druid Coordinator exposes a web console for displaying cluster information and rule configuration. After the Coordinator starts, the console can be accessed at: + +``` +http://: +``` + +There exists a full cluster view (which shows indexing tasks and Historical processes), as well as views for individual Historical processes, datasources and segments themselves. Segment information can be displayed in raw JSON form or as part of a sortable and filterable table. + +The Coordinator console also exposes an interface to creating and editing rules. All valid datasources configured in the segment database, along with a default datasource, are available for configuration. Rules of different types can be added, deleted or edited. + +#### Version 1 + +The oldest version of Druid's Coordinator console is still available for backwards compatibility at: + +``` +http://:/old-console +``` + +### Overlord Console + +The Overlord console can be used to view pending tasks, running tasks, available workers, and recent worker creation and termination. The console can be accessed at: + +``` +http://:/console.html +``` diff --git a/docs/content/operations/metrics.md b/docs/content/operations/metrics.md index e8e8958a974d..99934cb65107 100644 --- a/docs/content/operations/metrics.md +++ b/docs/content/operations/metrics.md @@ -51,10 +51,10 @@ Available Metrics |------|-----------|----------|------------| |`query/time`|Milliseconds taken to complete a query.|Common: dataSource, type, interval, hasFilters, duration, context, remoteAddress, id. Aggregation Queries: numMetrics, numComplexMetrics. GroupBy: numDimensions. TopN: threshold, dimension.|< 1s| |`query/bytes`|number of bytes returned in query response.|Common: dataSource, type, interval, hasFilters, duration, context, remoteAddress, id. Aggregation Queries: numMetrics, numComplexMetrics. GroupBy: numDimensions. TopN: threshold, dimension.| | -|`query/node/time`|Milliseconds taken to query individual historical/realtime nodes.|id, status, server.|< 1s| -|`query/node/bytes`|number of bytes returned from querying individual historical/realtime nodes.|id, status, server.| | -|`query/node/ttfb`|Time to first byte. Milliseconds elapsed until Broker starts receiving the response from individual historical/realtime nodes.|id, status, server.|< 1s| -|`query/node/backpressure`|Milliseconds that the channel to this node has spent suspended due to backpressure.|id, status, server.| | +|`query/node/time`|Milliseconds taken to query individual historical/realtime processes.|id, status, server.|< 1s| +|`query/node/bytes`|number of bytes returned from querying individual historical/realtime processes.|id, status, server.| | +|`query/node/ttfb`|Time to first byte. Milliseconds elapsed until Broker starts receiving the response from individual historical/realtime processes.|id, status, server.|< 1s| +|`query/node/backpressure`|Milliseconds that the channel to this process has spent suspended due to backpressure.|id, status, server.| | |`query/intervalChunk/time`|Only emitted if interval chunking is enabled. Milliseconds required to query an interval chunk. This metric is deprecated and will be removed in the future because interval chunking is deprecated. See [Query Context](../querying/query-context.html).|id, status, chunkInterval (if interval chunking is enabled).|< 1s| |`query/count`|number of total queries|This metric is only available if the QueryCountStatsMonitor module is included.|| |`query/success/count`|number of queries successfully processed|This metric is only available if the QueryCountStatsMonitor module is included.|| @@ -69,7 +69,7 @@ Available Metrics |`query/segment/time`|Milliseconds taken to query individual segment. Includes time to page in the segment from disk.|id, status, segment.|several hundred milliseconds| |`query/wait/time`|Milliseconds spent waiting for a segment to be scanned.|id, segment.|< several hundred milliseconds| |`segment/scan/pending`|Number of segments in queue waiting to be scanned.||Close to 0| -|`query/segmentAndCache/time`|Milliseconds taken to query individual segment or hit the cache (if it is enabled on the Historical node).|id, segment.|several hundred milliseconds| +|`query/segmentAndCache/time`|Milliseconds taken to query individual segment or hit the cache (if it is enabled on the Historical process).|id, segment.|several hundred milliseconds| |`query/cpu/time`|Microseconds of CPU time taken to complete a query|Common: dataSource, type, interval, hasFilters, duration, context, remoteAddress, id. Aggregation Queries: numMetrics, numComplexMetrics. GroupBy: numDimensions. TopN: threshold, dimension.|Varies| |`query/count`|number of total queries|This metric is only available if the QueryCountStatsMonitor module is included.|| |`query/success/count`|number of queries successfully processed|This metric is only available if the QueryCountStatsMonitor module is included.|| @@ -145,9 +145,9 @@ These metrics are applicable for the Kafka Indexing Service. |`ingest/kafka/maxLag`|Max lag between the offsets consumed by the Kafka indexing tasks and latest offsets in Kafka brokers across all partitions. Minimum emission period for this metric is a minute.|dataSource.|Greater than 0, should not be a very high number | |`ingest/kafka/avgLag`|Average lag between the offsets consumed by the Kafka indexing tasks and latest offsets in Kafka brokers across all partitions. Minimum emission period for this metric is a minute.|dataSource.|Greater than 0, should not be a very high number | -## Ingestion Metrics (Realtime node) +## Ingestion Metrics (Realtime process) -These metrics are only available if the RealtimeMetricsMonitor is included in the monitors list for the Realtime node. These metrics are deltas for each emission period. +These metrics are only available if the RealtimeMetricsMonitor is included in the monitors list for the Realtime process. These metrics are deltas for each emission period. |Metric|Description|Dimensions|Normal Value| |------|-----------|----------|------------| diff --git a/docs/content/operations/other-hadoop.md b/docs/content/operations/other-hadoop.md index 74a86965f250..0c73d573b09f 100644 --- a/docs/content/operations/other-hadoop.md +++ b/docs/content/operations/other-hadoop.md @@ -37,7 +37,7 @@ For best results, use these tips when configuring Druid to interact with your fa ## Tip #1: Place Hadoop XMLs on Druid classpath Place your Hadoop configuration XMLs (core-site.xml, hdfs-site.xml, yarn-site.xml, mapred-site.xml) on the classpath -of your Druid nodes. You can do this by copying them into `conf/druid/_common/core-site.xml`, +of your Druid processes. You can do this by copying them into `conf/druid/_common/core-site.xml`, `conf/druid/_common/hdfs-site.xml`, and so on. This allows Druid to find your Hadoop cluster and properly submit jobs. ## Tip #2: Classloader modification on Hadoop (Map/Reduce jobs only) diff --git a/docs/content/operations/performance-faq.md b/docs/content/operations/performance-faq.md index 672ca9d3a40c..856700128727 100644 --- a/docs/content/operations/performance-faq.md +++ b/docs/content/operations/performance-faq.md @@ -26,19 +26,19 @@ title: "Performance FAQ" ## I can't match your benchmarked results -Improper configuration is by far the largest problem we see people trying to deploy Druid. The example configurations listed in the tutorials are designed for a small volume of data where all nodes are on a single machine. The configs are extremely poor for actual production use. +Improper configuration is by far the largest problem we see people trying to deploy Druid. The example configurations listed in the tutorials are designed for a small volume of data where all processes are on a single machine. The configs are extremely poor for actual production use. ## What should I set my JVM heap? -The size of the JVM heap really depends on the type of Druid node you are running. Below are a few considerations. +The size of the JVM heap really depends on the type of Druid process you are running. Below are a few considerations. -[Broker nodes](../design/broker.html) uses the JVM heap mainly to merge results from Historicals and real-times. Brokers also use off-heap memory and processing threads for groupBy queries. We recommend 20G-30G of heap here. +[Broker processes](../design/broker.html) uses the JVM heap mainly to merge results from Historicals and real-times. Brokers also use off-heap memory and processing threads for groupBy queries. We recommend 20G-30G of heap here. -[Historical nodes](../design/historical.html) use off-heap memory to store intermediate results, and by default, all segments are memory mapped before they can be queried. Typically, the more memory is available on a Historical node, the more segments can be served without the possibility of data being paged on to disk. On Historicals, the JVM heap is used for [GroupBy queries](../querying/groupbyquery.html), some data structures used for intermediate computation, and general processing. One way to calculate how much space there is for segments is: memory_for_segments = total_memory - heap - direct_memory - jvm_overhead. Note that total_memory here refers to the memory available to the cgroup (if running on Linux), which for default cases is going to be all the system memory. +[Historical processes](../design/historical.html) use off-heap memory to store intermediate results, and by default, all segments are memory mapped before they can be queried. Typically, the more memory is available on a Historical process, the more segments can be served without the possibility of data being paged on to disk. On Historicals, the JVM heap is used for [GroupBy queries](../querying/groupbyquery.html), some data structures used for intermediate computation, and general processing. One way to calculate how much space there is for segments is: memory_for_segments = total_memory - heap - direct_memory - jvm_overhead. Note that total_memory here refers to the memory available to the cgroup (if running on Linux), which for default cases is going to be all the system memory. We recommend 250mb * (processing.numThreads) for the heap. -[Coordinator nodes](../design/coordinator.html) do not require off-heap memory and the heap is used for loading information about all segments to determine what segments need to be loaded, dropped, moved, or replicated. +[Coordinator processes](../design/coordinator.html) do not require off-heap memory and the heap is used for loading information about all segments to determine what segments need to be loaded, dropped, moved, or replicated. ## How much direct memory does Druid use? Any Druid process that handles queries (Brokers, Peons, and Historicals) uses two kinds of direct memory buffers with configurable size: processing buffers and merge buffers. @@ -57,10 +57,10 @@ The `+1` is a fuzzy parameter meant to account for the decompression and diction Operators can ensure at least this amount of direct memory is available by providing `-XX:MaxDirectMemorySize=` at the command line. ## What is the intermediate computation buffer? -The intermediate computation buffer specifies a buffer size for the storage of intermediate results. The computation engine in both the Historical and Realtime nodes will use a scratch buffer of this size to do all of their intermediate computations off-heap. Larger values allow for more aggregations in a single pass over the data while smaller values can require more passes depending on the query that is being executed. The default size is 1073741824 bytes (1GB). +The intermediate computation buffer specifies a buffer size for the storage of intermediate results. The computation engine in both the Historical and Realtime processes will use a scratch buffer of this size to do all of their intermediate computations off-heap. Larger values allow for more aggregations in a single pass over the data while smaller values can require more passes depending on the query that is being executed. The default size is 1073741824 bytes (1GB). ## What is server maxSize? -Server maxSize sets the maximum cumulative segment size (in bytes) that a node can hold. Changing this parameter will affect performance by controlling the memory/disk ratio on a node. Setting this parameter to a value greater than the total memory capacity on a node and may cause disk paging to occur. This paging time introduces a query latency delay. +Server maxSize sets the maximum cumulative segment size (in bytes) that a process can hold. Changing this parameter will affect performance by controlling the memory/disk ratio on a process. Setting this parameter to a value greater than the total memory capacity on a process and may cause disk paging to occur. This paging time introduces a query latency delay. ## My logs are really chatty, can I set them to asynchronously write? Yes, using a `log4j2.xml` similar to the following causes some of the more chatty classes to write asynchronously: diff --git a/docs/content/operations/recommendations.md b/docs/content/operations/recommendations.md index bb0acd30974b..03a4fb2c0bef 100644 --- a/docs/content/operations/recommendations.md +++ b/docs/content/operations/recommendations.md @@ -60,20 +60,20 @@ Please note that above flags are general guidelines only. Be cautious and feel f Additionally, for large jvm heaps, here are a few Garbage Collection efficiency guidelines that have been known to help in some cases. - Mount /tmp on tmpfs ( See http://www.evanjones.ca/jvm-mmap-pause.html ) -- On Disk-IO intensive nodes (e.g. Historical and MiddleManager), GC and Druid logs should be written to a different disk than where data is written. +- On Disk-IO intensive processes (e.g. Historical and MiddleManager), GC and Druid logs should be written to a different disk than where data is written. - Disable Transparent Huge Pages ( See https://blogs.oracle.com/linux/performance-issues-with-transparent-huge-pages-thp ) - Try disabling biased locking by using `-XX:-UseBiasedLocking` jvm flag. ( See https://dzone.com/articles/logging-stop-world-pauses-jvm ) # Use UTC Timezone -We recommend using UTC timezone for all your events and across on your nodes, not just for Druid, but for all data infrastructure. This can greatly mitigate potential query problems with inconsistent timezones. To query in a non-UTC timezone see [query granularities](../querying/granularities.html#period-granularities) +We recommend using UTC timezone for all your events and across your hosts, not just for Druid, but for all data infrastructure. This can greatly mitigate potential query problems with inconsistent timezones. To query in a non-UTC timezone see [query granularities](../querying/granularities.html#period-granularities) # SSDs -SSDs are highly recommended for Historical and real-time nodes if you are not running a cluster that is entirely in memory. SSDs can greatly mitigate the time required to page data in and out of memory. +SSDs are highly recommended for Historical and real-time processes if you are not running a cluster that is entirely in memory. SSDs can greatly mitigate the time required to page data in and out of memory. # JBOD vs RAID -Historical nodes store large number of segments on Disk and support specifying multiple paths for storing those. Typically, hosts have multiple disks configured with RAID which makes them look like a single disk to OS. RAID might have overheads specially if its not hardware controller based but software based. So, Historicals might get improved disk throughput with JBOD. +Historical processes store large number of segments on Disk and support specifying multiple paths for storing those. Typically, hosts have multiple disks configured with RAID which makes them look like a single disk to OS. RAID might have overheads specially if its not hardware controller based but software based. So, Historicals might get improved disk throughput with JBOD. # Use Timeseries and TopN Queries Instead of GroupBy Where Possible diff --git a/docs/content/operations/rolling-updates.md b/docs/content/operations/rolling-updates.md index 9b908037ed35..a7672f52feba 100644 --- a/docs/content/operations/rolling-updates.md +++ b/docs/content/operations/rolling-updates.md @@ -24,7 +24,7 @@ title: "Rolling Updates" # Rolling Updates -For rolling Druid cluster updates with no downtime, we recommend updating Druid nodes in the +For rolling Druid cluster updates with no downtime, we recommend updating Druid processes in the following order: 1. Historical @@ -38,15 +38,15 @@ following order: ## Historical -Historical nodes can be updated one at a time. Each Historical node has a startup time to memory map +Historical processes can be updated one at a time. Each Historical process has a startup time to memory map all the segments it was serving before the update. The startup time typically takes a few seconds to -a few minutes, depending on the hardware of the node. As long as each Historical node is updated -with a sufficient delay (greater than the time required to start a single node), you can rolling +a few minutes, depending on the hardware of the host. As long as each Historical process is updated +with a sufficient delay (greater than the time required to start a single process), you can rolling update the entire Historical cluster. ## Overlord -Overlord nodes can be updated one at a time in a rolling fashion. +Overlord processes can be updated one at a time in a rolling fashion. ## Middle Managers @@ -80,23 +80,23 @@ to `/druid/worker/v1/enable`. ### Autoscaling-based replacement -If autoscaling is enabled on your Overlord, then Overlord nodes can launch new Middle Manager nodes +If autoscaling is enabled on your Overlord, then Overlord processes can launch new Middle Manager processes en masse and then gracefully terminate old ones as their tasks finish. This process is configured by -setting `druid.indexer.runner.minWorkerVersion=#{VERSION}`. Each time you update your Overlord node, +setting `druid.indexer.runner.minWorkerVersion=#{VERSION}`. Each time you update your Overlord process, the `VERSION` value should be increased, which will trigger a mass launch of new Middle Managers. The config `druid.indexer.autoscale.workerVersion=#{VERSION}` also needs to be set. ## Standalone Real-time -Standalone real-time nodes can be updated one at a time in a rolling fashion. +Standalone real-time processes can be updated one at a time in a rolling fashion. ## Broker -Broker nodes can be updated one at a time in a rolling fashion. There needs to be some delay between -updating each node as Brokers must load the entire state of the cluster before they return valid +Broker processes can be updated one at a time in a rolling fashion. There needs to be some delay between +updating each process as Brokers must load the entire state of the cluster before they return valid results. ## Coordinator -Coordinator nodes can be updated one at a time in a rolling fashion. +Coordinator processes can be updated one at a time in a rolling fashion. diff --git a/docs/content/operations/rule-configuration.md b/docs/content/operations/rule-configuration.md index 4d67789986bc..4847b16b99d6 100644 --- a/docs/content/operations/rule-configuration.md +++ b/docs/content/operations/rule-configuration.md @@ -24,14 +24,14 @@ title: "Retaining or Automatically Dropping Data" # Retaining or Automatically Dropping Data -Coordinator nodes use rules to determine what data should be loaded to or dropped from the cluster. Rules are used for data retention and query execution, and are set on the Coordinator console (http://coordinator_ip:port). +Coordinator processes use rules to determine what data should be loaded to or dropped from the cluster. Rules are used for data retention and query execution, and are set on the Coordinator console (http://coordinator_ip:port). -There are three types of rules, i.e., load rules, drop rules, and broadcast rules. Load rules indicate how segments should be assigned to different historical node tiers and how many replicas of a segment should exist in each tier. -Drop rules indicate when segments should be dropped entirely from the cluster. Finally, broadcast rules indicate how segments of different data sources should be co-located in historical nodes. +There are three types of rules, i.e., load rules, drop rules, and broadcast rules. Load rules indicate how segments should be assigned to different historical process tiers and how many replicas of a segment should exist in each tier. +Drop rules indicate when segments should be dropped entirely from the cluster. Finally, broadcast rules indicate how segments of different data sources should be co-located in Historical processes. The Coordinator loads a set of rules from the metadata storage. Rules may be specific to a certain datasource and/or a default set of rules can be configured. Rules are read in order and hence the ordering of rules is important. The Coordinator will cycle through all available segments and match each segment with the first rule that applies. Each segment may only match a single rule. -Note: It is recommended that the Coordinator console is used to configure rules. However, the Coordinator node does have HTTP endpoints to programmatically configure rules. +Note: It is recommended that the Coordinator console is used to configure rules. However, the Coordinator process does have HTTP endpoints to programmatically configure rules. When a rule is updated, the change may not be reflected until the next time the Coordinator runs. This will be fixed in the near future. @@ -173,7 +173,7 @@ The interval of a segment will be compared against the specified period. The per Broadcast Rules --------------- -Broadcast rules indicate how segments of different data sources should be co-located in Historical nodes. +Broadcast rules indicate how segments of different data sources should be co-located in Historical processes. Once a broadcast rule is configured for a data source, all segments of the data source are broadcasted to the servers holding _any segments_ of the co-located data sources. ### Forever Broadcast Rule @@ -188,7 +188,7 @@ Forever broadcast rules are of the form: ``` * `type` - this should always be "broadcastForever" -* `colocatedDataSources` - A JSON List containing data source names to be co-located. `null` and empty list means broadcasting to every node in the cluster. +* `colocatedDataSources` - A JSON List containing data source names to be co-located. `null` and empty list means broadcasting to every process in the cluster. ### Interval Broadcast Rule @@ -203,7 +203,7 @@ Interval broadcast rules are of the form: ``` * `type` - this should always be "broadcastByInterval" -* `colocatedDataSources` - A JSON List containing data source names to be co-located. `null` and empty list means broadcasting to every node in the cluster. +* `colocatedDataSources` - A JSON List containing data source names to be co-located. `null` and empty list means broadcasting to every process in the cluster. * `interval` - A JSON Object representing ISO-8601 Periods. Only the segments of the interval will be broadcasted. ### Period Broadcast Rule @@ -220,7 +220,7 @@ Period broadcast rules are of the form: ``` * `type` - this should always be "broadcastByPeriod" -* `colocatedDataSources` - A JSON List containing data source names to be co-located. `null` and empty list means broadcasting to every node in the cluster. +* `colocatedDataSources` - A JSON List containing data source names to be co-located. `null` and empty list means broadcasting to every process in the cluster. * `period` - A JSON Object representing ISO-8601 Periods * `includeFuture` - A JSON Boolean indicating whether the load period should include the future. This property is optional, Default is true. diff --git a/docs/content/operations/segment-optimization.md b/docs/content/operations/segment-optimization.md index 2fea3ff3d9cd..179b418b934e 100644 --- a/docs/content/operations/segment-optimization.md +++ b/docs/content/operations/segment-optimization.md @@ -1,6 +1,6 @@ --- layout: doc_page -title: "Segment size optimization" +title: "Segment Size Optimization" --- -# Segment size optimization +# Segment Size Optimization In Druid, it's important to optimize the segment size because 1. Druid stores data in segments. If you're using the [best-effort roll-up](../design/index.html#roll-up-modes) mode, increasing the segment size might introduce further aggregation which reduces the dataSource size. - 2. When a query is submitted, that query is distributed to all Historicals and realtimes - which hold the input segments of the query. Each node has a processing threads pool and use one thread per segment to - process it. If the segment size is too large, data might not be well distributed over the - whole cluster, thereby decreasing the degree of parallelism. If the segment size is too small, - each processing thread processes too small data. This might reduce the processing speed of other queries as well as - the input query itself because the processing threads are shared for executing all queries. + 2. When a query is submitted, that query is distributed to all Historicals and realtime tasks + which hold the input segments of the query. Each process and task picks a thread from its own processing thread pool + to process a single segment. If segment sizes are too large, data might not be well distributed between data + servers, decreasing the degree of parallelism possible during query processing. + At the other extreme where segment sizes are too small, the scheduling + overhead of processing a larger number of segments per query can reduce + performance, as the threads that process each segment compete for the fixed + slots of the processing pool. It would be best if you can optimize the segment size at ingestion time, but sometimes it's not easy -especially for the streaming ingestion because the amount of data ingested might vary over time. In this case, -you can roughly set the segment size at ingestion time and optimize it later. You have two options: +especially when it comes to stream ingestion because the amount of data ingested might vary over time. In this case, +you can create segments with a sub-optimzed size first and optimize them later. + +You may need to consider the followings to optimize your segments. + + - Number of rows per segment: it's generally recommended for each segment to have around 5 million rows. + This setting is usually _more_ important than the below "segment byte size". + This is because Druid uses a single thread to process each segment, + and thus this setting can directly control how many rows each thread processes, + which in turn means how well the query execution is parallelized. + - Segment byte size: it's recommended to set 300 ~ 700MB. If this value + doesn't match with the "number of rows per segment", please consider optimizing + number of rows per segment rather than this value. + +
+The above recommendation works in general, but the optimal setting can +vary based on your workload. For example, if most of your queries +are heavy and take a long time to process each row, you may want to make +segments smaller so that the query processing can be more parallelized. +If you still see some performance issue after optimizing segment size, +you may need to find the optimal settings for your workload. +
+ +There might be several ways to check if the compaction is necessary. One way +is using the [System Schema](../querying/sql.html#system-schema). The +system schema provides several tables about the current system status including the `segments` table. +By running the below query, you can get the average number of rows and average size for published segments. + +```sql +SELECT + "start", + "end", + version, + COUNT(*) AS num_segments, + AVG("num_rows") AS avg_num_rows, + SUM("num_rows") AS total_num_rows, + AVG("size") AS avg_size, + SUM("size") AS total_size +FROM + sys.segments A +WHERE + datasource = 'your_dataSource' AND + is_published = 1 +GROUP BY 1, 2, 3 +ORDER BY 1, 2, 3 DESC; +``` + +Please note that the query result might include overshadowed segments. +In this case, you may want to see only rows of the max version per interval (pair of `start` and `end`). + +Once you find your segments need compaction, you can consider the below two options: - Turning on the [automatic compaction of Coordinators](../design/coordinator.html#compacting-segments). The Coordinator periodically submits [compaction tasks](../ingestion/tasks.html#compaction-task) to re-index small segments. + To enable the automatic compaction, you need to configure it for each dataSource via Coordinator's dynamic configuration. + See [Compaction Configuration API](../operations/api-reference.html#compaction-configuration) + and [Compaction Configuration](../configuration/index.html#compaction-dynamic-configuration) for details. - Running periodic Hadoop batch ingestion jobs and using a `dataSource` inputSpec to read from the segments generated by the Kafka indexing tasks. This might be helpful if you want to compact a lot of segments in parallel. Details on how to do this can be found under ['Updating Existing Data'](../ingestion/update-existing-data.html). diff --git a/docs/content/operations/tls-support.md b/docs/content/operations/tls-support.md index 8563b0b26e80..5585e623f41c 100644 --- a/docs/content/operations/tls-support.md +++ b/docs/content/operations/tls-support.md @@ -32,7 +32,7 @@ title: "TLS Support" |`druid.enableTlsPort`|Enable/Disable HTTPS connector.|`false`| Although not recommended but both HTTP and HTTPS connectors can be enabled at a time and respective ports are configurable using `druid.plaintextPort` -and `druid.tlsPort` properties on each node. Please see `Configuration` section of individual nodes to check the valid and default values for these ports. +and `druid.tlsPort` properties on each process. Please see `Configuration` section of individual processes to check the valid and default values for these ports. # Jetty Server TLS Configuration @@ -75,7 +75,7 @@ The following table contains non-mandatory advanced configuration options, use c # Druid's internal communication over TLS -Whenever possible Druid nodes will use HTTPS to talk to each other. To enable this communication Druid's HttpClient needs to +Whenever possible Druid processes will use HTTPS to talk to each other. To enable this communication Druid's HttpClient needs to be configured with a proper [SSLContext](http://docs.oracle.com/javase/8/docs/api/javax/net/ssl/SSLContext.html) that is able to validate the Server Certificates, otherwise communication will fail. @@ -87,7 +87,7 @@ If this extension does not satisfy the requirements then please follow the exten to create your own extension. # Upgrading Clients that interact with Overlord or Coordinator -When Druid Coordinator/Overlord have both HTTP and HTTPS enabled and Client sends request to non-leader node, then Client is always redirected to the HTTPS endpoint on leader node. +When Druid Coordinator/Overlord have both HTTP and HTTPS enabled and Client sends request to non-leader process, then Client is always redirected to the HTTPS endpoint on leader process. So, Clients should be first upgraded to be able to handle redirect to HTTPS. Then Druid Overlord/Coordinator should be upgraded and configured to run both HTTP and HTTPS ports. Then Client configuration should be changed to refer to Druid Coordinator/Overlord via the HTTPS endpoint and then HTTP port on Druid Coordinator/Overlord should be disabled. # Custom TLS certificate checks diff --git a/docs/content/querying/aggregations.md b/docs/content/querying/aggregations.md index c2d2c72d1da3..a9a819be02e0 100644 --- a/docs/content/querying/aggregations.md +++ b/docs/content/querying/aggregations.md @@ -277,10 +277,16 @@ The [DataSketches Theta Sketch](../development/extensions-core/datasketches-thet The [DataSketches HLL Sketch](../development/extensions-core/datasketches-hll.html) extension-provided aggregator gives distinct count estimates using the HyperLogLog algorithm. The HLL Sketch is faster and requires less storage than the Theta Sketch, but does not support intersection or difference operations. -#### Cardinality/HyperUnique +#### Cardinality/HyperUnique (Deprecated) + +
+The Cardinality and HyperUnique aggregators are deprecated. Please use DataSketches HLL Sketch instead. +
The [Cardinality and HyperUnique](../hll-old.html) aggregators are older aggregator implementations available by default in Druid that also provide distinct count estimates using the HyperLogLog algorithm. The newer [DataSketches HLL Sketch](../development/extensions-core/datasketches-hll.html) extension-provided aggregator has superior accuracy and performance and is recommended instead. +The DataSketches team has published a [comparison study](https://datasketches.github.io/docs/HLL/HllSketchVsDruidHyperLogLogCollector.html) between Druid's original HLL algorithm and the DataSketches HLL algorithm. Based on the demonstrated advantages of the DataSketches implementation, we have deprecated Druid's original HLL aggregator. + Please note that DataSketches HLL aggregators and `hyperUnique` aggregators are not mutually compatible. ### Histograms and quantiles @@ -289,10 +295,31 @@ Please note that DataSketches HLL aggregators and `hyperUnique` aggregators are The [DataSketches Quantiles Sketch](../development/extensions-core/datasketches-quantiles.html) extension-provided aggregator provides quantile estimates and histogram approximations using the numeric quantiles DoublesSketch from the [datasketches](http://datasketches.github.io/) library. -#### Approximate Histogram +We recommend this aggregator in general for quantiles/histogram use cases, as it provides formal error bounds and has distribution-independent accuracy. + +#### Fixed Buckets Histogram + +Druid also provides a [simple histogram implementation]((../development/extensions-core/approxiate-histograms.html#fixed-buckets-histogram) that uses a fixed range and fixed number of buckets with support for quantile estimation, backed by an array of bucket count values. + +The fixed buckets histogram can perform well when the distribution of the input data allows a small number of buckets to be used. + +We do not recommend the fixed buckets histogram for general use, as its usefulness is extremely data dependent. However, it is made available for users that have already identified use cases where a fixed buckets histogram is suitable. + +#### Approximate Histogram (Deprecated) The [Approximate Histogram](../development/extensions-core/approxiate-histograms.html) extension-provided aggregator also provides quantile estimates and histogram approximations, based on [http://jmlr.org/papers/volume11/ben-haim10a/ben-haim10a.pdf](http://jmlr.org/papers/volume11/ben-haim10a/ben-haim10a.pdf). +The algorithm used by this deprecated aggregator is highly distribution-dependent and its output is subject to serious distortions when the input does not fit within the algorithm's limitations. + +A [study published by the DataSketches team](https://datasketches.github.io/docs/Quantiles/DruidApproxHistogramStudy.html) demonstrates some of the known failure modes of this algorithm: +- The algorithm's quantile calculations can fail to provide results for a large range of rank values (all ranks less than 0.89 in the example used in the study), returning all zeroes instead. +- The algorithm can completely fail to record spikes in the tail ends of the distribution +- In general, the histogram produced by the algorithm can deviate significantly from the true histogram, with no bounds on the errors. + +It is not possible to determine a priori how well this aggregator will behave for a given input stream, nor does the aggregator provide any indication that serious distortions are present in the output. + +For these reasons, we have deprecated this aggregator and do not recommend its use. + ## Miscellaneous Aggregations ### Filtered Aggregator diff --git a/docs/content/querying/groupbyquery.md b/docs/content/querying/groupbyquery.md index 6ba1402c20a6..c2aa9aa7d608 100644 --- a/docs/content/querying/groupbyquery.md +++ b/docs/content/querying/groupbyquery.md @@ -230,18 +230,18 @@ GroupBy queries can be executed using two different strategies. The default stra the query context. If neither the context field nor the property is set, the "v2" strategy will be used. - "v2", the default, is designed to offer better performance and memory management. This strategy generates -per-segment results using a fully off-heap map. Data nodes merge the per-segment results using a fully off-heap +per-segment results using a fully off-heap map. Data processes merge the per-segment results using a fully off-heap concurrent facts map combined with an on-heap string dictionary. This may optionally involve spilling to disk. Data -nodes return sorted results to the Broker, which merges result streams using an N-way merge. The broker materializes +processes return sorted results to the Broker, which merges result streams using an N-way merge. The broker materializes the results if necessary (e.g. if the query sorts on columns other than its dimensions). Otherwise, it streams results back as they are merged. -- "v1", a legacy engine, generates per-segment results on data nodes (Historical, realtime, MiddleManager) using a map which -is partially on-heap (dimension keys and the map itself) and partially off-heap (the aggregated values). Data nodes then +- "v1", a legacy engine, generates per-segment results on data processes (Historical, realtime, MiddleManager) using a map which +is partially on-heap (dimension keys and the map itself) and partially off-heap (the aggregated values). Data processes then merge the per-segment results using Druid's indexing mechanism. This merging is multi-threaded by default, but can optionally be single-threaded. The Broker merges the final result set using Druid's indexing mechanism again. The broker merging is always single-threaded. Because the Broker merges results using the indexing mechanism, it must materialize -the full result set before returning any results. On both the data nodes and the Broker, the merging index is fully +the full result set before returning any results. On both the data processes and the Broker, the merging index is fully on-heap by default, but it can optionally store aggregated values off-heap. #### Differences between v1 and v2 @@ -257,8 +257,8 @@ that can complete successfully in one engine may exceed resource limits and fail - groupBy v1 imposes no limit on the number of concurrently running queries, whereas groupBy v2 controls memory usage by using a finite-sized merge buffer pool. By default, the number of merge buffers is 1/4 the number of processing threads. You can adjust this as necessary to balance concurrency and memory usage. -- groupBy v1 supports caching on either the Broker or Historical nodes, whereas groupBy v2 only supports caching on -Historical nodes. +- groupBy v1 supports caching on either the Broker or Historical processes, whereas groupBy v2 only supports caching on +Historical processes. - groupBy v1 supports using [chunkPeriod](query-context.html) to parallelize merging on the Broker, whereas groupBy v2 ignores chunkPeriod. - groupBy v2 supports both array-based aggregation and hash-based aggregation. The array-based aggregation is used only @@ -334,7 +334,7 @@ data is actually spilled (see [Memory tuning and resource limits](#memory-tuning Once parallel combine is enabled, the groupBy v2 engine can create a combining tree for merging sorted aggregates. Each intermediate node of the tree is a thread merging aggregates from the child nodes. The leaf node threads read and merge -aggregates from hash tables including spilled ones. Usually, leaf nodes are slower than intermediate nodes because they +aggregates from hash tables including spilled ones. Usually, leaf processes are slower than intermediate nodes because they need to read data from disk. As a result, less threads are used for intermediate nodes by default. You can change the degree of intermediate nodes. See `intermediateCombineDegree` in [Advanced groupBy v2 configurations](#groupby-v2-configurations). @@ -364,7 +364,7 @@ strategy perform the outer query on the Broker in a single-threaded fashion. #### Configurations -This section describes the configurations for groupBy queries. You can set the runtime properties in the `runtime.properties` file on Broker, Historical, and MiddleManager nodes. You can set the query context parameters through the [query context](query-context.html). +This section describes the configurations for groupBy queries. You can set the runtime properties in the `runtime.properties` file on Broker, Historical, and MiddleManager processes. You can set the query context parameters through the [query context](query-context.html). ##### Configurations for groupBy v2 @@ -424,7 +424,7 @@ Supported query contexts: |`intermediateCombineDegree`|Overrides the value of `druid.query.groupBy.intermediateCombineDegree`|None| |`numParallelCombineThreads`|Overrides the value of `druid.query.groupBy.numParallelCombineThreads`|None| |`sortByDimsFirst`|Sort the results first by dimension values and then by timestamp.|false| -|`forceLimitPushDown`|When all fields in the orderby are part of the grouping key, the Broker will push limit application down to the Historical nodes. When the sorting order uses fields that are not in the grouping key, applying this optimization can result in approximate results with unknown accuracy, so this optimization is disabled by default in that case. Enabling this context flag turns on limit push down for limit/orderbys that contain non-grouping key columns.|false| +|`forceLimitPushDown`|When all fields in the orderby are part of the grouping key, the Broker will push limit application down to the Historical processes. When the sorting order uses fields that are not in the grouping key, applying this optimization can result in approximate results with unknown accuracy, so this optimization is disabled by default in that case. Enabling this context flag turns on limit push down for limit/orderbys that contain non-grouping key columns.|false| ##### GroupBy v1 configurations diff --git a/docs/content/querying/lookups.md b/docs/content/querying/lookups.md index 7c7ad81df2fe..33cf767a75ec 100644 --- a/docs/content/querying/lookups.md +++ b/docs/content/querying/lookups.md @@ -118,8 +118,8 @@ These endpoints will return one of the following results: ## Configuration propagation behavior The configuration is propagated to the query serving processes (Broker / Router / Peon / Historical) by the Coordinator. -The query serving nodes have an internal API for managing lookups on the node and those are used by the Coordinator. -The Coordinator periodically checks if any of the nodes need to load/drop lookups and updates them appropriately. +The query serving processes have an internal API for managing lookups on the process and those are used by the Coordinator. +The Coordinator periodically checks if any of the processes need to load/drop lookups and updates them appropriately. # API for configuring lookups @@ -260,8 +260,11 @@ For example, a post to `/druid/coordinator/v1/lookups/config/realtime_customer1/ This will replace the `site_id_customer1` lookup in the `realtime_customer1` with the definition above. +## Get All Lookups +A `GET` to `/druid/coordinator/v1/lookups/config/all` will return all known lookup specs for all tiers. + ## Get Lookup -A `GET` to a particular lookup extractor factory is accomplished via `/druid/coordinator/v1/lookups/{tier}/{id}` +A `GET` to a particular lookup extractor factory is accomplished via `/druid/coordinator/v1/lookups/config/{tier}/{id}` Using the prior example, a `GET` to `/druid/coordinator/v1/lookups/config/realtime_customer2/site_id_customer2` should return @@ -288,7 +291,7 @@ To discover a list of tiers currently active in the cluster **instead of** ones A `GET` to `/druid/coordinator/v1/lookups/config/{tier}` will return a list of known lookup names for that tier. # Additional API related to status of configured lookups -These end points can be used to get the propagation status of configured lookups to lookup nodes such as Historicals. +These end points can be used to get the propagation status of configured lookups to processes using lookups such as Historicals. ## List load status of all lookups `GET /druid/coordinator/v1/lookups/status` with optional query parameter `detailed`. @@ -299,25 +302,25 @@ These end points can be used to get the propagation status of configured lookups ## List load status of single lookup `GET /druid/coordinator/v1/lookups/status/{tier}/{lookup}` with optional query parameter `detailed`. -## List lookup state of all nodes +## List lookup state of all processes `GET /druid/coordinator/v1/lookups/nodeStatus` with optional query parameter `discover` to discover tiers from zookeeper or configured lookup tiers are listed. -## List lookup state of nodes in a tier +## List lookup state of processes in a tier `GET /druid/coordinator/v1/lookups/nodeStatus/{tier}` -## List lookup state of single node +## List lookup state of single process `GET /druid/coordinator/v1/lookups/nodeStatus/{tier}/{host:port}` # Internal API -The Peon, Router, Broker, and Historical nodes all have the ability to consume lookup configuration. -There is an internal API these nodes use to list/load/drop their lookups starting at `/druid/listen/v1/lookups`. +The Peon, Router, Broker, and Historical processes all have the ability to consume lookup configuration. +There is an internal API these processes use to list/load/drop their lookups starting at `/druid/listen/v1/lookups`. These follow the same convention for return values as the cluster wide dynamic configuration. Following endpoints can be used for debugging purposes but not otherwise. ## Get Lookups -A `GET` to the node at `/druid/listen/v1/lookups` will return a json map of all the lookups currently active on the node. +A `GET` to the process at `/druid/listen/v1/lookups` will return a json map of all the lookups currently active on the process. The return value will be a json map of the lookups to their extractor factories. ```json @@ -336,7 +339,7 @@ The return value will be a json map of the lookups to their extractor factories. ## Get Lookup -A `GET` to the node at `/druid/listen/v1/lookups/some_lookup_name` will return the LookupExtractorFactory for the lookup identified by `some_lookup_name`. +A `GET` to the process at `/druid/listen/v1/lookups/some_lookup_name` will return the LookupExtractorFactory for the lookup identified by `some_lookup_name`. The return value will be the json representation of the factory. ```json @@ -358,7 +361,7 @@ To configure a Broker / Router / Historical / Peon to announce itself as part of |Property | Description | Default | |---------|-------------|---------| -|`druid.lookup.lookupTier`| The tier for **lookups** for this node. This is independent of other tiers.|`__default`| +|`druid.lookup.lookupTier`| The tier for **lookups** for this process. This is independent of other tiers.|`__default`| |`druid.lookup.lookupTierIsDatasource`|For some things like indexing service tasks, the datasource is passed in the runtime properties of a task. This option fetches the tierName from the same value as the datasource for the task. It is suggested to only use this as Peon options for the indexing service, if at all. If true, `druid.lookup.lookupTier` MUST NOT be specified|`"false"`| To configure the behavior of the dynamic configuration manager, use the following properties on the Coordinator: @@ -366,18 +369,18 @@ To configure the behavior of the dynamic configuration manager, use the followin |Property|Description|Default| |--------|-----------|-------| |`druid.manager.lookups.hostTimeout`|Timeout (in ms) PER HOST for processing request|`2000`(2 seconds)| -|`druid.manager.lookups.allHostTimeout`|Timeout (in ms) to finish lookup management on all the nodes.|`900000`(15 mins)| +|`druid.manager.lookups.allHostTimeout`|Timeout (in ms) to finish lookup management on all the processes.|`900000`(15 mins)| |`druid.manager.lookups.period`|How long to pause between management cycles|`120000`(2 mins)| -|`druid.manager.lookups.threadPoolSize`|Number of service nodes that can be managed concurrently|`10`| +|`druid.manager.lookups.threadPoolSize`|Number of service processes that can be managed concurrently|`10`| ## Saving configuration across restarts -It is possible to save the configuration across restarts such that a node will not have to wait for Coordinator action to re-populate its lookups. To do this the following property is set: +It is possible to save the configuration across restarts such that a process will not have to wait for Coordinator action to re-populate its lookups. To do this the following property is set: |Property|Description|Default| |--------|-----------|-------| |`druid.lookup.snapshotWorkingDir`|Working path used to store snapshot of current lookup configuration, leaving this property null will disable snapshot/bootstrap utility|null| -|`druid.lookup.enableLookupSyncOnStartup`|Enable the lookup synchronization process with Coordinator on startup. The queryable nodes will fetch and load the lookups from the Coordinator instead of waiting for the Coordinator to load the lookups for them. Users may opt to disable this option if there are no lookups configured in the cluster.|true| +|`druid.lookup.enableLookupSyncOnStartup`|Enable the lookup synchronization process with Coordinator on startup. The queryable processes will fetch and load the lookups from the Coordinator instead of waiting for the Coordinator to load the lookups for them. Users may opt to disable this option if there are no lookups configured in the cluster.|true| |`druid.lookup.numLookupLoadingThreads`|Number of threads for loading the lookups in parallel on startup. This thread pool is destroyed once startup is done. It is not kept during the lifetime of the JVM|Available Processors / 2| |`druid.lookup.coordinatorFetchRetries`|How many times to retry to fetch the lookup bean list from Coordinator, during the sync on startup.|3| |`druid.lookup.lookupStartRetries`|How many times to retry to start each lookup, either during the sync on startup, or during the runtime.|3| @@ -434,7 +437,7 @@ ex: `GET /druid/v1/lookups/introspect/nato-phonetic/values` ``` ## Druid version 0.10.0 to 0.10.1 upgrade/downgrade -Overall druid cluster lookups configuration is persisted in metadata store and also individual lookup nodes optionally persist a snapshot of loaded lookups on disk. +Overall druid cluster lookups configuration is persisted in metadata store and also individual lookup processes optionally persist a snapshot of loaded lookups on disk. If upgrading from druid version 0.10.0 to 0.10.1, then migration for all persisted metadata is handled automatically. If downgrading from 0.10.1 to 0.9.0 then lookups updates done via Coordinator while 0.10.1 was running, would be lost. diff --git a/docs/content/querying/multitenancy.md b/docs/content/querying/multitenancy.md index ea7408128f2c..69c57a9953f8 100644 --- a/docs/content/querying/multitenancy.md +++ b/docs/content/querying/multitenancy.md @@ -71,7 +71,7 @@ You can use this in concert with single-dimension partitioning to repartition yo ## Customizing data distribution -Druid additionally supports multitenancy by providing configurable means of distributing data. Druid's Historical nodes +Druid additionally supports multitenancy by providing configurable means of distributing data. Druid's Historical processes can be configured into [tiers](../operations/rule-configuration.html), and [rules](../operations/rule-configuration.html) can be set that determines which segments go into which tiers. One use case of this is that recent data tends to be accessed more frequently than older data. Tiering enables more recent segments to be hosted on more powerful hardware for better performance. @@ -80,8 +80,8 @@ stored on this tier. ## Supporting high query concurrency -Druid's fundamental unit of computation is a [segment](../design/segments.html). Nodes scan segments in parallel and a -given node can scan `druid.processing.numThreads` concurrently. To +Druid's fundamental unit of computation is a [segment](../design/segments.html). Processes scan segments in parallel and a +given process can scan `druid.processing.numThreads` concurrently. To process more data in parallel and increase performance, more cores can be added to a cluster. Druid segments should be sized such that any computation over any given segment should complete in at most 500ms. @@ -94,6 +94,6 @@ that resources are constantly being yielded, and segments pertaining to differen Druid queries can optionally set a `priority` flag in the [query context](../querying/query-context.html). Queries known to be slow (download or reporting style queries) can be de-prioritized and more interactive queries can have higher priority. -Broker nodes can also be dedicated to a given tier. For example, one set of broker nodes can be dedicated to fast interactive queries, -and a second set of Broker nodes can be dedicated to slower reporting queries. Druid also provides a [Router](../development/router.html) -node that can route queries to different Brokers based on various query parameters (datasource, interval, etc.). +Broker processes can also be dedicated to a given tier. For example, one set of Broker processes can be dedicated to fast interactive queries, +and a second set of Broker processes can be dedicated to slower reporting queries. Druid also provides a [Router](../development/router.html) +process that can route queries to different Brokers based on various query parameters (datasource, interval, etc.). diff --git a/docs/content/querying/query-context.md b/docs/content/querying/query-context.md index 60fe1c715028..185ae2dbff3c 100644 --- a/docs/content/querying/query-context.md +++ b/docs/content/querying/query-context.md @@ -37,11 +37,11 @@ The query context is used for various query configuration parameters. The follow |populateResultLevelCache | `false` | Flag indicating whether to save the results of the query to the result level cache. Primarily used for debugging. When set to false, it disables saving the results of this query to the query cache. When set to true, Druid uses druid.broker.cache.populateCache to determine whether or not to save the results of this query to the query cache | |bySegment | `false` | Return "by segment" results. Primarily used for debugging, setting it to `true` returns results associated with the data segment they came from | |finalize | `true` | Flag indicating whether to "finalize" aggregation results. Primarily used for debugging. For instance, the `hyperUnique` aggregator will return the full HyperLogLog sketch instead of the estimated cardinality when this flag is set to `false` | -|chunkPeriod | `P0D` (off) | At the Broker node level, long interval queries (of any type) may be broken into shorter interval queries to parallelize merging more than normal. Broken up queries will use a larger share of cluster resources, but, if you use groupBy "v1, it may be able to complete faster as a result. Use ISO 8601 periods. For example, if this property is set to `P1M` (one month), then a query covering a year would be broken into 12 smaller queries. The broker uses its query processing executor service to initiate processing for query chunks, so make sure "druid.processing.numThreads" is configured appropriately on the broker. [groupBy queries](groupbyquery.html) do not support chunkPeriod by default, although they do if using the legacy "v1" engine. This context is deprecated since it's only useful for groupBy "v1", and will be removed in the future releases.| -|maxScatterGatherBytes| `druid.server.http.maxScatterGatherBytes` | Maximum number of bytes gathered from data nodes such as Historicals and realtime processes to execute a query. This parameter can be used to further reduce `maxScatterGatherBytes` limit at query time. See [Broker configuration](../configuration/index.html#broker) for more details.| +|chunkPeriod | `P0D` (off) | At the Broker process level, long interval queries (of any type) may be broken into shorter interval queries to parallelize merging more than normal. Broken up queries will use a larger share of cluster resources, but, if you use groupBy "v1, it may be able to complete faster as a result. Use ISO 8601 periods. For example, if this property is set to `P1M` (one month), then a query covering a year would be broken into 12 smaller queries. The broker uses its query processing executor service to initiate processing for query chunks, so make sure "druid.processing.numThreads" is configured appropriately on the broker. [groupBy queries](groupbyquery.html) do not support chunkPeriod by default, although they do if using the legacy "v1" engine. This context is deprecated since it's only useful for groupBy "v1", and will be removed in the future releases.| +|maxScatterGatherBytes| `druid.server.http.maxScatterGatherBytes` | Maximum number of bytes gathered from data processes such as Historicals and realtime processes to execute a query. This parameter can be used to further reduce `maxScatterGatherBytes` limit at query time. See [Broker configuration](../configuration/index.html#broker) for more details.| |maxQueuedBytes | `druid.broker.http.maxQueuedBytes` | Maximum number of bytes queued per query before exerting backpressure on the channel to the data server. Similar to `maxScatterGatherBytes`, except unlike that configuration, this one will trigger backpressure rather than query failure. Zero means disabled.| -|serializeDateTimeAsLong| `false` | If true, DateTime is serialized as long in the result returned by Broker and the data transportation between Broker and compute node| -|serializeDateTimeAsLongInner| `false` | If true, DateTime is serialized as long in the data transportation between Broker and compute node| +|serializeDateTimeAsLong| `false` | If true, DateTime is serialized as long in the result returned by Broker and the data transportation between Broker and compute process| +|serializeDateTimeAsLongInner| `false` | If true, DateTime is serialized as long in the data transportation between Broker and compute process| In addition, some query types offer context parameters specific to that query type. diff --git a/docs/content/querying/querying.md b/docs/content/querying/querying.md index c26e6011cb30..af2ee6c18025 100644 --- a/docs/content/querying/querying.md +++ b/docs/content/querying/querying.md @@ -24,11 +24,11 @@ title: "Querying" # Querying -Queries are made using an HTTP REST style request to queryable nodes ([Broker](../design/broker.html), +Queries are made using an HTTP REST style request to queryable processes ([Broker](../design/broker.html), [Historical](../design/historical.html). [Peons](../design/peons.html)) that are running stream ingestion tasks can also accept queries. The -query is expressed in JSON and each of these node types expose the same -REST query interface. For normal Druid operations, queries should be issued to the Broker nodes. Queries can be posted -to the queryable nodes like this - +query is expressed in JSON and each of these process types expose the same +REST query interface. For normal Druid operations, queries should be issued to the Broker processes. Queries can be posted +to the queryable processes like this - ```bash curl -X POST ':/druid/v2/?pretty' -H 'Content-Type:application/json' -H 'Accept:application/json' -d @ diff --git a/docs/content/querying/scan-query.md b/docs/content/querying/scan-query.md index cb4791f7ec06..462d14fde90b 100644 --- a/docs/content/querying/scan-query.md +++ b/docs/content/querying/scan-query.md @@ -178,7 +178,7 @@ The format of the result when resultFormat equals to `compactedList`: The biggest difference between select query and scan query is that, scan query doesn't retain all rows in memory before rows can be returned to client. It will cause memory pressure if too many rows required by select query. Scan query doesn't have this issue. -Scan query can return all rows without issuing another pagination query, which is extremely useful when query against Historical or realtime node directly. +Scan query can return all rows without issuing another pagination query, which is extremely useful when query against Historical or realtime process directly. ## Legacy mode @@ -191,6 +191,6 @@ you may have that is named "timestamp". - Timestamps are returned as ISO8601 time strings rather than integers (milliseconds since 1970-01-01 00:00:00 UTC). Legacy mode can be triggered either by passing `"legacy" : true` in your query JSON, or by setting -`druid.query.scan.legacy = true` on your Druid nodes. If you were previously using the scan-query contrib extension, +`druid.query.scan.legacy = true` on your Druid processes. If you were previously using the scan-query contrib extension, the best way to migrate is to activate legacy mode during a rolling upgrade, then switch it off after the upgrade is complete. diff --git a/docs/content/querying/searchquery.md b/docs/content/querying/searchquery.md index 3727a5d0169d..aaaf97e5d949 100644 --- a/docs/content/querying/searchquery.md +++ b/docs/content/querying/searchquery.md @@ -56,7 +56,7 @@ There are several main parts to a search query: |dataSource|A String or Object defining the data source to query, very similar to a table in a relational database. See [DataSource](../querying/datasource.html) for more information.|yes| |granularity|Defines the granularity of the query. See [Granularities](../querying/granularities.html).|yes| |filter|See [Filters](../querying/filters.html).|no| -|limit| Defines the maximum number per Historical node (parsed as int) of search results to return. |no (default to 1000)| +|limit| Defines the maximum number per Historical process (parsed as int) of search results to return. |no (default to 1000)| |intervals|A JSON Object representing ISO-8601 Intervals. This defines the time ranges to run the query over.|yes| |searchDimensions|The dimensions to run the search over. Excluding this means the search is run over all dimensions.|no| |query|See [SearchQuerySpec](../querying/searchqueryspec.html).|yes| diff --git a/docs/content/querying/sql.md b/docs/content/querying/sql.md index 3f274918569d..31e0109a9a71 100644 --- a/docs/content/querying/sql.md +++ b/docs/content/querying/sql.md @@ -31,7 +31,7 @@ subject to change. Druid SQL is a built-in SQL layer and an alternative to Druid's native JSON-based query language, and is powered by a parser and planner based on [Apache Calcite](https://calcite.apache.org/). Druid SQL translates SQL into native Druid -queries on the query Broker (the first node you query), which are then passed down to data nodes as native Druid +queries on the query Broker (the first process you query), which are then passed down to data processes as native Druid queries. Other than the (slight) overhead of translating SQL on the Broker, there isn't an additional performance penalty versus native queries. @@ -91,7 +91,7 @@ ordinal position (like `ORDER BY 2` to order by the second selected column). For can only order by the `__time` column. For aggregation queries, ORDER BY can order by any column. The LIMIT clause can be used to limit the number of rows returned. It can be used with any query type. It is pushed down -to data nodes for queries that run with the native TopN query type, but not the native GroupBy query type. Future +to data processes for queries that run with the native TopN query type, but not the native GroupBy query type. Future versions of Druid will support pushing down limits using the native GroupBy query type as well. If you notice that adding a limit doesn't change performance very much, then it's likely that Druid didn't push down the limit for your query. @@ -327,7 +327,7 @@ computed in memory. See the TopN documentation for more details. - [GroupBy](groupbyquery.html) is used for all other aggregations, including any nested aggregation queries. Druid's GroupBy is a traditional aggregation engine: it delivers exact results and rankings and supports a wide variety of features. GroupBy aggregates in memory if it can, but it may spill to disk if it doesn't have enough memory to complete -your query. Results are streamed back from data nodes through the Broker if you ORDER BY the same expressions in your +your query. Results are streamed back from data processes through the Broker if you ORDER BY the same expressions in your GROUP BY clause, or if you don't have an ORDER BY at all. If your query has an ORDER BY referencing expressions that don't appear in the GROUP BY clause (like aggregation functions) then the Broker will materialize a list of results in memory, up to a max of your LIMIT, if any. See the GroupBy documentation for details about tuning performance and memory @@ -345,7 +345,7 @@ of plan. For all native query types, filters on the `__time` column will be translated into top-level query "intervals" whenever possible, which allows Druid to use its global time index to quickly prune the set of data that must be scanned. In -addition, Druid will use indexes local to each data node to further speed up WHERE evaluation. This can typically be +addition, Druid will use indexes local to each data process to further speed up WHERE evaluation. This can typically be done for filters that involve boolean combinations of references to and functions of single columns, like `WHERE col1 = 'a' AND col2 = 'b'`, but not `WHERE col1 = col2`. @@ -476,7 +476,7 @@ so avoid those. Druid's JDBC server does not share connection state between Brokers. This means that if you're using JDBC and have multiple Druid Brokers, you should either connect to a specific Broker, or use a load balancer with sticky sessions -enabled. The Druid Router node provides connection stickiness when balancing JDBC requests, and can be used to achieve +enabled. The Druid Router process provides connection stickiness when balancing JDBC requests, and can be used to achieve the necessary stickiness even with a normal non-sticky load balancer. Please see the [Router](../development/router.html) documentation for more details. @@ -571,6 +571,8 @@ The "sys" schema provides visibility into Druid segments, servers and tasks. ### SEGMENTS table Segments table provides details on all Druid segments, whether they are published yet or not. +#### CAVEAT +Note that a segment can be served by more than one stream ingestion tasks or Historical processes, in that case it would have multiple replicas. These replicas are weakly consistent with each other when served by multiple ingestion tasks, until a segment is eventually served by a Historical, at that point the segment is immutable. Broker prefers to query a segment from Historical over an ingestion task. But if a segment has multiple realtime replicas, for eg. kafka index tasks, and one task is slower than other, then the sys.segments query results can vary for the duration of the tasks because only one of the ingestion tasks is queried by the Broker and it is not gauranteed that the same task gets picked everytime. The `num_rows` column of segments table can have inconsistent values during this period. There is an open [issue](https://github.com/apache/incubator-druid/issues/5915) about this inconsistency with stream ingestion tasks. |Column|Notes| |------|-----| diff --git a/docs/content/querying/topnquery.md b/docs/content/querying/topnquery.md index db662a8db09c..d973f71447a2 100644 --- a/docs/content/querying/topnquery.md +++ b/docs/content/querying/topnquery.md @@ -26,7 +26,7 @@ title: "TopN queries" TopN queries return a sorted set of results for the values in a given dimension according to some criteria. Conceptually, they can be thought of as an approximate [GroupByQuery](../querying/groupbyquery.html) over a single dimension with an [Ordering](../querying/limitspec.html) spec. TopNs are much faster and resource efficient than GroupBys for this use case. These types of queries take a topN query object and return an array of JSON objects where each object represents a value asked for by the topN query. -TopNs are approximate in that each node will rank their top K results and only return those top K results to the Broker. K, by default in Druid, is `max(1000, threshold)`. In practice, this means that if you ask for the top 1000 items ordered, the correctness of the first ~900 items will be 100%, and the ordering of the results after that is not guaranteed. TopNs can be made more accurate by increasing the threshold. +TopNs are approximate in that each data process will rank their top K results and only return those top K results to the Broker. K, by default in Druid, is `max(1000, threshold)`. In practice, this means that if you ask for the top 1000 items ordered, the correctness of the first ~900 items will be 100%, and the ordering of the results after that is not guaranteed. TopNs can be made more accurate by increasing the threshold. A topN query object looks like: diff --git a/docs/content/toc.md b/docs/content/toc.md index 5d67c19a5708..e3a610ff8de4 100644 --- a/docs/content/toc.md +++ b/docs/content/toc.md @@ -129,6 +129,7 @@ layout: toc * [Updating the Cluster](/docs/VERSION/operations/rolling-updates.html) * [Different Hadoop Versions](/docs/VERSION/operations/other-hadoop.html) * [Performance FAQ](/docs/VERSION/operations/performance-faq.html) + * [Management UIs](/docs/VERSION/operations/management-uis.html) * [Dump Segment Tool](/docs/VERSION/operations/dump-segment.html) * [Insert Segment Tool](/docs/VERSION/operations/insert-segment-to-db.html) * [Pull Dependencies Tool](/docs/VERSION/operations/pull-deps.html) diff --git a/docs/content/tutorials/cluster.md b/docs/content/tutorials/cluster.md index a2bc31d585ae..e3b577b32df3 100644 --- a/docs/content/tutorials/cluster.md +++ b/docs/content/tutorials/cluster.md @@ -182,7 +182,7 @@ druid.indexer.logs.directory=/druid/indexing-logs Also, - Place your Hadoop configuration XMLs (core-site.xml, hdfs-site.xml, yarn-site.xml, -mapred-site.xml) on the classpath of your Druid nodes. You can do this by copying them into +mapred-site.xml) on the classpath of your Druid processes. You can do this by copying them into `conf/druid/_common/`. ## Configure Tranquility Server (optional) @@ -207,7 +207,7 @@ a path on HDFS that you'd like to use for temporary files required during the in `druid.indexer.task.hadoopWorkingPath=/tmp/druid-indexing` is a common choice. - Place your Hadoop configuration XMLs (core-site.xml, hdfs-site.xml, yarn-site.xml, -mapred-site.xml) on the classpath of your Druid nodes. You can do this by copying them into +mapred-site.xml) on the classpath of your Druid processes. You can do this by copying them into `conf/druid/_common/core-site.xml`, `conf/druid/_common/hdfs-site.xml`, and so on. Note that you don't need to use HDFS deep storage in order to load data from Hadoop. For example, if @@ -263,7 +263,7 @@ hardware. The most commonly adjusted configurations are: - `druid.processing.numThreads` - `druid.query.groupBy.maxIntermediateRows` - `druid.query.groupBy.maxResults` -- `druid.server.maxSize` and `druid.segmentCache.locations` on Historical Nodes +- `druid.server.maxSize` and `druid.segmentCache.locations` on Historical processes - `druid.worker.capacity` on MiddleManagers
diff --git a/docs/content/tutorials/img/tutorial-batch-01.png b/docs/content/tutorials/img/tutorial-batch-01.png index 6c2fdaa804d4..dc506ddfa5dc 100644 Binary files a/docs/content/tutorials/img/tutorial-batch-01.png and b/docs/content/tutorials/img/tutorial-batch-01.png differ diff --git a/docs/content/tutorials/img/tutorial-compaction-01.png b/docs/content/tutorials/img/tutorial-compaction-01.png index 5c609509851f..99b9e456cdd2 100644 Binary files a/docs/content/tutorials/img/tutorial-compaction-01.png and b/docs/content/tutorials/img/tutorial-compaction-01.png differ diff --git a/docs/content/tutorials/img/tutorial-compaction-02.png b/docs/content/tutorials/img/tutorial-compaction-02.png index 5467635b8b05..11c316ec04bb 100644 Binary files a/docs/content/tutorials/img/tutorial-compaction-02.png and b/docs/content/tutorials/img/tutorial-compaction-02.png differ diff --git a/docs/content/tutorials/img/tutorial-compaction-03.png b/docs/content/tutorials/img/tutorial-compaction-03.png new file mode 100644 index 000000000000..88fd9d6571bd Binary files /dev/null and b/docs/content/tutorials/img/tutorial-compaction-03.png differ diff --git a/docs/content/tutorials/img/tutorial-compaction-04.png b/docs/content/tutorials/img/tutorial-compaction-04.png new file mode 100644 index 000000000000..8df3699e3286 Binary files /dev/null and b/docs/content/tutorials/img/tutorial-compaction-04.png differ diff --git a/docs/content/tutorials/img/tutorial-compaction-05.png b/docs/content/tutorials/img/tutorial-compaction-05.png new file mode 100644 index 000000000000..07356df3655f Binary files /dev/null and b/docs/content/tutorials/img/tutorial-compaction-05.png differ diff --git a/docs/content/tutorials/img/tutorial-compaction-06.png b/docs/content/tutorials/img/tutorial-compaction-06.png new file mode 100644 index 000000000000..ec1525c7d56c Binary files /dev/null and b/docs/content/tutorials/img/tutorial-compaction-06.png differ diff --git a/docs/content/tutorials/img/tutorial-compaction-07.png b/docs/content/tutorials/img/tutorial-compaction-07.png new file mode 100644 index 000000000000..aa304580c877 Binary files /dev/null and b/docs/content/tutorials/img/tutorial-compaction-07.png differ diff --git a/docs/content/tutorials/img/tutorial-compaction-08.png b/docs/content/tutorials/img/tutorial-compaction-08.png new file mode 100644 index 000000000000..b9d89b24a84b Binary files /dev/null and b/docs/content/tutorials/img/tutorial-compaction-08.png differ diff --git a/docs/content/tutorials/img/tutorial-deletion-01.png b/docs/content/tutorials/img/tutorial-deletion-01.png index d209b8c753cc..cddcb16aac10 100644 Binary files a/docs/content/tutorials/img/tutorial-deletion-01.png and b/docs/content/tutorials/img/tutorial-deletion-01.png differ diff --git a/docs/content/tutorials/img/tutorial-deletion-02.png b/docs/content/tutorials/img/tutorial-deletion-02.png index 772a510a1a63..fdea20f00abd 100644 Binary files a/docs/content/tutorials/img/tutorial-deletion-02.png and b/docs/content/tutorials/img/tutorial-deletion-02.png differ diff --git a/docs/content/tutorials/img/tutorial-retention-01.png b/docs/content/tutorials/img/tutorial-retention-01.png index 0c9f40337867..64f666cee66c 100644 Binary files a/docs/content/tutorials/img/tutorial-retention-01.png and b/docs/content/tutorials/img/tutorial-retention-01.png differ diff --git a/docs/content/tutorials/img/tutorial-retention-02.png b/docs/content/tutorials/img/tutorial-retention-02.png index a317116b933c..2458d9d1cd1d 100644 Binary files a/docs/content/tutorials/img/tutorial-retention-02.png and b/docs/content/tutorials/img/tutorial-retention-02.png differ diff --git a/docs/content/tutorials/img/tutorial-retention-03.png b/docs/content/tutorials/img/tutorial-retention-03.png index ee4538c2d4fa..5cf2e8a52aab 100644 Binary files a/docs/content/tutorials/img/tutorial-retention-03.png and b/docs/content/tutorials/img/tutorial-retention-03.png differ diff --git a/docs/content/tutorials/img/tutorial-retention-04.png b/docs/content/tutorials/img/tutorial-retention-04.png new file mode 100644 index 000000000000..73f9f2214353 Binary files /dev/null and b/docs/content/tutorials/img/tutorial-retention-04.png differ diff --git a/docs/content/tutorials/img/tutorial-retention-05.png b/docs/content/tutorials/img/tutorial-retention-05.png new file mode 100644 index 000000000000..622718f46255 Binary files /dev/null and b/docs/content/tutorials/img/tutorial-retention-05.png differ diff --git a/docs/content/tutorials/img/tutorial-retention-06.png b/docs/content/tutorials/img/tutorial-retention-06.png new file mode 100644 index 000000000000..540551f7029d Binary files /dev/null and b/docs/content/tutorials/img/tutorial-retention-06.png differ diff --git a/docs/content/tutorials/index.md b/docs/content/tutorials/index.md index 7ba0cd291bfa..483d3e6eab64 100644 --- a/docs/content/tutorials/index.md +++ b/docs/content/tutorials/index.md @@ -96,13 +96,13 @@ This will bring up instances of Zookeeper and the Druid services, all running on ```bash bin/supervise -c quickstart/tutorial/conf/tutorial-cluster.conf -[Thu Jul 26 12:16:23 2018] Running command[zk], logging to[/stage/apache-druid-#{DRUIDVERSION}/var/sv/zk.log]: bin/run-zk quickstart/tutorial/conf -[Thu Jul 26 12:16:23 2018] Running command[coordinator], logging to[/stage/apache-druid-#{DRUIDVERSION}/var/sv/coordinator.log]: bin/run-druid coordinator quickstart/tutorial/conf -[Thu Jul 26 12:16:23 2018] Running command[broker], logging to[//stage/apache-druid-#{DRUIDVERSION}/var/sv/broker.log]: bin/run-druid broker quickstart/tutorial/conf -[Thu Jul 26 12:16:23 2018] Running command[historical], logging to[/stage/apache-druid-#{DRUIDVERSION}/var/sv/historical.log]: bin/run-druid historical quickstart/tutorial/conf -[Thu Jul 26 12:16:23 2018] Running command[overlord], logging to[/stage/apache-druid-#{DRUIDVERSION}/var/sv/overlord.log]: bin/run-druid overlord quickstart/tutorial/conf -[Thu Jul 26 12:16:23 2018] Running command[middleManager], logging to[/stage/apache-druid-#{DRUIDVERSION}/var/sv/middleManager.log]: bin/run-druid middleManager quickstart/tutorial/conf - +[Wed Feb 27 12:46:13 2019] Running command[zk], logging to[/apache-druid-#{DRUIDVERSION}/var/sv/zk.log]: bin/run-zk quickstart/tutorial/conf +[Wed Feb 27 12:46:13 2019] Running command[coordinator], logging to[/apache-druid-#{DRUIDVERSION}/var/sv/coordinator.log]: bin/run-druid coordinator quickstart/tutorial/conf +[Wed Feb 27 12:46:13 2019] Running command[broker], logging to[/apache-druid-#{DRUIDVERSION}/var/sv/broker.log]: bin/run-druid broker quickstart/tutorial/conf +[Wed Feb 27 12:46:13 2019] Running command[router], logging to[/apache-druid-#{DRUIDVERSION}/var/sv/router.log]: bin/run-druid router quickstart/tutorial/conf +[Wed Feb 27 12:46:13 2019] Running command[historical], logging to[/apache-druid-#{DRUIDVERSION}/var/sv/historical.log]: bin/run-druid historical quickstart/tutorial/conf +[Wed Feb 27 12:46:13 2019] Running command[overlord], logging to[/apache-druid-#{DRUIDVERSION}/var/sv/overlord.log]: bin/run-druid overlord quickstart/tutorial/conf +[Wed Feb 27 12:46:13 2019] Running command[middleManager], logging to[/apache-druid-#{DRUIDVERSION}/var/sv/middleManager.log]: bin/run-druid middleManager quickstart/tutorial/conf ``` All persistent state such as the cluster metadata store and segments for the services will be kept in the `var` directory under the apache-druid-#{DRUIDVERSION} package root. Logs for the services are located at `var/sv`. diff --git a/docs/content/tutorials/tutorial-batch.md b/docs/content/tutorials/tutorial-batch.md index 41f62aa5a03c..a18d4341f9f2 100644 --- a/docs/content/tutorials/tutorial-batch.md +++ b/docs/content/tutorials/tutorial-batch.md @@ -163,16 +163,16 @@ Which will print the ID of the task if the submission was successful: {"task":"index_wikipedia_2018-06-09T21:30:32.802Z"} ``` -To view the status of the ingestion task, go to the Overlord console: -[http://localhost:8090/console.html](http://localhost:8090/console.html). You can refresh the console periodically, and after -the task is successful, you should see a "SUCCESS" status for the task. +To view the status of the ingestion task, go to the Druid Console: +[http://localhost:8888/](http://localhost:8888). You can refresh the console periodically, and after +the task is successful, you should see a "SUCCESS" status for the task under the [Tasks view](http://localhost:8888/unified-console.html#tasks). -After the ingestion task finishes, the data will be loaded by Historical nodes and available for +After the ingestion task finishes, the data will be loaded by Historical processes and available for querying within a minute or two. You can monitor the progress of loading the data in the -Coordinator console, by checking whether there is a datasource "wikipedia" with a blue circle -indicating "fully available": [http://localhost:8081/#/](http://localhost:8081/#/). +Datasources view, by checking whether there is a datasource "wikipedia" with a green circle +indicating "fully available": [http://localhost:8888/unified-console.html#datasources](http://localhost:8888/unified-console.html#datasources). -![Coordinator console](../tutorials/img/tutorial-batch-01.png "Wikipedia 100% loaded") +![Druid Console](../tutorials/img/tutorial-batch-01.png "Wikipedia 100% loaded") ## Further reading diff --git a/docs/content/tutorials/tutorial-compaction.md b/docs/content/tutorials/tutorial-compaction.md index 201ea0090fc0..e51b3c64df04 100644 --- a/docs/content/tutorials/tutorial-compaction.md +++ b/docs/content/tutorials/tutorial-compaction.md @@ -27,6 +27,7 @@ title: "Tutorial: Compacting segments" This tutorial demonstrates how to compact existing segments into fewer but larger segments. Because there is some per-segment memory and processing overhead, it can sometimes be beneficial to reduce the total number of segments. +Please check [Segment size optimization](../operations/segment-optimization.html) for details. For this tutorial, we'll assume you've already downloaded Druid as described in the [single-machine quickstart](index.html) and have it running on your local machine. @@ -35,7 +36,7 @@ It will also be helpful to have finished [Tutorial: Loading a file](../tutorials ## Load the initial data -For this tutorial, we'll be using the Wikipedia edits sample data, with an ingestion task spec that will create a separate segment for each hour in the input data. +For this tutorial, we'll be using the Wikipedia edits sample data, with an ingestion task spec that will create 1-3 segments per hour in the input data. The ingestion spec can be found at `quickstart/tutorial/compaction-init-index.json`. Let's submit that spec, which will create a datasource called `compaction-tutorial`: @@ -43,11 +44,20 @@ The ingestion spec can be found at `quickstart/tutorial/compaction-init-index.js bin/post-index-task --file quickstart/tutorial/compaction-init-index.json ``` -After the ingestion completes, go to http://localhost:8081/#/datasources/compaction-tutorial in a browser to view information about the new datasource in the Coordinator console. +
+Please note that `maxRowsPerSegment` in the ingestion spec is set to 1000. This is to generate multiple segments per hour and _NOT_ recommended in production. +It's 5000000 by default and may need to be adjusted to make your segments optimized. +
-There will be 24 segments for this datasource, one segment per hour in the input data: +After the ingestion completes, go to [http://localhost:8888/unified-console.html#datasources](http://localhost:8888/unified-console.html#datasources) in a browser to see the new datasource in the Druid Console. -![Original segments](../tutorials/img/tutorial-retention-01.png "Original segments") +![compaction-tutorial datasource](../tutorials/img/tutorial-compaction-01.png "compaction-tutorial datasource") + +Click the `51 segments` link next to "Fully Available" for the `compaction-tutorial` datasource to view information about the datasource's segments: + +There will be 51 segments for this datasource, 1-3 segments per hour in the input data: + +![Original segments](../tutorials/img/tutorial-compaction-02.png "Original segments") Running a COUNT(*) query on this datasource shows that there are 39,244 rows: @@ -63,9 +73,9 @@ Retrieved 1 row in 1.38s. ## Compact the data -Let's now combine these 24 segments into one segment. +Let's now compact these 51 small segments. -We have included a compaction task spec for this tutorial datasource at `quickstart/tutorial/compaction-final-index.json`: +We have included a compaction task spec for this tutorial datasource at `quickstart/tutorial/compaction-keep-granularity.json`: ```json { @@ -85,23 +95,25 @@ This will compact all segments for the interval `2015-09-12/2015-09-13` in the ` The parameters in the `tuningConfig` control how many segments will be present in the compacted set of segments. -In this tutorial example, only one compacted segment will be created, as the 39244 rows in the input is less than the 5000000 `maxRowsPerSegment`. +In this tutorial example, only one compacted segment will be created per hour, as each hour has less rows than the 5000000 `maxRowsPerSegment` (note that the total number of rows is 39244). Let's submit this task now: ```bash -bin/post-index-task --file quickstart/tutorial/compaction-final-index.json +bin/post-index-task --file quickstart/tutorial/compaction-keep-granularity.json ``` -After the task finishes, refresh the http://localhost:8081/#/datasources/compaction-tutorial page. +After the task finishes, refresh the [segments view](http://localhost:8888/unified-console.html#segments). -The original 24 segments will eventually be marked as "unused" by the Coordinator and removed, with the new compacted segment remaining. +The original 51 segments will eventually be marked as "unused" by the Coordinator and removed, with the new compacted segments remaining. -By default, the Druid Coordinator will not mark segments as unused until the Coordinator process has been up for at least 15 minutes, so you may see the old segment set and the new compacted set at the same time in the Coordinator, e.g.: +By default, the Druid Coordinator will not mark segments as unused until the Coordinator process has been up for at least 15 minutes, so you may see the old segment set and the new compacted set at the same time in the Druid Console, with 75 total segments: -![Compacted segments intermediate state](../tutorials/img/tutorial-compaction-01.png "Compacted segments intermediate state") +![Compacted segments intermediate state 1](../tutorials/img/tutorial-compaction-03.png "Compacted segments intermediate state 1") -The new compacted segment has a more recent version than the original segments, so even when both sets of segments are shown by the Coordinator, queries will only read from the new compacted segment. +![Compacted segments intermediate state 2](../tutorials/img/tutorial-compaction-04.png "Compacted segments intermediate state 2") + +The new compacted segments have a more recent version than the original segments, so even when both sets of segments are shown in the Druid Console, queries will only read from the new compacted segments. Let's try running a COUNT(*) on `compaction-tutorial` again, where the row count should still be 39,244: @@ -115,12 +127,50 @@ dsql> select count(*) from "compaction-tutorial"; Retrieved 1 row in 1.30s. ``` -After the Coordinator has been running for at least 15 minutes, the http://localhost:8081/#/datasources/compaction-tutorial page should show there is only 1 segment: +After the Coordinator has been running for at least 15 minutes, the [segments view](http://localhost:8888/unified-console.html#segments) should show there are 24 segments, one per hour: + +![Compacted segments hourly granularity 1](../tutorials/img/tutorial-compaction-05.png "Compacted segments hourly granularity 1") + +![Compacted segments hourly granularity 2](../tutorials/img/tutorial-compaction-06.png "Compacted segments hourly granularity 2") + +## Compact the data with new segment granularity + +The compaction task can also produce compacted segments with a granularity different from the granularity of the input segments. + +We have included a compaction task spec that will create DAY granularity segments at `quickstart/tutorial/compaction-day-granularity.json`: + +```json +{ + "type": "compact", + "dataSource": "compaction-tutorial", + "interval": "2015-09-12/2015-09-13", + "segmentGranularity": "DAY", + "tuningConfig" : { + "type" : "index", + "maxRowsPerSegment" : 5000000, + "maxRowsInMemory" : 25000, + "forceExtendableShardSpecs" : true + } +} +``` + +Note that `segmentGranularity` is set to `DAY` in this compaction task spec. + +Let's submit this task now: + +```bash +bin/post-index-task --file quickstart/tutorial/compaction-day-granularity.json +``` + +It will take a bit of time before the Coordinator marks the old input segments as unused, so you may see an intermediate state with 25 total segments. Eventually, there will only be one DAY granularity segment: + +![Compacted segments day granularity 1](../tutorials/img/tutorial-compaction-07.png "Compacted segments day granularity 1") + +![Compacted segments day granularity 2](../tutorials/img/tutorial-compaction-08.png "Compacted segments day granularity 2") -![Compacted segments final state](../tutorials/img/tutorial-compaction-02.png "Compacted segments final state") ## Further reading [Task documentation](../ingestion/tasks.html) -[Segment optimization](../operations/segment-optimization.html) +[Segment optimization](../operations/segment-optimization.html) \ No newline at end of file diff --git a/docs/content/tutorials/tutorial-delete-data.md b/docs/content/tutorials/tutorial-delete-data.md index 6eba8f05804b..bd80cd7a9336 100644 --- a/docs/content/tutorials/tutorial-delete-data.md +++ b/docs/content/tutorials/tutorial-delete-data.md @@ -41,7 +41,7 @@ Let's load this initial data: bin/post-index-task --file quickstart/tutorial/deletion-index.json ``` -When the load finishes, open http://localhost:8081/#/datasources/deletion-tutorial in a browser. +When the load finishes, open [http://localhost:8888/unified-console.html#datasources](http://localhost:8888/unified-console.html#datasources) in a browser. ## How to permanently delete data @@ -56,15 +56,17 @@ Let's drop some segments now, first with load rules, then manually. As with the previous retention tutorial, there are currently 24 segments in the `deletion-tutorial` datasource. -Click the `edit rules` button with a pencil icon at the upper left corner of the page. +click the blue pencil icon next to `Cluster default: loadForever` for the `deletion-tutorial` datasource. -A rule configuration window will appear. Enter `tutorial` for both the user and changelog comment field. +A rule configuration window will appear. -Now click the `+ Add a rule` button twice. +Now click the `+ New rule` button twice. -In the `rule #1` box at the top, click `Load`, `Interval`, enter `2015-09-12T12:00:00.000Z/2015-09-13T00:00:00.000Z` in the interval box, and click `+ _default_tier replicant`. +In the upper rule box, select `Load` and `by interval`, and then enter `2015-09-12T12:00:00.000Z/2015-09-13T00:00:00.000Z` in field next to `by interval`. Replicants can remain at 2 in the `_default_tier`. -In the `rule #2` box at the bottom, click `Drop` and `Forever`. +In the lower rule box, select `Drop` and `forever`. + +Now click `Next` and enter `tutorial` for both the user and changelog comment field. This will cause the first 12 segments of `deletion-tutorial` to be dropped. However, these dropped segments are not removed from deep storage. @@ -102,11 +104,11 @@ $ ls -l1 var/druid/segments/deletion-tutorial/ Let's manually disable a segment now. This will mark a segment as "unused", but not remove it from deep storage. -On http://localhost:8081/#/datasources/deletion-tutorial, click one of the remaining segments on the left for full details about the segment: +In the [segments view](http://localhost:8888/unified-console.html#segments), click the arrow on the left side of one of the remaining segments to expand the segment entry: ![Segments](../tutorials/img/tutorial-deletion-01.png "Segments") -The top of the info box shows the full segment ID, e.g. `deletion-tutorial_2016-06-27T14:00:00.000Z_2016-06-27T15:00:00.000Z_2018-07-27T22:57:00.110Z` for the segment of hour 14. +The top of the info box shows the full segment ID, e.g. `deletion-tutorial_2015-09-12T14:00:00.000Z_2015-09-12T15:00:00.000Z_2019-02-28T01:11:51.606Z` for the segment of hour 14. Let's disable the hour 14 segment by sending the following DELETE request to the Coordinator, where {SEGMENT-ID} is the full segment ID shown in the info box: diff --git a/docs/content/tutorials/tutorial-kafka.md b/docs/content/tutorials/tutorial-kafka.md index c6ce8c65803f..328561966453 100644 --- a/docs/content/tutorials/tutorial-kafka.md +++ b/docs/content/tutorials/tutorial-kafka.md @@ -70,6 +70,8 @@ If the supervisor was successfully created, you will get a response containing t For more details about what's going on here, check out the [Druid Kafka indexing service documentation](../development/extensions-core/kafka-ingestion.html). +You can view the current supervisors and tasks in the Druid Console: [http://localhost:8888/unified-console.html#tasks](http://localhost:8888/unified-console.html#tasks). + ## Load data Let's launch a console producer for our topic and send some data! diff --git a/docs/content/tutorials/tutorial-retention.md b/docs/content/tutorials/tutorial-retention.md index b78e1deca919..66c0ee501f1b 100644 --- a/docs/content/tutorials/tutorial-retention.md +++ b/docs/content/tutorials/tutorial-retention.md @@ -41,49 +41,54 @@ The ingestion spec can be found at `quickstart/tutorial/retention-index.json`. L bin/post-index-task --file quickstart/tutorial/retention-index.json ``` -After the ingestion completes, go to http://localhost:8081 in a browser to access the Coordinator console. +After the ingestion completes, go to [http://localhost:8888/unified-console.html#datasources](http://localhost:8888/unified-console.html#datasources) in a browser to access the Druid Console's datasource view. -In the Coordinator console, go to the `datasources` tab at the top of the page. +This view shows the available datasources and a summary of the retention rules for each datasource: -This tab shows the available datasources and a summary of the retention rules for each datasource: +![Summary](../tutorials/img/tutorial-retention-01.png "Summary") -![Summary](../tutorials/img/tutorial-retention-00.png "Summary") +Currently there are no rules set for the `retention-tutorial` datasource. Note that there are default rules for the cluster: load forever with 2 replicants in `_default_tier`. -Currently there are no rules set for the `retention-tutorial` datasource. Note that there are default rules, currently set to `load Forever 2 in _default_tier`. - -This means that all data will be loaded regardless of timestamp, and each segment will be replicated to two nodes in the default tier. +This means that all data will be loaded regardless of timestamp, and each segment will be replicated to two Historical processes in the default tier. In this tutorial, we will ignore the tiering and redundancy concepts for now. -Let's click the `retention-tutorial` datasource on the left. +Let's view the segments for the `retention-tutorial` datasource by clicking the "24 Segments" link next to "Fully Available". -The next page (http://localhost:8081/#/datasources/retention-tutorial) provides information about what segments a datasource contains. On the left, the page shows that there are 24 segments, each one containing data for a specific hour of 2015-09-12: +The segments view ([http://localhost:8888/unified-console.html#segments](http://localhost:8888/unified-console.html#segments)) provides information about what segments a datasource contains. The page shows that there are 24 segments, each one containing data for a specific hour of 2015-09-12: -![Original segments](../tutorials/img/tutorial-retention-01.png "Original segments") +![Original segments](../tutorials/img/tutorial-retention-02.png "Original segments") ## Set retention rules Suppose we want to drop data for the first 12 hours of 2015-09-12 and keep data for the later 12 hours of 2015-09-12. -Click the `edit rules` button with a pencil icon at the upper left corner of the page. +Go to the [datasources view](http://localhost:8888/unified-console.html#datasources) and click the blue pencil icon next to `Cluster default: loadForever` for the `retention-tutorial` datasource. + +A rule configuration window will appear: -A rule configuration window will appear. Enter `tutorial` for both the user and changelog comment field. +![Rule configuration](../tutorials/img/tutorial-retention-03.png "Rule configuration") -Now click the `+ Add a rule` button twice. +Now click the `+ New rule` button twice. -In the `rule #1` box at the top, click `Load`, `Interval`, enter `2015-09-12T12:00:00.000Z/2015-09-13T00:00:00.000Z` in the interval box, and click `+ _default_tier replicant`. +In the upper rule box, select `Load` and `by interval`, and then enter `2015-09-12T12:00:00.000Z/2015-09-13T00:00:00.000Z` in field next to `by interval`. Replicants can remain at 2 in the `_default_tier`. -In the `rule #2` box at the bottom, click `Drop` and `Forever`. +In the lower rule box, select `Drop` and `forever`. The rules should look like this: -![Set rules](../tutorials/img/tutorial-retention-02.png "Set rules") +![Set rules](../tutorials/img/tutorial-retention-04.png "Set rules") + +Now click `Next`. The rule configuration process will ask for a user name and comment, for change logging purposes. You can enter `tutorial` for both. -Now click `Save all rules`, wait for a few seconds, and refresh the page. +Now click `Save`. You can see the new rules in the datasources view: +![New rules](../tutorials/img/tutorial-retention-05.png "New rules") + +Give the cluster a few minutes to apply the rule change, and go to the [segments view](http://localhost:8888/unified-console.html#segments) in the Druid Console. The segments for the first 12 hours of 2015-09-12 are now gone: -![New segments](../tutorials/img/tutorial-retention-03.png "New segments") +![New segments](../tutorials/img/tutorial-retention-06.png "New segments") The resulting retention rule chain is the following: @@ -93,7 +98,6 @@ The resulting retention rule chain is the following: 3. loadForever (default rule) - The rule chain is evaluated from top to bottom, with the default rule chain always added at the bottom. The tutorial rule chain we just created loads data if it is within the specified 12 hour interval. diff --git a/examples/pom.xml b/examples/pom.xml index c9ba9106024b..b48f98d264be 100644 --- a/examples/pom.xml +++ b/examples/pom.xml @@ -29,7 +29,7 @@ org.apache.druid druid - 0.14.0-incubating-SNAPSHOT + 0.14.1-incubating-SNAPSHOT diff --git a/examples/quickstart/tutorial/compaction-day-granularity.json b/examples/quickstart/tutorial/compaction-day-granularity.json new file mode 100644 index 000000000000..4855821c0646 --- /dev/null +++ b/examples/quickstart/tutorial/compaction-day-granularity.json @@ -0,0 +1,12 @@ +{ + "type": "compact", + "dataSource": "compaction-tutorial", + "interval": "2015-09-12/2015-09-13", + "segmentGranularity": "DAY", + "tuningConfig" : { + "type" : "index", + "maxRowsPerSegment" : 5000000, + "maxRowsInMemory" : 25000, + "forceExtendableShardSpecs" : true + } +} diff --git a/examples/quickstart/tutorial/compaction-init-index.json b/examples/quickstart/tutorial/compaction-init-index.json index 90ee82657568..d74f5c0a7c1f 100644 --- a/examples/quickstart/tutorial/compaction-init-index.json +++ b/examples/quickstart/tutorial/compaction-init-index.json @@ -56,8 +56,7 @@ }, "tuningConfig" : { "type" : "index", - "maxRowsPerSegment" : 5000000, - "maxRowsInMemory" : 25000, + "maxRowsPerSegment" : 1000, "forceExtendableShardSpecs" : true } } diff --git a/examples/quickstart/tutorial/compaction-final-index.json b/examples/quickstart/tutorial/compaction-keep-granularity.json similarity index 100% rename from examples/quickstart/tutorial/compaction-final-index.json rename to examples/quickstart/tutorial/compaction-keep-granularity.json diff --git a/examples/quickstart/tutorial/conf/druid/_common/common.runtime.properties b/examples/quickstart/tutorial/conf/druid/_common/common.runtime.properties index 5b6b0acff50e..9d0f087d58a2 100644 --- a/examples/quickstart/tutorial/conf/druid/_common/common.runtime.properties +++ b/examples/quickstart/tutorial/conf/druid/_common/common.runtime.properties @@ -120,7 +120,7 @@ druid.selectors.coordinator.serviceName=druid/coordinator # druid.monitoring.monitors=["org.apache.druid.java.util.metrics.JvmMonitor"] -druid.emitter=logging +druid.emitter=noop druid.emitter.logging.logLevel=info # Storage type of double columns @@ -138,3 +138,8 @@ druid.server.hiddenProperties=["druid.s3.accessKey","druid.s3.secretKey","druid. # SQL # druid.sql.enable=true + +# +# Lookups +# +druid.lookup.enableLookupSyncOnStartup=false \ No newline at end of file diff --git a/examples/quickstart/tutorial/conf/druid/middleManager/runtime.properties b/examples/quickstart/tutorial/conf/druid/middleManager/runtime.properties index d1660aa4deb9..2262d8473bb1 100644 --- a/examples/quickstart/tutorial/conf/druid/middleManager/runtime.properties +++ b/examples/quickstart/tutorial/conf/druid/middleManager/runtime.properties @@ -24,14 +24,14 @@ druid.plaintextPort=8091 druid.worker.capacity=3 # Task launch parameters -druid.indexer.runner.javaOpts=-server -Xms512m -Xmx512m -Duser.timezone=UTC -Dfile.encoding=UTF-8 -XX:+ExitOnOutOfMemoryError -Djava.util.logging.manager=org.apache.logging.log4j.jul.LogManager +druid.indexer.runner.javaOpts=-server -Xms1g -Xmx1g -Duser.timezone=UTC -Dfile.encoding=UTF-8 -XX:+ExitOnOutOfMemoryError -Djava.util.logging.manager=org.apache.logging.log4j.jul.LogManager druid.indexer.task.baseTaskDir=var/druid/task # HTTP server threads druid.server.http.numThreads=9 # Processing threads and buffers on Peons -druid.indexer.fork.property.druid.processing.buffer.sizeBytes=256000000 +druid.indexer.fork.property.druid.processing.buffer.sizeBytes=201326592 druid.indexer.fork.property.druid.processing.numThreads=2 # Hadoop indexing diff --git a/examples/quickstart/tutorial/conf/druid/router/main.config b/examples/quickstart/tutorial/conf/druid/router/main.config new file mode 100644 index 000000000000..4194b69c259b --- /dev/null +++ b/examples/quickstart/tutorial/conf/druid/router/main.config @@ -0,0 +1 @@ +org.apache.druid.cli.Main server router diff --git a/examples/quickstart/tutorial/hadoop/docker/LICENSE b/examples/quickstart/tutorial/hadoop/docker/LICENSE deleted file mode 100644 index 4f4964bf3f3d..000000000000 --- a/examples/quickstart/tutorial/hadoop/docker/LICENSE +++ /dev/null @@ -1,1331 +0,0 @@ - - Apache License - Version 2.0, January 2004 - http://www.apache.org/licenses/ - - TERMS AND CONDITIONS FOR USE, REPRODUCTION, AND DISTRIBUTION - - 1. Definitions. - - "License" shall mean the terms and conditions for use, reproduction, - and distribution as defined by Sections 1 through 9 of this document. - - "Licensor" shall mean the copyright owner or entity authorized by - the copyright owner that is granting the License. - - "Legal Entity" shall mean the union of the acting entity and all - other entities that control, are controlled by, or are under common - control with that entity. For the purposes of this definition, - "control" means (i) the power, direct or indirect, to cause the - direction or management of such entity, whether by contract or - otherwise, or (ii) ownership of fifty percent (50%) or more of the - outstanding shares, or (iii) beneficial ownership of such entity. - - "You" (or "Your") shall mean an individual or Legal Entity - exercising permissions granted by this License. - - "Source" form shall mean the preferred form for making modifications, - including but not limited to software source code, documentation - source, and configuration files. - - "Object" form shall mean any form resulting from mechanical - transformation or translation of a Source form, including but - not limited to compiled object code, generated documentation, - and conversions to other media types. - - "Work" shall mean the work of authorship, whether in Source or - Object form, made available under the License, as indicated by a - copyright notice that is included in or attached to the work - (an example is provided in the Appendix below). - - "Derivative Works" shall mean any work, whether in Source or Object - form, that is based on (or derived from) the Work and for which the - editorial revisions, annotations, elaborations, or other modifications - represent, as a whole, an original work of authorship. For the purposes - of this License, Derivative Works shall not include works that remain - separable from, or merely link (or bind by name) to the interfaces of, - the Work and Derivative Works thereof. - - "Contribution" shall mean any work of authorship, including - the original version of the Work and any modifications or additions - to that Work or Derivative Works thereof, that is intentionally - submitted to Licensor for inclusion in the Work by the copyright owner - or by an individual or Legal Entity authorized to submit on behalf of - the copyright owner. For the purposes of this definition, "submitted" - means any form of electronic, verbal, or written communication sent - to the Licensor or its representatives, including but not limited to - communication on electronic mailing lists, source code control systems, - and issue tracking systems that are managed by, or on behalf of, the - Licensor for the purpose of discussing and improving the Work, but - excluding communication that is conspicuously marked or otherwise - designated in writing by the copyright owner as "Not a Contribution." - - "Contributor" shall mean Licensor and any individual or Legal Entity - on behalf of whom a Contribution has been received by Licensor and - subsequently incorporated within the Work. - - 2. Grant of Copyright License. Subject to the terms and conditions of - this License, each Contributor hereby grants to You a perpetual, - worldwide, non-exclusive, no-charge, royalty-free, irrevocable - copyright license to reproduce, prepare Derivative Works of, - publicly display, publicly perform, sublicense, and distribute the - Work and such Derivative Works in Source or Object form. - - 3. Grant of Patent License. Subject to the terms and conditions of - this License, each Contributor hereby grants to You a perpetual, - worldwide, non-exclusive, no-charge, royalty-free, irrevocable - (except as stated in this section) patent license to make, have made, - use, offer to sell, sell, import, and otherwise transfer the Work, - where such license applies only to those patent claims licensable - by such Contributor that are necessarily infringed by their - Contribution(s) alone or by combination of their Contribution(s) - with the Work to which such Contribution(s) was submitted. If You - institute patent litigation against any entity (including a - cross-claim or counterclaim in a lawsuit) alleging that the Work - or a Contribution incorporated within the Work constitutes direct - or contributory patent infringement, then any patent licenses - granted to You under this License for that Work shall terminate - as of the date such litigation is filed. - - 4. Redistribution. You may reproduce and distribute copies of the - Work or Derivative Works thereof in any medium, with or without - modifications, and in Source or Object form, provided that You - meet the following conditions: - - (a) You must give any other recipients of the Work or - Derivative Works a copy of this License; and - - (b) You must cause any modified files to carry prominent notices - stating that You changed the files; and - - (c) You must retain, in the Source form of any Derivative Works - that You distribute, all copyright, patent, trademark, and - attribution notices from the Source form of the Work, - excluding those notices that do not pertain to any part of - the Derivative Works; and - - (d) If the Work includes a "NOTICE" text file as part of its - distribution, then any Derivative Works that You distribute must - include a readable copy of the attribution notices contained - within such NOTICE file, excluding those notices that do not - pertain to any part of the Derivative Works, in at least one - of the following places: within a NOTICE text file distributed - as part of the Derivative Works; within the Source form or - documentation, if provided along with the Derivative Works; or, - within a display generated by the Derivative Works, if and - wherever such third-party notices normally appear. The contents - of the NOTICE file are for informational purposes only and - do not modify the License. You may add Your own attribution - notices within Derivative Works that You distribute, alongside - or as an addendum to the NOTICE text from the Work, provided - that such additional attribution notices cannot be construed - as modifying the License. - - You may add Your own copyright statement to Your modifications and - may provide additional or different license terms and conditions - for use, reproduction, or distribution of Your modifications, or - for any such Derivative Works as a whole, provided Your use, - reproduction, and distribution of the Work otherwise complies with - the conditions stated in this License. - - 5. Submission of Contributions. Unless You explicitly state otherwise, - any Contribution intentionally submitted for inclusion in the Work - by You to the Licensor shall be under the terms and conditions of - this License, without any additional terms or conditions. - Notwithstanding the above, nothing herein shall supersede or modify - the terms of any separate license agreement you may have executed - with Licensor regarding such Contributions. - - 6. Trademarks. This License does not grant permission to use the trade - names, trademarks, service marks, or product names of the Licensor, - except as required for reasonable and customary use in describing the - origin of the Work and reproducing the content of the NOTICE file. - - 7. Disclaimer of Warranty. Unless required by applicable law or - agreed to in writing, Licensor provides the Work (and each - Contributor provides its Contributions) on an "AS IS" BASIS, - WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or - implied, including, without limitation, any warranties or conditions - of TITLE, NON-INFRINGEMENT, MERCHANTABILITY, or FITNESS FOR A - PARTICULAR PURPOSE. You are solely responsible for determining the - appropriateness of using or redistributing the Work and assume any - risks associated with Your exercise of permissions under this License. - - 8. Limitation of Liability. In no event and under no legal theory, - whether in tort (including negligence), contract, or otherwise, - unless required by applicable law (such as deliberate and grossly - negligent acts) or agreed to in writing, shall any Contributor be - liable to You for damages, including any direct, indirect, special, - incidental, or consequential damages of any character arising as a - result of this License or out of the use or inability to use the - Work (including but not limited to damages for loss of goodwill, - work stoppage, computer failure or malfunction, or any and all - other commercial damages or losses), even if such Contributor - has been advised of the possibility of such damages. - - 9. Accepting Warranty or Additional Liability. While redistributing - the Work or Derivative Works thereof, You may choose to offer, - and charge a fee for, acceptance of support, warranty, indemnity, - or other liability obligations and/or rights consistent with this - License. However, in accepting such obligations, You may act only - on Your own behalf and on Your sole responsibility, not on behalf - of any other Contributor, and only if You agree to indemnify, - defend, and hold each Contributor harmless for any liability - incurred by, or claims asserted against, such Contributor by reason - of your accepting any such warranty or additional liability. - - END OF TERMS AND CONDITIONS - - APPENDIX: How to apply the Apache License to your work. - - To apply the Apache License to your work, attach the following - boilerplate notice, with the fields enclosed by brackets "[]" - replaced with your own identifying information. (Don't include - the brackets!) The text should be enclosed in the appropriate - comment syntax for the file format. We also recommend that a - file or class name and description of purpose be included on the - same "printed page" as the copyright notice for easier - identification within third-party archives. - - Copyright [yyyy] [name of copyright owner] - - Licensed under the Apache License, Version 2.0 (the "License"); - you may not use this file except in compliance with the License. - You may obtain a copy of the License at - - http://www.apache.org/licenses/LICENSE-2.0 - - Unless required by applicable law or agreed to in writing, software - distributed under the License is distributed on an "AS IS" BASIS, - WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - See the License for the specific language governing permissions and - limitations under the License. - - -Apache Knox Subcomponents (binary distributions): - -Apache Knox includes a number of sub-components with separate copyright -notices and license terms. Your use of these sub-components is subject -to the terms and conditions of the following licenses. - ------------------------------------------------------------------------------- -From Jetty and Jerico ------------------------------------------------------------------------------- -Eclipse Public License - v 1.0 - -THE ACCOMPANYING PROGRAM IS PROVIDED UNDER THE TERMS OF THIS ECLIPSE PUBLIC -LICENSE ("AGREEMENT"). ANY USE, REPRODUCTION OR DISTRIBUTION OF THE PROGRAM -CONSTITUTES RECIPIENT'S ACCEPTANCE OF THIS AGREEMENT. - -1. DEFINITIONS - -"Contribution" means: - -a) in the case of the initial Contributor, the initial code and documentation -distributed under this Agreement, and - -b) in the case of each subsequent Contributor: - -i) changes to the Program, and - -ii) additions to the Program; - -where such changes and/or additions to the Program originate from and are -distributed by that particular Contributor. A Contribution 'originates' from a -Contributor if it was added to the Program by such Contributor itself or anyone -acting on such Contributor's behalf. Contributions do not include additions to -the Program which: (i) are separate modules of software distributed in -conjunction with the Program under their own license agreement, and (ii) are not -derivative works of the Program. - -"Contributor" means any person or entity that distributes the Program. - -"Licensed Patents" mean patent claims licensable by a Contributor which are -necessarily infringed by the use or sale of its Contribution alone or when -combined with the Program. - -"Program" means the Contributions distributed in accordance with this Agreement. - -"Recipient" means anyone who receives the Program under this Agreement, -including all Contributors. - -2. GRANT OF RIGHTS - -a) Subject to the terms of this Agreement, each Contributor hereby grants -Recipient a non-exclusive, worldwide, royalty-free copyright license to -reproduce, prepare derivative works of, publicly display, publicly perform, -distribute and sublicense the Contribution of such Contributor, if any, and such -derivative works, in source code and object code form. - -b) Subject to the terms of this Agreement, each Contributor hereby grants -Recipient a non-exclusive, worldwide, royalty-free patent license under Licensed -Patents to make, use, sell, offer to sell, import and otherwise transfer the -Contribution of such Contributor, if any, in source code and object code form. -This patent license shall apply to the combination of the Contribution and the -Program if, at the time the Contribution is added by the Contributor, such -addition of the Contribution causes such combination to be covered by the -Licensed Patents. The patent license shall not apply to any other combinations -which include the Contribution. No hardware per se is licensed hereunder. - -c) Recipient understands that although each Contributor grants the licenses to -its Contributions set forth herein, no assurances are provided by any -Contributor that the Program does not infringe the patent or other intellectual -property rights of any other entity. Each Contributor disclaims any liability to -Recipient for claims brought by any other entity based on infringement of -intellectual property rights or otherwise. As a condition to exercising the -rights and licenses granted hereunder, each Recipient hereby assumes sole -responsibility to secure any other intellectual property rights needed, if any. -For example, if a third party patent license is required to allow Recipient to -distribute the Program, it is Recipient's responsibility to acquire that license -before distributing the Program. - -d) Each Contributor represents that to its knowledge it has sufficient copyright -rights in its Contribution, if any, to grant the copyright license set forth in -this Agreement. - -3. REQUIREMENTS - -A Contributor may choose to distribute the Program in object code form under its -own license agreement, provided that: - -a) it complies with the terms and conditions of this Agreement; and - -b) its license agreement: - -i) effectively disclaims on behalf of all Contributors all warranties and -conditions, express and implied, including warranties or conditions of title and -non-infringement, and implied warranties or conditions of merchantability and -fitness for a particular purpose; - -ii) effectively excludes on behalf of all Contributors all liability for -damages, including direct, indirect, special, incidental and consequential -damages, such as lost profits; - -iii) states that any provisions which differ from this Agreement are offered by -that Contributor alone and not by any other party; and - -iv) states that source code for the Program is available from such Contributor, -and informs licensees how to obtain it in a reasonable manner on or through a -medium customarily used for software exchange. - -When the Program is made available in source code form: - -a) it must be made available under this Agreement; and - -b) a copy of this Agreement must be included with each copy of the Program. - -Contributors may not remove or alter any copyright notices contained within the -Program. - -Each Contributor must identify itself as the originator of its Contribution, if -any, in a manner that reasonably allows subsequent Recipients to identify the -originator of the Contribution. - -4. COMMERCIAL DISTRIBUTION - -Commercial distributors of software may accept certain responsibilities with -respect to end users, business partners and the like. While this license is -intended to facilitate the commercial use of the Program, the Contributor who -includes the Program in a commercial product offering should do so in a manner -which does not create potential liability for other Contributors. Therefore, if -a Contributor includes the Program in a commercial product offering, such -Contributor ("Commercial Contributor") hereby agrees to defend and indemnify -every other Contributor ("Indemnified Contributor") against any losses, damages -and costs (collectively "Losses") arising from claims, lawsuits and other legal -actions brought by a third party against the Indemnified Contributor to the -extent caused by the acts or omissions of such Commercial Contributor in -connection with its distribution of the Program in a commercial product -offering. The obligations in this section do not apply to any claims or Losses -relating to any actual or alleged intellectual property infringement. In order -to qualify, an Indemnified Contributor must: a) promptly notify the Commercial -Contributor in writing of such claim, and b) allow the Commercial Contributor -to control, and cooperate with the Commercial Contributor in, the defense and -any related settlement negotiations. The Indemnified Contributor may -participate in any such claim at its own expense. - -For example, a Contributor might include the Program in a commercial product -offering, Product X. That Contributor is then a Commercial Contributor. If that -Commercial Contributor then makes performance claims, or offers warranties -related to Product X, those performance claims and warranties are such -Commercial Contributor's responsibility alone. Under this section, the -Commercial Contributor would have to defend claims against the other -Contributors related to those performance claims and warranties, and if a court -requires any other Contributor to pay any damages as a result, the Commercial -Contributor must pay those damages. - -5. NO WARRANTY - -EXCEPT AS EXPRESSLY SET FORTH IN THIS AGREEMENT, THE PROGRAM IS PROVIDED ON AN -"AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, EITHER EXPRESS OR -IMPLIED INCLUDING, WITHOUT LIMITATION, ANY WARRANTIES OR CONDITIONS OF TITLE, -NON-INFRINGEMENT, MERCHANTABILITY OR FITNESS FOR A PARTICULAR PURPOSE. Each -Recipient is solely responsible for determining the appropriateness of using and -distributing the Program and assumes all risks associated with its exercise of -rights under this Agreement , including but not limited to the risks and costs -of program errors, compliance with applicable laws, damage to or loss of data, -programs or equipment, and unavailability or interruption of operations. - -6. DISCLAIMER OF LIABILITY - -EXCEPT AS EXPRESSLY SET FORTH IN THIS AGREEMENT, NEITHER RECIPIENT NOR ANY -CONTRIBUTORS SHALL HAVE ANY LIABILITY FOR ANY DIRECT, INDIRECT, INCIDENTAL, -SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING WITHOUT LIMITATION LOST -PROFITS), HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, -STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY -OUT OF THE USE OR DISTRIBUTION OF THE PROGRAM OR THE EXERCISE OF ANY RIGHTS -GRANTED HEREUNDER, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGES. - -7. GENERAL - -If any provision of this Agreement is invalid or unenforceable under applicable -law, it shall not affect the validity or enforceability of the remainder of the -terms of this Agreement, and without further action by the parties hereto, such -provision shall be reformed to the minimum extent necessary to make such -provision valid and enforceable. - -If Recipient institutes patent litigation against any entity (including a -cross-claim or counterclaim in a lawsuit) alleging that the Program itself -(excluding combinations of the Program with other software or hardware) -infringes such Recipient's patent(s), then such Recipient's rights granted under -Section 2(b) shall terminate as of the date such litigation is filed. - -All Recipient's rights under this Agreement shall terminate if it fails to -comply with any of the material terms or conditions of this Agreement and does -not cure such failure in a reasonable period of time after becoming aware of -such noncompliance. If all Recipient's rights under this Agreement terminate, -Recipient agrees to cease use and distribution of the Program as soon as -reasonably practicable. However, Recipient's obligations under this Agreement -and any licenses granted by Recipient relating to the Program shall continue and -survive. - -Everyone is permitted to copy and distribute copies of this Agreement, but in -order to avoid inconsistency the Agreement is copyrighted and may only be -modified in the following manner. The Agreement Steward reserves the right to -publish new versions (including revisions) of this Agreement from time to time. -No one other than the Agreement Steward has the right to modify this Agreement. -The Eclipse Foundation is the initial Agreement Steward. The Eclipse Foundation -may assign the responsibility to serve as the Agreement Steward to a suitable -separate entity. Each new version of the Agreement will be given a -distinguishing version number. The Program (including Contributions) may always -be distributed subject to the version of the Agreement under which it was -received. In addition, after a new version of the Agreement is published, -Contributor may elect to distribute the Program (including its Contributions) -under the new version. Except as expressly stated in Sections 2(a) and 2(b) -above, Recipient receives no rights or licenses to the intellectual property of -any Contributor under this Agreement, whether expressly, by implication, -estoppel or otherwise. All rights in the Program not expressly granted under -this Agreement are reserved. - -This Agreement is governed by the laws of the State of New York and the -intellectual property laws of the United States of America. No party to this -Agreement will bring a legal action under this Agreement more than one year -after the cause of action arose. Each party waives its rights to a jury trial in -any resulting litigation. - - -For TODO.jar (Jave EE Servlet API) - -COMMON DEVELOPMENT AND DISTRIBUTION LICENSE (CDDL) Version 1.0 - -1. Definitions. - - 1.1. Contributor. means each individual or entity that creates or contributes - to the creation of Modifications. - - 1.2. Contributor Version. means the combination of the Original Software, - prior Modifications used by a Contributor (if any), and the - Modifications made by that particular Contributor. - - 1.3. Covered Software. means (a) the Original Software, or (b) Modifications, - or (c) the combination of files containing Original Software with files - containing Modifications, in each case including portions thereof. - - 1.4. Executable. means the Covered Software in any form other than Source - Code. - - 1.5. Initial Developer. means the individual or entity that first makes - Original Software available under this License. - - 1.6. Larger Work. means a work which combines Covered Software or portions - thereof with code not governed by the terms of this License. - - 1.7. License. means this document. - - 1.8. Licensable. means having the right to grant, to the maximum extent - possible, whether at the time of the initial grant or subsequently - acquired, any and all of the rights conveyed herein. - - 1.9. Modifications. means the Source Code and Executable form of any of the - following: - - A. Any file that results from an addition to, deletion from or - modification of the contents of a file containing Original Software - or previous Modifications; - - B. Any new file that contains any part of the Original Software or - previous Modification; or - - C. Any new file that is contributed or otherwise made available under - the terms of this License. - - 1.10. Original Software. means the Source Code and Executable form of - computer software code that is originally released under this License. - - 1.11. Patent Claims. means any patent claim(s), now owned or hereafter - acquired, including without limitation, method, process, and apparatus - claims, in any patent Licensable by grantor. - - 1.12. Source Code. means (a) the common form of computer software code in - which modifications are made and (b) associated documentation included - in or with such code. - - 1.13. You. (or .Your.) means an individual or a legal entity exercising - rights under, and complying with all of the terms of, this License. For - legal entities, .You. includes any entity which controls, is controlled - by, or is under common control with You. For purposes of this - definition, .control. means (a) the power, direct or indirect, to cause - the direction or management of such entity, whether by contract or - otherwise, or (b) ownership of more than fifty percent (50%) of the - outstanding shares or beneficial ownership of such entity. - -2. License Grants. - - 2.1. The Initial Developer Grant. - - Conditioned upon Your compliance with Section 3.1 below and subject to - third party intellectual property claims, the Initial Developer hereby - grants You a world-wide, royalty-free, non-exclusive license: - - (a) under intellectual property rights (other than patent or trademark) - Licensable by Initial Developer, to use, reproduce, modify, display, - perform, sublicense and distribute the Original Software (or - portions thereof), with or without Modifications, and/or as part of - a Larger Work; and - - (b) under Patent Claims infringed by the making, using or selling of - Original Software, to make, have made, use, practice, sell, and - offer for sale, and/or otherwise dispose of the Original Software - (or portions thereof). - - (c) The licenses granted in Sections 2.1(a) and (b) are effective on the - date Initial Developer first distributes or otherwise makes the - Original Software available to a third party under the terms of this - License. - - (d) Notwithstanding Section 2.1(b) above, no patent license is granted: - (1) for code that You delete from the Original Software, or (2) for - infringements caused by: (i) the modification of the Original - Software, or (ii) the combination of the Original Software with - other software or devices. - - 2.2. Contributor Grant. - - Conditioned upon Your compliance with Section 3.1 below and subject to third - party intellectual property claims, each Contributor hereby grants You a - world-wide, royalty-free, non-exclusive license: - - (a) under intellectual property rights (other than patent or trademark) - Licensable by Contributor to use, reproduce, modify, display, - perform, sublicense and distribute the Modifications created by such - Contributor (or portions thereof), either on an unmodified basis, - with other Modifications, as Covered Software and/or as part of a - Larger Work; and - - (b) under Patent Claims infringed by the making, using, or selling of - Modifications made by that Contributor either alone and/or in - combination with its Contributor Version (or portions of such - combination), to make, use, sell, offer for sale, have made, and/or - otherwise dispose of: (1) Modifications made by that Contributor (or - portions thereof); and (2) the combination of Modifications made by - that Contributor with its Contributor Version (or portions of such - combination). - - (c) The licenses granted in Sections 2.2(a) and 2.2(b) are effective on - the date Contributor first distributes or otherwise makes the - Modifications available to a third party. - - (d) Notwithstanding Section 2.2(b) above, no patent license is granted: - (1) for any code that Contributor has deleted from the Contributor - Version; (2) for infringements caused by: (i) third party - modifications of Contributor Version, or (ii) the combination of - Modifications made by that Contributor with other software (except - as part of the Contributor Version) or other devices; or (3) under - Patent Claims infringed by Covered Software in the absence of - Modifications made by that Contributor. - -3. Distribution Obligations. - - 3.1. Availability of Source Code. - Any Covered Software that You distribute or otherwise make available in - Executable form must also be made available in Source Code form and that - Source Code form must be distributed only under the terms of this License. - You must include a copy of this License with every copy of the Source Code - form of the Covered Software You distribute or otherwise make available. - You must inform recipients of any such Covered Software in Executable form - as to how they can obtain such Covered Software in Source Code form in a - reasonable manner on or through a medium customarily used for software - exchange. - - 3.2. Modifications. - The Modifications that You create or to which You contribute are governed - by the terms of this License. You represent that You believe Your - Modifications are Your original creation(s) and/or You have sufficient - rights to grant the rights conveyed by this License. - - 3.3. Required Notices. - You must include a notice in each of Your Modifications that identifies - You as the Contributor of the Modification. You may not remove or alter - any copyright, patent or trademark notices contained within the Covered - Software, or any notices of licensing or any descriptive text giving - attribution to any Contributor or the Initial Developer. - - 3.4. Application of Additional Terms. - You may not offer or impose any terms on any Covered Software in Source - Code form that alters or restricts the applicable version of this License - or the recipients. rights hereunder. You may choose to offer, and to - charge a fee for, warranty, support, indemnity or liability obligations to - one or more recipients of Covered Software. However, you may do so only on - Your own behalf, and not on behalf of the Initial Developer or any - Contributor. You must make it absolutely clear that any such warranty, - support, indemnity or liability obligation is offered by You alone, and - You hereby agree to indemnify the Initial Developer and every Contributor - for any liability incurred by the Initial Developer or such Contributor as - a result of warranty, support, indemnity or liability terms You offer. - - 3.5. Distribution of Executable Versions. - You may distribute the Executable form of the Covered Software under the - terms of this License or under the terms of a license of Your choice, - which may contain terms different from this License, provided that You are - in compliance with the terms of this License and that the license for the - Executable form does not attempt to limit or alter the recipient.s rights - in the Source Code form from the rights set forth in this License. If You - distribute the Covered Software in Executable form under a different - license, You must make it absolutely clear that any terms which differ - from this License are offered by You alone, not by the Initial Developer - or Contributor. You hereby agree to indemnify the Initial Developer and - every Contributor for any liability incurred by the Initial Developer or - such Contributor as a result of any such terms You offer. - - 3.6. Larger Works. - You may create a Larger Work by combining Covered Software with other code - not governed by the terms of this License and distribute the Larger Work - as a single product. In such a case, You must make sure the requirements - of this License are fulfilled for the Covered Software. - -4. Versions of the License. - - 4.1. New Versions. - Sun Microsystems, Inc. is the initial license steward and may publish - revised and/or new versions of this License from time to time. Each - version will be given a distinguishing version number. Except as provided - in Section 4.3, no one other than the license steward has the right to - modify this License. - - 4.2. Effect of New Versions. - You may always continue to use, distribute or otherwise make the Covered - Software available under the terms of the version of the License under - which You originally received the Covered Software. If the Initial - Developer includes a notice in the Original Software prohibiting it from - being distributed or otherwise made available under any subsequent version - of the License, You must distribute and make the Covered Software - available under the terms of the version of the License under which You - originally received the Covered Software. Otherwise, You may also choose - to use, distribute or otherwise make the Covered Software available under - the terms of any subsequent version of the License published by the - license steward. - - 4.3. Modified Versions. - When You are an Initial Developer and You want to create a new license for - Your Original Software, You may create and use a modified version of this - License if You: (a) rename the license and remove any references to the - name of the license steward (except to note that the license differs from - this License); and (b) otherwise make it clear that the license contains - terms which differ from this License. - -5. DISCLAIMER OF WARRANTY. - - COVERED SOFTWARE IS PROVIDED UNDER THIS LICENSE ON AN .AS IS. BASIS, WITHOUT - WARRANTY OF ANY KIND, EITHER EXPRESSED OR IMPLIED, INCLUDING, WITHOUT - LIMITATION, WARRANTIES THAT THE COVERED SOFTWARE IS FREE OF DEFECTS, - MERCHANTABLE, FIT FOR A PARTICULAR PURPOSE OR NON-INFRINGING. THE ENTIRE RISK - AS TO THE QUALITY AND PERFORMANCE OF THE COVERED SOFTWARE IS WITH YOU. SHOULD - ANY COVERED SOFTWARE PROVE DEFECTIVE IN ANY RESPECT, YOU (NOT THE INITIAL - DEVELOPER OR ANY OTHER CONTRIBUTOR) ASSUME THE COST OF ANY NECESSARY - SERVICING, REPAIR OR CORRECTION. THIS DISCLAIMER OF WARRANTY CONSTITUTES AN - ESSENTIAL PART OF THIS LICENSE. NO USE OF ANY COVERED SOFTWARE IS AUTHORIZED - HEREUNDER EXCEPT UNDER THIS DISCLAIMER. - -6. TERMINATION. - - 6.1. This License and the rights granted hereunder will terminate - automatically if You fail to comply with terms herein and fail to - cure such breach within 30 days of becoming aware of the breach. - Provisions which, by their nature, must remain in effect beyond the - termination of this License shall survive. - - 6.2. If You assert a patent infringement claim (excluding declaratory - judgment actions) against Initial Developer or a Contributor (the - Initial Developer or Contributor against whom You assert such claim - is referred to as .Participant.) alleging that the Participant - Software (meaning the Contributor Version where the Participant is a - Contributor or the Original Software where the Participant is the - Initial Developer) directly or indirectly infringes any patent, then - any and all rights granted directly or indirectly to You by such - Participant, the Initial Developer (if the Initial Developer is not - the Participant) and all Contributors under Sections 2.1 and/or 2.2 - of this License shall, upon 60 days notice from Participant terminate - prospectively and automatically at the expiration of such 60 day - notice period, unless if within such 60 day period You withdraw Your - claim with respect to the Participant Software against such - Participant either unilaterally or pursuant to a written agreement - with Participant. - - 6.3. In the event of termination under Sections 6.1 or 6.2 above, all end - user licenses that have been validly granted by You or any - distributor hereunder prior to termination (excluding licenses - granted to You by any distributor) shall survive termination. - -7. LIMITATION OF LIABILITY. - - UNDER NO CIRCUMSTANCES AND UNDER NO LEGAL THEORY, WHETHER TORT (INCLUDING - NEGLIGENCE), CONTRACT, OR OTHERWISE, SHALL YOU, THE INITIAL DEVELOPER, ANY - OTHER CONTRIBUTOR, OR ANY DISTRIBUTOR OF COVERED SOFTWARE, OR ANY SUPPLIER OF - ANY OF SUCH PARTIES, BE LIABLE TO ANY PERSON FOR ANY INDIRECT, SPECIAL, - INCIDENTAL, OR CONSEQUENTIAL DAMAGES OF ANY CHARACTER INCLUDING, WITHOUT - LIMITATION, DAMAGES FOR LOST PROFITS, LOSS OF GOODWILL, WORK STOPPAGE, - COMPUTER FAILURE OR MALFUNCTION, OR ANY AND ALL OTHER COMMERCIAL DAMAGES OR - LOSSES, EVEN IF SUCH PARTY SHALL HAVE BEEN INFORMED OF THE POSSIBILITY OF - SUCH DAMAGES. THIS LIMITATION OF LIABILITY SHALL NOT APPLY TO LIABILITY FOR - DEATH OR PERSONAL INJURY RESULTING FROM SUCH PARTY.S NEGLIGENCE TO THE EXTENT - APPLICABLE LAW PROHIBITS SUCH LIMITATION. SOME JURISDICTIONS DO NOT ALLOW THE - EXCLUSION OR LIMITATION OF INCIDENTAL OR CONSEQUENTIAL DAMAGES, SO THIS - EXCLUSION AND LIMITATION MAY NOT APPLY TO YOU. - -8. U.S. GOVERNMENT END USERS. - - The Covered Software is a .commercial item,. as that term is defined in 48 - C.F.R. 2.101 (Oct. 1995), consisting of .commercial computer software. (as - that term is defined at 48 C.F.R. ? 252.227-7014(a)(1)) and commercial - computer software documentation. as such terms are used in 48 C.F.R. 12.212 - (Sept. 1995). Consistent with 48 C.F.R. 12.212 and 48 C.F.R. 227.7202-1 - through 227.7202-4 (June 1995), all U.S. Government End Users acquire Covered - Software with only those rights set forth herein. This U.S. Government Rights - clause is in lieu of, and supersedes, any other FAR, DFAR, or other clause or - provision that addresses Government rights in computer software under this - License. - -9. MISCELLANEOUS. - - This License represents the complete agreement concerning subject matter - hereof. If any provision of this License is held to be unenforceable, such - provision shall be reformed only to the extent necessary to make it - enforceable. This License shall be governed by the law of the jurisdiction - specified in a notice contained within the Original Software (except to the - extent applicable law, if any, provides otherwise), excluding such - jurisdiction's conflict-of-law provisions. Any litigation relating to this - License shall be subject to the jurisdiction of the courts located in the - jurisdiction and venue specified in a notice contained within the Original - Software, with the losing party responsible for costs, including, without - limitation, court costs and reasonable attorneys. fees and expenses. The - application of the United Nations Convention on Contracts for the - International Sale of Goods is expressly excluded. Any law or regulation - which provides that the language of a contract shall be construed against - the drafter shall not apply to this License. You agree that You alone are - responsible for compliance with the United States export administration - regulations (and the export control laws and regulation of any other - countries) when You use, distribute or otherwise make available any Covered - Software. - -10. RESPONSIBILITY FOR CLAIMS. - - As between Initial Developer and the Contributors, each party is responsible - for claims and damages arising, directly or indirectly, out of its - utilization of rights under this License and You agree to work with Initial - Developer and Contributors to distribute such responsibility on an equitable - basis. Nothing herein is intended or shall be deemed to constitute any - admission of liability. - - NOTICE PURSUANT TO SECTION 9 OF THE COMMON DEVELOPMENT AND DISTRIBUTION - LICENSE (CDDL) - - The code released under the CDDL shall be governed by the laws of the State - of California (excluding conflict-of-law provisions). Any litigation relating - to this License shall be subject to the jurisdiction of the Federal Courts of - the Northern District of California and the state courts of the State of - California, with venue lying in Santa Clara County, California. - - ------------------------------------------------------------------------------- -ANTLR 2 License (from ApacheDS, Groovy) ------------------------------------------------------------------------------- -We reserve no legal rights to the ANTLR--it is fully in the public domain. -An individual or company may do whatever they wish with source code -distributed with ANTLR or the code generated by ANTLR, including the -incorporation of ANTLR, or its output, into commerical software. -We encourage users to develop software with ANTLR. However, we do ask that -credit is given to us for developing ANTLR. By "credit", we mean that if you -use ANTLR or incorporate any source code into one of your programs -(commercial product, research project, or otherwise) that you acknowledge -this fact somewhere in the documentation, research report, etc... If you like -ANTLR and have developed a nice tool with the output, please mention that you -developed it using ANTLR. In addition, we ask that the headers remain intact -in our source code. As long as these guidelines are kept, we expect to -continue enhancing this system and expect to make other tools available as -they are completed. - ------------------------------------------------------------------------------- -ASM Project License (from CGLib, Groovy) ------------------------------------------------------------------------------- -Copyright (c) 2000-2011 INRIA, France Telecom -All rights reserved. - -Redistribution and use in source and binary forms, with or without -modification, are permitted provided that the following conditions -are met: - -1. Redistributions of source code must retain the above copyright - notice, this list of conditions and the following disclaimer. - -2. Redistributions in binary form must reproduce the above copyright - notice, this list of conditions and the following disclaimer in the - documentation and/or other materials provided with the distribution. - -3. Neither the name of the copyright holders nor the names of its - contributors may be used to endorse or promote products derived from - this software without specific prior written permission. - -THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" -AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE -IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE -ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE -LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR -CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF -SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS -INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN -CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) -ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF -THE POSSIBILITY OF SUCH DAMAGE. - - ------------------------------------------------------------------------------- -Bouncy Castle License (from ApacheDS) ------------------------------------------------------------------------------- -Copyright (c) 2000 - 2012 The Legion Of The Bouncy Castle -(http://www.bouncycastle.org) - -Permission is hereby granted, free of charge, to any person obtaining a copy -of this software and associated documentation files (the "Software"), to deal -in the Software without restriction, including without limitation the rights -to use, copy, modify, merge, publish, distribute, sublicense, and/or sell -copies of the Software, and to permit persons to whom the Software is furnished -to do so, subject to the following conditions: - -The above copyright notice and this permission notice shall be included in -all copies or substantial portions of the Software. - -THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR -IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, -FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE -AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER -LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, -OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE -SOFTWARE. - - ------------------------------------------------------------------------------- -Eclipse Public License - v1.0 (from Jetty/Jerico) ------------------------------------------------------------------------------- -THE ACCOMPANYING PROGRAM IS PROVIDED UNDER THE TERMS OF THIS ECLIPSE PUBLIC -LICENSE ("AGREEMENT"). ANY USE, REPRODUCTION OR DISTRIBUTION OF THE PROGRAM -CONSTITUTES RECIPIENT’S ACCEPTANCE OF THIS AGREEMENT. - -1. DEFINITIONS - -"Contribution" means: - -a) in the case of the initial Contributor, the initial code and documentation - distributed under this Agreement, and -b) in the case of each subsequent Contributor: - -i)changes to the Program, and - -ii)additions to the Program; - -where such changes and/or additions to the Program originate from and are -distributed by that particular Contributor. A Contribution 'originates' from -a Contributor if it was added to the Program by such Contributor itself or -anyone acting on such Contributor’s behalf. Contributions do not include -additions to the Program which: (i) are separate modules of software -distributed in conjunction with the Program under their own license agreement, -and (ii) are not derivative works of the Program. - -"Contributor" means any person or entity that distributes the Program. - -"Licensed Patents " mean patent claims licensable by a Contributor which are -necessarily infringed by the use or sale of its Contribution alone or when -combined with the Program. - -"Program" means the Contributions distributed in accordance with this -Agreement. - -"Recipient" means anyone who receives the Program under this Agreement, -including all Contributors. - -2. GRANT OF RIGHTS - -a) Subject to the terms of this Agreement, each Contributor hereby grants - Recipient a non-exclusive, worldwide, royalty-free copyright license to - reproduce, prepare derivative works of, publicly display, publicly perform, - distribute and sublicense the Contribution of such Contributor, if any, - and such derivative works, in source code and object code form. - -b) Subject to the terms of this Agreement, each Contributor hereby grants - Recipient a non-exclusive, worldwide, royalty-free patent license under - Licensed Patents to make, use, sell, offer to sell, import and otherwise - transfer the Contribution of such Contributor, if any, in source code and - object code form. This patent license shall apply to the combination of the - Contribution and the Program if, at the time the Contribution is added by - the Contributor, such addition of the Contribution causes such combination - to be covered by the Licensed Patents. The patent license shall not apply - to any other combinations which include the Contribution. No hardware per - se is licensed hereunder. - -c) Recipient understands that although each Contributor grants the licenses - to its Contributions set forth herein, no assurances are provided by any - Contributor that the Program does not infringe the patent or other - intellectual property rights of any other entity. Each Contributor - disclaims any liability to Recipient for claims brought by any other - entity based on infringement of intellectual property rights or otherwise. - As a condition to exercising the rights and licenses granted hereunder, - each Recipient hereby assumes sole responsibility to secure any other - intellectual property rights needed, if any. For example, if a third - party patent license is required to allow Recipient to distribute the - Program, it is Recipient’s responsibility to acquire that license before - distributing the Program. - -d) Each Contributor represents that to its knowledge it has sufficient - copyright rights in its Contribution, if any, to grant the copyright - license set forth in this Agreement. - -3. REQUIREMENTS - -A Contributor may choose to distribute the Program in object code form under -its own license agreement, provided that: - -a) it complies with the terms and conditions of this Agreement; and - -b) its license agreement: - -i) effectively disclaims on behalf of all Contributors all warranties and - conditions, express and implied, including warranties or conditions of - title and non-infringement, and implied warranties or conditions of - merchantability and fitness for a particular purpose; - -ii) effectively excludes on behalf of all Contributors all liability for - damages, including direct, indirect, special, incidental and consequential - damages, such as lost profits; - -iii) states that any provisions which differ from this Agreement are offered by - that Contributor alone and not by any other party; and - -iv) states that source code for the Program is available from such - Contributor, and informs licensees how to obtain it in a reasonable manner - on or through a medium customarily used for software exchange. - -When the Program is made available in source code form: - -a) it must be made available under this Agreement; and - -b) a copy of this Agreement must be included with each copy of the Program. - -Contributors may not remove or alter any copyright notices contained within -the Program. - -Each Contributor must identify itself as the originator of its Contribution, -if any, in a manner that reasonably allows subsequent Recipients to identify -the originator of the Contribution. - -4. COMMERCIAL DISTRIBUTION - -Commercial distributors of software may accept certain responsibilities with -respect to end users, business partners and the like. While this license is -intended to facilitate the commercial use of the Program, the Contributor who -includes the Program in a commercial product offering should do so in a manner -which does not create potential liability for other Contributors. Therefore, -if a Contributor includes the Program in a commercial product offering, such -Contributor ("Commercial Contributor") hereby agrees to defend and indemnify -every other Contributor ("Indemnified Contributor") against any losses, -damages and costs (collectively "Losses") arising from claims, lawsuits and -other legal actions brought by a third party against the Indemnified -Contributor to the extent caused by the acts or omissions of such Commercial -Contributor in connection with its distribution of the Program in a commercial -product offering. The obligations in this section do not apply to any claims -or Losses relating to any actual or alleged intellectual property infringement. -In order to qualify, an Indemnified Contributor must: a) promptly notify the -Commercial Contributor in writing of such claim, and b) allow the Commercial -Contributor to control, and cooperate with the Commercial Contributor in, the -defense and any related settlement negotiations. The Indemnified Contributor -may participate in any such claim at its own expense. - -For example, a Contributor might include the Program in a commercial product -offering, Product X. That Contributor is then a Commercial Contributor. If -that Commercial Contributor then makes performance claims, or offers -warranties related to Product X, those performance claims and warranties are -such Commercial Contributor’s responsibility alone. Under this section, the -Commercial Contributor would have to defend claims against the other -Contributors related to those performance claims and warranties, and if a -court requires any other Contributor to pay any damages as a result, the -Commercial Contributor must pay those damages. - -5. NO WARRANTY - -EXCEPT AS EXPRESSLY SET FORTH IN THIS AGREEMENT, THE PROGRAM IS PROVIDED ON -AN "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, EITHER EXPRESS -OR IMPLIED INCLUDING, WITHOUT LIMITATION, ANY WARRANTIES OR CONDITIONS OF -TITLE, NON-INFRINGEMENT, MERCHANTABILITY OR FITNESS FOR A PARTICULAR PURPOSE. -Each Recipient is solely responsible for determining the appropriateness of -using and distributing the Program and assumes all risks associated with its -exercise of rights under this Agreement , including but not limited to the -risks and costs of program errors, compliance with applicable laws, damage to -or loss of data, programs or equipment, and unavailability or interruption of -operations. - -6. DISCLAIMER OF LIABILITY - -EXCEPT AS EXPRESSLY SET FORTH IN THIS AGREEMENT, NEITHER RECIPIENT NOR ANY -CONTRIBUTORS SHALL HAVE ANY LIABILITY FOR ANY DIRECT, INDIRECT, INCIDENTAL, -SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING WITHOUT LIMITATION -LOST PROFITS), HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN -CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) -ARISING IN ANY WAY OUT OF THE USE OR DISTRIBUTION OF THE PROGRAM OR THE -EXERCISE OF ANY RIGHTS GRANTED HEREUNDER, EVEN IF ADVISED OF THE POSSIBILITY -OF SUCH DAMAGES. - -7. GENERAL - -If any provision of this Agreement is invalid or unenforceable under -applicable law, it shall not affect the validity or enforceability of the -remainder of the terms of this Agreement, and without further action by the -parties hereto, such provision shall be reformed to the minimum extent -necessary to make such provision valid and enforceable. - -If Recipient institutes patent litigation against any entity (including a -cross-claim or counterclaim in a lawsuit) alleging that the Program itself -(excluding combinations of the Program with other software or hardware) -infringes such Recipient’s patent(s), then such Recipient’s rights granted -under Section 2(b) shall terminate as of the date such litigation is filed. - -All Recipient’s rights under this Agreement shall terminate if it fails to -comply with any of the material terms or conditions of this Agreement and -does not cure such failure in a reasonable period of time after becoming -aware of such noncompliance. If all Recipient’s rights under this Agreement -terminate, Recipient agrees to cease use and distribution of the Program as -soon as reasonably practicable. However, Recipient’s obligations under this -Agreement and any licenses granted by Recipient relating to the Program shall -continue and survive. - -Everyone is permitted to copy and distribute copies of this Agreement, but in -order to avoid inconsistency the Agreement is copyrighted and may only be -modified in the following manner. The Agreement Steward reserves the right to -publish new versions (including revisions) of this Agreement from time to -time. No one other than the Agreement Steward has the right to modify this -Agreement. The Eclipse Foundation is the initial Agreement Steward. The -Eclipse Foundation may assign the responsibility to serve as the Agreement -Steward to a suitable separate entity. Each new version of the Agreement -will be given a distinguishing version number. The Program (including -Contributions) may always be distributed subject to the version of the -Agreement under which it was received. In addition, after a new version of -the Agreement is published, Contributor may elect to distribute the Program -(including its Contributions) under the new version. Except as expressly -stated in Sections 2(a) and 2(b) above, Recipient receives no rights or -licenses to the intellectual property of any Contributor under this Agreement, -whether expressly, by implication, estoppel or otherwise. All rights in the -Program not expressly granted under this Agreement are reserved. - -This Agreement is governed by the laws of the State of New York and the -intellectual property laws of the United States of America. No party to this -Agreement will bring a legal action under this Agreement more than one year -after the cause of action arose. Each party waives its rights to a jury trial -in any resulting litigation. - - --------------------------------------------------------------------------------------------------- -JDBM LICENSE v1.00 (from ApacheDS) --------------------------------------------------------------------------------------------------- -/** - * JDBM LICENSE v1.00 - * - * Redistribution and use of this software and associated documentation - * ("Software"), with or without modification, are permitted provided - * that the following conditions are met: - * - * 1. Redistributions of source code must retain copyright - * statements and notices. Redistributions must also contain a - * copy of this document. - * - * 2. Redistributions in binary form must reproduce the - * above copyright notice, this list of conditions and the - * following disclaimer in the documentation and/or other - * materials provided with the distribution. - * - * 3. The name "JDBM" must not be used to endorse or promote - * products derived from this Software without prior written - * permission of Cees de Groot. For written permission, - * please contact cg@cdegroot.com. - * - * 4. Products derived from this Software may not be called "JDBM" - * nor may "JDBM" appear in their names without prior written - * permission of Cees de Groot. - * - * 5. Due credit should be given to the JDBM Project - * (http://jdbm.sourceforge.net/). - * - * THIS SOFTWARE IS PROVIDED BY THE JDBM PROJECT AND CONTRIBUTORS - * ``AS IS'' AND ANY EXPRESSED OR IMPLIED WARRANTIES, INCLUDING, BUT - * NOT LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND - * FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL - * CEES DE GROOT OR ANY CONTRIBUTORS BE LIABLE FOR ANY DIRECT, - * INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES - * (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR - * SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) - * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, - * STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) - * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED - * OF THE POSSIBILITY OF SUCH DAMAGE. - * - * Copyright 2000 (C) Cees de Groot. All Rights Reserved. - * Contributions are Copyright (C) 2000 by their associated contributors. - * - * $Id: LICENSE.txt,v 1.1 2000/05/05 23:59:52 boisvert Exp $ - */ - ------------------------------------------------------------------------------- -JLine License - BSD (from Groovy) ------------------------------------------------------------------------------- -Copyright (c) 2002-2006, Marc Prud'hommeaux -All rights reserved. - -Redistribution and use in source and binary forms, with or -without modification, are permitted provided that the following -conditions are met: - -Redistributions of source code must retain the above copyright -notice, this list of conditions and the following disclaimer. - -Redistributions in binary form must reproduce the above copyright -notice, this list of conditions and the following disclaimer -in the documentation and/or other materials provided with -the distribution. - -Neither the name of JLine nor the names of its contributors -may be used to endorse or promote products derived from this -software without specific prior written permission. - -THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS -"AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, -BUT NOT LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY -AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO -EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE LIABLE -FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, -OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, -PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, -DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED -AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT -LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING -IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED -OF THE POSSIBILITY OF SUCH DAMAGE. - - ------------------------------------------------------------------------------- -SL4J License - MIT ------------------------------------------------------------------------------- -Copyright (c) 2004-2013 QOS.ch -All rights reserved. - -Permission is hereby granted, free of charge, to any person obtaining -a copy of this software and associated documentation files (the -"Software"), to deal in the Software without restriction, including -without limitation the rights to use, copy, modify, merge, publish, -distribute, sublicense, and/or sell copies of the Software, and to -permit persons to whom the Software is furnished to do so, subject to -the following conditions: - -The above copyright notice and this permission notice shall be -included in all copies or substantial portions of the Software. - -THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, -EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF -MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND -NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS BE -LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION -OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION -WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE. - ------------------------------------------------------------------------------- -Tanuki Software License (from ApacheDS) ------------------------------------------------------------------------------- -Copyright (c) 1999, 2004 Tanuki Software - -Permission is hereby granted, free of charge, to any person -obtaining a copy of the Java Service Wrapper and associated -documentation files (the "Software"), to deal in the Software -without restriction, including without limitation the rights -to use, copy, modify, merge, publish, distribute, sub-license, -and/or sell copies of the Software, and to permit persons to -whom the Software is furnished to do so, subject to the -following conditions: - -The above copyright notice and this permission notice shall be -included in all copies or substantial portions of the Software. - -THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, -EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES -OF MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND -NON-INFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT -HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, -WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING -FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR -OTHER DEALINGS IN THE SOFTWARE. - - ------------------------------------------------------------------------------- -Silver Egg Technology License (from ApacheDS) ------------------------------------------------------------------------------- -Portions of the Software have been derived from source code -developed by Silver Egg Technology under the following license: - -Copyright (c) 2001 Silver Egg Technology - -Permission is hereby granted, free of charge, to any person -obtaining a copy of this software and associated documentation -files (the "Software"), to deal in the Software without -restriction, including without limitation the rights to use, -copy, modify, merge, publish, distribute, sub-license, and/or -sell copies of the Software, and to permit persons to whom the -Software is furnished to do so, subject to the following -conditions: - -The above copyright notice and this permission notice shall be -included in all copies or substantial portions of the Software. - - ------------------------------------------------------------------------------- -Sun Microsystems, Inc. License (from Groovy) ------------------------------------------------------------------------------- -The following notice applies to the files: - -src/main/org/codehaus/groovy/jsr223/GroovyCompiledScript.java -src/main/org/codehaus/groovy/jsr223/GroovyScriptEngineFactory.java -src/main/org/codehaus/groovy/jsr223/GroovyScriptEngineImpl.java - -/* - * Copyright 2006 Sun Microsystems, Inc. All rights reserved. - * Use is subject to license terms. - * - * Redistribution and use in source and binary forms, with or without modification, are - * permitted provided that the following conditions are met: Redistributions of source code - * must retain the above copyright notice, this list of conditions and the following disclaimer. - * Redistributions in binary form must reproduce the above copyright notice, this list of - * conditions and the following disclaimer in the documentation and/or other materials - * provided with the distribution. Neither the name of the Sun Microsystems nor the names of - * is contributors may be used to endorse or promote products derived from this software - * without specific prior written permission. - - * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" AND ANY EXPRESS - * OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY - * AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER - * OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR - * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR - * SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON - * ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE - * OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE - * POSSIBILITY OF SUCH DAMAGE. - */ - ------------------------------------------------------------------------------- -European Commission License (from Hadoop) ------------------------------------------------------------------------------- -For the org.apache.hadoop.util.bloom.* classes: - -/** - * - * Copyright (c) 2005, European Commission project OneLab under contract - * 034819 (http://www.one-lab.org) - * All rights reserved. - * Redistribution and use in source and binary forms, with or - * without modification, are permitted provided that the following - * conditions are met: - * - Redistributions of source code must retain the above copyright - * notice, this list of conditions and the following disclaimer. - * - Redistributions in binary form must reproduce the above copyright - * notice, this list of conditions and the following disclaimer in - * the documentation and/or other materials provided with the distribution. - * - Neither the name of the University Catholique de Louvain - UCL - * nor the names of its contributors may be used to endorse or - * promote products derived from this software without specific prior - * written permission. - * - * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS - * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT - * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS - * FOR A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE - * COPYRIGHT OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, - * INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, - * BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; - * LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER - * CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT - * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN - * ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE - * POSSIBILITY OF SUCH DAMAGE. - */ - ------------------------------------------------------------------------------- -zlib/libpng License ------------------------------------------------------------------------------- -This software is provided 'as-is', without any express or implied warranty. In -no event will the authors be held liable for any damages arising from the use of -this software. - -Permission is granted to anyone to use this software for any purpose, including -commercial applications, and to alter it and redistribute it freely, subject to -the following restrictions: - - 1. The origin of this software must not be misrepresented; you must not claim - that you wrote the original software. If you use this software in a - product, an acknowledgment in the product documentation would be - appreciated but is not required. - 2. Altered source versions must be plainly marked as such, and must not be - misrepresented as being the original software. - 3. This notice may not be removed or altered from any source distribution. - - ------------------------------------------------------------------------------- -bzip2 License ------------------------------------------------------------------------------- -Redistribution and use in source and binary forms, with or without modification, -are permitted provided that the following conditions are met: - - 1. Redistributions of source code must retain the above copyright notice, - this list of conditions and the following disclaimer. - 2. The origin of this software must not be misrepresented; you must not claim - that you wrote the original software. If you use this software in a - product, an acknowledgment in the product documentation would be - appreciated but is not required. - 3. Altered source versions must be plainly marked as such, and must not be - misrepresented as being the original software. - 4. The name of the author may not be used to endorse or promote products - derived from this software without specific prior written permission. - -THIS SOFTWARE IS PROVIDED BY THE AUTHOR ``AS IS AND ANY EXPRESS OR IMPLIED -WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES OF -MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT -SHALL THE AUTHOR BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, -EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT -OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS -INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN -CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING -IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY -OF SUCH DAMAGE. - -Julian Seward, Cambridge, UK. -jseward@acm.org diff --git a/extendedset/pom.xml b/extendedset/pom.xml index 783528a4e75b..7895a916fa58 100755 --- a/extendedset/pom.xml +++ b/extendedset/pom.xml @@ -18,8 +18,7 @@ ~ under the License. --> - + 4.0.0 extendedset @@ -32,7 +31,7 @@ org.apache.druid druid - 0.14.0-incubating-SNAPSHOT + 0.14.1-incubating-SNAPSHOT diff --git a/extensions-contrib/ambari-metrics-emitter/pom.xml b/extensions-contrib/ambari-metrics-emitter/pom.xml index 886a7630a26c..cdd665a2bd23 100644 --- a/extensions-contrib/ambari-metrics-emitter/pom.xml +++ b/extensions-contrib/ambari-metrics-emitter/pom.xml @@ -18,14 +18,13 @@ ~ under the License. --> - + 4.0.0 org.apache.druid druid - 0.14.0-incubating-SNAPSHOT + 0.14.1-incubating-SNAPSHOT ../../pom.xml diff --git a/extensions-contrib/azure-extensions/pom.xml b/extensions-contrib/azure-extensions/pom.xml index 2980d5e6c1ce..c4f7c95f4ec7 100644 --- a/extensions-contrib/azure-extensions/pom.xml +++ b/extensions-contrib/azure-extensions/pom.xml @@ -18,8 +18,7 @@ ~ under the License. --> - + 4.0.0 org.apache.druid.extensions.contrib @@ -30,7 +29,7 @@ org.apache.druid druid - 0.14.0-incubating-SNAPSHOT + 0.14.1-incubating-SNAPSHOT ../../pom.xml diff --git a/extensions-contrib/cassandra-storage/pom.xml b/extensions-contrib/cassandra-storage/pom.xml index 67c215f43c5e..42ac8b4c567b 100644 --- a/extensions-contrib/cassandra-storage/pom.xml +++ b/extensions-contrib/cassandra-storage/pom.xml @@ -29,7 +29,7 @@ org.apache.druid druid - 0.14.0-incubating-SNAPSHOT + 0.14.1-incubating-SNAPSHOT ../../pom.xml diff --git a/extensions-contrib/cloudfiles-extensions/pom.xml b/extensions-contrib/cloudfiles-extensions/pom.xml index f80ca1f98932..bc7061c2b942 100644 --- a/extensions-contrib/cloudfiles-extensions/pom.xml +++ b/extensions-contrib/cloudfiles-extensions/pom.xml @@ -18,8 +18,7 @@ ~ under the License. --> - + 4.0.0 org.apache.druid.extensions.contrib @@ -30,7 +29,7 @@ org.apache.druid druid - 0.14.0-incubating-SNAPSHOT + 0.14.1-incubating-SNAPSHOT ../../pom.xml diff --git a/extensions-contrib/distinctcount/pom.xml b/extensions-contrib/distinctcount/pom.xml index c4aeacea7b7a..9d180a7a9221 100644 --- a/extensions-contrib/distinctcount/pom.xml +++ b/extensions-contrib/distinctcount/pom.xml @@ -18,8 +18,7 @@ ~ under the License. --> - + 4.0.0 org.apache.druid.extensions.contrib @@ -30,7 +29,7 @@ org.apache.druid druid - 0.14.0-incubating-SNAPSHOT + 0.14.1-incubating-SNAPSHOT ../../pom.xml diff --git a/extensions-contrib/druid-rocketmq/pom.xml b/extensions-contrib/druid-rocketmq/pom.xml index 0394abcb7754..936efe1cea78 100644 --- a/extensions-contrib/druid-rocketmq/pom.xml +++ b/extensions-contrib/druid-rocketmq/pom.xml @@ -17,14 +17,12 @@ ~ specific language governing permissions and limitations ~ under the License. --> - + 4.0.0 druid org.apache.druid - 0.14.0-incubating-SNAPSHOT + 0.14.1-incubating-SNAPSHOT ../../pom.xml diff --git a/extensions-contrib/google-extensions/pom.xml b/extensions-contrib/google-extensions/pom.xml index f252f58fe1da..9d8d58e65b9e 100644 --- a/extensions-contrib/google-extensions/pom.xml +++ b/extensions-contrib/google-extensions/pom.xml @@ -29,7 +29,7 @@ org.apache.druid druid - 0.14.0-incubating-SNAPSHOT + 0.14.1-incubating-SNAPSHOT ../../pom.xml diff --git a/extensions-contrib/graphite-emitter/pom.xml b/extensions-contrib/graphite-emitter/pom.xml index b48d6909aa61..fee253997171 100644 --- a/extensions-contrib/graphite-emitter/pom.xml +++ b/extensions-contrib/graphite-emitter/pom.xml @@ -18,14 +18,13 @@ ~ under the License. --> - + 4.0.0 org.apache.druid druid - 0.14.0-incubating-SNAPSHOT + 0.14.1-incubating-SNAPSHOT ../../pom.xml diff --git a/extensions-contrib/influx-extensions/pom.xml b/extensions-contrib/influx-extensions/pom.xml index 916780dcc123..c5dd93ce27e8 100644 --- a/extensions-contrib/influx-extensions/pom.xml +++ b/extensions-contrib/influx-extensions/pom.xml @@ -18,8 +18,7 @@ ~ under the License. --> - + 4.0.0 org.apache.druid.extensions.contrib @@ -30,7 +29,7 @@ org.apache.druid druid - 0.14.0-incubating-SNAPSHOT + 0.14.1-incubating-SNAPSHOT ../../pom.xml diff --git a/extensions-contrib/kafka-eight-simpleConsumer/pom.xml b/extensions-contrib/kafka-eight-simpleConsumer/pom.xml index 4e4144632d60..7dae2cc59244 100644 --- a/extensions-contrib/kafka-eight-simpleConsumer/pom.xml +++ b/extensions-contrib/kafka-eight-simpleConsumer/pom.xml @@ -18,8 +18,7 @@ ~ under the License. --> - + 4.0.0 org.apache.druid.extensions.contrib druid-kafka-eight-simple-consumer @@ -29,7 +28,7 @@ org.apache.druid druid - 0.14.0-incubating-SNAPSHOT + 0.14.1-incubating-SNAPSHOT ../../pom.xml diff --git a/extensions-contrib/kafka-emitter/pom.xml b/extensions-contrib/kafka-emitter/pom.xml index ef714b640976..998baa50b43d 100644 --- a/extensions-contrib/kafka-emitter/pom.xml +++ b/extensions-contrib/kafka-emitter/pom.xml @@ -18,14 +18,13 @@ ~ under the License. --> - + 4.0.0 org.apache.druid druid - 0.14.0-incubating-SNAPSHOT + 0.14.1-incubating-SNAPSHOT ../../pom.xml diff --git a/extensions-contrib/materialized-view-maintenance/pom.xml b/extensions-contrib/materialized-view-maintenance/pom.xml index 643d50055820..8873a6100320 100644 --- a/extensions-contrib/materialized-view-maintenance/pom.xml +++ b/extensions-contrib/materialized-view-maintenance/pom.xml @@ -18,13 +18,11 @@ ~ under the License. --> - + druid org.apache.druid - 0.14.0-incubating-SNAPSHOT + 0.14.1-incubating-SNAPSHOT ../../pom.xml 4.0.0 diff --git a/extensions-contrib/materialized-view-maintenance/src/main/java/org/apache/druid/indexing/materializedview/MaterializedViewSupervisor.java b/extensions-contrib/materialized-view-maintenance/src/main/java/org/apache/druid/indexing/materializedview/MaterializedViewSupervisor.java index 1ccd8129246a..105afdf8f23f 100644 --- a/extensions-contrib/materialized-view-maintenance/src/main/java/org/apache/druid/indexing/materializedview/MaterializedViewSupervisor.java +++ b/extensions-contrib/materialized-view-maintenance/src/main/java/org/apache/druid/indexing/materializedview/MaterializedViewSupervisor.java @@ -270,13 +270,18 @@ public void checkpoint( void checkSegmentsAndSubmitTasks() { synchronized (taskLock) { + List intervalsToRemove = new ArrayList<>(); for (Map.Entry entry : runningTasks.entrySet()) { Optional taskStatus = taskStorage.getStatus(entry.getValue().getId()); if (!taskStatus.isPresent() || !taskStatus.get().isRunnable()) { - runningTasks.remove(entry.getKey()); - runningVersion.remove(entry.getKey()); + intervalsToRemove.add(entry.getKey()); } } + for (Interval interval : intervalsToRemove) { + runningTasks.remove(interval); + runningVersion.remove(interval); + } + if (runningTasks.size() == maxTaskCount) { //if the number of running tasks reach the max task count, supervisor won't submit new tasks. return; @@ -288,6 +293,12 @@ void checkSegmentsAndSubmitTasks() submitTasks(sortedToBuildVersion, baseSegments); } } + + @VisibleForTesting + Pair, Map> getRunningTasks() + { + return new Pair<>(runningTasks, runningVersion); + } /** * Find infomation about the intervals in which derived dataSource data should be rebuilt. diff --git a/extensions-contrib/materialized-view-maintenance/src/test/java/org/apache/druid/indexing/materializedview/MaterializedViewSupervisorTest.java b/extensions-contrib/materialized-view-maintenance/src/test/java/org/apache/druid/indexing/materializedview/MaterializedViewSupervisorTest.java index 7b575f01dd71..1bf1c39709d5 100644 --- a/extensions-contrib/materialized-view-maintenance/src/test/java/org/apache/druid/indexing/materializedview/MaterializedViewSupervisorTest.java +++ b/extensions-contrib/materialized-view-maintenance/src/test/java/org/apache/druid/indexing/materializedview/MaterializedViewSupervisorTest.java @@ -27,7 +27,11 @@ import com.google.common.collect.Sets; import org.apache.druid.data.input.impl.DimensionsSpec; import org.apache.druid.data.input.impl.StringDimensionSchema; +import org.apache.druid.indexer.HadoopIOConfig; +import org.apache.druid.indexer.HadoopIngestionSpec; import org.apache.druid.indexer.HadoopTuningConfig; +import org.apache.druid.indexer.TaskStatus; +import org.apache.druid.indexing.common.task.HadoopIndexTask; import org.apache.druid.indexing.overlord.IndexerMetadataStorageCoordinator; import org.apache.druid.indexing.overlord.TaskMaster; import org.apache.druid.indexing.overlord.TaskQueue; @@ -41,7 +45,9 @@ import org.apache.druid.query.aggregation.AggregatorFactory; import org.apache.druid.query.aggregation.LongSumAggregatorFactory; import org.apache.druid.segment.TestHelper; +import org.apache.druid.segment.indexing.DataSchema; import org.apache.druid.segment.realtime.firehose.ChatHandlerProvider; +import org.apache.druid.segment.transform.TransformSpec; import org.apache.druid.server.security.AuthorizerMapper; import org.apache.druid.timeline.DataSegment; import org.apache.druid.timeline.partition.HashBasedNumberedShardSpec; @@ -176,6 +182,83 @@ public void testCheckSegments() throws IOException Assert.assertEquals(expectedSegments, toBuildInterval.rhs); } + @Test + public void testCheckSegmentsAndSubmitTasks() throws IOException + { + Set baseSegments = Sets.newHashSet( + new DataSegment( + "base", + Intervals.of("2015-01-02T00Z/2015-01-03T00Z"), + "2015-01-03", + ImmutableMap.of(), + ImmutableList.of("dim1", "dim2"), + ImmutableList.of("m1"), + new HashBasedNumberedShardSpec(0, 1, null, null), + 9, + 1024 + ) + ); + indexerMetadataStorageCoordinator.announceHistoricalSegments(baseSegments); + expect(taskMaster.getTaskQueue()).andReturn(Optional.of(taskQueue)).anyTimes(); + expect(taskMaster.getTaskRunner()).andReturn(Optional.absent()).anyTimes(); + expect(taskStorage.getActiveTasks()).andReturn(ImmutableList.of()).anyTimes(); + expect(taskStorage.getStatus("test_task1")).andReturn(Optional.of(TaskStatus.failure("test_task1"))).anyTimes(); + expect(taskStorage.getStatus("test_task2")).andReturn(Optional.of(TaskStatus.running("test_task2"))).anyTimes(); + EasyMock.replay(taskStorage); + + Pair, Map> runningTasksPair = supervisor.getRunningTasks(); + Map runningTasks = runningTasksPair.lhs; + Map runningVersion = runningTasksPair.rhs; + + DataSchema dataSchema = new DataSchema( + "test_datasource", + null, + null, + null, + TransformSpec.NONE, + objectMapper + ); + HadoopIOConfig hadoopIOConfig = new HadoopIOConfig(new HashMap<>(), null, null); + HadoopIngestionSpec spec = new HadoopIngestionSpec(dataSchema, hadoopIOConfig, null); + HadoopIndexTask task1 = new HadoopIndexTask( + "test_task1", + spec, + null, + null, + null, + objectMapper, + null, + null, + null + ); + runningTasks.put(Intervals.of("2015-01-01T00Z/2015-01-02T00Z"), task1); + runningVersion.put(Intervals.of("2015-01-01T00Z/2015-01-02T00Z"), "test_version1"); + + HadoopIndexTask task2 = new HadoopIndexTask( + "test_task2", + spec, + null, + null, + null, + objectMapper, + null, + null, + null + ); + runningTasks.put(Intervals.of("2015-01-02T00Z/2015-01-03T00Z"), task2); + runningVersion.put(Intervals.of("2015-01-02T00Z/2015-01-03T00Z"), "test_version2"); + + supervisor.checkSegmentsAndSubmitTasks(); + + Map expectedRunningTasks = new HashMap<>(); + Map expectedRunningVersion = new HashMap<>(); + expectedRunningTasks.put(Intervals.of("2015-01-02T00Z/2015-01-03T00Z"), task2); + expectedRunningVersion.put(Intervals.of("2015-01-02T00Z/2015-01-03T00Z"), "test_version2"); + + Assert.assertEquals(expectedRunningTasks, runningTasks); + Assert.assertEquals(expectedRunningVersion, runningVersion); + + } @Test public void testSuspendedDoesntRun() diff --git a/extensions-contrib/materialized-view-selection/pom.xml b/extensions-contrib/materialized-view-selection/pom.xml index 341ce9221fa7..a7f031e2c527 100644 --- a/extensions-contrib/materialized-view-selection/pom.xml +++ b/extensions-contrib/materialized-view-selection/pom.xml @@ -18,13 +18,11 @@ ~ under the License. --> - + druid org.apache.druid - 0.14.0-incubating-SNAPSHOT + 0.14.1-incubating-SNAPSHOT ../../pom.xml 4.0.0 diff --git a/extensions-contrib/opentsdb-emitter/pom.xml b/extensions-contrib/opentsdb-emitter/pom.xml index c89c4ceb14f4..f73dec90270a 100644 --- a/extensions-contrib/opentsdb-emitter/pom.xml +++ b/extensions-contrib/opentsdb-emitter/pom.xml @@ -19,9 +19,7 @@ ~ under the License. --> - + 4.0.0 org.apache.druid.extensions.contrib @@ -31,7 +29,7 @@ org.apache.druid druid - 0.14.0-incubating-SNAPSHOT + 0.14.1-incubating-SNAPSHOT ../../pom.xml diff --git a/extensions-contrib/orc-extensions/pom.xml b/extensions-contrib/orc-extensions/pom.xml index 18ddf7827b8d..0c3754219ba8 100644 --- a/extensions-contrib/orc-extensions/pom.xml +++ b/extensions-contrib/orc-extensions/pom.xml @@ -17,9 +17,7 @@ ~ specific language governing permissions and limitations ~ under the License. --> - + org.apache.druid.extensions.contrib druid-orc-extensions druid-orc-extensions @@ -28,7 +26,7 @@ druid org.apache.druid - 0.14.0-incubating-SNAPSHOT + 0.14.1-incubating-SNAPSHOT ../../pom.xml 4.0.0 diff --git a/extensions-contrib/rabbitmq/pom.xml b/extensions-contrib/rabbitmq/pom.xml index 7b9d375fd4a8..c53230b78ebe 100644 --- a/extensions-contrib/rabbitmq/pom.xml +++ b/extensions-contrib/rabbitmq/pom.xml @@ -29,7 +29,7 @@ org.apache.druid druid - 0.14.0-incubating-SNAPSHOT + 0.14.1-incubating-SNAPSHOT ../../pom.xml diff --git a/extensions-contrib/redis-cache/pom.xml b/extensions-contrib/redis-cache/pom.xml index cd06e05ccafb..c3dbad7db1a4 100644 --- a/extensions-contrib/redis-cache/pom.xml +++ b/extensions-contrib/redis-cache/pom.xml @@ -19,8 +19,7 @@ ~ under the License. --> - + 4.0.0 org.apache.druid.extensions.contrib @@ -30,7 +29,7 @@ org.apache.druid druid - 0.14.0-incubating-SNAPSHOT + 0.14.1-incubating-SNAPSHOT ../../pom.xml diff --git a/extensions-contrib/sqlserver-metadata-storage/pom.xml b/extensions-contrib/sqlserver-metadata-storage/pom.xml index b886f264e704..2f72d32e4856 100644 --- a/extensions-contrib/sqlserver-metadata-storage/pom.xml +++ b/extensions-contrib/sqlserver-metadata-storage/pom.xml @@ -28,7 +28,7 @@ org.apache.druid druid - 0.14.0-incubating-SNAPSHOT + 0.14.1-incubating-SNAPSHOT ../../pom.xml diff --git a/extensions-contrib/statsd-emitter/pom.xml b/extensions-contrib/statsd-emitter/pom.xml index b55b287bec76..9e3c3b915e4a 100644 --- a/extensions-contrib/statsd-emitter/pom.xml +++ b/extensions-contrib/statsd-emitter/pom.xml @@ -17,13 +17,11 @@ ~ specific language governing permissions and limitations ~ under the License. --> - + druid org.apache.druid - 0.14.0-incubating-SNAPSHOT + 0.14.1-incubating-SNAPSHOT ../../pom.xml 4.0.0 diff --git a/extensions-contrib/thrift-extensions/pom.xml b/extensions-contrib/thrift-extensions/pom.xml index 95137f9a5ac2..c97d118c5132 100644 --- a/extensions-contrib/thrift-extensions/pom.xml +++ b/extensions-contrib/thrift-extensions/pom.xml @@ -18,9 +18,7 @@ ~ under the License. --> - + org.apache.druid.extensions.contrib druid-thrift-extensions @@ -30,7 +28,7 @@ druid org.apache.druid - 0.14.0-incubating-SNAPSHOT + 0.14.1-incubating-SNAPSHOT ../../pom.xml 4.0.0 @@ -80,6 +78,10 @@ org.apache.thrift libthrift + + hadoop-lzo + com.hadoop.gplcompression + diff --git a/extensions-contrib/time-min-max/pom.xml b/extensions-contrib/time-min-max/pom.xml index d34ba373f49f..8fcfe7d24d0a 100644 --- a/extensions-contrib/time-min-max/pom.xml +++ b/extensions-contrib/time-min-max/pom.xml @@ -17,13 +17,11 @@ ~ specific language governing permissions and limitations ~ under the License. --> - + druid org.apache.druid - 0.14.0-incubating-SNAPSHOT + 0.14.1-incubating-SNAPSHOT ../../pom.xml 4.0.0 diff --git a/extensions-contrib/virtual-columns/pom.xml b/extensions-contrib/virtual-columns/pom.xml index 0ec600db7c47..e672cc0ef13d 100644 --- a/extensions-contrib/virtual-columns/pom.xml +++ b/extensions-contrib/virtual-columns/pom.xml @@ -29,7 +29,7 @@ org.apache.druid druid - 0.14.0-incubating-SNAPSHOT + 0.14.1-incubating-SNAPSHOT ../../pom.xml diff --git a/extensions-core/avro-extensions/pom.xml b/extensions-core/avro-extensions/pom.xml index c675dad3aaed..55ab394ac623 100644 --- a/extensions-core/avro-extensions/pom.xml +++ b/extensions-core/avro-extensions/pom.xml @@ -18,8 +18,7 @@ ~ under the License. --> - + 4.0.0 org.apache.druid.extensions @@ -30,7 +29,7 @@ org.apache.druid druid - 0.14.0-incubating-SNAPSHOT + 0.14.1-incubating-SNAPSHOT ../../pom.xml diff --git a/extensions-core/datasketches/pom.xml b/extensions-core/datasketches/pom.xml index 489226224b0f..0b8fbfeceac2 100644 --- a/extensions-core/datasketches/pom.xml +++ b/extensions-core/datasketches/pom.xml @@ -18,8 +18,7 @@ ~ under the License. --> - + 4.0.0 org.apache.druid.extensions @@ -30,7 +29,7 @@ org.apache.druid druid - 0.14.0-incubating-SNAPSHOT + 0.14.1-incubating-SNAPSHOT ../../pom.xml diff --git a/extensions-core/druid-basic-security/pom.xml b/extensions-core/druid-basic-security/pom.xml index ea10f325db2a..5c5a4c2d2466 100644 --- a/extensions-core/druid-basic-security/pom.xml +++ b/extensions-core/druid-basic-security/pom.xml @@ -19,9 +19,7 @@ ~ under the License. --> - + 4.0.0 org.apache.druid.extensions @@ -32,7 +30,7 @@ org.apache.druid druid - 0.14.0-incubating-SNAPSHOT + 0.14.1-incubating-SNAPSHOT ../../pom.xml diff --git a/extensions-core/druid-bloom-filter/pom.xml b/extensions-core/druid-bloom-filter/pom.xml index f9ac9f90c07a..60a993a377b5 100644 --- a/extensions-core/druid-bloom-filter/pom.xml +++ b/extensions-core/druid-bloom-filter/pom.xml @@ -18,8 +18,7 @@ ~ under the License. --> - + 4.0.0 org.apache.druid.extensions @@ -30,7 +29,7 @@ org.apache.druid druid - 0.14.0-incubating-SNAPSHOT + 0.14.1-incubating-SNAPSHOT ../../pom.xml diff --git a/extensions-core/druid-kerberos/pom.xml b/extensions-core/druid-kerberos/pom.xml index b6fad3510f58..7178899dc496 100644 --- a/extensions-core/druid-kerberos/pom.xml +++ b/extensions-core/druid-kerberos/pom.xml @@ -18,8 +18,7 @@ ~ under the License. --> - + 4.0.0 org.apache.druid.extensions @@ -30,7 +29,7 @@ org.apache.druid druid - 0.14.0-incubating-SNAPSHOT + 0.14.1-incubating-SNAPSHOT ../../pom.xml @@ -71,6 +70,7 @@ org.apache.hadoop hadoop-common ${hadoop.compile.version} + compile commons-cli diff --git a/extensions-core/hdfs-storage/pom.xml b/extensions-core/hdfs-storage/pom.xml index c60787717db9..b0fc28799081 100644 --- a/extensions-core/hdfs-storage/pom.xml +++ b/extensions-core/hdfs-storage/pom.xml @@ -29,7 +29,7 @@ org.apache.druid druid - 0.14.0-incubating-SNAPSHOT + 0.14.1-incubating-SNAPSHOT ../../pom.xml @@ -151,6 +151,130 @@ + + org.apache.hadoop + hadoop-common + ${hadoop.compile.version} + compile + + + commons-cli + commons-cli + + + commons-httpclient + commons-httpclient + + + log4j + log4j + + + commons-codec + commons-codec + + + commons-logging + commons-logging + + + commons-io + commons-io + + + commons-lang + commons-lang + + + org.apache.httpcomponents + httpclient + + + org.apache.httpcomponents + httpcore + + + org.codehaus.jackson + jackson-core-asl + + + org.codehaus.jackson + jackson-mapper-asl + + + org.apache.zookeeper + zookeeper + + + org.slf4j + slf4j-api + + + org.slf4j + slf4j-log4j12 + + + javax.ws.rs + jsr311-api + + + com.google.code.findbugs + jsr305 + + + org.mortbay.jetty + jetty-util + + + org.apache.hadoop + hadoop-annotations + + + com.google.protobuf + protobuf-java + + + com.sun.jersey + jersey-core + + + org.apache.curator + curator-client + + + org.apache.commons + commons-math3 + + + com.google.guava + guava + + + org.apache.avro + avro + + + net.java.dev.jets3t + jets3t + + + com.sun.jersey + jersey-json + + + com.jcraft + jsch + + + org.mortbay.jetty + jetty + + + com.sun.jersey + jersey-server + + + org.apache.hadoop hadoop-aws @@ -164,6 +288,13 @@ + + org.apache.hadoop + hadoop-common + ${hadoop.compile.version} + tests + test + junit junit @@ -189,13 +320,6 @@ tests test - - org.apache.hadoop - hadoop-common - ${hadoop.compile.version} - tests - test - org.apache.hadoop hadoop-hdfs diff --git a/extensions-core/histogram/pom.xml b/extensions-core/histogram/pom.xml index dcd706ad5b7f..5e0c2fb225d1 100644 --- a/extensions-core/histogram/pom.xml +++ b/extensions-core/histogram/pom.xml @@ -28,7 +28,7 @@ org.apache.druid druid - 0.14.0-incubating-SNAPSHOT + 0.14.1-incubating-SNAPSHOT ../../pom.xml diff --git a/extensions-core/histogram/src/main/java/org/apache/druid/query/aggregation/histogram/BucketsPostAggregator.java b/extensions-core/histogram/src/main/java/org/apache/druid/query/aggregation/histogram/BucketsPostAggregator.java index 94400f781bbb..c47044cd3307 100644 --- a/extensions-core/histogram/src/main/java/org/apache/druid/query/aggregation/histogram/BucketsPostAggregator.java +++ b/extensions-core/histogram/src/main/java/org/apache/druid/query/aggregation/histogram/BucketsPostAggregator.java @@ -82,7 +82,7 @@ public float getBucketSize() @JsonProperty public float getOffset() { - return bucketSize; + return offset; } @Override diff --git a/extensions-core/histogram/src/test/java/org/apache/druid/query/aggregation/histogram/BucketsPostAggregatorTest.java b/extensions-core/histogram/src/test/java/org/apache/druid/query/aggregation/histogram/BucketsPostAggregatorTest.java new file mode 100644 index 000000000000..ebcd2823dc81 --- /dev/null +++ b/extensions-core/histogram/src/test/java/org/apache/druid/query/aggregation/histogram/BucketsPostAggregatorTest.java @@ -0,0 +1,45 @@ +/* + * Licensed to the Apache Software Foundation (ASF) under one + * or more contributor license agreements. See the NOTICE file + * distributed with this work for additional information + * regarding copyright ownership. The ASF licenses this file + * to you under the Apache License, Version 2.0 (the + * "License"); you may not use this file except in compliance + * with the License. You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, + * software distributed under the License is distributed on an + * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY + * KIND, either express or implied. See the License for the + * specific language governing permissions and limitations + * under the License. + */ + +package org.apache.druid.query.aggregation.histogram; + +import org.apache.druid.jackson.DefaultObjectMapper; +import org.junit.Assert; +import org.junit.Test; + +public class BucketsPostAggregatorTest +{ + @Test + public void testSerde() throws Exception + { + BucketsPostAggregator aggregator1 = + new BucketsPostAggregator("buckets_post_aggregator", "test_field", 2f, 4f); + + DefaultObjectMapper mapper = new DefaultObjectMapper(); + BucketsPostAggregator aggregator2 = mapper.readValue( + mapper.writeValueAsString(aggregator1), + BucketsPostAggregator.class + ); + + Assert.assertEquals(aggregator1.getBucketSize(), aggregator2.getBucketSize(), 0.0001); + Assert.assertEquals(aggregator1.getOffset(), aggregator2.getOffset(), 0.0001); + Assert.assertArrayEquals(aggregator1.getCacheKey(), aggregator2.getCacheKey()); + Assert.assertEquals(aggregator1.getDependentFields(), aggregator2.getDependentFields()); + } +} diff --git a/extensions-core/kafka-eight/pom.xml b/extensions-core/kafka-eight/pom.xml index 0c11346668c1..88edc5c76622 100644 --- a/extensions-core/kafka-eight/pom.xml +++ b/extensions-core/kafka-eight/pom.xml @@ -29,7 +29,7 @@ org.apache.druid druid - 0.14.0-incubating-SNAPSHOT + 0.14.1-incubating-SNAPSHOT ../../pom.xml diff --git a/extensions-core/kafka-extraction-namespace/pom.xml b/extensions-core/kafka-extraction-namespace/pom.xml index 60fa27fd46f3..6429449bcb96 100644 --- a/extensions-core/kafka-extraction-namespace/pom.xml +++ b/extensions-core/kafka-extraction-namespace/pom.xml @@ -18,8 +18,7 @@ ~ under the License. --> - + 4.0.0 org.apache.druid.extensions druid-kafka-extraction-namespace @@ -29,7 +28,7 @@ org.apache.druid druid - 0.14.0-incubating-SNAPSHOT + 0.14.1-incubating-SNAPSHOT ../../pom.xml diff --git a/extensions-core/kafka-indexing-service/pom.xml b/extensions-core/kafka-indexing-service/pom.xml index 5004bd5bad4c..f134b6f57c55 100644 --- a/extensions-core/kafka-indexing-service/pom.xml +++ b/extensions-core/kafka-indexing-service/pom.xml @@ -29,7 +29,7 @@ org.apache.druid druid - 0.14.0-incubating-SNAPSHOT + 0.14.1-incubating-SNAPSHOT ../../pom.xml diff --git a/extensions-core/kafka-indexing-service/src/main/java/org/apache/druid/indexing/kafka/IncrementalPublishingKafkaIndexTaskRunner.java b/extensions-core/kafka-indexing-service/src/main/java/org/apache/druid/indexing/kafka/IncrementalPublishingKafkaIndexTaskRunner.java index 6424c290fc9d..9e38a97e1369 100644 --- a/extensions-core/kafka-indexing-service/src/main/java/org/apache/druid/indexing/kafka/IncrementalPublishingKafkaIndexTaskRunner.java +++ b/extensions-core/kafka-indexing-service/src/main/java/org/apache/druid/indexing/kafka/IncrementalPublishingKafkaIndexTaskRunner.java @@ -28,6 +28,7 @@ import org.apache.druid.indexing.seekablestream.SeekableStreamDataSourceMetadata; import org.apache.druid.indexing.seekablestream.SeekableStreamIndexTaskRunner; import org.apache.druid.indexing.seekablestream.SeekableStreamPartitions; +import org.apache.druid.indexing.seekablestream.SequenceMetadata; import org.apache.druid.indexing.seekablestream.common.OrderedPartitionableRecord; import org.apache.druid.indexing.seekablestream.common.OrderedSequenceNumber; import org.apache.druid.indexing.seekablestream.common.RecordSupplier; @@ -83,7 +84,7 @@ public IncrementalPublishingKafkaIndexTaskRunner( } @Override - protected Long getSequenceNumberToStoreAfterRead(@NotNull Long sequenceNumber) + protected Long getNextStartOffset(@NotNull Long sequenceNumber) { return sequenceNumber + 1; } @@ -111,7 +112,7 @@ protected List> getRecords( } @Override - protected SeekableStreamPartitions deserializeSeekableStreamPartitionsFromMetadata( + protected SeekableStreamPartitions deserializePartitionsFromMetadata( ObjectMapper mapper, Object object ) @@ -208,21 +209,23 @@ protected void possiblyResetDataSourceMetadata( } @Override - protected boolean isEndSequenceOffsetsExclusive() + protected boolean isEndOffsetExclusive() { return true; } @Override - protected boolean isStartingSequenceOffsetsExclusive() + protected boolean isEndOfShard(Long seqNum) { return false; } @Override - protected boolean isEndOfShard(Long seqNum) + public TypeReference>> getSequenceMetadataTypeReference() { - return false; + return new TypeReference>>() + { + }; } @Nullable diff --git a/extensions-core/kafka-indexing-service/src/main/java/org/apache/druid/indexing/kafka/KafkaIndexTaskModule.java b/extensions-core/kafka-indexing-service/src/main/java/org/apache/druid/indexing/kafka/KafkaIndexTaskModule.java index 47b5df82aebe..34bc3bc1a378 100644 --- a/extensions-core/kafka-indexing-service/src/main/java/org/apache/druid/indexing/kafka/KafkaIndexTaskModule.java +++ b/extensions-core/kafka-indexing-service/src/main/java/org/apache/druid/indexing/kafka/KafkaIndexTaskModule.java @@ -44,6 +44,9 @@ public List getJacksonModules() new NamedType(KafkaIndexTask.class, "index_kafka"), new NamedType(KafkaDataSourceMetadata.class, "kafka"), new NamedType(KafkaIndexTaskIOConfig.class, "kafka"), + // "KafkaTuningConfig" is not the ideal name, but is needed for backwards compatibility. + // (Older versions of Druid didn't specify a type name and got this one by default.) + new NamedType(KafkaIndexTaskTuningConfig.class, "KafkaTuningConfig"), new NamedType(KafkaSupervisorTuningConfig.class, "kafka"), new NamedType(KafkaSupervisorSpec.class, "kafka") ) diff --git a/extensions-core/kafka-indexing-service/src/main/java/org/apache/druid/indexing/kafka/KafkaIndexTaskTuningConfig.java b/extensions-core/kafka-indexing-service/src/main/java/org/apache/druid/indexing/kafka/KafkaIndexTaskTuningConfig.java index 7cee87790654..2104759f2966 100644 --- a/extensions-core/kafka-indexing-service/src/main/java/org/apache/druid/indexing/kafka/KafkaIndexTaskTuningConfig.java +++ b/extensions-core/kafka-indexing-service/src/main/java/org/apache/druid/indexing/kafka/KafkaIndexTaskTuningConfig.java @@ -21,7 +21,6 @@ import com.fasterxml.jackson.annotation.JsonCreator; import com.fasterxml.jackson.annotation.JsonProperty; -import com.fasterxml.jackson.annotation.JsonTypeName; import org.apache.druid.indexing.seekablestream.SeekableStreamIndexTaskTuningConfig; import org.apache.druid.segment.IndexSpec; import org.apache.druid.segment.writeout.SegmentWriteOutMediumFactory; @@ -30,7 +29,6 @@ import javax.annotation.Nullable; import java.io.File; -@JsonTypeName("KafkaTuningConfig") public class KafkaIndexTaskTuningConfig extends SeekableStreamIndexTaskTuningConfig { @JsonCreator diff --git a/extensions-core/kafka-indexing-service/src/main/java/org/apache/druid/indexing/kafka/KafkaRecordSupplier.java b/extensions-core/kafka-indexing-service/src/main/java/org/apache/druid/indexing/kafka/KafkaRecordSupplier.java index 935404cbc7c2..60aea3c292aa 100644 --- a/extensions-core/kafka-indexing-service/src/main/java/org/apache/druid/indexing/kafka/KafkaRecordSupplier.java +++ b/extensions-core/kafka-indexing-service/src/main/java/org/apache/druid/indexing/kafka/KafkaRecordSupplier.java @@ -68,7 +68,6 @@ public void assign(Set> streamPartitions) .stream() .map(x -> new TopicPartition(x.getStream(), x.getPartitionId())) .collect(Collectors.toSet())); - seekToEarliest(streamPartitions); } @Override diff --git a/extensions-core/kafka-indexing-service/src/main/java/org/apache/druid/indexing/kafka/LegacyKafkaIndexTaskRunner.java b/extensions-core/kafka-indexing-service/src/main/java/org/apache/druid/indexing/kafka/LegacyKafkaIndexTaskRunner.java index d081a0e0d796..528780d73cd8 100644 --- a/extensions-core/kafka-indexing-service/src/main/java/org/apache/druid/indexing/kafka/LegacyKafkaIndexTaskRunner.java +++ b/extensions-core/kafka-indexing-service/src/main/java/org/apache/druid/indexing/kafka/LegacyKafkaIndexTaskRunner.java @@ -20,6 +20,7 @@ package org.apache.druid.indexing.kafka; import com.fasterxml.jackson.core.JsonProcessingException; +import com.fasterxml.jackson.core.type.TypeReference; import com.fasterxml.jackson.databind.ObjectMapper; import com.google.common.base.Optional; import com.google.common.base.Preconditions; @@ -50,6 +51,7 @@ import org.apache.druid.indexing.seekablestream.SeekableStreamIndexTask; import org.apache.druid.indexing.seekablestream.SeekableStreamIndexTaskRunner; import org.apache.druid.indexing.seekablestream.SeekableStreamPartitions; +import org.apache.druid.indexing.seekablestream.SequenceMetadata; import org.apache.druid.indexing.seekablestream.common.OrderedPartitionableRecord; import org.apache.druid.indexing.seekablestream.common.OrderedSequenceNumber; import org.apache.druid.indexing.seekablestream.common.RecordSupplier; @@ -627,6 +629,14 @@ protected boolean isEndOfShard(Long seqNum) return false; } + @Override + public TypeReference>> getSequenceMetadataTypeReference() + { + return new TypeReference>>() + { + }; + } + @Nonnull @Override protected List> getRecords( @@ -713,20 +723,13 @@ protected void possiblyResetDataSourceMetadata( } @Override - protected boolean isEndSequenceOffsetsExclusive() + protected boolean isEndOffsetExclusive() { - return false; + return true; } @Override - protected boolean isStartingSequenceOffsetsExclusive() - { - return false; - } - - - @Override - protected SeekableStreamPartitions deserializeSeekableStreamPartitionsFromMetadata( + protected SeekableStreamPartitions deserializePartitionsFromMetadata( ObjectMapper mapper, Object object ) @@ -812,7 +815,7 @@ private void requestPause() } @Override - protected Long getSequenceNumberToStoreAfterRead(Long sequenceNumber) + protected Long getNextStartOffset(Long sequenceNumber) { throw new UnsupportedOperationException(); } @@ -976,7 +979,7 @@ public Map getCurrentOffsets(@Context final HttpServletRequest re } @Override - public Map getCurrentOffsets() + public ConcurrentMap getCurrentOffsets() { return nextOffsets; } diff --git a/extensions-core/kafka-indexing-service/src/test/java/org/apache/druid/indexing/kafka/KafkaIndexTaskTest.java b/extensions-core/kafka-indexing-service/src/test/java/org/apache/druid/indexing/kafka/KafkaIndexTaskTest.java index 04d3802a66cb..a52dd805fb15 100644 --- a/extensions-core/kafka-indexing-service/src/test/java/org/apache/druid/indexing/kafka/KafkaIndexTaskTest.java +++ b/extensions-core/kafka-indexing-service/src/test/java/org/apache/druid/indexing/kafka/KafkaIndexTaskTest.java @@ -87,6 +87,7 @@ import org.apache.druid.java.util.common.Intervals; import org.apache.druid.java.util.common.StringUtils; import org.apache.druid.java.util.common.concurrent.Execs; +import org.apache.druid.java.util.common.concurrent.ListenableFutures; import org.apache.druid.java.util.common.granularity.Granularities; import org.apache.druid.java.util.common.logger.Logger; import org.apache.druid.java.util.common.parsers.JSONPathSpec; @@ -215,8 +216,6 @@ public class KafkaIndexTaskTest private File reportsFile; private RowIngestionMetersFactory rowIngestionMetersFactory; - private int handoffCount = 0; - // This should be removed in versions greater that 0.12.x // isIncrementalHandoffSupported should always be set to true in those later versions @Parameterized.Parameters(name = "isIncrementalHandoffSupported = {0}") @@ -284,6 +283,26 @@ private static List> generateRecords(String topic ); } + private static List> generateSinglePartitionRecords(String topic) + { + return ImmutableList.of( + new ProducerRecord<>(topic, 0, null, JB("2008", "a", "y", "10", "20.0", "1.0")), + new ProducerRecord<>(topic, 0, null, JB("2009", "b", "y", "10", "20.0", "1.0")), + new ProducerRecord<>(topic, 0, null, JB("2010", "c", "y", "10", "20.0", "1.0")), + new ProducerRecord<>(topic, 0, null, JB("2011", "d", "y", "10", "20.0", "1.0")), + new ProducerRecord<>(topic, 0, null, JB("2011", "D", "y", "10", "20.0", "1.0")), + new ProducerRecord<>(topic, 0, null, JB("2012", "e", "y", "10", "20.0", "1.0")), + new ProducerRecord<>(topic, 0, null, JB("2009", "B", "y", "10", "20.0", "1.0")), + new ProducerRecord<>(topic, 0, null, JB("2008", "A", "x", "10", "20.0", "1.0")), + new ProducerRecord<>(topic, 0, null, JB("2009", "B", "x", "10", "20.0", "1.0")), + new ProducerRecord<>(topic, 0, null, JB("2010", "C", "x", "10", "20.0", "1.0")), + new ProducerRecord<>(topic, 0, null, JB("2011", "D", "x", "10", "20.0", "1.0")), + new ProducerRecord<>(topic, 0, null, JB("2011", "d", "x", "10", "20.0", "1.0")), + new ProducerRecord<>(topic, 0, null, JB("2012", "E", "x", "10", "20.0", "1.0")), + new ProducerRecord<>(topic, 0, null, JB("2009", "b", "x", "10", "20.0", "1.0")) + ); + } + private static String getTopicName() { return "topic" + topicPostfix++; @@ -865,16 +884,7 @@ public void testIncrementalHandOffReadsThroughEndOffsets() throws Exception if (!isIncrementalHandoffSupported) { return; } - - List> records = ImmutableList.of( - new ProducerRecord<>(topic, 0, null, JB("2008", "a", "y", "10", "20.0", "1.0")), - new ProducerRecord<>(topic, 0, null, JB("2009", "b", "y", "10", "20.0", "1.0")), - new ProducerRecord<>(topic, 0, null, JB("2010", "c", "y", "10", "20.0", "1.0")), - new ProducerRecord<>(topic, 0, null, JB("2011", "d", "y", "10", "20.0", "1.0")), - new ProducerRecord<>(topic, 0, null, JB("2011", "D", "y", "10", "20.0", "1.0")), - new ProducerRecord<>(topic, 0, null, JB("2012", "e", "y", "10", "20.0", "1.0")), - new ProducerRecord<>(topic, 0, null, JB("2009", "B", "y", "10", "20.0", "1.0")) - ); + records = generateSinglePartitionRecords(topic); final String baseSequenceName = "sequence0"; // as soon as any segment has more than one record, incremental publishing should happen @@ -889,20 +899,16 @@ public void testIncrementalHandOffReadsThroughEndOffsets() throws Exception Map consumerProps = kafkaServer.consumerProperties(); consumerProps.put("max.poll.records", "1"); - final SeekableStreamPartitions startPartitions = new SeekableStreamPartitions<>( - topic, - ImmutableMap.of(0, 0L) - ); - final SeekableStreamPartitions checkpoint1 = new SeekableStreamPartitions<>( - topic, - ImmutableMap.of(0, 5L) - ); - final SeekableStreamPartitions endPartitions = new SeekableStreamPartitions<>( - topic, - ImmutableMap.of(0, 7L) - ); + final SeekableStreamPartitions startPartitions = + new SeekableStreamPartitions<>(topic, ImmutableMap.of(0, 0L)); + final SeekableStreamPartitions checkpoint1 = + new SeekableStreamPartitions<>(topic, ImmutableMap.of(0, 5L)); + final SeekableStreamPartitions checkpoint2 = + new SeekableStreamPartitions<>(topic, ImmutableMap.of(0, 9L)); + final SeekableStreamPartitions endPartitions = + new SeekableStreamPartitions<>(topic, ImmutableMap.of(0, Long.MAX_VALUE)); - final KafkaIndexTask task = createTask( + final KafkaIndexTask normalReplica = createTask( null, new KafkaIndexTaskIOConfig( 0, @@ -917,23 +923,69 @@ public void testIncrementalHandOffReadsThroughEndOffsets() throws Exception false ) ); - final ListenableFuture future = runTask(task); - while (task.getRunner().getStatus() != Status.PAUSED) { + final KafkaIndexTask staleReplica = createTask( + null, + new KafkaIndexTaskIOConfig( + 0, + baseSequenceName, + startPartitions, + endPartitions, + consumerProps, + KafkaSupervisorIOConfig.DEFAULT_POLL_TIMEOUT_MILLIS, + true, + null, + null, + false + ) + ); + + final ListenableFuture normalReplicaFuture = runTask(normalReplica); + // Simulating one replica is slower than the other + final ListenableFuture staleReplicaFuture = ListenableFutures.transformAsync( + taskExec.submit(() -> { + Thread.sleep(1000); + return staleReplica; + }), + this::runTask + ); + + while (normalReplica.getRunner().getStatus() != Status.PAUSED) { Thread.sleep(10); } - final Map currentOffsets = ImmutableMap.copyOf(task.getRunner().getCurrentOffsets()); - Assert.assertTrue(checkpoint1.getPartitionSequenceNumberMap().equals(currentOffsets)); + staleReplica.getRunner().pause(); + while (staleReplica.getRunner().getStatus() != Status.PAUSED) { + Thread.sleep(10); + } + Map currentOffsets = ImmutableMap.copyOf(normalReplica.getRunner().getCurrentOffsets()); + Assert.assertEquals(checkpoint1.getPartitionSequenceNumberMap(), currentOffsets); - // actual checkpoint offset is 5, but simulating behavior of publishing set end offset call, to ensure this task - // will continue reading through the end offset of the checkpointed sequence - task.getRunner().setEndOffsets(ImmutableMap.of(0, 6L), true); + normalReplica.getRunner().setEndOffsets(currentOffsets, false); + staleReplica.getRunner().setEndOffsets(currentOffsets, false); - Assert.assertEquals(TaskState.SUCCESS, future.get().getStatusCode()); + while (normalReplica.getRunner().getStatus() != Status.PAUSED) { + Thread.sleep(10); + } + while (staleReplica.getRunner().getStatus() != Status.PAUSED) { + Thread.sleep(10); + } + currentOffsets = ImmutableMap.copyOf(normalReplica.getRunner().getCurrentOffsets()); + Assert.assertEquals(checkpoint2.getPartitionSequenceNumberMap(), currentOffsets); + currentOffsets = ImmutableMap.copyOf(staleReplica.getRunner().getCurrentOffsets()); + Assert.assertEquals(checkpoint2.getPartitionSequenceNumberMap(), currentOffsets); - // processed count would be 5 if it stopped at it's current offsets - Assert.assertEquals(6, task.getRunner().getRowIngestionMeters().getProcessed()); - Assert.assertEquals(0, task.getRunner().getRowIngestionMeters().getUnparseable()); - Assert.assertEquals(0, task.getRunner().getRowIngestionMeters().getThrownAway()); + normalReplica.getRunner().setEndOffsets(currentOffsets, true); + staleReplica.getRunner().setEndOffsets(currentOffsets, true); + + Assert.assertEquals(TaskState.SUCCESS, normalReplicaFuture.get().getStatusCode()); + Assert.assertEquals(TaskState.SUCCESS, staleReplicaFuture.get().getStatusCode()); + + Assert.assertEquals(9, normalReplica.getRunner().getRowIngestionMeters().getProcessed()); + Assert.assertEquals(0, normalReplica.getRunner().getRowIngestionMeters().getUnparseable()); + Assert.assertEquals(0, normalReplica.getRunner().getRowIngestionMeters().getThrownAway()); + + Assert.assertEquals(9, staleReplica.getRunner().getRowIngestionMeters().getProcessed()); + Assert.assertEquals(0, staleReplica.getRunner().getRowIngestionMeters().getUnparseable()); + Assert.assertEquals(0, staleReplica.getRunner().getRowIngestionMeters().getThrownAway()); } @Test(timeout = 60_000L) @@ -1903,6 +1955,115 @@ public void testRestore() throws Exception Assert.assertEquals(ImmutableList.of("d", "e"), readSegmentColumn("dim1", desc2)); } + @Test(timeout = 60_000L) + public void testRestoreAfterPersistingSequences() throws Exception + { + if (!isIncrementalHandoffSupported) { + return; + } + + records = generateSinglePartitionRecords(topic); + maxRowsPerSegment = 2; + Map consumerProps = kafkaServer.consumerProperties(); + consumerProps.put("max.poll.records", "1"); + + final KafkaIndexTask task1 = createTask( + null, + new KafkaIndexTaskIOConfig( + 0, + "sequence0", + new SeekableStreamPartitions<>(topic, ImmutableMap.of(0, 0L)), + new SeekableStreamPartitions<>(topic, ImmutableMap.of(0, 9L)), + consumerProps, + KafkaSupervisorIOConfig.DEFAULT_POLL_TIMEOUT_MILLIS, + true, + null, + null, + false + ) + ); + + final SeekableStreamPartitions checkpoint = new SeekableStreamPartitions<>( + topic, + ImmutableMap.of(0, 5L) + ); + + final ListenableFuture future1 = runTask(task1); + + // Insert some data, but not enough for the task to finish + try (final KafkaProducer kafkaProducer = kafkaServer.newProducer()) { + for (ProducerRecord record : Iterables.limit(records, 5)) { + kafkaProducer.send(record).get(); + } + } + + while (task1.getRunner().getStatus() != Status.PAUSED) { + Thread.sleep(10); + } + final Map currentOffsets = ImmutableMap.copyOf(task1.getRunner().getCurrentOffsets()); + Assert.assertEquals(checkpoint.getPartitionSequenceNumberMap(), currentOffsets); + // Set endOffsets to persist sequences + task1.getRunner().setEndOffsets(ImmutableMap.of(0, 5L), false); + + // Stop without publishing segment + task1.stopGracefully(toolboxFactory.build(task1).getConfig()); + unlockAppenderatorBasePersistDirForTask(task1); + + Assert.assertEquals(TaskState.SUCCESS, future1.get().getStatusCode()); + + // Start a new task + final KafkaIndexTask task2 = createTask( + task1.getId(), + new KafkaIndexTaskIOConfig( + 0, + "sequence0", + new SeekableStreamPartitions<>(topic, ImmutableMap.of(0, 0L)), + new SeekableStreamPartitions<>(topic, ImmutableMap.of(0, 9L)), + consumerProps, + KafkaSupervisorIOConfig.DEFAULT_POLL_TIMEOUT_MILLIS, + true, + null, + null, + false + ) + ); + + final ListenableFuture future2 = runTask(task2); + // Wait for the task to start reading + + // Insert remaining data + try (final KafkaProducer kafkaProducer = kafkaServer.newProducer()) { + for (ProducerRecord record : Iterables.skip(records, 5)) { + kafkaProducer.send(record).get(); + } + } + + // Wait for task to exit + Assert.assertEquals(TaskState.SUCCESS, future2.get().getStatusCode()); + + // Check metrics + Assert.assertEquals(5, task1.getRunner().getRowIngestionMeters().getProcessed()); + Assert.assertEquals(0, task1.getRunner().getRowIngestionMeters().getUnparseable()); + Assert.assertEquals(0, task1.getRunner().getRowIngestionMeters().getThrownAway()); + Assert.assertEquals(4, task2.getRunner().getRowIngestionMeters().getProcessed()); + Assert.assertEquals(0, task2.getRunner().getRowIngestionMeters().getUnparseable()); + Assert.assertEquals(0, task2.getRunner().getRowIngestionMeters().getThrownAway()); + + // Check published segments & metadata + SegmentDescriptor desc1 = SD(task1, "2008/P1D", 0); + SegmentDescriptor desc2 = SD(task1, "2008/P1D", 1); + SegmentDescriptor desc3 = SD(task1, "2009/P1D", 0); + SegmentDescriptor desc4 = SD(task1, "2009/P1D", 1); + SegmentDescriptor desc5 = SD(task1, "2010/P1D", 0); + SegmentDescriptor desc6 = SD(task1, "2011/P1D", 0); + SegmentDescriptor desc7 = SD(task1, "2012/P1D", 0); + Assert.assertEquals(ImmutableSet.of(desc1, desc2, desc3, desc4, desc5, desc6, desc7), publishedDescriptors()); + Assert.assertEquals( + new KafkaDataSourceMetadata(new SeekableStreamPartitions<>(topic, ImmutableMap.of(0, 9L))), + metadataStorageCoordinator.getDataSourceMetadata(DATA_SCHEMA.getDataSource()) + ); + } + @Test(timeout = 60_000L) public void testRunWithPauseAndResume() throws Exception { @@ -2127,6 +2288,108 @@ public void testRunContextSequenceAheadOfStartingOffsets() throws Exception Assert.assertEquals(ImmutableList.of("d", "e"), readSegmentColumn("dim1", desc2)); } + @Test(timeout = 60_000L) + public void testRunWithDuplicateRequest() throws Exception + { + // Insert data + try (final KafkaProducer kafkaProducer = kafkaServer.newProducer()) { + for (ProducerRecord record : records) { + kafkaProducer.send(record).get(); + } + } + + final KafkaIndexTask task = createTask( + null, + new KafkaIndexTaskIOConfig( + 0, + "sequence0", + new SeekableStreamPartitions<>(topic, ImmutableMap.of(0, 200L)), + new SeekableStreamPartitions<>(topic, ImmutableMap.of(0, 500L)), + kafkaServer.consumerProperties(), + KafkaSupervisorIOConfig.DEFAULT_POLL_TIMEOUT_MILLIS, + true, + null, + null, + false + ) + ); + + runTask(task); + + while (!task.getRunner().getStatus().equals(Status.READING)) { + Thread.sleep(20); + } + + // first setEndOffsets request + task.getRunner().pause(); + task.getRunner().setEndOffsets(ImmutableMap.of(0, 500L), true); + Assert.assertEquals(Status.READING, task.getRunner().getStatus()); + + // duplicate setEndOffsets request + task.getRunner().pause(); + task.getRunner().setEndOffsets(ImmutableMap.of(0, 500L), true); + Assert.assertEquals(Status.READING, task.getRunner().getStatus()); + } + + @Test(timeout = 60_000L) + public void testCanStartFromLaterThanEarliestOffset() throws Exception + { + if (!isIncrementalHandoffSupported) { + return; + } + final String baseSequenceName = "sequence0"; + maxRowsPerSegment = Integer.MAX_VALUE; + maxTotalRows = null; + + // Insert data + try (final KafkaProducer kafkaProducer = kafkaServer.newProducer()) { + for (ProducerRecord record : records) { + kafkaProducer.send(record).get(); + } + } + + Map consumerProps = kafkaServer.consumerProperties(); + consumerProps.put("max.poll.records", "1"); + + final SeekableStreamPartitions startPartitions = new SeekableStreamPartitions<>( + topic, + ImmutableMap.of( + 0, + 0L, + 1, + 1L + ) + ); + + final SeekableStreamPartitions endPartitions = new SeekableStreamPartitions<>( + topic, + ImmutableMap.of( + 0, + 10L, + 1, + 2L + ) + ); + + final KafkaIndexTask task = createTask( + null, + new KafkaIndexTaskIOConfig( + 0, + baseSequenceName, + startPartitions, + endPartitions, + consumerProps, + KafkaSupervisorIOConfig.DEFAULT_POLL_TIMEOUT_MILLIS, + true, + null, + null, + false + ) + ); + final ListenableFuture future = runTask(task); + Assert.assertEquals(TaskState.SUCCESS, future.get().getStatusCode()); + } + private ListenableFuture runTask(final Task task) { try { diff --git a/extensions-core/kinesis-indexing-service/pom.xml b/extensions-core/kinesis-indexing-service/pom.xml index a7e576ec435d..e206de60e0c3 100644 --- a/extensions-core/kinesis-indexing-service/pom.xml +++ b/extensions-core/kinesis-indexing-service/pom.xml @@ -18,8 +18,7 @@ ~ under the License. --> - + 4.0.0 org.apache.druid.extensions @@ -30,7 +29,7 @@ org.apache.druid druid - 0.14.0-incubating-SNAPSHOT + 0.14.1-incubating-SNAPSHOT ../../pom.xml diff --git a/extensions-core/kinesis-indexing-service/src/main/java/org/apache/druid/indexing/kinesis/KinesisIndexTaskRunner.java b/extensions-core/kinesis-indexing-service/src/main/java/org/apache/druid/indexing/kinesis/KinesisIndexTaskRunner.java index 3e7e5e7aeddd..247f6d785491 100644 --- a/extensions-core/kinesis-indexing-service/src/main/java/org/apache/druid/indexing/kinesis/KinesisIndexTaskRunner.java +++ b/extensions-core/kinesis-indexing-service/src/main/java/org/apache/druid/indexing/kinesis/KinesisIndexTaskRunner.java @@ -28,6 +28,7 @@ import org.apache.druid.indexing.seekablestream.SeekableStreamDataSourceMetadata; import org.apache.druid.indexing.seekablestream.SeekableStreamIndexTaskRunner; import org.apache.druid.indexing.seekablestream.SeekableStreamPartitions; +import org.apache.druid.indexing.seekablestream.SequenceMetadata; import org.apache.druid.indexing.seekablestream.common.OrderedPartitionableRecord; import org.apache.druid.indexing.seekablestream.common.OrderedSequenceNumber; import org.apache.druid.indexing.seekablestream.common.RecordSupplier; @@ -77,7 +78,7 @@ public class KinesisIndexTaskRunner extends SeekableStreamIndexTaskRunner> getRecords( } @Override - protected SeekableStreamPartitions deserializeSeekableStreamPartitionsFromMetadata( + protected SeekableStreamPartitions deserializePartitionsFromMetadata( ObjectMapper mapper, Object object ) @@ -159,21 +160,23 @@ protected void possiblyResetDataSourceMetadata( } @Override - protected boolean isEndSequenceOffsetsExclusive() + protected boolean isEndOffsetExclusive() { return false; } @Override - protected boolean isStartingSequenceOffsetsExclusive() + protected boolean isEndOfShard(String seqNum) { - return true; + return KinesisSequenceNumber.END_OF_SHARD_MARKER.equals(seqNum); } @Override - protected boolean isEndOfShard(String seqNum) + public TypeReference>> getSequenceMetadataTypeReference() { - return KinesisSequenceNumber.END_OF_SHARD_MARKER.equals(seqNum); + return new TypeReference>>() + { + }; } @Nullable diff --git a/extensions-core/kinesis-indexing-service/src/test/java/org/apache/druid/indexing/kinesis/KinesisIndexTaskTest.java b/extensions-core/kinesis-indexing-service/src/test/java/org/apache/druid/indexing/kinesis/KinesisIndexTaskTest.java index a8130b9c0e5b..1823d9374a20 100644 --- a/extensions-core/kinesis-indexing-service/src/test/java/org/apache/druid/indexing/kinesis/KinesisIndexTaskTest.java +++ b/extensions-core/kinesis-indexing-service/src/test/java/org/apache/druid/indexing/kinesis/KinesisIndexTaskTest.java @@ -83,6 +83,7 @@ import org.apache.druid.indexing.seekablestream.SeekableStreamIndexTaskRunner; import org.apache.druid.indexing.seekablestream.SeekableStreamPartitions; import org.apache.druid.indexing.seekablestream.common.OrderedPartitionableRecord; +import org.apache.druid.indexing.seekablestream.common.StreamPartition; import org.apache.druid.indexing.seekablestream.supervisor.SeekableStreamSupervisor; import org.apache.druid.indexing.test.TestDataSegmentAnnouncer; import org.apache.druid.indexing.test.TestDataSegmentKiller; @@ -92,6 +93,7 @@ import org.apache.druid.java.util.common.Intervals; import org.apache.druid.java.util.common.StringUtils; import org.apache.druid.java.util.common.concurrent.Execs; +import org.apache.druid.java.util.common.concurrent.ListenableFutures; import org.apache.druid.java.util.common.granularity.Granularities; import org.apache.druid.java.util.common.logger.Logger; import org.apache.druid.java.util.common.parsers.JSONPathSpec; @@ -177,6 +179,7 @@ import java.util.Objects; import java.util.Set; import java.util.TreeMap; +import java.util.concurrent.ConcurrentMap; import java.util.concurrent.Executor; import java.util.concurrent.Executors; import java.util.concurrent.TimeUnit; @@ -198,28 +201,7 @@ public class KinesisIndexTaskTest extends EasyMockSupport private static String shardId1 = "1"; private static String shardId0 = "0"; private static KinesisRecordSupplier recordSupplier; - private static List> records = ImmutableList.of( - new OrderedPartitionableRecord<>(stream, "1", "0", JB("2008", "a", "y", "10", "20.0", "1.0")), - new OrderedPartitionableRecord<>(stream, "1", "1", JB("2009", "b", "y", "10", "20.0", "1.0")), - new OrderedPartitionableRecord<>(stream, "1", "2", JB("2010", "c", "y", "10", "20.0", "1.0")), - new OrderedPartitionableRecord<>(stream, "1", "3", JB("2011", "d", "y", "10", "20.0", "1.0")), - new OrderedPartitionableRecord<>(stream, "1", "4", JB("2011", "e", "y", "10", "20.0", "1.0")), - new OrderedPartitionableRecord<>( - stream, - "1", - "5", - JB("246140482-04-24T15:36:27.903Z", "x", "z", "10", "20.0", "1.0") - ), - new OrderedPartitionableRecord<>(stream, "1", "6", Collections.singletonList(StringUtils.toUtf8("unparseable"))), - new OrderedPartitionableRecord<>(stream, "1", "7", Collections.singletonList(StringUtils.toUtf8("unparseable2"))), - new OrderedPartitionableRecord<>(stream, "1", "8", Collections.singletonList(StringUtils.toUtf8("{}"))), - new OrderedPartitionableRecord<>(stream, "1", "9", JB("2013", "f", "y", "10", "20.0", "1.0")), - new OrderedPartitionableRecord<>(stream, "1", "10", JB("2049", "f", "y", "notanumber", "20.0", "1.0")), - new OrderedPartitionableRecord<>(stream, "1", "11", JB("2049", "f", "y", "10", "notanumber", "1.0")), - new OrderedPartitionableRecord<>(stream, "1", "12", JB("2049", "f", "y", "10", "20.0", "notanumber")), - new OrderedPartitionableRecord<>(stream, "0", "0", JB("2012", "g", "y", "10", "20.0", "1.0")), - new OrderedPartitionableRecord<>(stream, "0", "1", JB("2011", "h", "y", "10", "20.0", "1.0")) - ); + private static List> records; private static ServiceEmitter emitter; private static ListeningExecutorService taskExec; @@ -315,6 +297,7 @@ public void setupTest() throws IOException, InterruptedException maxSavedParseExceptions = null; skipAvailabilityCheck = false; doHandoff = true; + records = generateRecords(stream); reportsFile = File.createTempFile("KinesisIndexTaskTestReports-" + System.currentTimeMillis(), "json"); maxRecordsPerPoll = 1; @@ -347,6 +330,52 @@ public static void tearDownClass() throws Exception emitter.close(); } + private static List> generateRecords(String stream) + { + return ImmutableList.of( + new OrderedPartitionableRecord<>(stream, "1", "0", JB("2008", "a", "y", "10", "20.0", "1.0")), + new OrderedPartitionableRecord<>(stream, "1", "1", JB("2009", "b", "y", "10", "20.0", "1.0")), + new OrderedPartitionableRecord<>(stream, "1", "2", JB("2010", "c", "y", "10", "20.0", "1.0")), + new OrderedPartitionableRecord<>(stream, "1", "3", JB("2011", "d", "y", "10", "20.0", "1.0")), + new OrderedPartitionableRecord<>(stream, "1", "4", JB("2011", "e", "y", "10", "20.0", "1.0")), + new OrderedPartitionableRecord<>( + stream, + "1", + "5", + JB("246140482-04-24T15:36:27.903Z", "x", "z", "10", "20.0", "1.0") + ), + new OrderedPartitionableRecord<>(stream, "1", "6", Collections.singletonList(StringUtils.toUtf8("unparseable"))), + new OrderedPartitionableRecord<>(stream, "1", "7", Collections.singletonList(StringUtils.toUtf8("unparseable2"))), + new OrderedPartitionableRecord<>(stream, "1", "8", Collections.singletonList(StringUtils.toUtf8("{}"))), + new OrderedPartitionableRecord<>(stream, "1", "9", JB("2013", "f", "y", "10", "20.0", "1.0")), + new OrderedPartitionableRecord<>(stream, "1", "10", JB("2049", "f", "y", "notanumber", "20.0", "1.0")), + new OrderedPartitionableRecord<>(stream, "1", "11", JB("2049", "f", "y", "10", "notanumber", "1.0")), + new OrderedPartitionableRecord<>(stream, "1", "12", JB("2049", "f", "y", "10", "20.0", "notanumber")), + new OrderedPartitionableRecord<>(stream, "0", "0", JB("2012", "g", "y", "10", "20.0", "1.0")), + new OrderedPartitionableRecord<>(stream, "0", "1", JB("2011", "h", "y", "10", "20.0", "1.0")) + ); + } + + private static List> generateSinglePartitionRecords(String stream) + { + return ImmutableList.of( + new OrderedPartitionableRecord<>(stream, "1", "0", JB("2008", "a", "y", "10", "20.0", "1.0")), + new OrderedPartitionableRecord<>(stream, "1", "1", JB("2009", "b", "y", "10", "20.0", "1.0")), + new OrderedPartitionableRecord<>(stream, "1", "2", JB("2010", "c", "y", "10", "20.0", "1.0")), + new OrderedPartitionableRecord<>(stream, "1", "3", JB("2011", "d", "y", "10", "20.0", "1.0")), + new OrderedPartitionableRecord<>(stream, "1", "4", JB("2011", "e", "y", "10", "20.0", "1.0")), + new OrderedPartitionableRecord<>(stream, "1", "5", JB("2012", "a", "y", "10", "20.0", "1.0")), + new OrderedPartitionableRecord<>(stream, "1", "6", JB("2013", "b", "y", "10", "20.0", "1.0")), + new OrderedPartitionableRecord<>(stream, "1", "7", JB("2010", "c", "y", "10", "20.0", "1.0")), + new OrderedPartitionableRecord<>(stream, "1", "8", JB("2011", "d", "y", "10", "20.0", "1.0")), + new OrderedPartitionableRecord<>(stream, "1", "9", JB("2011", "e", "y", "10", "20.0", "1.0")), + new OrderedPartitionableRecord<>(stream, "1", "10", JB("2008", "a", "y", "10", "20.0", "1.0")), + new OrderedPartitionableRecord<>(stream, "1", "11", JB("2009", "b", "y", "10", "20.0", "1.0")), + new OrderedPartitionableRecord<>(stream, "1", "12", JB("2010", "c", "y", "10", "20.0", "1.0")), + new OrderedPartitionableRecord<>(stream, "1", "13", JB("2012", "d", "y", "10", "20.0", "1.0")), + new OrderedPartitionableRecord<>(stream, "1", "14", JB("2013", "e", "y", "10", "20.0", "1.0")) + ); + } @Test(timeout = 120_000L) public void testRunAfterDataInserted() throws Exception { @@ -640,7 +669,6 @@ public void testIncrementalHandOff() throws Exception Assert.assertEquals(ImmutableList.of("f"), readSegmentColumn("dim1", desc7)); } - @Test(timeout = 120_000L) public void testIncrementalHandOffMaxTotalRows() throws Exception { @@ -1055,7 +1083,7 @@ public void testRunWithTransformSpec() throws Exception @Test(timeout = 120_000L) - public void testRunOnNothing() throws Exception + public void testRunOnSingletonRange() throws Exception { recordSupplier.assign(anyObject()); expectLastCall().anyTimes(); @@ -1065,11 +1093,15 @@ public void testRunOnNothing() throws Exception recordSupplier.seek(anyObject(), anyString()); expectLastCall().anyTimes(); + expect(recordSupplier.poll(anyLong())).andReturn(records.subList(2, 3)).once(); + recordSupplier.close(); expectLastCall().once(); replayAll(); + // When start and end offsets are the same, it means we need to read one message (since in Kinesis, end offsets + // are inclusive). final KinesisIndexTask task = createTask( null, new KinesisIndexTaskIOConfig( @@ -1104,12 +1136,12 @@ public void testRunOnNothing() throws Exception verifyAll(); // Check metrics - Assert.assertEquals(0, task.getRunner().getRowIngestionMeters().getProcessed()); + Assert.assertEquals(1, task.getRunner().getRowIngestionMeters().getProcessed()); Assert.assertEquals(0, task.getRunner().getRowIngestionMeters().getUnparseable()); Assert.assertEquals(0, task.getRunner().getRowIngestionMeters().getThrownAway()); // Check published metadata - Assert.assertEquals(ImmutableSet.of(), publishedDescriptors()); + Assert.assertEquals(ImmutableSet.of(SD(task, "2010/P1D", 0)), publishedDescriptors()); } @@ -2075,14 +2107,11 @@ public void testRunTwoTasksTwoPartitions() throws Exception @Test(timeout = 120_000L) public void testRestore() throws Exception { - recordSupplier.assign(anyObject()); - expectLastCall().anyTimes(); - - expect(recordSupplier.getEarliestSequenceNumber(anyObject())).andReturn("0").anyTimes(); - - recordSupplier.seek(anyObject(), anyString()); - expectLastCall().anyTimes(); - + final StreamPartition streamPartition = StreamPartition.of(stream, shardId1); + recordSupplier.assign(ImmutableSet.of(streamPartition)); + expectLastCall(); + recordSupplier.seek(streamPartition, "2"); + expectLastCall(); expect(recordSupplier.poll(anyLong())).andReturn(records.subList(2, 4)) .once() .andReturn(Collections.emptyList()) @@ -2133,16 +2162,13 @@ public void testRestore() throws Exception verifyAll(); reset(recordSupplier); - recordSupplier.assign(anyObject()); - expectLastCall().anyTimes(); - - expect(recordSupplier.getEarliestSequenceNumber(anyObject())).andReturn("0").anyTimes(); - - recordSupplier.seek(anyObject(), anyString()); - expectLastCall().anyTimes(); - + recordSupplier.assign(ImmutableSet.of(streamPartition)); + expectLastCall(); + recordSupplier.seek(streamPartition, "3"); + expectLastCall(); expect(recordSupplier.poll(anyLong())).andReturn(records.subList(3, 6)).once(); - + recordSupplier.assign(ImmutableSet.of()); + expectLastCall(); recordSupplier.close(); expectLastCall(); @@ -2214,18 +2240,170 @@ public void testRestore() throws Exception Assert.assertEquals(ImmutableList.of("d", "e"), readSegmentColumn("dim1", desc2)); } - @Test(timeout = 120_000L) - public void testRunWithPauseAndResume() throws Exception + public void testRestoreAfterPersistingSequences() throws Exception { + maxRowsPerSegment = 2; + maxRecordsPerPoll = 1; + records = generateSinglePartitionRecords(stream); + recordSupplier.assign(anyObject()); expectLastCall().anyTimes(); - expect(recordSupplier.getEarliestSequenceNumber(anyObject())).andReturn("0").anyTimes(); + recordSupplier.seek(anyObject(), anyString()); + expectLastCall().anyTimes(); + + // simulate 1 record at a time + expect(recordSupplier.poll(anyLong())).andReturn(Collections.singletonList(records.get(0))) + .once() + .andReturn(Collections.singletonList(records.get(1))) + .once() + .andReturn(Collections.singletonList(records.get(2))) + .once() + .andReturn(Collections.singletonList(records.get(3))) + .once() + .andReturn(Collections.singletonList(records.get(4))) + .once() + .andReturn(Collections.emptyList()) + .anyTimes(); + + replayAll(); + + final KinesisIndexTask task1 = createTask( + "task1", + new KinesisIndexTaskIOConfig( + null, + "sequence0", + new SeekableStreamPartitions<>(stream, ImmutableMap.of( + shardId1, + "0" + )), + new SeekableStreamPartitions<>(stream, ImmutableMap.of( + shardId1, + "6" + )), + true, + null, + null, + "awsEndpoint", + null, + null, + null, + null, + null, + false + ) + ); + + final SeekableStreamPartitions checkpoint1 = new SeekableStreamPartitions<>( + stream, + ImmutableMap.of(shardId1, "4") + ); + + final ListenableFuture future1 = runTask(task1); + + while (task1.getRunner().getStatus() != SeekableStreamIndexTaskRunner.Status.PAUSED) { + Thread.sleep(10); + } + final Map currentOffsets = ImmutableMap.copyOf(task1.getRunner().getCurrentOffsets()); + Assert.assertEquals(checkpoint1.getPartitionSequenceNumberMap(), currentOffsets); + task1.getRunner().setEndOffsets(currentOffsets, false); + + // Stop without publishing segment + task1.stopGracefully(toolboxFactory.build(task1).getConfig()); + unlockAppenderatorBasePersistDirForTask(task1); + + Assert.assertEquals(TaskState.SUCCESS, future1.get().getStatusCode()); + + verifyAll(); + reset(recordSupplier); + + recordSupplier.assign(anyObject()); + expectLastCall().anyTimes(); recordSupplier.seek(anyObject(), anyString()); expectLastCall().anyTimes(); + expect(recordSupplier.poll(anyLong())).andReturn(Collections.singletonList(records.get(5))) + .once() + .andReturn(Collections.singletonList(records.get(6))) + .once() + .andReturn(Collections.emptyList()) + .anyTimes(); + + recordSupplier.close(); + expectLastCall(); + + replayAll(); + + // Start a new task + final KinesisIndexTask task2 = createTask( + task1.getId(), + new KinesisIndexTaskIOConfig( + null, + "sequence0", + new SeekableStreamPartitions<>(stream, ImmutableMap.of( + shardId1, + "0" + )), + new SeekableStreamPartitions<>(stream, ImmutableMap.of( + shardId1, + "6" + )), + true, + null, + null, + "awsEndpoint", + null, + null, + ImmutableSet.of(shardId1), + null, + null, + false + ) + ); + + final ListenableFuture future2 = runTask(task2); + + // Wait for task to exit + Assert.assertEquals(TaskState.SUCCESS, future2.get().getStatusCode()); + + verifyAll(); + + // Check metrics + Assert.assertEquals(5, task1.getRunner().getRowIngestionMeters().getProcessed()); + Assert.assertEquals(0, task1.getRunner().getRowIngestionMeters().getUnparseable()); + Assert.assertEquals(0, task1.getRunner().getRowIngestionMeters().getThrownAway()); + Assert.assertEquals(2, task2.getRunner().getRowIngestionMeters().getProcessed()); + Assert.assertEquals(0, task2.getRunner().getRowIngestionMeters().getUnparseable()); + Assert.assertEquals(0, task2.getRunner().getRowIngestionMeters().getThrownAway()); + + // Check published segments & metadata + SegmentDescriptor desc1 = SD(task1, "2008/P1D", 0); + SegmentDescriptor desc2 = SD(task1, "2009/P1D", 0); + SegmentDescriptor desc3 = SD(task1, "2010/P1D", 0); + SegmentDescriptor desc4 = SD(task1, "2011/P1D", 0); + SegmentDescriptor desc5 = SD(task1, "2012/P1D", 0); + SegmentDescriptor desc6 = SD(task1, "2013/P1D", 0); + Assert.assertEquals(ImmutableSet.of(desc1, desc2, desc3, desc4, desc5, desc6), publishedDescriptors()); + Assert.assertEquals( + new KinesisDataSourceMetadata( + new SeekableStreamPartitions<>(stream, ImmutableMap.of( + shardId1, + "6" + ))), + metadataStorageCoordinator.getDataSourceMetadata(DATA_SCHEMA.getDataSource()) + ); + } + + @Test(timeout = 120_000L) + public void testRunWithPauseAndResume() throws Exception + { + final StreamPartition streamPartition = StreamPartition.of(stream, shardId1); + recordSupplier.assign(ImmutableSet.of(streamPartition)); + expectLastCall(); + recordSupplier.seek(streamPartition, "2"); + expectLastCall(); expect(recordSupplier.poll(anyLong())).andReturn(records.subList(2, 5)) .once() .andReturn(Collections.emptyList()) @@ -2278,7 +2456,7 @@ public void testRunWithPauseAndResume() throws Exception verifyAll(); - Map currentOffsets = task.getRunner().getCurrentOffsets(); + ConcurrentMap currentOffsets = task.getRunner().getCurrentOffsets(); try { future.get(10, TimeUnit.SECONDS); @@ -2292,14 +2470,8 @@ public void testRunWithPauseAndResume() throws Exception reset(recordSupplier); - recordSupplier.assign(anyObject()); - expectLastCall().anyTimes(); - - expect(recordSupplier.getEarliestSequenceNumber(anyObject())).andReturn("0").anyTimes(); - - recordSupplier.seek(anyObject(), anyString()); - expectLastCall().anyTimes(); - + recordSupplier.assign(ImmutableSet.of()); + expectLastCall(); recordSupplier.close(); expectLastCall().once(); @@ -2363,8 +2535,8 @@ public void testRunContextSequenceAheadOfStartingOffsets() throws Exception final TreeMap> sequences = new TreeMap<>(); // Here the sequence number is 1 meaning that one incremental handoff was done by the failed task - // and this task should start reading from stream 2 for partition 0 - sequences.put(1, ImmutableMap.of(shardId1, "2")); + // and this task should start reading from offset 2 for partition 0 (not offset 1, because end is inclusive) + sequences.put(1, ImmutableMap.of(shardId1, "1")); final Map context = new HashMap<>(); context.put("checkpoints", objectMapper.writerWithType(new TypeReference>>() { @@ -2373,6 +2545,7 @@ public void testRunContextSequenceAheadOfStartingOffsets() throws Exception final KinesisIndexTask task = createTask( "task1", + DATA_SCHEMA, new KinesisIndexTaskIOConfig( null, "sequence0", @@ -2424,6 +2597,160 @@ public void testRunContextSequenceAheadOfStartingOffsets() throws Exception Assert.assertEquals(ImmutableList.of("d", "e"), readSegmentColumn("dim1", desc2)); } + @Test(timeout = 5000L) + public void testIncrementalHandOffReadsThroughEndOffsets() throws Exception + { + records = generateSinglePartitionRecords(stream); + + final String baseSequenceName = "sequence0"; + // as soon as any segment has more than one record, incremental publishing should happen + maxRowsPerSegment = 2; + + final KinesisRecordSupplier recordSupplier1 = mock(KinesisRecordSupplier.class); + recordSupplier1.assign(anyObject()); + expectLastCall().anyTimes(); + expect(recordSupplier1.getEarliestSequenceNumber(anyObject())).andReturn("0").anyTimes(); + recordSupplier1.seek(anyObject(), anyString()); + expectLastCall().anyTimes(); + expect(recordSupplier1.poll(anyLong())).andReturn(records.subList(0, 5)) + .once() + .andReturn(records.subList(4, 10)) + .once(); + recordSupplier1.close(); + expectLastCall().once(); + final KinesisRecordSupplier recordSupplier2 = mock(KinesisRecordSupplier.class); + recordSupplier2.assign(anyObject()); + expectLastCall().anyTimes(); + expect(recordSupplier2.getEarliestSequenceNumber(anyObject())).andReturn("0").anyTimes(); + recordSupplier2.seek(anyObject(), anyString()); + expectLastCall().anyTimes(); + expect(recordSupplier2.poll(anyLong())).andReturn(records.subList(0, 5)) + .once() + .andReturn(records.subList(4, 10)) + .once(); + recordSupplier2.close(); + expectLastCall().once(); + + replayAll(); + + final SeekableStreamPartitions startPartitions = new SeekableStreamPartitions<>( + stream, + ImmutableMap.of(shardId1, "0") + ); + + final SeekableStreamPartitions checkpoint1 = new SeekableStreamPartitions<>( + stream, + ImmutableMap.of(shardId1, "4") + ); + + final SeekableStreamPartitions checkpoint2 = new SeekableStreamPartitions<>( + stream, + ImmutableMap.of(shardId1, "9") + ); + + final SeekableStreamPartitions endPartitions = new SeekableStreamPartitions<>( + stream, + ImmutableMap.of(shardId1, "100") // simulating unlimited + ); + final KinesisIndexTaskIOConfig ioConfig = new KinesisIndexTaskIOConfig( + null, + baseSequenceName, + startPartitions, + endPartitions, + true, + null, + null, + "awsEndpoint", + null, + null, + null, + null, + null, + false + ); + final KinesisIndexTask normalReplica = createTask( + null, + DATA_SCHEMA, + ioConfig, + null + ); + ((TestableKinesisIndexTask) normalReplica).setLocalSupplier(recordSupplier1); + final KinesisIndexTask staleReplica = createTask( + null, + DATA_SCHEMA, + ioConfig, + null + ); + ((TestableKinesisIndexTask) staleReplica).setLocalSupplier(recordSupplier2); + final ListenableFuture normalReplicaFuture = runTask(normalReplica); + // Simulating one replica is slower than the other + final ListenableFuture staleReplicaFuture = ListenableFutures.transformAsync( + taskExec.submit(() -> { + Thread.sleep(1000); + return staleReplica; + }), + this::runTask + ); + + while (normalReplica.getRunner().getStatus() != Status.PAUSED) { + Thread.sleep(10); + } + staleReplica.getRunner().pause(); + while (staleReplica.getRunner().getStatus() != Status.PAUSED) { + Thread.sleep(10); + } + Map currentOffsets = ImmutableMap.copyOf(normalReplica.getRunner().getCurrentOffsets()); + Assert.assertEquals(checkpoint1.getPartitionSequenceNumberMap(), currentOffsets); + + normalReplica.getRunner().setEndOffsets(currentOffsets, false); + staleReplica.getRunner().setEndOffsets(currentOffsets, false); + + while (normalReplica.getRunner().getStatus() != Status.PAUSED) { + Thread.sleep(10); + } + while (staleReplica.getRunner().getStatus() != Status.PAUSED) { + Thread.sleep(10); + } + currentOffsets = ImmutableMap.copyOf(normalReplica.getRunner().getCurrentOffsets()); + Assert.assertEquals(checkpoint2.getPartitionSequenceNumberMap(), currentOffsets); + currentOffsets = ImmutableMap.copyOf(staleReplica.getRunner().getCurrentOffsets()); + Assert.assertEquals(checkpoint2.getPartitionSequenceNumberMap(), currentOffsets); + + normalReplica.getRunner().setEndOffsets(currentOffsets, true); + staleReplica.getRunner().setEndOffsets(currentOffsets, true); + + Assert.assertEquals(TaskState.SUCCESS, normalReplicaFuture.get().getStatusCode()); + Assert.assertEquals(TaskState.SUCCESS, staleReplicaFuture.get().getStatusCode()); + + verifyAll(); + + Assert.assertEquals(2, checkpointRequestsHash.size()); + + // Check metrics + Assert.assertEquals(10, normalReplica.getRunner().getRowIngestionMeters().getProcessed()); + Assert.assertEquals(0, normalReplica.getRunner().getRowIngestionMeters().getUnparseable()); + Assert.assertEquals(0, normalReplica.getRunner().getRowIngestionMeters().getThrownAway()); + + // Check published metadata + final Set descriptors = new HashSet<>(); + descriptors.add(SD(normalReplica, "2008/P1D", 0)); + descriptors.add(SD(normalReplica, "2009/P1D", 0)); + descriptors.add(SD(normalReplica, "2010/P1D", 0)); + descriptors.add(SD(normalReplica, "2010/P1D", 1)); + descriptors.add(SD(normalReplica, "2011/P1D", 0)); + descriptors.add(SD(normalReplica, "2011/P1D", 1)); + descriptors.add(SD(normalReplica, "2012/P1D", 0)); + descriptors.add(SD(normalReplica, "2013/P1D", 0)); + Assert.assertEquals(descriptors, publishedDescriptors()); + Assert.assertEquals( + new KinesisDataSourceMetadata(new SeekableStreamPartitions<>(stream, ImmutableMap.of( + shardId1, + "9" + ))), + metadataStorageCoordinator.getDataSourceMetadata(DATA_SCHEMA.getDataSource()) + ); + } + private ListenableFuture runTask(final Task task) { try { @@ -2446,7 +2773,7 @@ private ListenableFuture runTask(final Task task) throw new ISE("Task is not ready"); } } - catch (Exception e) { + catch (Throwable e) { log.warn(e, "Task failed"); return TaskStatus.failure(task.getId(), Throwables.getStackTraceAsString(e)); } @@ -2475,26 +2802,27 @@ private KinesisIndexTask createTask( final KinesisIndexTaskIOConfig ioConfig ) { - return createTask(taskId, DATA_SCHEMA, ioConfig); + return createTask(taskId, DATA_SCHEMA, ioConfig, null); } private KinesisIndexTask createTask( final String taskId, - final KinesisIndexTaskIOConfig ioConfig, - final Map context + final DataSchema dataSchema, + final KinesisIndexTaskIOConfig ioConfig ) { - return createTask(taskId, DATA_SCHEMA, ioConfig, context); + return createTask(taskId, dataSchema, ioConfig, null); } private KinesisIndexTask createTask( final String taskId, final DataSchema dataSchema, - final KinesisIndexTaskIOConfig ioConfig + final KinesisIndexTaskIOConfig ioConfig, + @Nullable final Map context ) { final KinesisIndexTaskTuningConfig tuningConfig = new KinesisIndexTaskTuningConfig( - 1000, + maxRowsInMemory, null, maxRowsPerSegment, maxTotalRows, @@ -2506,11 +2834,11 @@ private KinesisIndexTask createTask( reportParseExceptions, handoffConditionTimeout, resetOffsetAutomatically, - skipAvailabilityCheck, + true, + null, null, null, null, - 5000, null, null, logParseExceptions, @@ -2519,58 +2847,20 @@ private KinesisIndexTask createTask( maxRecordsPerPoll, intermediateHandoffPeriod ); - final Map context = null; - final KinesisIndexTask task = new TestableKinesisIndexTask( - taskId, - null, - cloneDataSchema(dataSchema), - tuningConfig, - ioConfig, - context, - null, - null, - rowIngestionMetersFactory, - null - ); - - return task; + return createTask(taskId, dataSchema, ioConfig, tuningConfig, context); } - private KinesisIndexTask createTask( final String taskId, final DataSchema dataSchema, final KinesisIndexTaskIOConfig ioConfig, - final Map context + final KinesisIndexTaskTuningConfig tuningConfig, + @Nullable final Map context ) { - final KinesisIndexTaskTuningConfig tuningConfig = new KinesisIndexTaskTuningConfig( - maxRowsInMemory, - null, - maxRowsPerSegment, - maxTotalRows, - new Period("P1Y"), - null, - null, - null, - true, - reportParseExceptions, - handoffConditionTimeout, - resetOffsetAutomatically, - true, - null, - null, - null, - null, - null, - null, - logParseExceptions, - maxParseExceptions, - maxSavedParseExceptions, - maxRecordsPerPoll, - intermediateHandoffPeriod - ); - context.put(SeekableStreamSupervisor.IS_INCREMENTAL_HANDOFF_SUPPORTED, true); + if (context != null) { + context.put(SeekableStreamSupervisor.IS_INCREMENTAL_HANDOFF_SUPPORTED, true); + } final KinesisIndexTask task = new TestableKinesisIndexTask( taskId, @@ -2928,8 +3218,10 @@ private IngestionStatsAndErrorsTaskReportData getTaskReportData() throws IOExcep @JsonTypeName("index_kinesis") private static class TestableKinesisIndexTask extends KinesisIndexTask { + private KinesisRecordSupplier localSupplier; + @JsonCreator - public TestableKinesisIndexTask( + private TestableKinesisIndexTask( @JsonProperty("id") String id, @JsonProperty("resource") TaskResource taskResource, @JsonProperty("dataSchema") DataSchema dataSchema, @@ -2956,10 +3248,15 @@ public TestableKinesisIndexTask( ); } + private void setLocalSupplier(KinesisRecordSupplier recordSupplier) + { + this.localSupplier = recordSupplier; + } + @Override protected KinesisRecordSupplier newTaskRecordSupplier() { - return recordSupplier; + return localSupplier == null ? recordSupplier : localSupplier; } } diff --git a/extensions-core/lookups-cached-global/pom.xml b/extensions-core/lookups-cached-global/pom.xml index 8866e3b062c1..6f4f0608fcc1 100644 --- a/extensions-core/lookups-cached-global/pom.xml +++ b/extensions-core/lookups-cached-global/pom.xml @@ -18,8 +18,7 @@ ~ under the License. --> - + 4.0.0 org.apache.druid.extensions druid-lookups-cached-global @@ -29,7 +28,7 @@ org.apache.druid druid - 0.14.0-incubating-SNAPSHOT + 0.14.1-incubating-SNAPSHOT ../../pom.xml diff --git a/extensions-core/lookups-cached-single/pom.xml b/extensions-core/lookups-cached-single/pom.xml index a3a1294c4be9..ab208bba3f1d 100644 --- a/extensions-core/lookups-cached-single/pom.xml +++ b/extensions-core/lookups-cached-single/pom.xml @@ -18,8 +18,7 @@ ~ under the License. --> - + 4.0.0 org.apache.druid.extensions druid-lookups-cached-single @@ -29,7 +28,7 @@ org.apache.druid druid - 0.14.0-incubating-SNAPSHOT + 0.14.1-incubating-SNAPSHOT ../../pom.xml diff --git a/extensions-core/mysql-metadata-storage/pom.xml b/extensions-core/mysql-metadata-storage/pom.xml index 37b0617e01d7..a37a64a0233f 100644 --- a/extensions-core/mysql-metadata-storage/pom.xml +++ b/extensions-core/mysql-metadata-storage/pom.xml @@ -30,7 +30,7 @@ org.apache.druid druid - 0.14.0-incubating-SNAPSHOT + 0.14.1-incubating-SNAPSHOT ../../pom.xml diff --git a/extensions-core/parquet-extensions/pom.xml b/extensions-core/parquet-extensions/pom.xml index 586b66d5dd41..e597a4f3e305 100644 --- a/extensions-core/parquet-extensions/pom.xml +++ b/extensions-core/parquet-extensions/pom.xml @@ -18,9 +18,7 @@ ~ under the License. --> - + org.apache.druid.extensions druid-parquet-extensions druid-parquet-extensions @@ -29,7 +27,7 @@ druid org.apache.druid - 0.14.0-incubating-SNAPSHOT + 0.14.1-incubating-SNAPSHOT ../../pom.xml 4.0.0 diff --git a/extensions-core/postgresql-metadata-storage/pom.xml b/extensions-core/postgresql-metadata-storage/pom.xml index a6bed8c96a42..af0f5360a565 100644 --- a/extensions-core/postgresql-metadata-storage/pom.xml +++ b/extensions-core/postgresql-metadata-storage/pom.xml @@ -30,7 +30,7 @@ org.apache.druid druid - 0.14.0-incubating-SNAPSHOT + 0.14.1-incubating-SNAPSHOT ../../pom.xml diff --git a/extensions-core/protobuf-extensions/pom.xml b/extensions-core/protobuf-extensions/pom.xml index c004982b7159..e272fa322175 100644 --- a/extensions-core/protobuf-extensions/pom.xml +++ b/extensions-core/protobuf-extensions/pom.xml @@ -18,9 +18,7 @@ ~ under the License. --> - + 4.0.0 org.apache.druid.extensions @@ -31,7 +29,7 @@ druid org.apache.druid - 0.14.0-incubating-SNAPSHOT + 0.14.1-incubating-SNAPSHOT ../../pom.xml diff --git a/extensions-core/s3-extensions/pom.xml b/extensions-core/s3-extensions/pom.xml index 8d05b309dd38..c65e3dc89b34 100644 --- a/extensions-core/s3-extensions/pom.xml +++ b/extensions-core/s3-extensions/pom.xml @@ -18,8 +18,7 @@ ~ under the License. --> - + 4.0.0 org.apache.druid.extensions @@ -30,7 +29,7 @@ org.apache.druid druid - 0.14.0-incubating-SNAPSHOT + 0.14.1-incubating-SNAPSHOT ../../pom.xml diff --git a/extensions-core/s3-extensions/src/main/java/org/apache/druid/storage/s3/S3DataSegmentMover.java b/extensions-core/s3-extensions/src/main/java/org/apache/druid/storage/s3/S3DataSegmentMover.java index 4fd54b499566..84a76d15e41a 100644 --- a/extensions-core/s3-extensions/src/main/java/org/apache/druid/storage/s3/S3DataSegmentMover.java +++ b/extensions-core/s3-extensions/src/main/java/org/apache/druid/storage/s3/S3DataSegmentMover.java @@ -174,7 +174,10 @@ private void selfCheckingMove( .withPrefix(s3Path) .withMaxKeys(1) ); - if (listResult.getKeyCount() == 0) { + // Using getObjectSummaries().size() instead of getKeyCount as, in some cases + // it is observed that even though the getObjectSummaries returns some data + // keyCount is still zero. + if (listResult.getObjectSummaries().size() == 0) { // should never happen throw new ISE("Unable to list object [s3://%s/%s]", s3Bucket, s3Path); } diff --git a/extensions-core/s3-extensions/src/main/java/org/apache/druid/storage/s3/S3StorageDruidModule.java b/extensions-core/s3-extensions/src/main/java/org/apache/druid/storage/s3/S3StorageDruidModule.java index d8573638511d..599ed24babae 100644 --- a/extensions-core/s3-extensions/src/main/java/org/apache/druid/storage/s3/S3StorageDruidModule.java +++ b/extensions-core/s3-extensions/src/main/java/org/apache/druid/storage/s3/S3StorageDruidModule.java @@ -43,6 +43,7 @@ import org.apache.druid.guice.LazySingleton; import org.apache.druid.initialization.DruidModule; import org.apache.druid.java.util.common.IAE; +import org.apache.druid.java.util.common.URIs; import org.apache.druid.java.util.common.logger.Logger; import javax.annotation.Nullable; @@ -185,7 +186,8 @@ private static Protocol determineProtocol(AWSClientConfig clientConfig, AWSEndpo final Protocol protocolFromClientConfig = parseProtocol(clientConfig.getProtocol()); final String endpointUrl = endpointConfig.getUrl(); if (StringUtils.isNotEmpty(endpointUrl)) { - final URI uri = URI.create(endpointUrl); + //noinspection ConstantConditions + final URI uri = URIs.parse(endpointUrl, protocolFromClientConfig.toString()); final Protocol protocol = parseProtocol(uri.getScheme()); if (protocol != null && (protocol != protocolFromClientConfig)) { log.warn("[%s] protocol will be used for endpoint [%s]", protocol, endpointUrl); diff --git a/extensions-core/s3-extensions/src/main/java/org/apache/druid/storage/s3/S3Utils.java b/extensions-core/s3-extensions/src/main/java/org/apache/druid/storage/s3/S3Utils.java index e0a3dac8cae5..97858864b149 100644 --- a/extensions-core/s3-extensions/src/main/java/org/apache/druid/storage/s3/S3Utils.java +++ b/extensions-core/s3-extensions/src/main/java/org/apache/druid/storage/s3/S3Utils.java @@ -251,7 +251,10 @@ public static S3ObjectSummary getSingleObjectSummary(ServerSideEncryptingAmazonS .withMaxKeys(1); final ListObjectsV2Result result = s3Client.listObjectsV2(request); - if (result.getKeyCount() == 0) { + // Using getObjectSummaries().size() instead of getKeyCount as, in some cases + // it is observed that even though the getObjectSummaries returns some data + // keyCount is still zero. + if (result.getObjectSummaries().size() == 0) { throw new ISE("Cannot find object for bucket[%s] and key[%s]", bucket, key); } final S3ObjectSummary objectSummary = result.getObjectSummaries().get(0); diff --git a/extensions-core/simple-client-sslcontext/pom.xml b/extensions-core/simple-client-sslcontext/pom.xml index b6286c237c54..2d3c936bf2ca 100644 --- a/extensions-core/simple-client-sslcontext/pom.xml +++ b/extensions-core/simple-client-sslcontext/pom.xml @@ -18,13 +18,11 @@ ~ under the License. --> - + druid org.apache.druid - 0.14.0-incubating-SNAPSHOT + 0.14.1-incubating-SNAPSHOT ../../pom.xml 4.0.0 diff --git a/extensions-core/stats/pom.xml b/extensions-core/stats/pom.xml index 704f25434629..00c1baf667ad 100644 --- a/extensions-core/stats/pom.xml +++ b/extensions-core/stats/pom.xml @@ -29,7 +29,7 @@ org.apache.druid druid - 0.14.0-incubating-SNAPSHOT + 0.14.1-incubating-SNAPSHOT ../../pom.xml diff --git a/hll/pom.xml b/hll/pom.xml index ebdfe8de6770..f24ee61c816a 100644 --- a/hll/pom.xml +++ b/hll/pom.xml @@ -24,7 +24,7 @@ org.apache.druid druid - 0.14.0-incubating-SNAPSHOT + 0.14.1-incubating-SNAPSHOT druid-hll diff --git a/hll/src/main/java/org/apache/druid/hll/HyperLogLogCollector.java b/hll/src/main/java/org/apache/druid/hll/HyperLogLogCollector.java index 5fd7df778deb..82912146625d 100644 --- a/hll/src/main/java/org/apache/druid/hll/HyperLogLogCollector.java +++ b/hll/src/main/java/org/apache/druid/hll/HyperLogLogCollector.java @@ -387,6 +387,13 @@ public HyperLogLogCollector fold(@Nullable HyperLogLogCollector other) storageBuffer.duplicate().put(other.storageBuffer.asReadOnlyBuffer()); + if (other.storageBuffer.remaining() != other.getNumBytesForDenseStorage()) { + // The other buffer was sparse, densify it + final int newLImit = storageBuffer.position() + other.storageBuffer.remaining(); + storageBuffer.limit(newLImit); + convertToDenseStorage(); + } + other = HyperLogLogCollector.makeCollector(tmpBuffer); } diff --git a/hll/src/test/java/org/apache/druid/hll/HyperLogLogCollectorTest.java b/hll/src/test/java/org/apache/druid/hll/HyperLogLogCollectorTest.java index ffcbd9885a29..3727717d1c65 100644 --- a/hll/src/test/java/org/apache/druid/hll/HyperLogLogCollectorTest.java +++ b/hll/src/test/java/org/apache/druid/hll/HyperLogLogCollectorTest.java @@ -22,6 +22,7 @@ import com.google.common.collect.Collections2; import com.google.common.collect.Lists; import com.google.common.hash.HashFunction; +import com.google.common.hash.Hasher; import com.google.common.hash.Hashing; import org.apache.druid.java.util.common.StringUtils; import org.apache.druid.java.util.common.logger.Logger; @@ -30,14 +31,17 @@ import org.junit.Test; import java.nio.ByteBuffer; +import java.nio.ByteOrder; import java.security.MessageDigest; import java.util.Arrays; import java.util.Collection; import java.util.List; import java.util.Random; import java.util.concurrent.ThreadLocalRandom; +import java.util.function.Predicate; /** + * */ public class HyperLogLogCollectorTest { @@ -45,6 +49,18 @@ public class HyperLogLogCollectorTest private final HashFunction fn = Hashing.murmur3_128(); + private static void fillBuckets(HyperLogLogCollector collector, byte startOffset, byte endOffset) + { + byte offset = startOffset; + while (offset <= endOffset) { + // fill buckets to shift registerOffset + for (short bucket = 0; bucket < 2048; ++bucket) { + collector.add(bucket, offset); + } + offset++; + } + } + @Test public void testFolding() { @@ -78,14 +94,13 @@ public void testFolding() } } - /** * This is a very long running test, disabled by default. * It is meant to catch issues when combining a large numer of HLL objects. * * It compares adding all the values to one HLL vs. * splitting up values into HLLs of 100 values each, and folding those HLLs into a single main HLL. - * + * * When reaching very large cardinalities (>> 50,000,000), offsets are mismatched between the main HLL and the ones * with 100 values, requiring a floating max as described in * http://druid.io/blog/2014/02/18/hyperloglog-optimizations-for-real-world-systems.html @@ -502,7 +517,8 @@ private short computeNumNonZero(byte theByte) return retVal; } - @Ignore @Test // This test can help when finding potential combinations that are weird, but it's non-deterministic + @Ignore + @Test // This test can help when finding potential combinations that are weird, but it's non-deterministic public void testFoldingwithDifferentOffsets() { // final Random random = new Random(37); // this seed will cause this test to fail because of slightly larger errors @@ -533,7 +549,8 @@ public void testFoldingwithDifferentOffsets() } } - @Ignore @Test + @Ignore + @Test public void testFoldingwithDifferentOffsets2() throws Exception { final Random random = new Random(0); @@ -707,6 +724,81 @@ public void testMaxOverflow() Assert.assertEquals(0, collector.getNumNonZeroRegisters()); } + @Test + public void testRegisterSwapWithSparse() + { + final HyperLogLogCollector collector = HyperLogLogCollector.makeLatestCollector(); + // Skip the first bucket + for (int i = 1; i < HyperLogLogCollector.NUM_BUCKETS; i++) { + collector.add((short) i, (byte) 1); + Assert.assertEquals(i, collector.getNumNonZeroRegisters()); + Assert.assertEquals(0, collector.getRegisterOffset()); + } + Assert.assertEquals( + 15615.219683654448D, + HyperLogLogCollector.makeCollector(collector.toByteBuffer().asReadOnlyBuffer()) + .estimateCardinality(), + 1e-5D + ); + + final byte[] hash = new byte[10]; + hash[0] = 1; // Bucket 0, 1 offset of 0 + collector.add(hash); + Assert.assertEquals(0, collector.getNumNonZeroRegisters()); + Assert.assertEquals(1, collector.getRegisterOffset()); + + // We have a REALLY bad distribution, Sketch as 0 is fine. + Assert.assertEquals( + 0.0D, + HyperLogLogCollector.makeCollector(collector.toByteBuffer().asReadOnlyBuffer()) + .estimateCardinality(), + 1e-5D + ); + final ByteBuffer buffer = collector.toByteBuffer(); + Assert.assertEquals(collector.getNumHeaderBytes(), buffer.remaining()); + + final HyperLogLogCollector denseCollector = HyperLogLogCollector.makeLatestCollector(); + for (int i = 0; i < HyperLogLogCollector.NUM_BUCKETS - 1; i++) { + denseCollector.add((short) i, (byte) 1); + } + + Assert.assertEquals(HyperLogLogCollector.NUM_BUCKETS - 1, denseCollector.getNumNonZeroRegisters()); + final HyperLogLogCollector folded = denseCollector.fold(HyperLogLogCollector.makeCollector(buffer)); + Assert.assertNotNull(folded.toByteBuffer()); + Assert.assertEquals(folded.getStorageBuffer().remaining(), denseCollector.getNumBytesForDenseStorage()); + } + + // Example of a terrible sampling filter. Don't use this method + @Test + public void testCanFillUpOnMod() + { + final HashFunction fn = Hashing.murmur3_128(); + final HyperLogLogCollector hyperLogLogCollector = HyperLogLogCollector.makeLatestCollector(); + final byte[] b = new byte[10]; + b[0] = 1; + hyperLogLogCollector.add(b); + final Random random = new Random(347893248701078L); + long loops = 0; + // Do a 1% "sample" where the mod of the hash is 43 + final Predicate pass = i -> { + // ByteOrder.nativeOrder() on lots of systems is ByteOrder.LITTLE_ENDIAN + final ByteBuffer bb = ByteBuffer.wrap(fn.hashInt(i).asBytes()).order(ByteOrder.LITTLE_ENDIAN); + return (bb.getInt() % 100) == 43; + }; + final long loopLimit = 1_000_000_000L; + do { + final int rnd = random.nextInt(); + if (!pass.test(rnd)) { + continue; + } + final Hasher hasher = fn.newHasher(); + hasher.putInt(rnd); + hyperLogLogCollector.add(hasher.hash().asBytes()); + } while (hyperLogLogCollector.getNumNonZeroRegisters() > 0 && ++loops < loopLimit); + Assert.assertNotEquals(loopLimit, loops); + Assert.assertEquals(hyperLogLogCollector.getNumHeaderBytes(), hyperLogLogCollector.toByteBuffer().remaining()); + } + @Test public void testMergeMaxOverflow() { @@ -736,19 +828,6 @@ public void testMergeMaxOverflow() Assert.assertEquals(67, collector.getMaxOverflowValue()); } - - private static void fillBuckets(HyperLogLogCollector collector, byte startOffset, byte endOffset) - { - byte offset = startOffset; - while (offset <= endOffset) { - // fill buckets to shift registerOffset - for (short bucket = 0; bucket < 2048; ++bucket) { - collector.add(bucket, offset); - } - offset++; - } - } - @Test public void testFoldOrder() { diff --git a/indexing-hadoop/pom.xml b/indexing-hadoop/pom.xml index 1fd923c99bfe..a4964aa727a9 100644 --- a/indexing-hadoop/pom.xml +++ b/indexing-hadoop/pom.xml @@ -28,7 +28,7 @@ org.apache.druid druid - 0.14.0-incubating-SNAPSHOT + 0.14.1-incubating-SNAPSHOT @@ -83,6 +83,22 @@ com.google.code.findbugs jsr305 + + org.apache.hadoop + hadoop-common + provided + + + org.apache.hadoop + hadoop-mapreduce-client-core + provided + + + javax.servlet + servlet-api + + + @@ -130,22 +146,6 @@ ${hadoop.compile.version} test - - org.apache.hadoop - hadoop-common - provided - - - org.apache.hadoop - hadoop-mapreduce-client-core - provided - - - javax.servlet - servlet-api - - - org.apache.druid druid-server diff --git a/indexing-service/pom.xml b/indexing-service/pom.xml index f664dd471234..94c75f2992ca 100644 --- a/indexing-service/pom.xml +++ b/indexing-service/pom.xml @@ -28,7 +28,7 @@ org.apache.druid druid - 0.14.0-incubating-SNAPSHOT + 0.14.1-incubating-SNAPSHOT diff --git a/indexing-service/src/main/java/org/apache/druid/indexing/common/Counters.java b/indexing-service/src/main/java/org/apache/druid/indexing/common/Counters.java index e46389083fb6..a57f419eb703 100644 --- a/indexing-service/src/main/java/org/apache/druid/indexing/common/Counters.java +++ b/indexing-service/src/main/java/org/apache/druid/indexing/common/Counters.java @@ -25,7 +25,7 @@ public final class Counters { - public static int incrementAndGetInt(ConcurrentHashMap counters, K key) + public static int getAndIncrementInt(ConcurrentHashMap counters, K key) { // get() before computeIfAbsent() is an optimization to avoid locking in computeIfAbsent() if not needed. // See https://github.com/apache/incubator-druid/pull/6898#discussion_r251384586. @@ -33,7 +33,7 @@ public static int incrementAndGetInt(ConcurrentHashMap cou if (counter == null) { counter = counters.computeIfAbsent(key, k -> new AtomicInteger()); } - return counter.incrementAndGet(); + return counter.getAndIncrement(); } public static long incrementAndGetLong(ConcurrentHashMap counters, K key) diff --git a/indexing-service/src/main/java/org/apache/druid/indexing/common/task/HadoopIndexTask.java b/indexing-service/src/main/java/org/apache/druid/indexing/common/task/HadoopIndexTask.java index 398ed96a2f39..62c23e734a53 100644 --- a/indexing-service/src/main/java/org/apache/druid/indexing/common/task/HadoopIndexTask.java +++ b/indexing-service/src/main/java/org/apache/druid/indexing/common/task/HadoopIndexTask.java @@ -430,21 +430,12 @@ private TaskStatus runInternal(TaskToolbox toolbox) throws Exception @Override public void stopGracefully(TaskConfig taskConfig) { - final ClassLoader oldLoader = Thread.currentThread().getContextClassLoader(); - File hadoopJobIdFile = new File(getHadoopJobIdFileName()); - String jobId = null; + // To avoid issue of kill command once the ingestion task is actually completed + if (!ingestionState.equals(IngestionState.COMPLETED)) { + final ClassLoader oldLoader = Thread.currentThread().getContextClassLoader(); + String hadoopJobIdFile = getHadoopJobIdFileName(); - try { - if (hadoopJobIdFile.exists()) { - jobId = HadoopDruidIndexerConfig.JSON_MAPPER.readValue(hadoopJobIdFile, String.class); - } - } - catch (Exception e) { - log.warn(e, "exeption while reading Hadoop Job ID from: %s", hadoopJobIdFile); - } - - try { - if (jobId != null) { + try { ClassLoader loader = HadoopTask.buildClassLoader(getHadoopDependencyCoordinates(), taskConfig.getDefaultHadoopCoordinates()); @@ -452,28 +443,28 @@ public void stopGracefully(TaskConfig taskConfig) "org.apache.druid.indexing.common.task.HadoopIndexTask$HadoopKillMRJobIdProcessingRunner", loader ); + String[] buildKillJobInput = new String[]{ - "-kill", - jobId + hadoopJobIdFile }; Class buildKillJobRunnerClass = killMRJobInnerProcessingRunner.getClass(); Method innerProcessingRunTask = buildKillJobRunnerClass.getMethod("runTask", buildKillJobInput.getClass()); Thread.currentThread().setContextClassLoader(loader); - final String killStatusString = (String) innerProcessingRunTask.invoke( + final String killStatusString[] = (String[]) innerProcessingRunTask.invoke( killMRJobInnerProcessingRunner, new Object[]{buildKillJobInput} ); - log.info(StringUtils.format("Tried killing job %s , status: %s", jobId, killStatusString)); + log.info(StringUtils.format("Tried killing job: [%s], status: [%s]", killStatusString[0], killStatusString[1])); + } + catch (Exception e) { + throw new RuntimeException(e); + } + finally { + Thread.currentThread().setContextClassLoader(oldLoader); } - } - catch (Exception e) { - throw new RuntimeException(e); - } - finally { - Thread.currentThread().setContextClassLoader(oldLoader); } } @@ -722,10 +713,29 @@ public Map getStats() @SuppressWarnings("unused") public static class HadoopKillMRJobIdProcessingRunner { - public String runTask(String[] args) throws Exception + public String[] runTask(String[] args) throws Exception { - int res = ToolRunner.run(new JobClient(), args); - return res == 0 ? "Success" : "Fail"; + File hadoopJobIdFile = new File(args[0]); + String jobId = null; + + try { + if (hadoopJobIdFile.exists()) { + jobId = HadoopDruidIndexerConfig.JSON_MAPPER.readValue(hadoopJobIdFile, String.class); + } + } + catch (Exception e) { + log.warn(e, "exeption while reading hadoop job id from: [%s]", hadoopJobIdFile); + } + + if (jobId != null) { + int res = ToolRunner.run(new JobClient(), new String[]{ + "-kill", + jobId + }); + + return new String[] {jobId, (res == 0 ? "Success" : "Fail")}; + } + return new String[] {jobId, "Fail"}; } } diff --git a/indexing-service/src/main/java/org/apache/druid/indexing/common/task/IndexTask.java b/indexing-service/src/main/java/org/apache/druid/indexing/common/task/IndexTask.java index 6d5a0d8e56ad..2f62f0b3dae3 100644 --- a/indexing-service/src/main/java/org/apache/druid/indexing/common/task/IndexTask.java +++ b/indexing-service/src/main/java/org/apache/druid/indexing/common/task/IndexTask.java @@ -407,7 +407,16 @@ public TaskStatus run(final TaskToolbox toolbox) try { if (chatHandlerProvider.isPresent()) { log.info("Found chat handler of class[%s]", chatHandlerProvider.get().getClass().getName()); - chatHandlerProvider.get().register(getId(), this, false); + + if (chatHandlerProvider.get().get(getId()).isPresent()) { + // This is a workaround for ParallelIndexSupervisorTask to avoid double registering when it runs in the + // sequential mode. See ParallelIndexSupervisorTask.runSequential(). + // Note that all HTTP endpoints are not available in this case. This works only for + // ParallelIndexSupervisorTask because it doesn't support APIs for live ingestion reports. + log.warn("Chat handler is already registered. Skipping chat handler registration."); + } else { + chatHandlerProvider.get().register(getId(), this, false); + } } else { log.warn("No chat handler detected"); } diff --git a/indexing-service/src/main/java/org/apache/druid/indexing/common/task/batch/parallel/ParallelIndexSubTask.java b/indexing-service/src/main/java/org/apache/druid/indexing/common/task/batch/parallel/ParallelIndexSubTask.java index 18e87d758b22..8004243bb3aa 100644 --- a/indexing-service/src/main/java/org/apache/druid/indexing/common/task/batch/parallel/ParallelIndexSubTask.java +++ b/indexing-service/src/main/java/org/apache/druid/indexing/common/task/batch/parallel/ParallelIndexSubTask.java @@ -235,9 +235,8 @@ private SegmentAllocator createSegmentAllocator( ) { final DataSchema dataSchema = ingestionSchema.getDataSchema(); - final boolean explicitIntervals = dataSchema.getGranularitySpec().bucketIntervals().isPresent(); final ParallelIndexIOConfig ioConfig = ingestionSchema.getIOConfig(); - if (ioConfig.isAppendToExisting() || !explicitIntervals) { + if (ioConfig.isAppendToExisting()) { return new ActionBasedSegmentAllocator( toolbox.getTaskActionClient(), dataSchema, diff --git a/indexing-service/src/main/java/org/apache/druid/indexing/common/task/batch/parallel/ParallelIndexSupervisorTask.java b/indexing-service/src/main/java/org/apache/druid/indexing/common/task/batch/parallel/ParallelIndexSupervisorTask.java index 385797f751af..f8eebd4e3da0 100644 --- a/indexing-service/src/main/java/org/apache/druid/indexing/common/task/batch/parallel/ParallelIndexSupervisorTask.java +++ b/indexing-service/src/main/java/org/apache/druid/indexing/common/task/batch/parallel/ParallelIndexSupervisorTask.java @@ -33,8 +33,10 @@ import org.apache.druid.indexer.TaskStatus; import org.apache.druid.indexing.common.Counters; import org.apache.druid.indexing.common.TaskLock; +import org.apache.druid.indexing.common.TaskLockType; import org.apache.druid.indexing.common.TaskToolbox; import org.apache.druid.indexing.common.actions.LockListAction; +import org.apache.druid.indexing.common.actions.LockTryAcquireAction; import org.apache.druid.indexing.common.actions.TaskActionClient; import org.apache.druid.indexing.common.stats.RowIngestionMetersFactory; import org.apache.druid.indexing.common.task.AbstractTask; @@ -245,13 +247,23 @@ public TaskStatus run(TaskToolbox toolbox) throws Exception chatHandlerProvider.register(getId(), this, false); try { - if (baseFirehoseFactory.isSplittable()) { + if (isParallelMode()) { return runParallel(toolbox); } else { - log.warn( - "firehoseFactory[%s] is not splittable. Running sequentially", - baseFirehoseFactory.getClass().getSimpleName() - ); + if (!baseFirehoseFactory.isSplittable()) { + log.warn( + "firehoseFactory[%s] is not splittable. Running sequentially.", + baseFirehoseFactory.getClass().getSimpleName() + ); + } else if (ingestionSchema.getTuningConfig().getMaxNumSubTasks() == 1) { + log.warn( + "maxNumSubTasks is 1. Running sequentially. " + + "Please set maxNumSubTasks to something higher than 1 if you want to run in parallel ingestion mode." + ); + } else { + throw new ISE("Unknown reason for sequentail mode. Failing this task."); + } + return runSequential(toolbox); } } @@ -260,6 +272,15 @@ public TaskStatus run(TaskToolbox toolbox) throws Exception } } + private boolean isParallelMode() + { + if (baseFirehoseFactory.isSplittable() && ingestionSchema.getTuningConfig().getMaxNumSubTasks() > 1) { + return true; + } else { + return false; + } + } + @VisibleForTesting void setToolbox(TaskToolbox toolbox) { @@ -269,7 +290,7 @@ void setToolbox(TaskToolbox toolbox) private TaskStatus runParallel(TaskToolbox toolbox) throws Exception { createRunner(toolbox); - return TaskStatus.fromCode(getId(), runner.run()); + return TaskStatus.fromCode(getId(), Preconditions.checkNotNull(runner, "runner").run()); } private TaskStatus runSequential(TaskToolbox toolbox) @@ -360,43 +381,79 @@ SegmentIdWithShardSpec allocateNewSegment(DateTime timestamp) throws IOException { final String dataSource = getDataSource(); final GranularitySpec granularitySpec = getIngestionSchema().getDataSchema().getGranularitySpec(); - final SortedSet bucketIntervals = Preconditions.checkNotNull( - granularitySpec.bucketIntervals().orNull(), - "bucketIntervals" - ); + final Optional> bucketIntervals = granularitySpec.bucketIntervals(); + // List locks whenever allocating a new segment because locks might be revoked and no longer valid. - final Map versions = toolbox + final List locks = toolbox .getTaskActionClient() - .submit(new LockListAction()) + .submit(new LockListAction()); + final TaskLock revokedLock = locks.stream().filter(TaskLock::isRevoked).findAny().orElse(null); + if (revokedLock != null) { + throw new ISE("Lock revoked: [%s]", revokedLock); + } + final Map versions = locks .stream() .collect(Collectors.toMap(TaskLock::getInterval, TaskLock::getVersion)); - final Optional maybeInterval = granularitySpec.bucketInterval(timestamp); - if (!maybeInterval.isPresent()) { - throw new IAE("Could not find interval for timestamp [%s]", timestamp); - } + Interval interval; + String version; + boolean justLockedInterval = false; + if (bucketIntervals.isPresent()) { + // If the granularity spec has explicit intervals, we just need to find the interval (of the segment + // granularity); we already tried to lock it at task startup. + final Optional maybeInterval = granularitySpec.bucketInterval(timestamp); + if (!maybeInterval.isPresent()) { + throw new IAE("Could not find interval for timestamp [%s]", timestamp); + } - final Interval interval = maybeInterval.get(); - if (!bucketIntervals.contains(interval)) { - throw new ISE("Unspecified interval[%s] in granularitySpec[%s]", interval, granularitySpec); + interval = maybeInterval.get(); + if (!bucketIntervals.get().contains(interval)) { + throw new ISE("Unspecified interval[%s] in granularitySpec[%s]", interval, granularitySpec); + } + + version = findVersion(versions, interval); + if (version == null) { + throw new ISE("Cannot find a version for interval[%s]", interval); + } + } else { + // We don't have explicit intervals. We can use the segment granularity to figure out what + // interval we need, but we might not have already locked it. + interval = granularitySpec.getSegmentGranularity().bucket(timestamp); + version = findVersion(versions, interval); + if (version == null) { + // We don't have a lock for this interval, so we should lock it now. + final TaskLock lock = Preconditions.checkNotNull( + toolbox.getTaskActionClient().submit(new LockTryAcquireAction(TaskLockType.EXCLUSIVE, interval)), + "Cannot acquire a lock for interval[%s]", interval + ); + version = lock.getVersion(); + justLockedInterval = true; + } } - final int partitionNum = Counters.incrementAndGetInt(partitionNumCountersPerInterval, interval); + final int partitionNum = Counters.getAndIncrementInt(partitionNumCountersPerInterval, interval); + if (justLockedInterval && partitionNum != 0) { + throw new ISE( + "Expected partitionNum to be 0 for interval [%s] right after locking, but got [%s]", + interval, partitionNum + ); + } return new SegmentIdWithShardSpec( dataSource, interval, - findVersion(versions, interval), + version, new NumberedShardSpec(partitionNum, 0) ); } + @Nullable private static String findVersion(Map versions, Interval interval) { return versions.entrySet().stream() .filter(entry -> entry.getKey().contains(interval)) .map(Entry::getValue) .findFirst() - .orElseThrow(() -> new ISE("Cannot find a version for interval[%s]", interval)); + .orElse(null); } /** @@ -432,11 +489,7 @@ public Response report( public Response getMode(@Context final HttpServletRequest req) { IndexTaskUtils.datasourceAuthorizationCheck(req, Action.READ, getDataSource(), authorizerMapper); - if (runner == null) { - return Response.status(Response.Status.SERVICE_UNAVAILABLE).entity("task is not running yet").build(); - } else { - return Response.ok(baseFirehoseFactory.isSplittable() ? "parallel" : "sequential").build(); - } + return Response.ok(isParallelMode() ? "parallel" : "sequential").build(); } @GET diff --git a/indexing-service/src/main/java/org/apache/druid/indexing/common/task/batch/parallel/ParallelIndexTuningConfig.java b/indexing-service/src/main/java/org/apache/druid/indexing/common/task/batch/parallel/ParallelIndexTuningConfig.java index 85929dbd880c..c0e93704d257 100644 --- a/indexing-service/src/main/java/org/apache/druid/indexing/common/task/batch/parallel/ParallelIndexTuningConfig.java +++ b/indexing-service/src/main/java/org/apache/druid/indexing/common/task/batch/parallel/ParallelIndexTuningConfig.java @@ -22,6 +22,7 @@ import com.fasterxml.jackson.annotation.JsonCreator; import com.fasterxml.jackson.annotation.JsonProperty; import com.fasterxml.jackson.annotation.JsonTypeName; +import com.google.common.base.Preconditions; import org.apache.druid.indexing.common.task.IndexTask.IndexTuningConfig; import org.apache.druid.segment.IndexSpec; import org.apache.druid.segment.writeout.SegmentWriteOutMediumFactory; @@ -34,7 +35,7 @@ @JsonTypeName("index_parallel") public class ParallelIndexTuningConfig extends IndexTuningConfig { - private static final int DEFAULT_MAX_NUM_BATCH_TASKS = Integer.MAX_VALUE; // unlimited + private static final int DEFAULT_MAX_NUM_BATCH_TASKS = 1; private static final int DEFAULT_MAX_RETRY = 3; private static final long DEFAULT_TASK_STATUS_CHECK_PERIOD_MS = 1000; @@ -131,6 +132,8 @@ public ParallelIndexTuningConfig( this.chatHandlerTimeout = DEFAULT_CHAT_HANDLER_TIMEOUT; this.chatHandlerNumRetries = DEFAULT_CHAT_HANDLER_NUM_RETRIES; + + Preconditions.checkArgument(this.maxNumSubTasks > 0, "maxNumSubTasks must be positive"); } @JsonProperty diff --git a/indexing-service/src/main/java/org/apache/druid/indexing/overlord/TaskLockbox.java b/indexing-service/src/main/java/org/apache/druid/indexing/overlord/TaskLockbox.java index b8402fab3bed..787852ddfbd5 100644 --- a/indexing-service/src/main/java/org/apache/druid/indexing/overlord/TaskLockbox.java +++ b/indexing-service/src/main/java/org/apache/druid/indexing/overlord/TaskLockbox.java @@ -1073,16 +1073,13 @@ public boolean equals(Object o) return true; } - if (!getClass().equals(o.getClass())) { + if (o == null || !getClass().equals(o.getClass())) { return false; } - final TaskLockPosse that = (TaskLockPosse) o; - if (!taskLock.equals(that.taskLock)) { - return false; - } - - return taskIds.equals(that.taskIds); + TaskLockPosse that = (TaskLockPosse) o; + return java.util.Objects.equals(taskLock, that.taskLock) && + java.util.Objects.equals(taskIds, that.taskIds); } @Override diff --git a/indexing-service/src/main/java/org/apache/druid/indexing/overlord/http/OverlordResource.java b/indexing-service/src/main/java/org/apache/druid/indexing/overlord/http/OverlordResource.java index 9b59202ee0ed..e5abac4070e7 100644 --- a/indexing-service/src/main/java/org/apache/druid/indexing/overlord/http/OverlordResource.java +++ b/indexing-service/src/main/java/org/apache/druid/indexing/overlord/http/OverlordResource.java @@ -101,7 +101,6 @@ import java.util.Map; import java.util.Set; import java.util.concurrent.atomic.AtomicReference; -import java.util.function.BiFunction; import java.util.stream.Collectors; /** @@ -122,7 +121,6 @@ public class OverlordResource private AtomicReference workerConfigRef = null; private static final List API_TASK_STATES = ImmutableList.of("pending", "waiting", "running", "complete"); - @Inject public OverlordResource( TaskMaster taskMaster, @@ -503,100 +501,6 @@ public Response getWaitingTasks(@Context final HttpServletRequest req) return getTasks("waiting", null, null, null, null, req); } - private static class AnyTask extends TaskRunnerWorkItem - { - private final String taskType; - private final String dataSource; - private final TaskState taskState; - private final RunnerTaskState runnerTaskState; - private final DateTime createdTime; - private final DateTime queueInsertionTime; - private final TaskLocation taskLocation; - - AnyTask( - String taskId, - String taskType, - ListenableFuture result, - String dataSource, - TaskState state, - RunnerTaskState runnerState, - DateTime createdTime, - DateTime queueInsertionTime, - TaskLocation taskLocation - ) - { - super(taskId, result, DateTimes.EPOCH, DateTimes.EPOCH); - this.taskType = taskType; - this.dataSource = dataSource; - this.taskState = state; - this.runnerTaskState = runnerState; - this.createdTime = createdTime; - this.queueInsertionTime = queueInsertionTime; - this.taskLocation = taskLocation; - } - - @Override - public TaskLocation getLocation() - { - return taskLocation; - } - - @Override - public String getTaskType() - { - return taskType; - } - - @Override - public String getDataSource() - { - return dataSource; - } - - public TaskState getTaskState() - { - return taskState; - } - - public RunnerTaskState getRunnerTaskState() - { - return runnerTaskState; - } - - @Override - public DateTime getCreatedTime() - { - return createdTime; - } - - @Override - public DateTime getQueueInsertionTime() - { - return queueInsertionTime; - } - - public AnyTask withTaskState( - TaskState newTaskState, - RunnerTaskState runnerState, - DateTime createdTime, - DateTime queueInsertionTime, - TaskLocation taskLocation - ) - { - return new AnyTask( - getTaskId(), - getTaskType(), - getResult(), - getDataSource(), - newTaskState, - runnerState, - createdTime, - queueInsertionTime, - taskLocation - ); - } - } - @GET @Path("/pendingTasks") @Produces(MediaType.APPLICATION_JSON) @@ -760,120 +664,6 @@ public Response getTasks( return Response.ok(authorizedList).build(); } - private static BiFunction, RunnerTaskState, TaskStatusPlus> newTaskInfo2TaskStatusPlusFn() - { - return (taskInfo, runnerTaskState) -> new TaskStatusPlus( - taskInfo.getId(), - taskInfo.getTask() == null ? null : taskInfo.getTask().getType(), - taskInfo.getCreatedTime(), - // Would be nice to include the real queue insertion time, but the - // TaskStorage API doesn't yet allow it. - DateTimes.EPOCH, - taskInfo.getStatus().getStatusCode(), - runnerTaskState, - taskInfo.getStatus().getDuration(), - TaskLocation.unknown(), - taskInfo.getDataSource(), - taskInfo.getStatus().getErrorMsg() - ); - } - - private List filterActiveTasks( - RunnerTaskState state, - List allTasks - ) - { - //divide active tasks into 3 lists : running, pending, waiting - Optional taskRunnerOpt = taskMaster.getTaskRunner(); - if (!taskRunnerOpt.isPresent()) { - throw new WebApplicationException( - Response.serverError().entity("No task runner found").build() - ); - } - TaskRunner runner = taskRunnerOpt.get(); - // the order of tasks below is waiting, pending, running to prevent - // skipping a task, it's the order in which tasks will change state - // if they do while this is code is executing, so a task might be - // counted twice but never skipped - if (RunnerTaskState.WAITING.equals(state)) { - Collection runnersKnownTasks = runner.getKnownTasks(); - Set runnerKnownTaskIds = runnersKnownTasks - .stream() - .map(TaskRunnerWorkItem::getTaskId) - .collect(Collectors.toSet()); - final List waitingTasks = new ArrayList<>(); - for (TaskRunnerWorkItem task : allTasks) { - if (!runnerKnownTaskIds.contains(task.getTaskId())) { - waitingTasks.add(((AnyTask) task).withTaskState( - TaskState.RUNNING, - RunnerTaskState.WAITING, - task.getCreatedTime(), - task.getQueueInsertionTime(), - task.getLocation() - )); - } - } - return waitingTasks; - } - - if (RunnerTaskState.PENDING.equals(state)) { - Collection knownPendingTasks = runner.getPendingTasks(); - Set pendingTaskIds = knownPendingTasks - .stream() - .map(TaskRunnerWorkItem::getTaskId) - .collect(Collectors.toSet()); - Map workItemIdMap = knownPendingTasks - .stream() - .collect(Collectors.toMap( - TaskRunnerWorkItem::getTaskId, - java.util.function.Function.identity(), - (previousWorkItem, newWorkItem) -> newWorkItem - )); - final List pendingTasks = new ArrayList<>(); - for (TaskRunnerWorkItem task : allTasks) { - if (pendingTaskIds.contains(task.getTaskId())) { - pendingTasks.add(((AnyTask) task).withTaskState( - TaskState.RUNNING, - RunnerTaskState.PENDING, - workItemIdMap.get(task.getTaskId()).getCreatedTime(), - workItemIdMap.get(task.getTaskId()).getQueueInsertionTime(), - workItemIdMap.get(task.getTaskId()).getLocation() - )); - } - } - return pendingTasks; - } - - if (RunnerTaskState.RUNNING.equals(state)) { - Collection knownRunningTasks = runner.getRunningTasks(); - Set runningTaskIds = knownRunningTasks - .stream() - .map(TaskRunnerWorkItem::getTaskId) - .collect(Collectors.toSet()); - Map workItemIdMap = knownRunningTasks - .stream() - .collect(Collectors.toMap( - TaskRunnerWorkItem::getTaskId, - java.util.function.Function.identity(), - (previousWorkItem, newWorkItem) -> newWorkItem - )); - final List runningTasks = new ArrayList<>(); - for (TaskRunnerWorkItem task : allTasks) { - if (runningTaskIds.contains(task.getTaskId())) { - runningTasks.add(((AnyTask) task).withTaskState( - TaskState.RUNNING, - RunnerTaskState.RUNNING, - workItemIdMap.get(task.getTaskId()).getCreatedTime(), - workItemIdMap.get(task.getTaskId()).getQueueInsertionTime(), - workItemIdMap.get(task.getTaskId()).getLocation() - )); - } - } - return runningTasks; - } - return allTasks; - } - @DELETE @Path("/pendingSegments/{dataSource}") @Produces(MediaType.APPLICATION_JSON) @@ -1016,6 +806,102 @@ private Response asLeaderWith(Optional x, Function f) } } + private List filterActiveTasks( + RunnerTaskState state, + List allTasks + ) + { + //divide active tasks into 3 lists : running, pending, waiting + Optional taskRunnerOpt = taskMaster.getTaskRunner(); + if (!taskRunnerOpt.isPresent()) { + throw new WebApplicationException( + Response.serverError().entity("No task runner found").build() + ); + } + TaskRunner runner = taskRunnerOpt.get(); + // the order of tasks below is waiting, pending, running to prevent + // skipping a task, it's the order in which tasks will change state + // if they do while this is code is executing, so a task might be + // counted twice but never skipped + if (RunnerTaskState.WAITING.equals(state)) { + Collection runnersKnownTasks = runner.getKnownTasks(); + Set runnerKnownTaskIds = runnersKnownTasks + .stream() + .map(TaskRunnerWorkItem::getTaskId) + .collect(Collectors.toSet()); + final List waitingTasks = new ArrayList<>(); + for (TaskRunnerWorkItem task : allTasks) { + if (!runnerKnownTaskIds.contains(task.getTaskId())) { + waitingTasks.add(((AnyTask) task).withTaskState( + TaskState.RUNNING, + RunnerTaskState.WAITING, + task.getCreatedTime(), + task.getQueueInsertionTime(), + task.getLocation() + )); + } + } + return waitingTasks; + } + + if (RunnerTaskState.PENDING.equals(state)) { + Collection knownPendingTasks = runner.getPendingTasks(); + Set pendingTaskIds = knownPendingTasks + .stream() + .map(TaskRunnerWorkItem::getTaskId) + .collect(Collectors.toSet()); + Map workItemIdMap = knownPendingTasks + .stream() + .collect(Collectors.toMap( + TaskRunnerWorkItem::getTaskId, + java.util.function.Function.identity(), + (previousWorkItem, newWorkItem) -> newWorkItem + )); + final List pendingTasks = new ArrayList<>(); + for (TaskRunnerWorkItem task : allTasks) { + if (pendingTaskIds.contains(task.getTaskId())) { + pendingTasks.add(((AnyTask) task).withTaskState( + TaskState.RUNNING, + RunnerTaskState.PENDING, + workItemIdMap.get(task.getTaskId()).getCreatedTime(), + workItemIdMap.get(task.getTaskId()).getQueueInsertionTime(), + workItemIdMap.get(task.getTaskId()).getLocation() + )); + } + } + return pendingTasks; + } + + if (RunnerTaskState.RUNNING.equals(state)) { + Collection knownRunningTasks = runner.getRunningTasks(); + Set runningTaskIds = knownRunningTasks + .stream() + .map(TaskRunnerWorkItem::getTaskId) + .collect(Collectors.toSet()); + Map workItemIdMap = knownRunningTasks + .stream() + .collect(Collectors.toMap( + TaskRunnerWorkItem::getTaskId, + java.util.function.Function.identity(), + (previousWorkItem, newWorkItem) -> newWorkItem + )); + final List runningTasks = new ArrayList<>(); + for (TaskRunnerWorkItem task : allTasks) { + if (runningTaskIds.contains(task.getTaskId())) { + runningTasks.add(((AnyTask) task).withTaskState( + TaskState.RUNNING, + RunnerTaskState.RUNNING, + workItemIdMap.get(task.getTaskId()).getCreatedTime(), + workItemIdMap.get(task.getTaskId()).getQueueInsertionTime(), + workItemIdMap.get(task.getTaskId()).getLocation() + )); + } + } + return runningTasks; + } + return allTasks; + } + private List securedTaskStatusPlus( List collectionToFilter, @Nullable String dataSource, @@ -1057,4 +943,98 @@ private List securedTaskStatusPlus( ) ); } + + private static class AnyTask extends TaskRunnerWorkItem + { + private final String taskType; + private final String dataSource; + private final TaskState taskState; + private final RunnerTaskState runnerTaskState; + private final DateTime createdTime; + private final DateTime queueInsertionTime; + private final TaskLocation taskLocation; + + AnyTask( + String taskId, + String taskType, + ListenableFuture result, + String dataSource, + TaskState state, + RunnerTaskState runnerState, + DateTime createdTime, + DateTime queueInsertionTime, + TaskLocation taskLocation + ) + { + super(taskId, result, DateTimes.EPOCH, DateTimes.EPOCH); + this.taskType = taskType; + this.dataSource = dataSource; + this.taskState = state; + this.runnerTaskState = runnerState; + this.createdTime = createdTime; + this.queueInsertionTime = queueInsertionTime; + this.taskLocation = taskLocation; + } + + @Override + public TaskLocation getLocation() + { + return taskLocation; + } + + @Override + public String getTaskType() + { + return taskType; + } + + @Override + public String getDataSource() + { + return dataSource; + } + + public TaskState getTaskState() + { + return taskState; + } + + public RunnerTaskState getRunnerTaskState() + { + return runnerTaskState; + } + + @Override + public DateTime getCreatedTime() + { + return createdTime; + } + + @Override + public DateTime getQueueInsertionTime() + { + return queueInsertionTime; + } + + public AnyTask withTaskState( + TaskState newTaskState, + RunnerTaskState runnerState, + DateTime createdTime, + DateTime queueInsertionTime, + TaskLocation taskLocation + ) + { + return new AnyTask( + getTaskId(), + getTaskType(), + getResult(), + getDataSource(), + newTaskState, + runnerState, + createdTime, + queueInsertionTime, + taskLocation + ); + } + } } diff --git a/indexing-service/src/main/java/org/apache/druid/indexing/seekablestream/SeekableStreamDataSourceMetadata.java b/indexing-service/src/main/java/org/apache/druid/indexing/seekablestream/SeekableStreamDataSourceMetadata.java index 2501048c6e73..b9e8d9ab82fd 100644 --- a/indexing-service/src/main/java/org/apache/druid/indexing/seekablestream/SeekableStreamDataSourceMetadata.java +++ b/indexing-service/src/main/java/org/apache/druid/indexing/seekablestream/SeekableStreamDataSourceMetadata.java @@ -19,7 +19,6 @@ package org.apache.druid.indexing.seekablestream; -import com.fasterxml.jackson.annotation.JsonCreator; import com.fasterxml.jackson.annotation.JsonProperty; import org.apache.druid.indexing.overlord.DataSourceMetadata; import org.apache.druid.java.util.common.IAE; @@ -33,9 +32,8 @@ public abstract class SeekableStreamDataSourceMetadata seekableStreamPartitions; - @JsonCreator public SeekableStreamDataSourceMetadata( - @JsonProperty("partitions") SeekableStreamPartitions seekableStreamPartitions + SeekableStreamPartitions seekableStreamPartitions ) { this.seekableStreamPartitions = seekableStreamPartitions; @@ -63,7 +61,6 @@ public boolean matches(DataSourceMetadata other) return plus(other).equals(other.plus(this)); } - @Override public DataSourceMetadata plus(DataSourceMetadata other) { diff --git a/indexing-service/src/main/java/org/apache/druid/indexing/seekablestream/SeekableStreamIndexTask.java b/indexing-service/src/main/java/org/apache/druid/indexing/seekablestream/SeekableStreamIndexTask.java index 29186961ce66..4a596e4e1ff9 100644 --- a/indexing-service/src/main/java/org/apache/druid/indexing/seekablestream/SeekableStreamIndexTask.java +++ b/indexing-service/src/main/java/org/apache/druid/indexing/seekablestream/SeekableStreamIndexTask.java @@ -19,8 +19,6 @@ package org.apache.druid.indexing.seekablestream; -import com.fasterxml.jackson.annotation.JacksonInject; -import com.fasterxml.jackson.annotation.JsonCreator; import com.fasterxml.jackson.annotation.JsonProperty; import com.google.common.annotations.VisibleForTesting; import com.google.common.base.Joiner; @@ -58,17 +56,14 @@ import org.apache.druid.server.security.AuthorizerMapper; import org.apache.druid.utils.CircularBuffer; +import javax.annotation.Nullable; import java.nio.ByteBuffer; import java.util.Map; -import java.util.Random; -import java.util.concurrent.ThreadLocalRandom; - -public abstract class SeekableStreamIndexTask extends AbstractTask - implements ChatHandler +public abstract class SeekableStreamIndexTask + extends AbstractTask implements ChatHandler { public static final long LOCK_ACQUIRE_TIMEOUT_SECONDS = 15; - private static final Random RANDOM = ThreadLocalRandom.current(); private static final EmittingLogger log = new EmittingLogger(SeekableStreamIndexTask.class); private final SeekableStreamIndexTaskRunner runner; @@ -82,18 +77,17 @@ public abstract class SeekableStreamIndexTask savedParseExceptions; - @JsonCreator public SeekableStreamIndexTask( - @JsonProperty("id") String id, - @JsonProperty("resource") TaskResource taskResource, - @JsonProperty("dataSchema") DataSchema dataSchema, - @JsonProperty("tuningConfig") SeekableStreamIndexTaskTuningConfig tuningConfig, - @JsonProperty("ioConfig") SeekableStreamIndexTaskIOConfig ioConfig, - @JsonProperty("context") Map context, - @JacksonInject ChatHandlerProvider chatHandlerProvider, - @JacksonInject AuthorizerMapper authorizerMapper, - @JacksonInject RowIngestionMetersFactory rowIngestionMetersFactory, - String groupId + final String id, + @Nullable final TaskResource taskResource, + final DataSchema dataSchema, + final SeekableStreamIndexTaskTuningConfig tuningConfig, + final SeekableStreamIndexTaskIOConfig ioConfig, + @Nullable final Map context, + @Nullable final ChatHandlerProvider chatHandlerProvider, + final AuthorizerMapper authorizerMapper, + final RowIngestionMetersFactory rowIngestionMetersFactory, + @Nullable final String groupId ) { super( @@ -119,7 +113,6 @@ public SeekableStreamIndexTask( this.runner = createTaskRunner(); } - private static String makeTaskId(String dataSource, String type) { final String suffix = RandomIdUtils.getRandomId(); diff --git a/indexing-service/src/main/java/org/apache/druid/indexing/seekablestream/SeekableStreamIndexTaskIOConfig.java b/indexing-service/src/main/java/org/apache/druid/indexing/seekablestream/SeekableStreamIndexTaskIOConfig.java index 6c469c7d0123..286d8827f055 100644 --- a/indexing-service/src/main/java/org/apache/druid/indexing/seekablestream/SeekableStreamIndexTaskIOConfig.java +++ b/indexing-service/src/main/java/org/apache/druid/indexing/seekablestream/SeekableStreamIndexTaskIOConfig.java @@ -19,7 +19,6 @@ package org.apache.druid.indexing.seekablestream; -import com.fasterxml.jackson.annotation.JsonCreator; import com.fasterxml.jackson.annotation.JsonProperty; import com.google.common.base.Optional; import com.google.common.base.Preconditions; @@ -46,18 +45,16 @@ public abstract class SeekableStreamIndexTaskIOConfig exclusiveStartSequenceNumberPartitions; - @JsonCreator public SeekableStreamIndexTaskIOConfig( - @JsonProperty("taskGroupId") @Nullable Integer taskGroupId, // can be null for backward compabitility - @JsonProperty("baseSequenceName") String baseSequenceName, - @JsonProperty("startPartitions") SeekableStreamPartitions startPartitions, - @JsonProperty("endPartitions") SeekableStreamPartitions endPartitions, - @JsonProperty("useTransaction") Boolean useTransaction, - @JsonProperty("minimumMessageTime") DateTime minimumMessageTime, - @JsonProperty("maximumMessageTime") DateTime maximumMessageTime, - @JsonProperty("skipOffsetGaps") Boolean skipOffsetGaps, - @JsonProperty("exclusiveStartSequenceNumberPartitions") - Set exclusiveStartSequenceNumberPartitions + final @Nullable Integer taskGroupId, // can be null for backward compabitility + final String baseSequenceName, + final SeekableStreamPartitions startPartitions, + final SeekableStreamPartitions endPartitions, + final Boolean useTransaction, + final DateTime minimumMessageTime, + final DateTime maximumMessageTime, + final Boolean skipOffsetGaps, + final Set exclusiveStartSequenceNumberPartitions ) { this.taskGroupId = taskGroupId; diff --git a/indexing-service/src/main/java/org/apache/druid/indexing/seekablestream/SeekableStreamIndexTaskRunner.java b/indexing-service/src/main/java/org/apache/druid/indexing/seekablestream/SeekableStreamIndexTaskRunner.java index 857264965f50..ee9a7e2453e7 100644 --- a/indexing-service/src/main/java/org/apache/druid/indexing/seekablestream/SeekableStreamIndexTaskRunner.java +++ b/indexing-service/src/main/java/org/apache/druid/indexing/seekablestream/SeekableStreamIndexTaskRunner.java @@ -20,8 +20,6 @@ package org.apache.druid.indexing.seekablestream; -import com.fasterxml.jackson.annotation.JsonCreator; -import com.fasterxml.jackson.annotation.JsonProperty; import com.fasterxml.jackson.core.JsonProcessingException; import com.fasterxml.jackson.core.type.TypeReference; import com.fasterxml.jackson.databind.ObjectMapper; @@ -53,7 +51,6 @@ import org.apache.druid.indexing.common.TaskToolbox; import org.apache.druid.indexing.common.actions.CheckPointDataSourceMetadataAction; import org.apache.druid.indexing.common.actions.ResetDataSourceMetadataAction; -import org.apache.druid.indexing.common.actions.SegmentTransactionalInsertAction; import org.apache.druid.indexing.common.stats.RowIngestionMeters; import org.apache.druid.indexing.common.stats.RowIngestionMetersFactory; import org.apache.druid.indexing.common.task.IndexTaskUtils; @@ -75,7 +72,6 @@ import org.apache.druid.segment.realtime.appenderator.AppenderatorDriverAddResult; import org.apache.druid.segment.realtime.appenderator.SegmentsAndMetadata; import org.apache.druid.segment.realtime.appenderator.StreamAppenderatorDriver; -import org.apache.druid.segment.realtime.appenderator.TransactionalSegmentPublisher; import org.apache.druid.segment.realtime.firehose.ChatHandler; import org.apache.druid.segment.realtime.firehose.ChatHandlerProvider; import org.apache.druid.server.security.Access; @@ -142,10 +138,16 @@ public enum Status } private static final EmittingLogger log = new EmittingLogger(SeekableStreamIndexTaskRunner.class); - private static final String METADATA_NEXT_PARTITIONS = "nextPartitions"; - private static final String METADATA_PUBLISH_PARTITIONS = "publishPartitions"; + static final String METADATA_NEXT_PARTITIONS = "nextPartitions"; + static final String METADATA_PUBLISH_PARTITIONS = "publishPartitions"; private final Map endOffsets; + + // lastReadOffsets are the last offsets that were read and processed. + private final Map lastReadOffsets = new HashMap<>(); + + // currOffsets are what should become the start offsets of the next reader, if we stopped reading now. They are + // initialized to the start offsets when the task begins. private final ConcurrentMap currOffsets = new ConcurrentHashMap<>(); private final ConcurrentMap lastPersistedOffsets = new ConcurrentHashMap<>(); @@ -196,8 +198,6 @@ public enum Status private final Set publishingSequences = Sets.newConcurrentHashSet(); private final List> publishWaitList = new ArrayList<>(); private final List> handOffWaitList = new ArrayList<>(); - private final Map initialOffsetsSnapshot = new HashMap<>(); - private final Set exclusiveStartingPartitions = new HashSet<>(); private volatile DateTime startTime; private volatile Status status = Status.NOT_STARTED; // this is only ever set by the task runner thread (runThread) @@ -210,7 +210,7 @@ public enum Status protected volatile boolean pauseRequested = false; private volatile long nextCheckpointTime; - private volatile CopyOnWriteArrayList sequences; + private volatile CopyOnWriteArrayList> sequences; private volatile Throwable backgroundThreadException; public SeekableStreamIndexTaskRunner( @@ -276,7 +276,7 @@ private TaskStatus runInternal(TaskToolbox toolbox) throws Exception Map.Entry> previous = sequenceOffsets.next(); while (sequenceOffsets.hasNext()) { Map.Entry> current = sequenceOffsets.next(); - sequences.add(new SequenceMetadata( + addSequence(new SequenceMetadata<>( previous.getKey(), StringUtils.format("%s_%s", ioConfig.getBaseSequenceName(), previous.getKey()), previous.getValue(), @@ -287,7 +287,7 @@ private TaskStatus runInternal(TaskToolbox toolbox) throws Exception previous = current; exclusive = true; } - sequences.add(new SequenceMetadata( + addSequence(new SequenceMetadata<>( previous.getKey(), StringUtils.format("%s_%s", ioConfig.getBaseSequenceName(), previous.getKey()), previous.getValue(), @@ -296,7 +296,7 @@ private TaskStatus runInternal(TaskToolbox toolbox) throws Exception exclusive ? previous.getValue().keySet() : null )); } else { - sequences.add(new SequenceMetadata( + addSequence(new SequenceMetadata<>( 0, StringUtils.format("%s_%s", ioConfig.getBaseSequenceName(), 0), ioConfig.getStartPartitions().getPartitionSequenceNumberMap(), @@ -369,7 +369,7 @@ private TaskStatus runInternal(TaskToolbox toolbox) throws Exception } else { @SuppressWarnings("unchecked") final Map restoredMetadataMap = (Map) restoredMetadata; - final SeekableStreamPartitions restoredNextPartitions = deserializeSeekableStreamPartitionsFromMetadata( + final SeekableStreamPartitions restoredNextPartitions = deserializePartitionsFromMetadata( toolbox.getObjectMapper(), restoredMetadataMap.get(METADATA_NEXT_PARTITIONS) ); @@ -412,6 +412,21 @@ private TaskStatus runInternal(TaskToolbox toolbox) throws Exception ); } + // Initialize lastReadOffsets immediately after restoring currOffsets. This is only done when end offsets are + // inclusive, because the point of initializing lastReadOffsets here is so we know when to skip the start record. + // When end offsets are exclusive, we never skip the start record. + if (!isEndOffsetExclusive()) { + for (Map.Entry entry : currOffsets.entrySet()) { + final boolean isAtStart = entry.getValue().equals( + ioConfig.getStartPartitions().getPartitionSequenceNumberMap().get(entry.getKey()) + ); + + if (!isAtStart || ioConfig.getExclusiveStartSequenceNumberPartitions().contains(entry.getKey())) { + lastReadOffsets.put(entry.getKey(), entry.getValue()); + } + } + } + // Set up committer. final Supplier committerSupplier = () -> { final Map snapshot = ImmutableMap.copyOf(currOffsets); @@ -454,17 +469,14 @@ public void run() status = Status.READING; Throwable caughtExceptionInner = null; - initialOffsetsSnapshot.putAll(currOffsets); - exclusiveStartingPartitions.addAll(ioConfig.getExclusiveStartSequenceNumberPartitions()); - try { while (stillReading) { if (possiblyPause()) { // The partition assignments may have changed while paused by a call to setEndOffsets() so reassign - // partitions upon resuming. This is safe even if the end sequences have not been modified. + // partitions upon resuming. Don't call "seekToStartingSequence" after "assignPartitions", because there's + // no need to re-seek here. All we're going to be doing is dropping partitions. assignment = assignPartitions(recordSupplier); possiblyResetDataSourceMetadata(toolbox, recordSupplier, assignment, currOffsets); - seekToStartingSequence(recordSupplier, assignment); if (assignment.isEmpty()) { log.info("All partitions have been fully read"); @@ -474,7 +486,7 @@ public void run() } // if stop is requested or task's end sequence is set by call to setEndOffsets method with finish set to true - if (stopRequested.get() || sequences.get(sequences.size() - 1).isCheckpointed()) { + if (stopRequested.get() || sequences.size() == 0 || sequences.get(sequences.size() - 1).isCheckpointed()) { status = Status.PUBLISHING; } @@ -490,7 +502,6 @@ public void run() maybePersistAndPublishSequences(committerSupplier); - // calling getRecord() ensures that exceptions specific to kafka/kinesis like OffsetOutOfRangeException // are handled in the subclasses. List> records = getRecords( @@ -503,44 +514,17 @@ public void run() SequenceMetadata sequenceToCheckpoint = null; for (OrderedPartitionableRecord record : records) { - - // for Kafka, the end offsets are exclusive, so skip it - if (isEndSequenceOffsetsExclusive() && - createSequenceNumber(record.getSequenceNumber()).compareTo( - createSequenceNumber(endOffsets.get(record.getPartitionId()))) == 0) { - continue; - } - - // for the first message we receive, check that we were given a message with a sequenceNumber that matches our - // expected starting sequenceNumber - if (!verifyInitialRecordAndSkipExclusivePartition(record, initialOffsetsSnapshot)) { - continue; - } + final boolean shouldProcess = verifyRecordInRange(record.getPartitionId(), record.getSequenceNumber()); log.trace( - "Got stream[%s] partition[%s] sequence[%s].", + "Got stream[%s] partition[%s] sequenceNumber[%s], shouldProcess[%s].", record.getStream(), record.getPartitionId(), - record.getSequenceNumber() + record.getSequenceNumber(), + shouldProcess ); - if (isEndOfShard(record.getSequenceNumber())) { - // shard is closed, applies to Kinesis only - currOffsets.put(record.getPartitionId(), record.getSequenceNumber()); - } else if (createSequenceNumber(record.getSequenceNumber()).compareTo( - createSequenceNumber(endOffsets.get(record.getPartitionId()))) <= 0) { - - - if (!record.getSequenceNumber().equals(currOffsets.get(record.getPartitionId())) - && !ioConfig.isSkipOffsetGaps()) { - throw new ISE( - "WTF?! Got sequence[%s] after sequence[%s] in partition[%s].", - record.getSequenceNumber(), - currOffsets.get(record.getPartitionId()), - record.getPartitionId() - ); - } - + if (shouldProcess) { try { final List valueBytess = record.getData(); final List rows; @@ -554,15 +538,15 @@ public void run() } boolean isPersistRequired = false; - final SequenceMetadata sequenceToUse = sequences + final SequenceMetadata sequenceToUse = sequences .stream() - .filter(sequenceMetadata -> sequenceMetadata.canHandle(record)) + .filter(sequenceMetadata -> sequenceMetadata.canHandle(this, record)) .findFirst() .orElse(null); if (sequenceToUse == null) { throw new ISE( - "WTH?! cannot find any valid sequence for record with partition [%d] and sequence [%d]. Current sequences: %s", + "WTH?! cannot find any valid sequence for record with partition [%s] and sequenceNumber [%s]. Current sequences: %s", record.getPartitionId(), record.getSequenceNumber(), sequences @@ -632,12 +616,18 @@ public void onFailure(Throwable t) // in kafka, we can easily get the next offset by adding 1, but for kinesis, there's no way // to get the next sequence number without having to make an expensive api call. So the behavior // here for kafka is to +1 while for kinesis we simply save the current sequence number - currOffsets.put(record.getPartitionId(), getSequenceNumberToStoreAfterRead(record.getSequenceNumber())); + lastReadOffsets.put(record.getPartitionId(), record.getSequenceNumber()); + currOffsets.put(record.getPartitionId(), getNextStartOffset(record.getSequenceNumber())); } - if ((currOffsets.get(record.getPartitionId()).equals(endOffsets.get(record.getPartitionId())) - || isEndOfShard(currOffsets.get(record.getPartitionId()))) - && assignment.remove(record.getStreamPartition())) { + // Use record.getSequenceNumber() in the moreToRead check, since currOffsets might not have been + // updated if we were skipping records for being beyond the end. + final boolean moreToReadAfterThisRecord = isMoreToReadAfterReadingRecord( + record.getSequenceNumber(), + endOffsets.get(record.getPartitionId()) + ); + + if (!moreToReadAfterThisRecord && assignment.remove(record.getStreamPartition())) { log.info("Finished reading stream[%s], partition[%s].", record.getStream(), record.getPartitionId()); recordSupplier.assign(assignment); stillReading = !assignment.isEmpty(); @@ -703,11 +693,18 @@ public void onFailure(Throwable t) status = Status.PUBLISHING; } - for (SequenceMetadata sequenceMetadata : sequences) { + for (int i = 0; i < sequences.size(); i++) { + final SequenceMetadata sequenceMetadata = sequences.get(i); if (!publishingSequences.contains(sequenceMetadata.getSequenceName())) { - // this is done to prevent checks in sequence specific commit supplier from failing - sequenceMetadata.setEndOffsets(currOffsets); - sequenceMetadata.updateAssignments(currOffsets); + final boolean isLast = i == (sequences.size() - 1); + if (isLast) { + // Shorten endOffsets of the last sequence to match currOffsets. + sequenceMetadata.setEndOffsets(currOffsets); + } + + // Update assignments of the sequence, which should clear them. (This will be checked later, when the + // Committer is built.) + sequenceMetadata.updateAssignments(currOffsets, this::isMoreToReadAfterReadingRecord); publishingSequences.add(sequenceMetadata.getSequenceName()); // persist already done in finally, so directly add to publishQueue publishAndRegisterHandoff(sequenceMetadata); @@ -810,7 +807,7 @@ public void onFailure(Throwable t) toolbox.getDruidNodeAnnouncer().unannounce(discoveryDruidNode); toolbox.getDataSegmentServerAnnouncer().unannounce(); } - catch (Exception e) { + catch (Throwable e) { if (caughtExceptionOuter != null) { caughtExceptionOuter.addSuppressed(e); } else { @@ -823,11 +820,6 @@ public void onFailure(Throwable t) return TaskStatus.success(task.getId()); } - /** - * checks if the input seqNum marks end of shard. Used by Kinesis only - */ - protected abstract boolean isEndOfShard(SequenceOffsetType seqNum); - private void checkPublishAndHandoffFailure() throws ExecutionException, InterruptedException { // Check if any publishFuture failed. @@ -857,14 +849,14 @@ private void checkPublishAndHandoffFailure() throws ExecutionException, Interrup handOffWaitList.removeAll(handoffFinished); } - private void publishAndRegisterHandoff(SequenceMetadata sequenceMetadata) + private void publishAndRegisterHandoff(SequenceMetadata sequenceMetadata) { log.info("Publishing segments for sequence [%s]", sequenceMetadata); final ListenableFuture publishFuture = Futures.transform( driver.publish( - sequenceMetadata.createPublisher(toolbox, ioConfig.isUseTransaction()), - sequenceMetadata.getCommitterSupplier(stream, lastPersistedOffsets).get(), + sequenceMetadata.createPublisher(this, toolbox, ioConfig.isUseTransaction()), + sequenceMetadata.getCommitterSupplier(this, stream, lastPersistedOffsets).get(), Collections.singletonList(sequenceMetadata.getSequenceName()) ), (Function) publishedSegmentsAndMetadata -> { @@ -932,7 +924,7 @@ public Void apply(@Nullable SegmentsAndMetadata handoffSegmentsAndMetadata) @Override public void onFailure(Throwable t) { - log.error(t, "Error while publishing segments for sequence[%s]", sequenceMetadata); + log.error(t, "Error while publishing segments for sequenceNumber[%s]", sequenceMetadata); handoffFuture.setException(t); } } @@ -949,11 +941,9 @@ private boolean restoreSequences() throws IOException final File sequencesPersistFile = getSequencesPersistFile(toolbox); if (sequencesPersistFile.exists()) { sequences = new CopyOnWriteArrayList<>( - toolbox.getObjectMapper().>readValue( + toolbox.getObjectMapper().>>readValue( sequencesPersistFile, - new TypeReference>() - { - } + getSequenceMetadataTypeReference() ) ); return true; @@ -966,9 +956,7 @@ private synchronized void persistSequences() throws IOException { log.info("Persisting Sequences Metadata [%s]", sequences); toolbox.getObjectMapper().writerWithType( - new TypeReference>() - { - } + getSequenceMetadataTypeReference() ).writeValue(getSequencesPersistFile(toolbox), sequences); } @@ -1013,8 +1001,8 @@ private Map getTaskCompletionRowStats() private void maybePersistAndPublishSequences(Supplier committerSupplier) throws InterruptedException { - for (SequenceMetadata sequenceMetadata : sequences) { - sequenceMetadata.updateAssignments(currOffsets); + for (SequenceMetadata sequenceMetadata : sequences) { + sequenceMetadata.updateAssignments(currOffsets, this::isMoreToReadBeforeReadingRecord); if (!sequenceMetadata.isOpen() && !publishingSequences.contains(sequenceMetadata.getSequenceName())) { publishingSequences.add(sequenceMetadata.getSequenceName()); try { @@ -1040,19 +1028,21 @@ private Set> assignPartitions( { final Set> assignment = new HashSet<>(); for (Map.Entry entry : currOffsets.entrySet()) { - final SequenceOffsetType endOffset = endOffsets.get(entry.getKey()); - if (isEndOfShard(endOffset) - || SeekableStreamPartitions.NO_END_SEQUENCE_NUMBER.equals(endOffset) - || createSequenceNumber(entry.getValue()).compareTo(createSequenceNumber(endOffset)) < 0) { - assignment.add(StreamPartition.of(stream, entry.getKey())); - } else if (entry.getValue().equals(endOffset)) { - log.info("Finished reading partition[%s].", entry.getKey()); - } else { - throw new ISE( - "WTF?! Cannot start from sequence[%,d] > endOffset[%,d]", - entry.getValue(), + final PartitionIdType partition = entry.getKey(); + final SequenceOffsetType currOffset = entry.getValue(); + final SequenceOffsetType endOffset = endOffsets.get(partition); + + if (!isRecordAlreadyRead(partition, endOffset) && isMoreToReadBeforeReadingRecord(currOffset, endOffset)) { + log.info( + "Adding partition[%s], start[%s] -> end[%s] to assignment.", + partition, + currOffset, endOffset ); + + assignment.add(StreamPartition.of(stream, partition)); + } else { + log.info("Finished reading partition[%s].", partition); } } @@ -1061,6 +1051,77 @@ private Set> assignPartitions( return assignment; } + private void addSequence(final SequenceMetadata sequenceMetadata) + { + // Sanity check that the start of the new sequence matches up with the end of the prior sequence. + for (Map.Entry entry : sequenceMetadata.getStartOffsets().entrySet()) { + final PartitionIdType partition = entry.getKey(); + final SequenceOffsetType startOffset = entry.getValue(); + + if (!sequences.isEmpty()) { + final SequenceOffsetType priorOffset = sequences.get(sequences.size() - 1).endOffsets.get(partition); + + if (!startOffset.equals(priorOffset)) { + throw new ISE( + "New sequence startOffset[%s] does not equal expected prior offset[%s]", + startOffset, + priorOffset + ); + } + } + } + + // Actually do the add. + sequences.add(sequenceMetadata); + } + + /** + * Returns true if the given record has already been read, based on lastReadOffsets. + */ + private boolean isRecordAlreadyRead( + final PartitionIdType recordPartition, + final SequenceOffsetType recordSequenceNumber + ) + { + final SequenceOffsetType lastReadOffset = lastReadOffsets.get(recordPartition); + + if (lastReadOffset == null) { + return false; + } else { + return createSequenceNumber(recordSequenceNumber).compareTo(createSequenceNumber(lastReadOffset)) <= 0; + } + } + + /** + * Returns true if, given that we want to start reading from recordSequenceNumber and end at endSequenceNumber, there + * is more left to read. Used in pre-read checks to determine if there is anything left to read. + */ + private boolean isMoreToReadBeforeReadingRecord( + final SequenceOffsetType recordSequenceNumber, + final SequenceOffsetType endSequenceNumber + ) + { + final int compareToEnd = createSequenceNumber(recordSequenceNumber) + .compareTo(createSequenceNumber(endSequenceNumber)); + + return isEndOffsetExclusive() ? compareToEnd < 0 : compareToEnd <= 0; + } + + /** + * Returns true if, given that recordSequenceNumber has already been read and we want to end at endSequenceNumber, + * there is more left to read. Used in post-read checks to determine if there is anything left to read. + */ + private boolean isMoreToReadAfterReadingRecord( + final SequenceOffsetType recordSequenceNumber, + final SequenceOffsetType endSequenceNumber + ) + { + final int compareNextToEnd = createSequenceNumber(getNextStartOffset(recordSequenceNumber)) + .compareTo(createSequenceNumber(endSequenceNumber)); + + // Unlike isMoreToReadBeforeReadingRecord, we don't care if the end is exclusive or not. If we read it, we're done. + return compareNextToEnd < 0; + } private void seekToStartingSequence( RecordSupplier recordSupplier, @@ -1069,7 +1130,7 @@ private void seekToStartingSequence( { for (final StreamPartition partition : partitions) { final SequenceOffsetType sequence = currOffsets.get(partition.getPartitionId()); - log.info("Seeking partition[%s] to sequence[%s].", partition.getPartitionId(), sequence); + log.info("Seeking partition[%s] to sequenceNumber[%s].", partition.getPartitionId(), sequence); recordSupplier.seek(partition, sequence); } } @@ -1128,7 +1189,7 @@ private void handleParseException(ParseException pe, OrderedPartitionableRecord if (tuningConfig.isLogParseExceptions()) { log.error( pe, - "Encountered parse exception on row from partition[%s] sequence[%s]", + "Encountered parse exception on row from partition[%s] sequenceNumber[%s]", record.getPartitionId(), record.getSequenceNumber() ); @@ -1254,7 +1315,7 @@ public void stopGracefully() } } catch (Exception e) { - Throwables.propagate(e); + throw new RuntimeException(e); } } @@ -1291,7 +1352,7 @@ public Map getCurrentOffsets(@Context final return getCurrentOffsets(); } - public Map getCurrentOffsets() + public ConcurrentMap getCurrentOffsets() { return currOffsets; } @@ -1389,20 +1450,25 @@ public Response setEndOffsets( // and after acquiring pauseLock to correctly guard against duplicate requests Preconditions.checkState(sequenceNumbers.size() > 0, "WTH?! No Sequences found to set end sequences"); - final SequenceMetadata latestSequence = sequences.get(sequences.size() - 1); - // if a partition has not been read yet (contained in initialOffsetsSnapshot), then - // do not mark the starting sequence number as exclusive - Set exclusivePartitions = sequenceNumbers.keySet() - .stream() - .filter(x -> !initialOffsetsSnapshot.containsKey(x) - || ioConfig.getExclusiveStartSequenceNumberPartitions() - .contains(x)) - .collect(Collectors.toSet()); - - if ((latestSequence.getStartOffsets().equals(sequenceNumbers) && latestSequence.exclusiveStartPartitions.equals( - exclusivePartitions) && !finish) || - (latestSequence.getEndOffsets().equals(sequenceNumbers) && finish)) { + final SequenceMetadata latestSequence = sequences.get(sequences.size() - 1); + final Set exclusiveStartPartitions; + + if (isEndOffsetExclusive()) { + // When end offsets are exclusive, there's no need for marking the next sequence as having any + // exclusive-start partitions. It should always start from the end offsets of the prior sequence. + exclusiveStartPartitions = Collections.emptySet(); + } else { + // When end offsets are inclusive, we must mark all partitions as exclusive-start, to avoid reading + // their final messages (which have already been read). + exclusiveStartPartitions = sequenceNumbers.keySet(); + } + + if ((latestSequence.getStartOffsets().equals(sequenceNumbers) + && latestSequence.getExclusiveStartPartitions().equals(exclusiveStartPartitions) + && !finish) + || (latestSequence.getEndOffsets().equals(sequenceNumbers) && finish)) { log.warn("Ignoring duplicate request, end sequences already set for sequences [%s]", sequenceNumbers); + resume(); return Response.ok(sequenceNumbers).build(); } else if (latestSequence.isCheckpointed()) { return Response.status(Response.Status.BAD_REQUEST) @@ -1418,8 +1484,7 @@ public Response setEndOffsets( } for (Map.Entry entry : sequenceNumbers.entrySet()) { - if (createSequenceNumber(entry.getValue()).compareTo(createSequenceNumber(currOffsets.get(entry.getKey()))) - < 0) { + if (createSequenceNumber(entry.getValue()).compareTo(createSequenceNumber(currOffsets.get(entry.getKey()))) < 0) { return Response.status(Response.Status.BAD_REQUEST) .entity( StringUtils.format( @@ -1439,19 +1504,17 @@ public Response setEndOffsets( log.info("Updating endOffsets from [%s] to [%s]", endOffsets, sequenceNumbers); endOffsets.putAll(sequenceNumbers); } else { - exclusiveStartingPartitions.addAll(exclusivePartitions); - // create new sequence - final SequenceMetadata newSequence = new SequenceMetadata( + log.info("Creating new sequence with startOffsets [%s] and endOffsets [%s]", sequenceNumbers, endOffsets); + final SequenceMetadata newSequence = new SequenceMetadata<>( latestSequence.getSequenceId() + 1, StringUtils.format("%s_%d", ioConfig.getBaseSequenceName(), latestSequence.getSequenceId() + 1), sequenceNumbers, endOffsets, false, - exclusivePartitions + exclusiveStartPartitions ); - sequences.add(newSequence); - initialOffsetsSnapshot.putAll(sequenceNumbers); + addSequence(newSequence); } persistSequences(); } @@ -1560,7 +1623,7 @@ public Response pause() throws InterruptedException return Response.ok().entity(toolbox.getObjectMapper().writeValueAsString(getCurrentOffsets())).build(); } catch (JsonProcessingException e) { - throw Throwables.propagate(e); + throw new RuntimeException(e); } } @@ -1605,329 +1668,56 @@ public DateTime getStartTime(@Context final HttpServletRequest req) return startTime; } - private class SequenceMetadata + /** + * This method does two things: + * + * 1) Verifies that the sequence numbers we read are at least as high as those read previously, and throws an + * exception if not. + * 2) Returns false if we should skip this record because it's either (a) the first record in a partition that we are + * needing to be exclusive on; (b) too late to read, past the endOffsets. + */ + private boolean verifyRecordInRange( + final PartitionIdType partition, + final SequenceOffsetType recordOffset + ) { - private final int sequenceId; - private final String sequenceName; - private final Set exclusiveStartPartitions; - private final Set assignments; - private final boolean sentinel; - private boolean checkpointed; - /** - * Lock for accessing {@link #endOffsets} and {@link #checkpointed}. This lock is required because - * {@link #setEndOffsets)} can be called by both the main thread and the HTTP thread. - */ - private final ReentrantLock lock = new ReentrantLock(); - - final Map startOffsets; - final Map endOffsets; - - @JsonCreator - public SequenceMetadata( - @JsonProperty("sequenceId") int sequenceId, - @JsonProperty("sequenceName") String sequenceName, - @JsonProperty("startOffsets") Map startOffsets, - @JsonProperty("endOffsets") Map endOffsets, - @JsonProperty("checkpointed") boolean checkpointed, - @JsonProperty("exclusiveStartPartitions") Set exclusiveStartPartitions - ) - { - Preconditions.checkNotNull(sequenceName); - Preconditions.checkNotNull(startOffsets); - Preconditions.checkNotNull(endOffsets); - this.sequenceId = sequenceId; - this.sequenceName = sequenceName; - this.startOffsets = ImmutableMap.copyOf(startOffsets); - this.endOffsets = new HashMap<>(endOffsets); - this.assignments = new HashSet<>(startOffsets.keySet()); - this.checkpointed = checkpointed; - this.sentinel = false; - this.exclusiveStartPartitions = exclusiveStartPartitions == null - ? Collections.emptySet() - : exclusiveStartPartitions; - } - - @JsonProperty - public Set getExclusiveStartPartitions() - { - return exclusiveStartPartitions; - } - - @JsonProperty - public int getSequenceId() - { - return sequenceId; - } - - @JsonProperty - public boolean isCheckpointed() - { - lock.lock(); - try { - return checkpointed; - } - finally { - lock.unlock(); - } - } - - @JsonProperty - public String getSequenceName() - { - return sequenceName; - } - - @JsonProperty - public Map getStartOffsets() - { - return startOffsets; - } - - @JsonProperty - public Map getEndOffsets() - { - lock.lock(); - try { - return endOffsets; - } - finally { - lock.unlock(); - } - } - - @JsonProperty - public boolean isSentinel() - { - return sentinel; - } - - void setEndOffsets(Map newEndOffsets) - { - lock.lock(); - try { - endOffsets.putAll(newEndOffsets); - checkpointed = true; - } - finally { - lock.unlock(); - } - } - - void updateAssignments(Map nextPartitionOffset) - { - lock.lock(); - try { - assignments.clear(); - nextPartitionOffset.forEach((key, value) -> { - if (endOffsets.get(key).equals(SeekableStreamPartitions.NO_END_SEQUENCE_NUMBER) - || createSequenceNumber(endOffsets.get(key)).compareTo(createSequenceNumber(nextPartitionOffset.get(key))) - > 0) { - assignments.add(key); - } - }); - } - finally { - lock.unlock(); - } - } - - boolean isOpen() - { - return !assignments.isEmpty(); - } - - boolean canHandle(OrderedPartitionableRecord record) - { - lock.lock(); - try { - final OrderedSequenceNumber partitionEndOffset = createSequenceNumber(endOffsets.get(record.getPartitionId())); - final OrderedSequenceNumber partitionStartOffset = createSequenceNumber(startOffsets.get( - record.getPartitionId())); - final OrderedSequenceNumber recordOffset = createSequenceNumber(record.getSequenceNumber()); - if (!isOpen() || recordOffset == null || partitionEndOffset == null || partitionStartOffset == null) { - return false; - } - boolean ret; - if (isStartingSequenceOffsetsExclusive()) { - ret = recordOffset.compareTo(partitionStartOffset) - >= (getExclusiveStartPartitions().contains(record.getPartitionId()) ? 1 : 0); - } else { - ret = recordOffset.compareTo(partitionStartOffset) >= 0; - } - - if (isEndSequenceOffsetsExclusive()) { - ret &= recordOffset.compareTo(partitionEndOffset) < 0; - } else { - ret &= recordOffset.compareTo(partitionEndOffset) <= 0; - } - - return ret; - } - finally { - lock.unlock(); - } - } - - @Override - public String toString() - { - lock.lock(); - try { - return "SequenceMetadata{" + - "sequenceName='" + sequenceName + '\'' + - ", sequenceId=" + sequenceId + - ", startOffsets=" + startOffsets + - ", endOffsets=" + endOffsets + - ", assignments=" + assignments + - ", sentinel=" + sentinel + - ", checkpointed=" + checkpointed + - '}'; - } - finally { - lock.unlock(); - } - } - - Supplier getCommitterSupplier( - String stream, - Map lastPersistedOffsets - ) - { - // Set up committer. - return () -> - new Committer() - { - @Override - public Object getMetadata() - { - lock.lock(); - - try { - Preconditions.checkState( - assignments.isEmpty(), - "This committer can be used only once all the records till sequences [%s] have been consumed, also make" - + " sure to call updateAssignments before using this committer", - endOffsets - ); - - - // merge endOffsets for this sequence with globally lastPersistedOffsets - // This is done because this committer would be persisting only sub set of segments - // corresponding to the current sequence. Generally, lastPersistedOffsets should already - // cover endOffsets but just to be sure take max of sequences and persist that - for (Map.Entry partitionOffset : endOffsets.entrySet()) { - SequenceOffsetType newOffsets = partitionOffset.getValue(); - if (lastPersistedOffsets.containsKey(partitionOffset.getKey()) && - createSequenceNumber(lastPersistedOffsets.get(partitionOffset.getKey())).compareTo( - createSequenceNumber(newOffsets)) > 0) { - newOffsets = lastPersistedOffsets.get(partitionOffset.getKey()); - } - lastPersistedOffsets.put( - partitionOffset.getKey(), - newOffsets - ); - } - - // Publish metadata can be different from persist metadata as we are going to publish only - // subset of segments - return ImmutableMap.of( - METADATA_NEXT_PARTITIONS, new SeekableStreamPartitions<>(stream, lastPersistedOffsets), - METADATA_PUBLISH_PARTITIONS, new SeekableStreamPartitions<>(stream, endOffsets) - ); - } - finally { - lock.unlock(); - } - } + // Verify that the record is at least as high as its currOffset. + final SequenceOffsetType currOffset = Preconditions.checkNotNull( + currOffsets.get(partition), + "Current offset is null for sequenceNumber[%s] and partition[%s]", + recordOffset, + partition + ); - @Override - public void run() - { - // Do nothing. - } - }; + final OrderedSequenceNumber recordSequenceNumber = createSequenceNumber(recordOffset); + final OrderedSequenceNumber currentSequenceNumber = createSequenceNumber(currOffset); + final int comparisonToCurrent = recordSequenceNumber.compareTo(currentSequenceNumber); + if (comparisonToCurrent < 0) { + throw new ISE( + "Record sequenceNumber[%s] is smaller than current sequenceNumber[%s] for partition[%s]", + recordOffset, + currOffset, + partition + ); } - TransactionalSegmentPublisher createPublisher(TaskToolbox toolbox, boolean useTransaction) - { - return (segments, commitMetadata) -> { - final SeekableStreamPartitions finalPartitions = deserializeSeekableStreamPartitionsFromMetadata( - toolbox.getObjectMapper(), - ((Map) Preconditions - .checkNotNull(commitMetadata, "commitMetadata")).get(METADATA_PUBLISH_PARTITIONS) - ); - - // Sanity check, we should only be publishing things that match our desired end state. - if (!getEndOffsets().equals(finalPartitions.getPartitionSequenceNumberMap())) { - throw new ISE( - "WTF?! Driver for sequence [%s], attempted to publish invalid metadata[%s].", - toString(), - commitMetadata - ); - } - - final SegmentTransactionalInsertAction action; - - if (useTransaction) { - action = new SegmentTransactionalInsertAction( - segments, - createDataSourceMetadata(new SeekableStreamPartitions<>( - finalPartitions.getStream(), - getStartOffsets() - )), - createDataSourceMetadata(finalPartitions) - ); - } else { - action = new SegmentTransactionalInsertAction(segments, null, null); - } - - log.info("Publishing with isTransaction[%s].", useTransaction); - - return toolbox.getTaskActionClient().submit(action); - }; + // Check if the record has already been read. + if (isRecordAlreadyRead(partition, recordOffset)) { + return false; } + // Finally, check if this record comes before the endOffsets for this partition. + return isMoreToReadBeforeReadingRecord(recordSequenceNumber.get(), endOffsets.get(partition)); } - private boolean verifyInitialRecordAndSkipExclusivePartition( - final OrderedPartitionableRecord record, - final Map intialSequenceSnapshot - ) - { - if (intialSequenceSnapshot.containsKey(record.getPartitionId())) { - if (!intialSequenceSnapshot.get(record.getPartitionId()).equals(record.getSequenceNumber())) { - throw new ISE( - "Starting sequenceNumber [%s] does not match expected [%s] for partition [%s]", - record.getSequenceNumber(), - intialSequenceSnapshot.get(record.getPartitionId()), - record.getPartitionId() - ); - } - - log.info( - "Verified starting sequenceNumber [%s] for partition [%s]", - record.getSequenceNumber(), record.getPartitionId() - ); - - intialSequenceSnapshot.remove(record.getPartitionId()); - if (intialSequenceSnapshot.isEmpty()) { - log.info("Verified starting sequences for all partitions"); - } - - // check exclusive starting sequence - if (isStartingSequenceOffsetsExclusive() && exclusiveStartingPartitions.contains(record.getPartitionId())) { - log.info("Skipping starting sequenceNumber for partition [%s] marked exclusive", record.getPartitionId()); - - return false; - } - } - - return true; - } + /** + * checks if the input seqNum marks end of shard. Used by Kinesis only + */ + protected abstract boolean isEndOfShard(SequenceOffsetType seqNum); /** - * deserailizes the checkpoints into of Map> + * deserializes the checkpoints into of Map> * * @param toolbox task toolbox * @param checkpointsString the json-serialized checkpoint string @@ -1943,26 +1733,24 @@ protected abstract TreeMap> ge ) throws IOException; /** - * Calculates the sequence number used to update `currentOffsets` after finishing reading a record. - * In Kafka this returns sequenceNumeber + 1 since that's the next expected offset - * In Kinesis this simply returns sequenceNumber, since the sequence numbers in Kinesis are not - * contiguous and finding the next sequence number requires an expensive API call + * Calculates the sequence number used to update currOffsets after finished reading a record. + * This is what would become the start offsets of the next reader, if we stopped reading now. * * @param sequenceNumber the sequence number that has already been processed * * @return next sequence number to be stored */ - protected abstract SequenceOffsetType getSequenceNumberToStoreAfterRead(SequenceOffsetType sequenceNumber); + protected abstract SequenceOffsetType getNextStartOffset(SequenceOffsetType sequenceNumber); /** - * deserialzies stored metadata into SeekableStreamPartitions + * deserializes stored metadata into SeekableStreamPartitions * * @param mapper json objectMapper * @param object metadata * * @return SeekableStreamPartitions */ - protected abstract SeekableStreamPartitions deserializeSeekableStreamPartitionsFromMetadata( + protected abstract SeekableStreamPartitions deserializePartitionsFromMetadata( ObjectMapper mapper, Object object ); @@ -2024,12 +1812,7 @@ protected abstract void possiblyResetDataSourceMetadata( * In Kafka, the endOffsets are exclusive, so skip it. * In Kinesis the endOffsets are inclusive */ - protected abstract boolean isEndSequenceOffsetsExclusive(); + protected abstract boolean isEndOffsetExclusive(); - /** - * In Kafka, the startingOffsets are inclusive. - * In Kinesis, the startingOffsets are exclusive, except for the first - * partition we read from stream - */ - protected abstract boolean isStartingSequenceOffsetsExclusive(); + protected abstract TypeReference>> getSequenceMetadataTypeReference(); } diff --git a/indexing-service/src/main/java/org/apache/druid/indexing/seekablestream/SeekableStreamIndexTaskTuningConfig.java b/indexing-service/src/main/java/org/apache/druid/indexing/seekablestream/SeekableStreamIndexTaskTuningConfig.java index 3ebfe5915aa8..b594e42a66cc 100644 --- a/indexing-service/src/main/java/org/apache/druid/indexing/seekablestream/SeekableStreamIndexTaskTuningConfig.java +++ b/indexing-service/src/main/java/org/apache/druid/indexing/seekablestream/SeekableStreamIndexTaskTuningConfig.java @@ -19,7 +19,6 @@ package org.apache.druid.indexing.seekablestream; -import com.fasterxml.jackson.annotation.JsonCreator; import com.fasterxml.jackson.annotation.JsonProperty; import org.apache.druid.segment.IndexSpec; import org.apache.druid.segment.indexing.RealtimeTuningConfig; @@ -60,27 +59,26 @@ public abstract class SeekableStreamIndexTaskTuningConfig implements TuningConfi private final int maxParseExceptions; private final int maxSavedParseExceptions; - @JsonCreator public SeekableStreamIndexTaskTuningConfig( - @JsonProperty("maxRowsInMemory") @Nullable Integer maxRowsInMemory, - @JsonProperty("maxBytesInMemory") @Nullable Long maxBytesInMemory, - @JsonProperty("maxRowsPerSegment") @Nullable Integer maxRowsPerSegment, - @JsonProperty("maxTotalRows") @Nullable Long maxTotalRows, - @JsonProperty("intermediatePersistPeriod") @Nullable Period intermediatePersistPeriod, - @JsonProperty("basePersistDirectory") @Nullable File basePersistDirectory, - @JsonProperty("maxPendingPersists") @Nullable Integer maxPendingPersists, - @JsonProperty("indexSpec") @Nullable IndexSpec indexSpec, + @Nullable Integer maxRowsInMemory, + @Nullable Long maxBytesInMemory, + @Nullable Integer maxRowsPerSegment, + @Nullable Long maxTotalRows, + @Nullable Period intermediatePersistPeriod, + @Nullable File basePersistDirectory, + @Nullable Integer maxPendingPersists, + @Nullable IndexSpec indexSpec, // This parameter is left for compatibility when reading existing configs, to be removed in Druid 0.12. - @JsonProperty("buildV9Directly") @Nullable Boolean buildV9Directly, - @Deprecated @JsonProperty("reportParseExceptions") @Nullable Boolean reportParseExceptions, - @JsonProperty("handoffConditionTimeout") @Nullable Long handoffConditionTimeout, - @JsonProperty("resetOffsetAutomatically") @Nullable Boolean resetOffsetAutomatically, - @JsonProperty("skipSequenceNumberAvailabilityCheck") Boolean skipSequenceNumberAvailabilityCheck, - @JsonProperty("segmentWriteOutMediumFactory") @Nullable SegmentWriteOutMediumFactory segmentWriteOutMediumFactory, - @JsonProperty("intermediateHandoffPeriod") @Nullable Period intermediateHandoffPeriod, - @JsonProperty("logParseExceptions") @Nullable Boolean logParseExceptions, - @JsonProperty("maxParseExceptions") @Nullable Integer maxParseExceptions, - @JsonProperty("maxSavedParseExceptions") @Nullable Integer maxSavedParseExceptions + @Deprecated @JsonProperty("buildV9Directly") @Nullable Boolean buildV9Directly, + @Deprecated @Nullable Boolean reportParseExceptions, + @Nullable Long handoffConditionTimeout, + @Nullable Boolean resetOffsetAutomatically, + Boolean skipSequenceNumberAvailabilityCheck, + @Nullable SegmentWriteOutMediumFactory segmentWriteOutMediumFactory, + @Nullable Period intermediateHandoffPeriod, + @Nullable Boolean logParseExceptions, + @Nullable Integer maxParseExceptions, + @Nullable Integer maxSavedParseExceptions ) { // Cannot be a static because default basePersistDirectory is unique per-instance diff --git a/indexing-service/src/main/java/org/apache/druid/indexing/seekablestream/SeekableStreamPartitions.java b/indexing-service/src/main/java/org/apache/druid/indexing/seekablestream/SeekableStreamPartitions.java index 8c034da41fb4..dc3ff87aff15 100644 --- a/indexing-service/src/main/java/org/apache/druid/indexing/seekablestream/SeekableStreamPartitions.java +++ b/indexing-service/src/main/java/org/apache/druid/indexing/seekablestream/SeekableStreamPartitions.java @@ -31,14 +31,13 @@ /** * class that encapsulates a partitionIdToSequenceNumberMap of partitionId -> sequenceNumber. * To be backward compatible with both Kafka and Kinesis datasource metadata when - * deserializing json. Redundant constructor fields stream, topic and + * serializing and deserializing json, redundant constructor fields stream, topic, * partitionSequenceNumberMap and partitionOffsetMap are created. Only one of topic, stream * should have a non-null value and only one of partitionOffsetMap and partitionSequenceNumberMap * should have a non-null value. - *

- * Redundant getters - * are used for proper Jackson serialization/deserialization when processing terminologies - * used by Kafka and kinesis (i.e. topic vs. stream) + * + * Redundant getters are used for proper Jackson serialization/deserialization when processing terminologies + * used by Kafka and Kinesis (i.e. topic vs. stream) * * @param partition id type * @param sequence number type @@ -81,7 +80,12 @@ public SeekableStreamPartitions( final Map partitionOffsetMap ) { - this(stream, null, partitionOffsetMap, null); + this( + Preconditions.checkNotNull(stream, "stream"), + null, + Preconditions.checkNotNull(partitionOffsetMap, "partitionOffsetMap"), + null + ); } @JsonProperty @@ -90,12 +94,32 @@ public String getStream() return stream; } + /** + * Identical to {@link #getStream()}. Here for backwards compatibility, so a serialized SeekableStreamPartitions can + * be read by older Druid versions as a KafkaPartitions object. + */ + @JsonProperty + public String getTopic() + { + return stream; + } + @JsonProperty public Map getPartitionSequenceNumberMap() { return partitionIdToSequenceNumberMap; } + /** + * Identical to {@link #getPartitionSequenceNumberMap()} ()}. Here for backwards compatibility, so a serialized + * SeekableStreamPartitions can be read by older Druid versions as a KafkaPartitions object. + */ + @JsonProperty + public Map getPartitionOffsetMap() + { + return partitionIdToSequenceNumberMap; + } + @Override public boolean equals(Object o) { @@ -119,9 +143,9 @@ public int hashCode() @Override public String toString() { - return "SeekableStreamPartitions{" + - "stream/topic='" + stream + '\'' + - ", partitionSequenceNumberMap/partitionOffsetMap=" + partitionIdToSequenceNumberMap + + return getClass().getSimpleName() + "{" + + "stream='" + stream + '\'' + + ", partitionSequenceNumberMap=" + partitionIdToSequenceNumberMap + '}'; } } diff --git a/indexing-service/src/main/java/org/apache/druid/indexing/seekablestream/SequenceMetadata.java b/indexing-service/src/main/java/org/apache/druid/indexing/seekablestream/SequenceMetadata.java new file mode 100644 index 000000000000..61bb35a7778a --- /dev/null +++ b/indexing-service/src/main/java/org/apache/druid/indexing/seekablestream/SequenceMetadata.java @@ -0,0 +1,336 @@ +/* + * Licensed to the Apache Software Foundation (ASF) under one + * or more contributor license agreements. See the NOTICE file + * distributed with this work for additional information + * regarding copyright ownership. The ASF licenses this file + * to you under the Apache License, Version 2.0 (the + * "License"); you may not use this file except in compliance + * with the License. You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, + * software distributed under the License is distributed on an + * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY + * KIND, either express or implied. See the License for the + * specific language governing permissions and limitations + * under the License. + */ + +package org.apache.druid.indexing.seekablestream; + +import com.fasterxml.jackson.annotation.JsonCreator; +import com.fasterxml.jackson.annotation.JsonProperty; +import com.google.common.base.Preconditions; +import com.google.common.base.Supplier; +import com.google.common.collect.ImmutableMap; +import org.apache.druid.data.input.Committer; +import org.apache.druid.indexing.common.TaskToolbox; +import org.apache.druid.indexing.common.actions.SegmentTransactionalInsertAction; +import org.apache.druid.indexing.seekablestream.common.OrderedPartitionableRecord; +import org.apache.druid.indexing.seekablestream.common.OrderedSequenceNumber; +import org.apache.druid.java.util.common.ISE; +import org.apache.druid.segment.realtime.appenderator.TransactionalSegmentPublisher; + +import java.util.Collections; +import java.util.HashMap; +import java.util.HashSet; +import java.util.Map; +import java.util.Set; +import java.util.concurrent.locks.ReentrantLock; +import java.util.function.BiFunction; + +public class SequenceMetadata +{ + private final int sequenceId; + private final String sequenceName; + private final Set exclusiveStartPartitions; + private final Set assignments; + private final boolean sentinel; + private boolean checkpointed; + /** + * Lock for accessing {@link #endOffsets} and {@link #checkpointed}. This lock is required because + * {@link #setEndOffsets)} can be called by both the main thread and the HTTP thread. + */ + private final ReentrantLock lock = new ReentrantLock(); + + final Map startOffsets; + final Map endOffsets; + + @JsonCreator + public SequenceMetadata( + @JsonProperty("sequenceId") int sequenceId, + @JsonProperty("sequenceName") String sequenceName, + @JsonProperty("startOffsets") Map startOffsets, + @JsonProperty("endOffsets") Map endOffsets, + @JsonProperty("checkpointed") boolean checkpointed, + @JsonProperty("exclusiveStartPartitions") Set exclusiveStartPartitions + ) + { + Preconditions.checkNotNull(sequenceName); + Preconditions.checkNotNull(startOffsets); + Preconditions.checkNotNull(endOffsets); + this.sequenceId = sequenceId; + this.sequenceName = sequenceName; + this.startOffsets = ImmutableMap.copyOf(startOffsets); + this.endOffsets = new HashMap<>(endOffsets); + this.assignments = new HashSet<>(startOffsets.keySet()); + this.checkpointed = checkpointed; + this.sentinel = false; + this.exclusiveStartPartitions = exclusiveStartPartitions == null + ? Collections.emptySet() + : exclusiveStartPartitions; + } + + @JsonProperty + public Set getExclusiveStartPartitions() + { + return exclusiveStartPartitions; + } + + @JsonProperty + public int getSequenceId() + { + return sequenceId; + } + + @JsonProperty + public boolean isCheckpointed() + { + lock.lock(); + try { + return checkpointed; + } + finally { + lock.unlock(); + } + } + + @JsonProperty + public String getSequenceName() + { + return sequenceName; + } + + @JsonProperty + public Map getStartOffsets() + { + return startOffsets; + } + + @JsonProperty + public Map getEndOffsets() + { + lock.lock(); + try { + return endOffsets; + } + finally { + lock.unlock(); + } + } + + @JsonProperty + public boolean isSentinel() + { + return sentinel; + } + + void setEndOffsets(Map newEndOffsets) + { + lock.lock(); + try { + endOffsets.putAll(newEndOffsets); + checkpointed = true; + } + finally { + lock.unlock(); + } + } + + void updateAssignments( + Map currOffsets, + BiFunction moreToReadFn + ) + { + lock.lock(); + try { + assignments.clear(); + currOffsets.forEach((key, value) -> { + SequenceOffsetType endOffset = endOffsets.get(key); + if (moreToReadFn.apply(value, endOffset)) { + assignments.add(key); + } + }); + } + finally { + lock.unlock(); + } + } + + boolean isOpen() + { + return !assignments.isEmpty(); + } + + boolean canHandle( + SeekableStreamIndexTaskRunner runner, + OrderedPartitionableRecord record + ) + { + lock.lock(); + try { + final OrderedSequenceNumber partitionEndOffset = runner.createSequenceNumber(endOffsets.get(record.getPartitionId())); + final OrderedSequenceNumber partitionStartOffset = runner.createSequenceNumber(startOffsets.get( + record.getPartitionId())); + final OrderedSequenceNumber recordOffset = runner.createSequenceNumber(record.getSequenceNumber()); + if (!isOpen() || recordOffset == null || partitionEndOffset == null || partitionStartOffset == null) { + return false; + } + boolean ret; + if (!runner.isEndOffsetExclusive()) { + // Inclusive endOffsets mean that we must skip the first record of any partition that has been read before. + ret = recordOffset.compareTo(partitionStartOffset) + >= (getExclusiveStartPartitions().contains(record.getPartitionId()) ? 1 : 0); + } else { + ret = recordOffset.compareTo(partitionStartOffset) >= 0; + } + + if (runner.isEndOffsetExclusive()) { + ret &= recordOffset.compareTo(partitionEndOffset) < 0; + } else { + ret &= recordOffset.compareTo(partitionEndOffset) <= 0; + } + + return ret; + } + finally { + lock.unlock(); + } + } + + @Override + public String toString() + { + lock.lock(); + try { + return "SequenceMetadata{" + + "sequenceName='" + sequenceName + '\'' + + ", sequenceId=" + sequenceId + + ", startOffsets=" + startOffsets + + ", endOffsets=" + endOffsets + + ", assignments=" + assignments + + ", sentinel=" + sentinel + + ", checkpointed=" + checkpointed + + '}'; + } + finally { + lock.unlock(); + } + } + + Supplier getCommitterSupplier( + SeekableStreamIndexTaskRunner runner, + String stream, + Map lastPersistedOffsets + ) + { + // Set up committer. + return () -> + new Committer() + { + @Override + public Object getMetadata() + { + lock.lock(); + + try { + Preconditions.checkState( + assignments.isEmpty(), + "This committer can be used only once all the records till sequences [%s] have been consumed, also make" + + " sure to call updateAssignments before using this committer", + endOffsets + ); + + + // merge endOffsets for this sequence with globally lastPersistedOffsets + // This is done because this committer would be persisting only sub set of segments + // corresponding to the current sequence. Generally, lastPersistedOffsets should already + // cover endOffsets but just to be sure take max of sequences and persist that + for (Map.Entry partitionOffset : endOffsets.entrySet()) { + SequenceOffsetType newOffsets = partitionOffset.getValue(); + if (lastPersistedOffsets.containsKey(partitionOffset.getKey()) + && runner.createSequenceNumber(lastPersistedOffsets.get(partitionOffset.getKey())) + .compareTo(runner.createSequenceNumber(newOffsets)) > 0) { + newOffsets = lastPersistedOffsets.get(partitionOffset.getKey()); + } + lastPersistedOffsets.put( + partitionOffset.getKey(), + newOffsets + ); + } + + // Publish metadata can be different from persist metadata as we are going to publish only + // subset of segments + return ImmutableMap.of( + SeekableStreamIndexTaskRunner.METADATA_NEXT_PARTITIONS, + new SeekableStreamPartitions<>(stream, lastPersistedOffsets), + SeekableStreamIndexTaskRunner.METADATA_PUBLISH_PARTITIONS, + new SeekableStreamPartitions<>(stream, endOffsets) + ); + } + finally { + lock.unlock(); + } + } + + @Override + public void run() + { + // Do nothing. + } + }; + + } + + TransactionalSegmentPublisher createPublisher( + SeekableStreamIndexTaskRunner runner, + TaskToolbox toolbox, + boolean useTransaction + ) + { + return (segments, commitMetadata) -> { + final Map commitMetaMap = (Map) Preconditions.checkNotNull(commitMetadata, "commitMetadata"); + final SeekableStreamPartitions finalPartitions = + runner.deserializePartitionsFromMetadata( + toolbox.getObjectMapper(), + commitMetaMap.get(SeekableStreamIndexTaskRunner.METADATA_PUBLISH_PARTITIONS) + ); + + // Sanity check, we should only be publishing things that match our desired end state. + if (!getEndOffsets().equals(finalPartitions.getPartitionSequenceNumberMap())) { + throw new ISE( + "WTF?! Driver for sequence [%s], attempted to publish invalid metadata[%s].", + toString(), + commitMetadata + ); + } + + final SegmentTransactionalInsertAction action; + + if (useTransaction) { + action = new SegmentTransactionalInsertAction( + segments, + runner.createDataSourceMetadata( + new SeekableStreamPartitions<>(finalPartitions.getStream(), getStartOffsets()) + ), + runner.createDataSourceMetadata(finalPartitions) + ); + } else { + action = new SegmentTransactionalInsertAction(segments, null, null); + } + + return toolbox.getTaskActionClient().submit(action); + }; + } +} diff --git a/indexing-service/src/main/java/org/apache/druid/indexing/seekablestream/common/OrderedSequenceNumber.java b/indexing-service/src/main/java/org/apache/druid/indexing/seekablestream/common/OrderedSequenceNumber.java index f193488240d9..74fd08d445ec 100644 --- a/indexing-service/src/main/java/org/apache/druid/indexing/seekablestream/common/OrderedSequenceNumber.java +++ b/indexing-service/src/main/java/org/apache/druid/indexing/seekablestream/common/OrderedSequenceNumber.java @@ -19,6 +19,7 @@ package org.apache.druid.indexing.seekablestream.common; +import java.util.Objects; /** * Represents a Kafka/Kinesis stream sequence number. Mainly used to do @@ -51,4 +52,33 @@ public boolean isExclusive() { return isExclusive; } + + @Override + public boolean equals(Object o) + { + if (this == o) { + return true; + } + if (o == null || getClass() != o.getClass()) { + return false; + } + OrderedSequenceNumber that = (OrderedSequenceNumber) o; + return isExclusive == that.isExclusive && + Objects.equals(sequenceNumber, that.sequenceNumber); + } + + @Override + public int hashCode() + { + return Objects.hash(sequenceNumber, isExclusive); + } + + @Override + public String toString() + { + return getClass().getSimpleName() + "{" + + "sequenceNumber=" + sequenceNumber + + ", isExclusive=" + isExclusive + + '}'; + } } diff --git a/indexing-service/src/main/java/org/apache/druid/indexing/seekablestream/supervisor/SeekableStreamSupervisor.java b/indexing-service/src/main/java/org/apache/druid/indexing/seekablestream/supervisor/SeekableStreamSupervisor.java index 25250ac0487c..b93d15f44418 100644 --- a/indexing-service/src/main/java/org/apache/druid/indexing/seekablestream/supervisor/SeekableStreamSupervisor.java +++ b/indexing-service/src/main/java/org/apache/druid/indexing/seekablestream/supervisor/SeekableStreamSupervisor.java @@ -119,8 +119,7 @@ * @param the type of the partition id, for example, partitions in Kafka are int type while partitions in Kinesis are String type * @param the type of the sequence number or offsets, for example, Kafka uses long offsets while Kinesis uses String sequence numbers */ -public abstract class SeekableStreamSupervisor - implements Supervisor +public abstract class SeekableStreamSupervisor implements Supervisor { public static final String IS_INCREMENTAL_HANDOFF_SUPPORTED = "IS_INCREMENTAL_HANDOFF_SUPPORTED"; diff --git a/indexing-service/src/test/java/org/apache/druid/indexing/common/task/IngestionTestBase.java b/indexing-service/src/test/java/org/apache/druid/indexing/common/task/IngestionTestBase.java index 8406adb7e948..c70a71a939b5 100644 --- a/indexing-service/src/test/java/org/apache/druid/indexing/common/task/IngestionTestBase.java +++ b/indexing-service/src/test/java/org/apache/druid/indexing/common/task/IngestionTestBase.java @@ -113,6 +113,11 @@ public TaskLockbox getLockbox() return lockbox; } + public IndexerSQLMetadataStorageCoordinator getStorageCoordinator() + { + return storageCoordinator; + } + public TaskActionToolbox createTaskActionToolbox() { storageCoordinator.start(); diff --git a/indexing-service/src/test/java/org/apache/druid/indexing/common/task/batch/parallel/AbstractParallelIndexSupervisorTaskTest.java b/indexing-service/src/test/java/org/apache/druid/indexing/common/task/batch/parallel/AbstractParallelIndexSupervisorTaskTest.java index 5d42919fa49f..907903aaa567 100644 --- a/indexing-service/src/test/java/org/apache/druid/indexing/common/task/batch/parallel/AbstractParallelIndexSupervisorTaskTest.java +++ b/indexing-service/src/test/java/org/apache/druid/indexing/common/task/batch/parallel/AbstractParallelIndexSupervisorTaskTest.java @@ -295,22 +295,6 @@ public Authorizer getAuthorizer(String name) new DropwizardRowIngestionMetersFactory() ); } - - @Override - public TaskStatus run(TaskToolbox toolbox) throws Exception - { - return TaskStatus.fromCode( - getId(), - new TestParallelIndexTaskRunner( - toolbox, - getId(), - getGroupId(), - getIngestionSchema(), - getContext(), - new NoopIndexingServiceClient() - ).run() - ); - } } static class TestParallelIndexTaskRunner extends SinglePhaseParallelIndexTaskRunner diff --git a/indexing-service/src/test/java/org/apache/druid/indexing/common/task/batch/parallel/ParallelIndexSupervisorTaskResourceTest.java b/indexing-service/src/test/java/org/apache/druid/indexing/common/task/batch/parallel/ParallelIndexSupervisorTaskResourceTest.java index 668c8ec19856..04aa5a7a1b07 100644 --- a/indexing-service/src/test/java/org/apache/druid/indexing/common/task/batch/parallel/ParallelIndexSupervisorTaskResourceTest.java +++ b/indexing-service/src/test/java/org/apache/druid/indexing/common/task/batch/parallel/ParallelIndexSupervisorTaskResourceTest.java @@ -500,7 +500,7 @@ private class TestSupervisorTask extends TestParallelIndexSupervisorTask } @Override - public TaskStatus run(TaskToolbox toolbox) throws Exception + ParallelIndexTaskRunner createRunner(TaskToolbox toolbox) { setRunner( new TestRunner( @@ -509,10 +509,7 @@ public TaskStatus run(TaskToolbox toolbox) throws Exception indexingServiceClient ) ); - return TaskStatus.fromCode( - getId(), - getRunner().run() - ); + return getRunner(); } } diff --git a/indexing-service/src/test/java/org/apache/druid/indexing/common/task/batch/parallel/ParallelIndexSupervisorTaskTest.java b/indexing-service/src/test/java/org/apache/druid/indexing/common/task/batch/parallel/ParallelIndexSupervisorTaskTest.java index 1d25f73b3968..efc1fc493bf2 100644 --- a/indexing-service/src/test/java/org/apache/druid/indexing/common/task/batch/parallel/ParallelIndexSupervisorTaskTest.java +++ b/indexing-service/src/test/java/org/apache/druid/indexing/common/task/batch/parallel/ParallelIndexSupervisorTaskTest.java @@ -24,7 +24,6 @@ import org.apache.druid.data.input.InputSplit; import org.apache.druid.data.input.impl.StringInputRowParser; import org.apache.druid.indexer.TaskState; -import org.apache.druid.indexer.TaskStatus; import org.apache.druid.indexing.common.TaskToolbox; import org.apache.druid.indexing.common.actions.TaskActionClient; import org.apache.druid.indexing.common.task.TaskResource; @@ -36,6 +35,7 @@ import org.apache.druid.segment.indexing.DataSchema; import org.apache.druid.segment.indexing.granularity.UniformGranularitySpec; import org.apache.druid.segment.realtime.firehose.LocalFirehoseFactory; +import org.apache.druid.timeline.DataSegment; import org.joda.time.Interval; import org.junit.After; import org.junit.Assert; @@ -51,6 +51,7 @@ import java.util.Collections; import java.util.HashMap; import java.util.Iterator; +import java.util.List; import java.util.Map; public class ParallelIndexSupervisorTaskTest extends AbstractParallelIndexSupervisorTaskTest @@ -126,8 +127,7 @@ public void testIsReady() throws Exception } } - @Test - public void testWithoutInterval() throws Exception + private void runTestWithoutIntervalTask() throws Exception { final ParallelIndexSupervisorTask task = newTask( null, @@ -142,6 +142,29 @@ public void testWithoutInterval() throws Exception prepareTaskForLocking(task); Assert.assertTrue(task.isReady(actionClient)); Assert.assertEquals(TaskState.SUCCESS, task.run(toolbox).getStatusCode()); + shutdownTask(task); + } + + @Test + public void testWithoutInterval() throws Exception + { + // Ingest all data. + runTestWithoutIntervalTask(); + + // Read the segments for one day. + final Interval interval = Intervals.of("2017-12-24/P1D"); + final List oldSegments = + getStorageCoordinator().getUsedSegmentsForInterval("dataSource", interval); + Assert.assertEquals(1, oldSegments.size()); + + // Reingest the same data. Each segment should get replaced by a segment with a newer version. + runTestWithoutIntervalTask(); + + // Verify that the segment has been replaced. + final List newSegments = + getStorageCoordinator().getUsedSegmentsForInterval("dataSource", interval); + Assert.assertEquals(1, newSegments.size()); + Assert.assertTrue(oldSegments.get(0).getVersion().compareTo(newSegments.get(0).getVersion()) < 0); } @Test() @@ -205,33 +228,55 @@ public void testPublishEmptySegments() throws Exception Assert.assertEquals(TaskState.SUCCESS, task.run(toolbox).getStatusCode()); } + @Test + public void testWith1MaxNumSubTasks() throws Exception + { + final ParallelIndexSupervisorTask task = newTask( + Intervals.of("2017/2018"), + new ParallelIndexIOConfig( + new LocalFirehoseFactory(inputDir, "test_*", null), + false + ), + new ParallelIndexTuningConfig( + null, + null, + null, + null, + null, + null, + null, + null, + null, + null, + null, + null, + null, + 1, + null, + null, + null, + null, + null, + null, + null + ) + ); + actionClient = createActionClient(task); + toolbox = createTaskToolbox(task); + + prepareTaskForLocking(task); + Assert.assertTrue(task.isReady(actionClient)); + Assert.assertEquals(TaskState.SUCCESS, task.run(toolbox).getStatusCode()); + Assert.assertNull("Runner must be null if the task was in the sequential mode", task.getRunner()); + } + private ParallelIndexSupervisorTask newTask( Interval interval, ParallelIndexIOConfig ioConfig ) { - // set up ingestion spec - final ParallelIndexIngestionSpec ingestionSpec = new ParallelIndexIngestionSpec( - new DataSchema( - "dataSource", - getObjectMapper().convertValue( - new StringInputRowParser( - DEFAULT_PARSE_SPEC, - null - ), - Map.class - ), - new AggregatorFactory[]{ - new LongSumAggregatorFactory("val", "val") - }, - new UniformGranularitySpec( - Granularities.DAY, - Granularities.MINUTE, - interval == null ? null : Collections.singletonList(interval) - ), - null, - getObjectMapper() - ), + return newTask( + interval, ioConfig, new ParallelIndexTuningConfig( null, @@ -257,6 +302,39 @@ private ParallelIndexSupervisorTask newTask( null ) ); + } + + private ParallelIndexSupervisorTask newTask( + Interval interval, + ParallelIndexIOConfig ioConfig, + ParallelIndexTuningConfig tuningConfig + ) + { + // set up ingestion spec + final ParallelIndexIngestionSpec ingestionSpec = new ParallelIndexIngestionSpec( + new DataSchema( + "dataSource", + getObjectMapper().convertValue( + new StringInputRowParser( + DEFAULT_PARSE_SPEC, + null + ), + Map.class + ), + new AggregatorFactory[]{ + new LongSumAggregatorFactory("val", "val") + }, + new UniformGranularitySpec( + Granularities.DAY, + Granularities.MINUTE, + interval == null ? null : Collections.singletonList(interval) + ), + null, + getObjectMapper() + ), + ioConfig, + tuningConfig + ); // set up test tools return new TestSupervisorTask( @@ -291,9 +369,8 @@ private static class TestSupervisorTask extends TestParallelIndexSupervisorTask } @Override - public TaskStatus run(TaskToolbox toolbox) throws Exception + ParallelIndexTaskRunner createRunner(TaskToolbox toolbox) { - setToolbox(toolbox); setRunner( new TestRunner( toolbox, @@ -301,10 +378,7 @@ public TaskStatus run(TaskToolbox toolbox) throws Exception indexingServiceClient ) ); - return TaskStatus.fromCode( - getId(), - getRunner().run() - ); + return getRunner(); } } diff --git a/indexing-service/src/test/java/org/apache/druid/indexing/overlord/TaskLockboxTest.java b/indexing-service/src/test/java/org/apache/druid/indexing/overlord/TaskLockboxTest.java index 560425512513..a2a14a006bba 100644 --- a/indexing-service/src/test/java/org/apache/druid/indexing/overlord/TaskLockboxTest.java +++ b/indexing-service/src/test/java/org/apache/druid/indexing/overlord/TaskLockboxTest.java @@ -667,6 +667,36 @@ public void testFindLockPosseAfterRevokeWithDifferentLockIntervals() throws Entr Assert.assertTrue(lowLockPosse.getTaskLock().isRevoked()); } + @Test + public void testLockPosseEquals() + { + final Task task1 = NoopTask.create(); + final Task task2 = NoopTask.create(); + + TaskLock taskLock1 = new TaskLock(TaskLockType.EXCLUSIVE, + task1.getGroupId(), + task1.getDataSource(), + Intervals.of("2018/2019"), + "v1", + task1.getPriority()); + + TaskLock taskLock2 = new TaskLock(TaskLockType.EXCLUSIVE, + task2.getGroupId(), + task2.getDataSource(), + Intervals.of("2018/2019"), + "v2", + task2.getPriority()); + + TaskLockPosse taskLockPosse1 = new TaskLockPosse(taskLock1); + TaskLockPosse taskLockPosse2 = new TaskLockPosse(taskLock2); + TaskLockPosse taskLockPosse3 = new TaskLockPosse(taskLock1); + + Assert.assertNotEquals(taskLockPosse1, null); + Assert.assertNotEquals(null, taskLockPosse1); + Assert.assertNotEquals(taskLockPosse1, taskLockPosse2); + Assert.assertEquals(taskLockPosse1, taskLockPosse3); + } + private Set getAllLocks(List tasks) { return tasks.stream() diff --git a/indexing-service/src/test/java/org/apache/druid/indexing/seekablestream/SeekableStreamPartitionsTest.java b/indexing-service/src/test/java/org/apache/druid/indexing/seekablestream/SeekableStreamPartitionsTest.java new file mode 100644 index 000000000000..da5c3eca99bc --- /dev/null +++ b/indexing-service/src/test/java/org/apache/druid/indexing/seekablestream/SeekableStreamPartitionsTest.java @@ -0,0 +1,72 @@ +/* + * Licensed to the Apache Software Foundation (ASF) under one + * or more contributor license agreements. See the NOTICE file + * distributed with this work for additional information + * regarding copyright ownership. The ASF licenses this file + * to you under the Apache License, Version 2.0 (the + * "License"); you may not use this file except in compliance + * with the License. You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, + * software distributed under the License is distributed on an + * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY + * KIND, either express or implied. See the License for the + * specific language governing permissions and limitations + * under the License. + */ + +package org.apache.druid.indexing.seekablestream; + +import com.fasterxml.jackson.core.type.TypeReference; +import com.fasterxml.jackson.databind.ObjectMapper; +import com.google.common.collect.ImmutableMap; +import org.apache.druid.java.util.common.jackson.JacksonUtils; +import org.apache.druid.segment.TestHelper; +import org.junit.Assert; +import org.junit.Test; + +import java.util.Map; + +public class SeekableStreamPartitionsTest +{ + private static final ObjectMapper OBJECT_MAPPER = TestHelper.makeJsonMapper(); + + @Test + public void testSerde() throws Exception + { + final String stream = "theStream"; + final Map offsetMap = ImmutableMap.of(1, 2L, 3, 4L); + + final SeekableStreamPartitions partitions = new SeekableStreamPartitions<>(stream, offsetMap); + final String serializedString = OBJECT_MAPPER.writeValueAsString(partitions); + + // Check round-trip. + final SeekableStreamPartitions partitions2 = OBJECT_MAPPER.readValue( + serializedString, + new TypeReference>() {} + ); + + Assert.assertEquals("Round trip", partitions, partitions2); + + // Check backwards compatibility. + final Map asMap = OBJECT_MAPPER.readValue( + serializedString, + JacksonUtils.TYPE_REFERENCE_MAP_STRING_OBJECT + ); + + Assert.assertEquals(stream, asMap.get("stream")); + Assert.assertEquals(stream, asMap.get("topic")); + + // Jackson will deserialize the maps as string -> int maps, not int -> long. + Assert.assertEquals( + offsetMap, + OBJECT_MAPPER.convertValue(asMap.get("partitionSequenceNumberMap"), new TypeReference>() {}) + ); + Assert.assertEquals( + offsetMap, + OBJECT_MAPPER.convertValue(asMap.get("partitionOffsetMap"), new TypeReference>() {}) + ); + } +} diff --git a/integration-tests/pom.xml b/integration-tests/pom.xml index 9c89d38b50a8..dac956c38182 100644 --- a/integration-tests/pom.xml +++ b/integration-tests/pom.xml @@ -28,7 +28,7 @@ org.apache.druid druid - 0.14.0-incubating-SNAPSHOT + 0.14.1-incubating-SNAPSHOT diff --git a/integration-tests/src/main/java/org/apache/druid/testing/clients/CoordinatorResourceTestClient.java b/integration-tests/src/main/java/org/apache/druid/testing/clients/CoordinatorResourceTestClient.java index df6ad20d34d6..04147b035075 100644 --- a/integration-tests/src/main/java/org/apache/druid/testing/clients/CoordinatorResourceTestClient.java +++ b/integration-tests/src/main/java/org/apache/druid/testing/clients/CoordinatorResourceTestClient.java @@ -32,6 +32,7 @@ import org.apache.druid.java.util.http.client.response.StatusResponseHolder; import org.apache.druid.testing.IntegrationTestingConfig; import org.apache.druid.testing.guice.TestClient; +import org.apache.druid.timeline.DataSegment; import org.jboss.netty.handler.codec.http.HttpMethod; import org.jboss.netty.handler.codec.http.HttpResponseStatus; import org.joda.time.Interval; @@ -41,6 +42,8 @@ import java.util.ArrayList; import java.util.List; import java.util.Map; +import java.util.Set; +import java.util.stream.Collectors; public class CoordinatorResourceTestClient { @@ -80,6 +83,11 @@ private String getIntervalsURL(String dataSource) return StringUtils.format("%sdatasources/%s/intervals", getCoordinatorURL(), StringUtils.urlEncode(dataSource)); } + private String getFullSegmentsURL(String dataSource) + { + return StringUtils.format("%sdatasources/%s/segments?full", getCoordinatorURL(), StringUtils.urlEncode(dataSource)); + } + private String getLoadStatusURL() { return StringUtils.format("%s%s", getCoordinatorURL(), "loadstatus"); @@ -123,6 +131,25 @@ public List getSegmentIntervals(final String dataSource) return segments; } + // return a set of the segment versions for the specified datasource + public Set getSegmentVersions(final String dataSource) + { + ArrayList segments; + try { + StatusResponseHolder response = makeRequest(HttpMethod.GET, getFullSegmentsURL(dataSource)); + + segments = jsonMapper.readValue( + response.getContent(), new TypeReference>() + { + } + ); + } + catch (Exception e) { + throw new RuntimeException(e); + } + return segments.stream().map(s -> s.getVersion()).collect(Collectors.toSet()); + } + private Map getLoadStatus() { Map status; diff --git a/integration-tests/src/test/java/org/apache/druid/tests/indexer/AbstractITBatchIndexTest.java b/integration-tests/src/test/java/org/apache/druid/tests/indexer/AbstractITBatchIndexTest.java index 3555ef8fd41e..2034b602ee32 100644 --- a/integration-tests/src/test/java/org/apache/druid/tests/indexer/AbstractITBatchIndexTest.java +++ b/integration-tests/src/test/java/org/apache/druid/tests/indexer/AbstractITBatchIndexTest.java @@ -33,6 +33,7 @@ import java.io.IOException; import java.io.InputStream; import java.util.List; +import java.util.Set; public class AbstractITBatchIndexTest extends AbstractIndexerTest { @@ -49,7 +50,8 @@ public class AbstractITBatchIndexTest extends AbstractIndexerTest void doIndexTestTest( String dataSource, String indexTaskFilePath, - String queryFilePath + String queryFilePath, + boolean waitForNewVersion ) throws IOException { final String fullDatasourceName = dataSource + config.getExtraDatasourceNameSuffix(); @@ -59,7 +61,7 @@ void doIndexTestTest( fullDatasourceName ); - submitTaskAndWait(taskSpec, fullDatasourceName); + submitTaskAndWait(taskSpec, fullDatasourceName, waitForNewVersion); try { String queryResponseTemplate; @@ -107,7 +109,7 @@ void doReindexTest( fullReindexDatasourceName ); - submitTaskAndWait(taskSpec, fullReindexDatasourceName); + submitTaskAndWait(taskSpec, fullReindexDatasourceName, false); try { String queryResponseTemplate; try { @@ -144,7 +146,7 @@ void doIndexTestSqlTest( String queryFilePath ) { - submitTaskAndWait(indexTaskFilePath, dataSource); + submitTaskAndWait(indexTaskFilePath, dataSource, false); try { sqlQueryHelper.testQueriesFromFile(queryFilePath, 2); } @@ -154,12 +156,25 @@ void doIndexTestSqlTest( } } - private void submitTaskAndWait(String taskSpec, String dataSourceName) + private void submitTaskAndWait(String taskSpec, String dataSourceName, boolean waitForNewVersion) { + final Set oldVersions = waitForNewVersion ? coordinator.getSegmentVersions(dataSourceName) : null; + final String taskID = indexer.submitTask(taskSpec); LOG.info("TaskID for loading index task %s", taskID); indexer.waitUntilTaskCompletes(taskID); + // ITParallelIndexTest does a second round of ingestion to replace segements in an existing + // data source. For that second round we need to make sure the coordinator actually learned + // about the new segments befor waiting for it to report that all segments are loaded; otherwise + // this method could return too early because the coordinator is merely reporting that all the + // original segments have loaded. + if (waitForNewVersion) { + RetryUtil.retryUntilTrue( + () -> !oldVersions.containsAll(coordinator.getSegmentVersions(dataSourceName)), "See a new version" + ); + } + RetryUtil.retryUntilTrue( () -> coordinator.areSegmentsLoaded(dataSourceName), "Segment Load" ); diff --git a/integration-tests/src/test/java/org/apache/druid/tests/indexer/ITIndexerTest.java b/integration-tests/src/test/java/org/apache/druid/tests/indexer/ITIndexerTest.java index 8412e49c17f4..245c3dd3960d 100644 --- a/integration-tests/src/test/java/org/apache/druid/tests/indexer/ITIndexerTest.java +++ b/integration-tests/src/test/java/org/apache/druid/tests/indexer/ITIndexerTest.java @@ -45,7 +45,8 @@ public void testIndexData() throws Exception doIndexTestTest( INDEX_DATASOURCE, INDEX_TASK, - INDEX_QUERIES_RESOURCE + INDEX_QUERIES_RESOURCE, + false ); doReindexTest( INDEX_DATASOURCE, diff --git a/integration-tests/src/test/java/org/apache/druid/tests/indexer/ITParallelIndexTest.java b/integration-tests/src/test/java/org/apache/druid/tests/indexer/ITParallelIndexTest.java index 80ca6e104873..e457a1c6d656 100644 --- a/integration-tests/src/test/java/org/apache/druid/tests/indexer/ITParallelIndexTest.java +++ b/integration-tests/src/test/java/org/apache/druid/tests/indexer/ITParallelIndexTest.java @@ -30,6 +30,8 @@ public class ITParallelIndexTest extends AbstractITBatchIndexTest { private static String INDEX_TASK = "/indexer/wikipedia_parallel_index_task.json"; private static String INDEX_QUERIES_RESOURCE = "/indexer/wikipedia_parallel_index_queries.json"; + private static String REINDEX_TASK = "/indexer/wikipedia_parallel_reindex_task.json"; + private static String REINDEX_QUERIES_RESOURCE = "/indexer/wikipedia_parallel_reindex_queries.json"; private static String INDEX_DATASOURCE = "wikipedia_parallel_index_test"; @Test @@ -39,7 +41,17 @@ public void testIndexData() throws Exception doIndexTestTest( INDEX_DATASOURCE, INDEX_TASK, - INDEX_QUERIES_RESOURCE + INDEX_QUERIES_RESOURCE, + false + ); + + // Index again, this time only choosing the second data file, and without explicit intervals chosen. + // The second datafile covers both day segments, so this should replace them, as reflected in the queries. + doIndexTestTest( + INDEX_DATASOURCE, + REINDEX_TASK, + REINDEX_QUERIES_RESOURCE, + true ); } } diff --git a/integration-tests/src/test/resources/indexer/wikipedia_parallel_reindex_queries.json b/integration-tests/src/test/resources/indexer/wikipedia_parallel_reindex_queries.json new file mode 100644 index 000000000000..bbbeca9b58a2 --- /dev/null +++ b/integration-tests/src/test/resources/indexer/wikipedia_parallel_reindex_queries.json @@ -0,0 +1,18 @@ +[ + { + "description": "timeseries, 1 agg, all should only show data2", + "query":{ + "queryType" : "timeBoundary", + "dataSource": "%%DATASOURCE%%" + }, + "expectedResults":[ + { + "timestamp" : "2013-08-31T11:58:39.000Z", + "result" : { + "minTime" : "2013-08-31T11:58:39.000Z", + "maxTime" : "2013-09-01T01:02:33.000Z" + } + } + ] + } +] \ No newline at end of file diff --git a/integration-tests/src/test/resources/indexer/wikipedia_parallel_reindex_task.json b/integration-tests/src/test/resources/indexer/wikipedia_parallel_reindex_task.json new file mode 100644 index 000000000000..c06890bfde4e --- /dev/null +++ b/integration-tests/src/test/resources/indexer/wikipedia_parallel_reindex_task.json @@ -0,0 +1,65 @@ +{ + "type": "index_parallel", + "spec": { + "dataSchema": { + "dataSource": "%%DATASOURCE%%", + "metricsSpec": [ + { + "type": "count", + "name": "count" + }, + { + "type": "doubleSum", + "name": "added", + "fieldName": "added" + }, + { + "type": "doubleSum", + "name": "deleted", + "fieldName": "deleted" + }, + { + "type": "doubleSum", + "name": "delta", + "fieldName": "delta" + } + ], + "granularitySpec": { + "segmentGranularity": "DAY", + "queryGranularity": "second" + }, + "parser": { + "parseSpec": { + "format" : "json", + "timestampSpec": { + "column": "timestamp" + }, + "dimensionsSpec": { + "dimensions": [ + "page", + {"type": "string", "name": "language", "createBitmapIndex": false}, + "user", + "unpatrolled", + "newPage", + "robot", + "anonymous", + "namespace", + "continent", + "country", + "region", + "city" + ] + } + } + } + }, + "ioConfig": { + "type": "index_parallel", + "firehose": { + "type": "local", + "baseDir": "/resources/data/batch_index", + "filter": "wikipedia_index_data2*" + } + } + } +} \ No newline at end of file diff --git a/licenses/bin/@babel-runtime.MIT b/licenses/bin/@babel-runtime.MIT new file mode 100644 index 000000000000..f31575ec773b --- /dev/null +++ b/licenses/bin/@babel-runtime.MIT @@ -0,0 +1,22 @@ +MIT License + +Copyright (c) 2014-present Sebastian McKenzie and other contributors + +Permission is hereby granted, free of charge, to any person obtaining +a copy of this software and associated documentation files (the +"Software"), to deal in the Software without restriction, including +without limitation the rights to use, copy, modify, merge, publish, +distribute, sublicense, and/or sell copies of the Software, and to +permit persons to whom the Software is furnished to do so, subject to +the following conditions: + +The above copyright notice and this permission notice shall be +included in all copies or substantial portions of the Software. + +THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, +EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF +MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND +NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS BE +LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION +OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION +WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE. diff --git a/licenses/bin/aether-connector-okhttp.EPL1 b/licenses/bin/aether-connector-okhttp.EPL1 new file mode 100644 index 000000000000..16cc69a52355 --- /dev/null +++ b/licenses/bin/aether-connector-okhttp.EPL1 @@ -0,0 +1,87 @@ +Eclipse Public License - v 1.0 + +THE ACCOMPANYING PROGRAM IS PROVIDED UNDER THE TERMS OF THIS ECLIPSE PUBLIC LICENSE ("AGREEMENT"). ANY USE, REPRODUCTION OR DISTRIBUTION OF THE PROGRAM CONSTITUTES RECIPIENT'S ACCEPTANCE OF THIS AGREEMENT. + +1. DEFINITIONS + +"Contribution" means: + +a) in the case of the initial Contributor, the initial code and documentation distributed under this Agreement, and + +b) in the case of each subsequent Contributor: + +i) changes to the Program, and + +ii) additions to the Program; + +where such changes and/or additions to the Program originate from and are distributed by that particular Contributor. A Contribution 'originates' from a Contributor if it was added to the Program by such Contributor itself or anyone acting on such Contributor's behalf. Contributions do not include additions to the Program which: (i) are separate modules of software distributed in conjunction with the Program under their own license agreement, and (ii) are not derivative works of the Program. + +"Contributor" means any person or entity that distributes the Program. + +"Licensed Patents" mean patent claims licensable by a Contributor which are necessarily infringed by the use or sale of its Contribution alone or when combined with the Program. + +"Program" means the Contributions distributed in accordance with this Agreement. + +"Recipient" means anyone who receives the Program under this Agreement, including all Contributors. + +2. GRANT OF RIGHTS + +a) Subject to the terms of this Agreement, each Contributor hereby grants Recipient a non-exclusive, worldwide, royalty-free copyright license to reproduce, prepare derivative works of, publicly display, publicly perform, distribute and sublicense the Contribution of such Contributor, if any, and such derivative works, in source code and object code form. + +b) Subject to the terms of this Agreement, each Contributor hereby grants Recipient a non-exclusive, worldwide, royalty-free patent license under Licensed Patents to make, use, sell, offer to sell, import and otherwise transfer the Contribution of such Contributor, if any, in source code and object code form. This patent license shall apply to the combination of the Contribution and the Program if, at the time the Contribution is added by the Contributor, such addition of the Contribution causes such combination to be covered by the Licensed Patents. The patent license shall not apply to any other combinations which include the Contribution. No hardware per se is licensed hereunder. + +c) Recipient understands that although each Contributor grants the licenses to its Contributions set forth herein, no assurances are provided by any Contributor that the Program does not infringe the patent or other intellectual property rights of any other entity. Each Contributor disclaims any liability to Recipient for claims brought by any other entity based on infringement of intellectual property rights or otherwise. As a condition to exercising the rights and licenses granted hereunder, each Recipient hereby assumes sole responsibility to secure any other intellectual property rights needed, if any. For example, if a third party patent license is required to allow Recipient to distribute the Program, it is Recipient's responsibility to acquire that license before distributing the Program. + +d) Each Contributor represents that to its knowledge it has sufficient copyright rights in its Contribution, if any, to grant the copyright license set forth in this Agreement. + +3. REQUIREMENTS + +A Contributor may choose to distribute the Program in object code form under its own license agreement, provided that: + +a) it complies with the terms and conditions of this Agreement; and + +b) its license agreement: + +i) effectively disclaims on behalf of all Contributors all warranties and conditions, express and implied, including warranties or conditions of title and non-infringement, and implied warranties or conditions of merchantability and fitness for a particular purpose; + +ii) effectively excludes on behalf of all Contributors all liability for damages, including direct, indirect, special, incidental and consequential damages, such as lost profits; + +iii) states that any provisions which differ from this Agreement are offered by that Contributor alone and not by any other party; and + +iv) states that source code for the Program is available from such Contributor, and informs licensees how to obtain it in a reasonable manner on or through a medium customarily used for software exchange. + +When the Program is made available in source code form: + +a) it must be made available under this Agreement; and + +b) a copy of this Agreement must be included with each copy of the Program. + +Contributors may not remove or alter any copyright notices contained within the Program. + +Each Contributor must identify itself as the originator of its Contribution, if any, in a manner that reasonably allows subsequent Recipients to identify the originator of the Contribution. + +4. COMMERCIAL DISTRIBUTION + +Commercial distributors of software may accept certain responsibilities with respect to end users, business partners and the like. While this license is intended to facilitate the commercial use of the Program, the Contributor who includes the Program in a commercial product offering should do so in a manner which does not create potential liability for other Contributors. Therefore, if a Contributor includes the Program in a commercial product offering, such Contributor ("Commercial Contributor") hereby agrees to defend and indemnify every other Contributor ("Indemnified Contributor") against any losses, damages and costs (collectively "Losses") arising from claims, lawsuits and other legal actions brought by a third party against the Indemnified Contributor to the extent caused by the acts or omissions of such Commercial Contributor in connection with its distribution of the Program in a commercial product offering. The obligations in this section do not apply to any claims or Losses relating to any actual or alleged intellectual property infringement. In order to qualify, an Indemnified Contributor must: a) promptly notify the Commercial Contributor in writing of such claim, and b) allow the Commercial Contributor to control, and cooperate with the Commercial Contributor in, the defense and any related settlement negotiations. The Indemnified Contributor may participate in any such claim at its own expense. + +For example, a Contributor might include the Program in a commercial product offering, Product X. That Contributor is then a Commercial Contributor. If that Commercial Contributor then makes performance claims, or offers warranties related to Product X, those performance claims and warranties are such Commercial Contributor's responsibility alone. Under this section, the Commercial Contributor would have to defend claims against the other Contributors related to those performance claims and warranties, and if a court requires any other Contributor to pay any damages as a result, the Commercial Contributor must pay those damages. + +5. NO WARRANTY + +EXCEPT AS EXPRESSLY SET FORTH IN THIS AGREEMENT, THE PROGRAM IS PROVIDED ON AN "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, EITHER EXPRESS OR IMPLIED INCLUDING, WITHOUT LIMITATION, ANY WARRANTIES OR CONDITIONS OF TITLE, NON-INFRINGEMENT, MERCHANTABILITY OR FITNESS FOR A PARTICULAR PURPOSE. Each Recipient is solely responsible for determining the appropriateness of using and distributing the Program and assumes all risks associated with its exercise of rights under this Agreement , including but not limited to the risks and costs of program errors, compliance with applicable laws, damage to or loss of data, programs or equipment, and unavailability or interruption of operations. + +6. DISCLAIMER OF LIABILITY + +EXCEPT AS EXPRESSLY SET FORTH IN THIS AGREEMENT, NEITHER RECIPIENT NOR ANY CONTRIBUTORS SHALL HAVE ANY LIABILITY FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING WITHOUT LIMITATION LOST PROFITS), HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OR DISTRIBUTION OF THE PROGRAM OR THE EXERCISE OF ANY RIGHTS GRANTED HEREUNDER, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGES. + +7. GENERAL + +If any provision of this Agreement is invalid or unenforceable under applicable law, it shall not affect the validity or enforceability of the remainder of the terms of this Agreement, and without further action by the parties hereto, such provision shall be reformed to the minimum extent necessary to make such provision valid and enforceable. + +If Recipient institutes patent litigation against any entity (including a cross-claim or counterclaim in a lawsuit) alleging that the Program itself (excluding combinations of the Program with other software or hardware) infringes such Recipient's patent(s), then such Recipient's rights granted under Section 2(b) shall terminate as of the date such litigation is filed. + +All Recipient's rights under this Agreement shall terminate if it fails to comply with any of the material terms or conditions of this Agreement and does not cure such failure in a reasonable period of time after becoming aware of such noncompliance. If all Recipient's rights under this Agreement terminate, Recipient agrees to cease use and distribution of the Program as soon as reasonably practicable. However, Recipient's obligations under this Agreement and any licenses granted by Recipient relating to the Program shall continue and survive. + +Everyone is permitted to copy and distribute copies of this Agreement, but in order to avoid inconsistency the Agreement is copyrighted and may only be modified in the following manner. The Agreement Steward reserves the right to publish new versions (including revisions) of this Agreement from time to time. No one other than the Agreement Steward has the right to modify this Agreement. The Eclipse Foundation is the initial Agreement Steward. The Eclipse Foundation may assign the responsibility to serve as the Agreement Steward to a suitable separate entity. Each new version of the Agreement will be given a distinguishing version number. The Program (including Contributions) may always be distributed subject to the version of the Agreement under which it was received. In addition, after a new version of the Agreement is published, Contributor may elect to distribute the Program (including its Contributions) under the new version. Except as expressly stated in Sections 2(a) and 2(b) above, Recipient receives no rights or licenses to the intellectual property of any Contributor under this Agreement, whether expressly, by implication, estoppel or otherwise. All rights in the Program not expressly granted under this Agreement are reserved. + +This Agreement is governed by the laws of the State of New York and the intellectual property laws of the United States of America. No party to this Agreement will bring a legal action under this Agreement more than one year after the cause of action arose. Each party waives its rights to a jury trial in any resulting litigation. diff --git a/licenses/bin/antlr-stringtemplate.BSD3 b/licenses/bin/antlr-stringtemplate.BSD3 new file mode 100644 index 000000000000..67e047c05e9f --- /dev/null +++ b/licenses/bin/antlr-stringtemplate.BSD3 @@ -0,0 +1,26 @@ +[The "BSD licence"] +Copyright (c) 2003-2008 Terence Parr +All rights reserved. + +Redistribution and use in source and binary forms, with or without +modification, are permitted provided that the following conditions +are met: + + 1. Redistributions of source code must retain the above copyright + notice, this list of conditions and the following disclaimer. + 2. Redistributions in binary form must reproduce the above copyright + notice, this list of conditions and the following disclaimer in the + documentation and/or other materials provided with the distribution. + 3. The name of the author may not be used to endorse or promote products + derived from this software without specific prior written permission. + +THIS SOFTWARE IS PROVIDED BY THE AUTHOR ``AS IS'' AND ANY EXPRESS OR +IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES +OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED. +IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR ANY DIRECT, INDIRECT, +INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT +NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, +DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY +THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT +(INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF +THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. diff --git a/licenses/bin/antlr.BSD3 b/licenses/bin/antlr.BSD3 new file mode 100644 index 000000000000..8ea1b564d666 --- /dev/null +++ b/licenses/bin/antlr.BSD3 @@ -0,0 +1,9 @@ +[The BSD License] +Copyright (c) 2012 Terence Parr and Sam Harwell +All rights reserved. +Redistribution and use in source and binary forms, with or without modification, are permitted provided that the following conditions are met: + +Redistributions of source code must retain the above copyright notice, this list of conditions and the following disclaimer. +Redistributions in binary form must reproduce the above copyright notice, this list of conditions and the following disclaimer in the documentation and/or other materials provided with the distribution. +Neither the name of the author nor the names of its contributors may be used to endorse or promote products derived from this software without specific prior written permission. +THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.YRIGHT HOLDERS AND CONTRIBUTORS "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. \ No newline at end of file diff --git a/licenses/bin/antlr4-runtime.BSD3 b/licenses/bin/antlr4-runtime.BSD3 new file mode 100644 index 000000000000..3537d506775a --- /dev/null +++ b/licenses/bin/antlr4-runtime.BSD3 @@ -0,0 +1,26 @@ +[The "BSD 3-clause license"] +Copyright (c) 2012-2017 The ANTLR Project. All rights reserved. + +Redistribution and use in source and binary forms, with or without +modification, are permitted provided that the following conditions +are met: + + 1. Redistributions of source code must retain the above copyright + notice, this list of conditions and the following disclaimer. + 2. Redistributions in binary form must reproduce the above copyright + notice, this list of conditions and the following disclaimer in the + documentation and/or other materials provided with the distribution. + 3. Neither the name of the copyright holder nor the names of its contributors + may be used to endorse or promote products derived from this software + without specific prior written permission. + +THIS SOFTWARE IS PROVIDED BY THE AUTHOR ``AS IS'' AND ANY EXPRESS OR +IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES +OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED. +IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR ANY DIRECT, INDIRECT, +INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT +NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, +DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY +THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT +(INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF +THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. diff --git a/licenses/bin/array-includes.MIT b/licenses/bin/array-includes.MIT new file mode 100644 index 000000000000..8c271c14b62f --- /dev/null +++ b/licenses/bin/array-includes.MIT @@ -0,0 +1,21 @@ +The MIT License (MIT) + +Copyright (C) 2015 Jordan Harband + +Permission is hereby granted, free of charge, to any person obtaining a copy +of this software and associated documentation files (the "Software"), to deal +in the Software without restriction, including without limitation the rights +to use, copy, modify, merge, publish, distribute, sublicense, and/or sell +copies of the Software, and to permit persons to whom the Software is +furnished to do so, subject to the following conditions: + +The above copyright notice and this permission notice shall be included in +all copies or substantial portions of the Software. + +THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR +IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, +FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE +AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER +LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, +OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN +THE SOFTWARE. \ No newline at end of file diff --git a/licenses/bin/asap.MIT b/licenses/bin/asap.MIT new file mode 100644 index 000000000000..ba18c61390db --- /dev/null +++ b/licenses/bin/asap.MIT @@ -0,0 +1,21 @@ + +Copyright 2009–2014 Contributors. All rights reserved. + +Permission is hereby granted, free of charge, to any person obtaining a copy +of this software and associated documentation files (the "Software"), to +deal in the Software without restriction, including without limitation the +rights to use, copy, modify, merge, publish, distribute, sublicense, and/or +sell copies of the Software, and to permit persons to whom the Software is +furnished to do so, subject to the following conditions: + +The above copyright notice and this permission notice shall be included in +all copies or substantial portions of the Software. + +THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR +IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, +FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE +AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER +LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING +FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS +IN THE SOFTWARE. + diff --git a/licenses/bin/asm.BSD3 b/licenses/bin/asm.BSD3 new file mode 100644 index 000000000000..8806c7d030f4 --- /dev/null +++ b/licenses/bin/asm.BSD3 @@ -0,0 +1,27 @@ + ASM: a very small and fast Java bytecode manipulation framework + Copyright (c) 2000-2011 INRIA, France Telecom + All rights reserved. + + Redistribution and use in source and binary forms, with or without + modification, are permitted provided that the following conditions + are met: + 1. Redistributions of source code must retain the above copyright + notice, this list of conditions and the following disclaimer. + 2. Redistributions in binary form must reproduce the above copyright + notice, this list of conditions and the following disclaimer in the + documentation and/or other materials provided with the distribution. + 3. Neither the name of the copyright holders nor the names of its + contributors may be used to endorse or promote products derived from + this software without specific prior written permission. + + THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" + AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE + IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE + ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE + LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR + CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF + SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS + INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN + CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) + ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF + THE POSSIBILITY OF SUCH DAMAGE. diff --git a/licenses/bin/axios.MIT b/licenses/bin/axios.MIT new file mode 100644 index 000000000000..d36c80ef27bf --- /dev/null +++ b/licenses/bin/axios.MIT @@ -0,0 +1,19 @@ +Copyright (c) 2014-present Matt Zabriskie + +Permission is hereby granted, free of charge, to any person obtaining a copy +of this software and associated documentation files (the "Software"), to deal +in the Software without restriction, including without limitation the rights +to use, copy, modify, merge, publish, distribute, sublicense, and/or sell +copies of the Software, and to permit persons to whom the Software is +furnished to do so, subject to the following conditions: + +The above copyright notice and this permission notice shall be included in +all copies or substantial portions of the Software. + +THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR +IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, +FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE +AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER +LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, +OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN +THE SOFTWARE. diff --git a/licenses/bin/bootstrap.MIT b/licenses/bin/bootstrap.MIT new file mode 100644 index 000000000000..8d94aa9ac9fc --- /dev/null +++ b/licenses/bin/bootstrap.MIT @@ -0,0 +1,21 @@ +The MIT License (MIT) + +Copyright (c) 2011-2014 Twitter, Inc + +Permission is hereby granted, free of charge, to any person obtaining a copy +of this software and associated documentation files (the "Software"), to deal +in the Software without restriction, including without limitation the rights +to use, copy, modify, merge, publish, distribute, sublicense, and/or sell +copies of the Software, and to permit persons to whom the Software is +furnished to do so, subject to the following conditions: + +The above copyright notice and this permission notice shall be included in +all copies or substantial portions of the Software. + +THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR +IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, +FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE +AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER +LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, +OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN +THE SOFTWARE. diff --git a/licenses/bin/brace.MIT b/licenses/bin/brace.MIT new file mode 100644 index 000000000000..41702c504347 --- /dev/null +++ b/licenses/bin/brace.MIT @@ -0,0 +1,23 @@ +Copyright 2013 Thorsten Lorenz. +All rights reserved. + +Permission is hereby granted, free of charge, to any person +obtaining a copy of this software and associated documentation +files (the "Software"), to deal in the Software without +restriction, including without limitation the rights to use, +copy, modify, merge, publish, distribute, sublicense, and/or sell +copies of the Software, and to permit persons to whom the +Software is furnished to do so, subject to the following +conditions: + +The above copyright notice and this permission notice shall be +included in all copies or substantial portions of the Software. + +THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, +EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES +OF MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND +NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT +HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, +WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING +FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR +OTHER DEALINGS IN THE SOFTWARE. diff --git a/licenses/bin/chain-function.MIT b/licenses/bin/chain-function.MIT new file mode 100644 index 000000000000..3bc6c7009827 --- /dev/null +++ b/licenses/bin/chain-function.MIT @@ -0,0 +1,9 @@ +The MIT License (MIT) + +Copyright (c) 2015 Jason Quense + +Permission is hereby granted, free of charge, to any person obtaining a copy of this software and associated documentation files (the "Software"), to deal in the Software without restriction, including without limitation the rights to use, copy, modify, merge, publish, distribute, sublicense, and/or sell copies of the Software, and to permit persons to whom the Software is furnished to do so, subject to the following conditions: + +The above copyright notice and this permission notice shall be included in all copies or substantial portions of the Software. + +THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE. diff --git a/licenses/bin/checkerframework-annotations.MIT b/licenses/bin/checkerframework-annotations.MIT new file mode 100644 index 000000000000..18c9a740e5ae --- /dev/null +++ b/licenses/bin/checkerframework-annotations.MIT @@ -0,0 +1 @@ +the Checker Framework developers \ No newline at end of file diff --git a/licenses/bin/classnames.MIT b/licenses/bin/classnames.MIT new file mode 100644 index 000000000000..d8e561e950ea --- /dev/null +++ b/licenses/bin/classnames.MIT @@ -0,0 +1,21 @@ +The MIT License (MIT) + +Copyright (c) 2017 Jed Watson + +Permission is hereby granted, free of charge, to any person obtaining a copy +of this software and associated documentation files (the "Software"), to deal +in the Software without restriction, including without limitation the rights +to use, copy, modify, merge, publish, distribute, sublicense, and/or sell +copies of the Software, and to permit persons to whom the Software is +furnished to do so, subject to the following conditions: + +The above copyright notice and this permission notice shall be included in all +copies or substantial portions of the Software. + +THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR +IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, +FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE +AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER +LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, +OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE +SOFTWARE. diff --git a/licenses/bin/creative-commons-2.5.LICENSE b/licenses/bin/creative-commons-2.5.LICENSE new file mode 100644 index 000000000000..f2aa747f5283 --- /dev/null +++ b/licenses/bin/creative-commons-2.5.LICENSE @@ -0,0 +1,60 @@ + +Attribution-NonCommercial 2.5 + +CREATIVE COMMONS CORPORATION IS NOT A LAW FIRM AND DOES NOT PROVIDE LEGAL SERVICES. DISTRIBUTION OF THIS LICENSE DOES NOT CREATE AN ATTORNEY-CLIENT RELATIONSHIP. CREATIVE COMMONS PROVIDES THIS INFORMATION ON AN "AS-IS" BASIS. CREATIVE COMMONS MAKES NO WARRANTIES REGARDING THE INFORMATION PROVIDED, AND DISCLAIMS LIABILITY FOR DAMAGES RESULTING FROM ITS USE. +License + +THE WORK (AS DEFINED BELOW) IS PROVIDED UNDER THE TERMS OF THIS CREATIVE COMMONS PUBLIC LICENSE ("CCPL" OR "LICENSE"). THE WORK IS PROTECTED BY COPYRIGHT AND/OR OTHER APPLICABLE LAW. ANY USE OF THE WORK OTHER THAN AS AUTHORIZED UNDER THIS LICENSE OR COPYRIGHT LAW IS PROHIBITED. + +BY EXERCISING ANY RIGHTS TO THE WORK PROVIDED HERE, YOU ACCEPT AND AGREE TO BE BOUND BY THE TERMS OF THIS LICENSE. THE LICENSOR GRANTS YOU THE RIGHTS CONTAINED HERE IN CONSIDERATION OF YOUR ACCEPTANCE OF SUCH TERMS AND CONDITIONS. + +1. Definitions + +"Collective Work" means a work, such as a periodical issue, anthology or encyclopedia, in which the Work in its entirety in unmodified form, along with a number of other contributions, constituting separate and independent works in themselves, are assembled into a collective whole. A work that constitutes a Collective Work will not be considered a Derivative Work (as defined below) for the purposes of this License. +"Derivative Work" means a work based upon the Work or upon the Work and other pre-existing works, such as a translation, musical arrangement, dramatization, fictionalization, motion picture version, sound recording, art reproduction, abridgment, condensation, or any other form in which the Work may be recast, transformed, or adapted, except that a work that constitutes a Collective Work will not be considered a Derivative Work for the purpose of this License. For the avoidance of doubt, where the Work is a musical composition or sound recording, the synchronization of the Work in timed-relation with a moving image ("synching") will be considered a Derivative Work for the purpose of this License. +"Licensor" means the individual or entity that offers the Work under the terms of this License. +"Original Author" means the individual or entity who created the Work. +"Work" means the copyrightable work of authorship offered under the terms of this License. +"You" means an individual or entity exercising rights under this License who has not previously violated the terms of this License with respect to the Work, or who has received express permission from the Licensor to exercise rights under this License despite a previous violation. +2. Fair Use Rights. Nothing in this license is intended to reduce, limit, or restrict any rights arising from fair use, first sale or other limitations on the exclusive rights of the copyright owner under copyright law or other applicable laws. + +3. License Grant. Subject to the terms and conditions of this License, Licensor hereby grants You a worldwide, royalty-free, non-exclusive, perpetual (for the duration of the applicable copyright) license to exercise the rights in the Work as stated below: + +to reproduce the Work, to incorporate the Work into one or more Collective Works, and to reproduce the Work as incorporated in the Collective Works; +to create and reproduce Derivative Works; +to distribute copies or phonorecords of, display publicly, perform publicly, and perform publicly by means of a digital audio transmission the Work including as incorporated in Collective Works; +to distribute copies or phonorecords of, display publicly, perform publicly, and perform publicly by means of a digital audio transmission Derivative Works; +The above rights may be exercised in all media and formats whether now known or hereafter devised. The above rights include the right to make such modifications as are technically necessary to exercise the rights in other media and formats. All rights not expressly granted by Licensor are hereby reserved, including but not limited to the rights set forth in Sections 4(d) and 4(e). + +4. Restrictions.The license granted in Section 3 above is expressly made subject to and limited by the following restrictions: + +You may distribute, publicly display, publicly perform, or publicly digitally perform the Work only under the terms of this License, and You must include a copy of, or the Uniform Resource Identifier for, this License with every copy or phonorecord of the Work You distribute, publicly display, publicly perform, or publicly digitally perform. You may not offer or impose any terms on the Work that alter or restrict the terms of this License or the recipients' exercise of the rights granted hereunder. You may not sublicense the Work. You must keep intact all notices that refer to this License and to the disclaimer of warranties. You may not distribute, publicly display, publicly perform, or publicly digitally perform the Work with any technological measures that control access or use of the Work in a manner inconsistent with the terms of this License Agreement. The above applies to the Work as incorporated in a Collective Work, but this does not require the Collective Work apart from the Work itself to be made subject to the terms of this License. If You create a Collective Work, upon notice from any Licensor You must, to the extent practicable, remove from the Collective Work any credit as required by clause 4(c), as requested. If You create a Derivative Work, upon notice from any Licensor You must, to the extent practicable, remove from the Derivative Work any credit as required by clause 4(c), as requested. +You may not exercise any of the rights granted to You in Section 3 above in any manner that is primarily intended for or directed toward commercial advantage or private monetary compensation. The exchange of the Work for other copyrighted works by means of digital file-sharing or otherwise shall not be considered to be intended for or directed toward commercial advantage or private monetary compensation, provided there is no payment of any monetary compensation in connection with the exchange of copyrighted works. +If you distribute, publicly display, publicly perform, or publicly digitally perform the Work or any Derivative Works or Collective Works, You must keep intact all copyright notices for the Work and provide, reasonable to the medium or means You are utilizing: (i) the name of Original Author (or pseudonym, if applicable) if supplied, and/or (ii) if the Original Author and/or Licensor designate another party or parties (e.g. a sponsor institute, publishing entity, journal) for attribution in Licensor's copyright notice, terms of service or by other reasonable means, the name of such party or parties; the title of the Work if supplied; to the extent reasonably practicable, the Uniform Resource Identifier, if any, that Licensor specifies to be associated with the Work, unless such URI does not refer to the copyright notice or licensing information for the Work; and in the case of a Derivative Work, a credit identifying the use of the Work in the Derivative Work (e.g., "French translation of the Work by Original Author," or "Screenplay based on original Work by Original Author"). Such credit may be implemented in any reasonable manner; provided, however, that in the case of a Derivative Work or Collective Work, at a minimum such credit will appear where any other comparable authorship credit appears and in a manner at least as prominent as such other comparable authorship credit. +For the avoidance of doubt, where the Work is a musical composition: + +Performance Royalties Under Blanket Licenses. Licensor reserves the exclusive right to collect, whether individually or via a performance rights society (e.g. ASCAP, BMI, SESAC), royalties for the public performance or public digital performance (e.g. webcast) of the Work if that performance is primarily intended for or directed toward commercial advantage or private monetary compensation. +Mechanical Rights and Statutory Royalties. Licensor reserves the exclusive right to collect, whether individually or via a music rights agency or designated agent (e.g. Harry Fox Agency), royalties for any phonorecord You create from the Work ("cover version") and distribute, subject to the compulsory license created by 17 USC Section 115 of the US Copyright Act (or the equivalent in other jurisdictions), if Your distribution of such cover version is primarily intended for or directed toward commercial advantage or private monetary compensation. +Webcasting Rights and Statutory Royalties. For the avoidance of doubt, where the Work is a sound recording, Licensor reserves the exclusive right to collect, whether individually or via a performance-rights society (e.g. SoundExchange), royalties for the public digital performance (e.g. webcast) of the Work, subject to the compulsory license created by 17 USC Section 114 of the US Copyright Act (or the equivalent in other jurisdictions), if Your public digital performance is primarily intended for or directed toward commercial advantage or private monetary compensation. +5. Representations, Warranties and Disclaimer + +UNLESS OTHERWISE MUTUALLY AGREED TO BY THE PARTIES IN WRITING, LICENSOR OFFERS THE WORK AS-IS AND MAKES NO REPRESENTATIONS OR WARRANTIES OF ANY KIND CONCERNING THE WORK, EXPRESS, IMPLIED, STATUTORY OR OTHERWISE, INCLUDING, WITHOUT LIMITATION, WARRANTIES OF TITLE, MERCHANTIBILITY, FITNESS FOR A PARTICULAR PURPOSE, NONINFRINGEMENT, OR THE ABSENCE OF LATENT OR OTHER DEFECTS, ACCURACY, OR THE PRESENCE OF ABSENCE OF ERRORS, WHETHER OR NOT DISCOVERABLE. SOME JURISDICTIONS DO NOT ALLOW THE EXCLUSION OF IMPLIED WARRANTIES, SO SUCH EXCLUSION MAY NOT APPLY TO YOU. + +6. Limitation on Liability. EXCEPT TO THE EXTENT REQUIRED BY APPLICABLE LAW, IN NO EVENT WILL LICENSOR BE LIABLE TO YOU ON ANY LEGAL THEORY FOR ANY SPECIAL, INCIDENTAL, CONSEQUENTIAL, PUNITIVE OR EXEMPLARY DAMAGES ARISING OUT OF THIS LICENSE OR THE USE OF THE WORK, EVEN IF LICENSOR HAS BEEN ADVISED OF THE POSSIBILITY OF SUCH DAMAGES. + +7. Termination + +This License and the rights granted hereunder will terminate automatically upon any breach by You of the terms of this License. Individuals or entities who have received Derivative Works or Collective Works from You under this License, however, will not have their licenses terminated provided such individuals or entities remain in full compliance with those licenses. Sections 1, 2, 5, 6, 7, and 8 will survive any termination of this License. +Subject to the above terms and conditions, the license granted here is perpetual (for the duration of the applicable copyright in the Work). Notwithstanding the above, Licensor reserves the right to release the Work under different license terms or to stop distributing the Work at any time; provided, however that any such election will not serve to withdraw this License (or any other license that has been, or is required to be, granted under the terms of this License), and this License will continue in full force and effect unless terminated as stated above. +8. Miscellaneous + +Each time You distribute or publicly digitally perform the Work or a Collective Work, the Licensor offers to the recipient a license to the Work on the same terms and conditions as the license granted to You under this License. +Each time You distribute or publicly digitally perform a Derivative Work, Licensor offers to the recipient a license to the original Work on the same terms and conditions as the license granted to You under this License. +If any provision of this License is invalid or unenforceable under applicable law, it shall not affect the validity or enforceability of the remainder of the terms of this License, and without further action by the parties to this agreement, such provision shall be reformed to the minimum extent necessary to make such provision valid and enforceable. +No term or provision of this License shall be deemed waived and no breach consented to unless such waiver or consent shall be in writing and signed by the party to be charged with such waiver or consent. +This License constitutes the entire agreement between the parties with respect to the Work licensed here. There are no understandings, agreements or representations with respect to the Work not specified here. Licensor shall not be bound by any additional provisions that may appear in any communication from You. This License may not be modified without the mutual written agreement of the Licensor and You. +Creative Commons is not a party to this License, and makes no warranty whatsoever in connection with the Work. Creative Commons will not be liable to You or any party on any legal theory for any damages whatsoever, including without limitation any general, special, incidental or consequential damages arising in connection to this license. Notwithstanding the foregoing two (2) sentences, if Creative Commons has expressly identified itself as the Licensor hereunder, it shall have all rights and obligations of Licensor. + +Except for the limited purpose of indicating to the public that the Work is licensed under the CCPL, neither party will use the trademark "Creative Commons" or any related trademark or logo of Creative Commons without the prior written consent of Creative Commons. Any permitted use will be in compliance with Creative Commons' then-current trademark usage guidelines, as may be published on its website or otherwise made available upon request from time to time. + +Creative Commons may be contacted at https://creativecommons.org/. \ No newline at end of file diff --git a/licenses/bin/css-loader.MIT b/licenses/bin/css-loader.MIT new file mode 100644 index 000000000000..8c11fc7289b7 --- /dev/null +++ b/licenses/bin/css-loader.MIT @@ -0,0 +1,20 @@ +Copyright JS Foundation and other contributors + +Permission is hereby granted, free of charge, to any person obtaining +a copy of this software and associated documentation files (the +'Software'), to deal in the Software without restriction, including +without limitation the rights to use, copy, modify, merge, publish, +distribute, sublicense, and/or sell copies of the Software, and to +permit persons to whom the Software is furnished to do so, subject to +the following conditions: + +The above copyright notice and this permission notice shall be +included in all copies or substantial portions of the Software. + +THE SOFTWARE IS PROVIDED 'AS IS', WITHOUT WARRANTY OF ANY KIND, +EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF +MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. +IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY +CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, +TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE +SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE. diff --git a/licenses/bin/d3-array.BSD3 b/licenses/bin/d3-array.BSD3 new file mode 100644 index 000000000000..b1b22233e2c2 --- /dev/null +++ b/licenses/bin/d3-array.BSD3 @@ -0,0 +1,27 @@ +Copyright 2010-2018 Mike Bostock +All rights reserved. + +Redistribution and use in source and binary forms, with or without modification, +are permitted provided that the following conditions are met: + +* Redistributions of source code must retain the above copyright notice, this + list of conditions and the following disclaimer. + +* Redistributions in binary form must reproduce the above copyright notice, + this list of conditions and the following disclaimer in the documentation + and/or other materials provided with the distribution. + +* Neither the name of the author nor the names of contributors may be used to + endorse or promote products derived from this software without specific prior + written permission. + +THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" AND +ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED +WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE +DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE LIABLE FOR +ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES +(INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; +LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON +ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT +(INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS +SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. diff --git a/licenses/bin/define-properties.MIT b/licenses/bin/define-properties.MIT new file mode 100644 index 000000000000..8c271c14b62f --- /dev/null +++ b/licenses/bin/define-properties.MIT @@ -0,0 +1,21 @@ +The MIT License (MIT) + +Copyright (C) 2015 Jordan Harband + +Permission is hereby granted, free of charge, to any person obtaining a copy +of this software and associated documentation files (the "Software"), to deal +in the Software without restriction, including without limitation the rights +to use, copy, modify, merge, publish, distribute, sublicense, and/or sell +copies of the Software, and to permit persons to whom the Software is +furnished to do so, subject to the following conditions: + +The above copyright notice and this permission notice shall be included in +all copies or substantial portions of the Software. + +THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR +IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, +FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE +AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER +LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, +OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN +THE SOFTWARE. \ No newline at end of file diff --git a/licenses/bin/dom-helpers.MIT b/licenses/bin/dom-helpers.MIT new file mode 100644 index 000000000000..284f555dbe2e --- /dev/null +++ b/licenses/bin/dom-helpers.MIT @@ -0,0 +1,21 @@ +The MIT License (MIT) + +Copyright (c) 2015 Jason Quense + +Permission is hereby granted, free of charge, to any person obtaining a copy +of this software and associated documentation files (the "Software"), to deal +in the Software without restriction, including without limitation the rights +to use, copy, modify, merge, publish, distribute, sublicense, and/or sell +copies of the Software, and to permit persons to whom the Software is +furnished to do so, subject to the following conditions: + +The above copyright notice and this permission notice shall be included in all +copies or substantial portions of the Software. + +THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR +IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, +FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE +AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER +LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, +OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE +SOFTWARE. \ No newline at end of file diff --git a/licenses/bin/dom4.MIT b/licenses/bin/dom4.MIT new file mode 100644 index 000000000000..78dd4477be0c --- /dev/null +++ b/licenses/bin/dom4.MIT @@ -0,0 +1,19 @@ +Copyright (C) 2013-2015 by Andrea Giammarchi - @WebReflection + +Permission is hereby granted, free of charge, to any person obtaining a copy +of this software and associated documentation files (the "Software"), to deal +in the Software without restriction, including without limitation the rights +to use, copy, modify, merge, publish, distribute, sublicense, and/or sell +copies of the Software, and to permit persons to whom the Software is +furnished to do so, subject to the following conditions: + +The above copyright notice and this permission notice shall be included in +all copies or substantial portions of the Software. + +THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR +IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, +FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE +AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER +LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, +OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN +THE SOFTWARE. diff --git a/licenses/bin/druid-console.MIT b/licenses/bin/druid-console.MIT new file mode 100644 index 000000000000..ab36f2ddc4a8 --- /dev/null +++ b/licenses/bin/druid-console.MIT @@ -0,0 +1,21 @@ +The MIT License (MIT) + +Copyright (c) 2014 Metamarkets + +Permission is hereby granted, free of charge, to any person obtaining a copy +of this software and associated documentation files (the "Software"), to deal +in the Software without restriction, including without limitation the rights +to use, copy, modify, merge, publish, distribute, sublicense, and/or sell +copies of the Software, and to permit persons to whom the Software is +furnished to do so, subject to the following conditions: + +The above copyright notice and this permission notice shall be included in +all copies or substantial portions of the Software. + +THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR +IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, +FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE +AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER +LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, +OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN +THE SOFTWARE. diff --git a/licenses/bin/encoding.MIT b/licenses/bin/encoding.MIT new file mode 100644 index 000000000000..33f5a9a366f6 --- /dev/null +++ b/licenses/bin/encoding.MIT @@ -0,0 +1,16 @@ +Copyright (c) 2012-2014 Andris Reinman + +Permission is hereby granted, free of charge, to any person obtaining a copy +of this software and associated documentation files (the "Software"), to deal +in the Software without restriction, including without limitation the rights +to use, copy, modify, merge, publish, distribute, sublicense, and/or sell +copies of the Software, and to permit persons to whom the Software is +furnished to do so, subject to the following conditions: + +THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR +IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, +FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE +AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER +LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, +OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE +SOFTWARE. diff --git a/licenses/bin/es-abstract.MIT b/licenses/bin/es-abstract.MIT new file mode 100644 index 000000000000..8c271c14b62f --- /dev/null +++ b/licenses/bin/es-abstract.MIT @@ -0,0 +1,21 @@ +The MIT License (MIT) + +Copyright (C) 2015 Jordan Harband + +Permission is hereby granted, free of charge, to any person obtaining a copy +of this software and associated documentation files (the "Software"), to deal +in the Software without restriction, including without limitation the rights +to use, copy, modify, merge, publish, distribute, sublicense, and/or sell +copies of the Software, and to permit persons to whom the Software is +furnished to do so, subject to the following conditions: + +The above copyright notice and this permission notice shall be included in +all copies or substantial portions of the Software. + +THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR +IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, +FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE +AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER +LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, +OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN +THE SOFTWARE. \ No newline at end of file diff --git a/licenses/bin/es-to-primitive.MIT b/licenses/bin/es-to-primitive.MIT new file mode 100644 index 000000000000..b43df444e518 --- /dev/null +++ b/licenses/bin/es-to-primitive.MIT @@ -0,0 +1,22 @@ +The MIT License (MIT) + +Copyright (c) 2015 Jordan Harband + +Permission is hereby granted, free of charge, to any person obtaining a copy +of this software and associated documentation files (the "Software"), to deal +in the Software without restriction, including without limitation the rights +to use, copy, modify, merge, publish, distribute, sublicense, and/or sell +copies of the Software, and to permit persons to whom the Software is +furnished to do so, subject to the following conditions: + +The above copyright notice and this permission notice shall be included in all +copies or substantial portions of the Software. + +THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR +IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, +FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE +AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER +LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, +OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE +SOFTWARE. + diff --git a/licenses/bin/es6-shim.MIT b/licenses/bin/es6-shim.MIT new file mode 100644 index 000000000000..996ec8d6f76e --- /dev/null +++ b/licenses/bin/es6-shim.MIT @@ -0,0 +1,26 @@ +The project was initially based on [es6-shim by Axel Rauschmayer](https://github.com/rauschma/es6-shim). + +Current maintainers are: [Paul Miller](http://paulmillr.com), [Jordan Harband](https://github.com/ljharb), and [C. Scott Ananian](http://cscott.net). + +The MIT License (MIT) + +Copyright (c) 2013-2016 Paul Miller (http://paulmillr.com) and contributors + +Permission is hereby granted, free of charge, to any person obtaining a copy +of this software and associated documentation files (the "Software"), to deal +in the Software without restriction, including without limitation the rights +to use, copy, modify, merge, publish, distribute, sublicense, and/or sell +copies of the Software, and to permit persons to whom the Software is +furnished to do so, subject to the following conditions: + +The above copyright notice and this permission notice shall be included in +all copies or substantial portions of the Software. + +THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR +IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, +FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE +AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER +LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, +OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN +THE SOFTWARE. + diff --git a/licenses/bin/es7-shim.MIT b/licenses/bin/es7-shim.MIT new file mode 100644 index 000000000000..f670305e024f --- /dev/null +++ b/licenses/bin/es7-shim.MIT @@ -0,0 +1,22 @@ +The MIT License (MIT) + +Copyright (C) 2015-2016 Jordan Harband and contributors + +Permission is hereby granted, free of charge, to any person obtaining a copy +of this software and associated documentation files (the "Software"), to deal +in the Software without restriction, including without limitation the rights +to use, copy, modify, merge, publish, distribute, sublicense, and/or sell +copies of the Software, and to permit persons to whom the Software is +furnished to do so, subject to the following conditions: + +The above copyright notice and this permission notice shall be included in +all copies or substantial portions of the Software. + +THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR +IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, +FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE +AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER +LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, +OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN +THE SOFTWARE. + diff --git a/licenses/bin/fbjs.MIT b/licenses/bin/fbjs.MIT new file mode 100644 index 000000000000..29e2bc2145ab --- /dev/null +++ b/licenses/bin/fbjs.MIT @@ -0,0 +1,20 @@ +MIT License + +Copyright (c) 2013-present, Facebook, Inc. + +Permission is hereby granted, free of charge, to any person obtaining a copy of +this software and associated documentation files (the "Software"), to deal in +the Software without restriction, including without limitation the rights to +use, copy, modify, merge, publish, distribute, sublicense, and/or sell copies of +the Software, and to permit persons to whom the Software is furnished to do so, +subject to the following conditions: + +The above copyright notice and this permission notice shall be included in all +copies or substantial portions of the Software. + +THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR +IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, FITNESS +FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR +COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER +IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN +CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE. diff --git a/licenses/bin/font-awesome.MIT b/licenses/bin/font-awesome.MIT new file mode 100644 index 000000000000..6058388853cf --- /dev/null +++ b/licenses/bin/font-awesome.MIT @@ -0,0 +1,9 @@ +The MIT License (MIT) + +Copyright 2014 Font Awesome + +Permission is hereby granted, free of charge, to any person obtaining a copy of this software and associated documentation files (the "Software"), to deal in the Software without restriction, including without limitation the rights to use, copy, modify, merge, publish, distribute, sublicense, and/or sell copies of the Software, and to permit persons to whom the Software is furnished to do so, subject to the following conditions: + +The above copyright notice and this permission notice shall be included in all copies or substantial portions of the Software. + +THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE. \ No newline at end of file diff --git a/licenses/bin/font-awesome.SIL-OFL b/licenses/bin/font-awesome.SIL-OFL new file mode 100644 index 000000000000..9d0040e08b4b --- /dev/null +++ b/licenses/bin/font-awesome.SIL-OFL @@ -0,0 +1,94 @@ +Copyright (c) 2014, Font Awesome (http://fontawesome.io), +with Reserved Font Name FontAwesome. + +This Font Software is licensed under the SIL Open Font License, Version 1.1. +This license is copied below, and is also available with a FAQ at: +http://scripts.sil.org/OFL + + +----------------------------------------------------------- +SIL OPEN FONT LICENSE Version 1.1 - 26 February 2007 +----------------------------------------------------------- + +PREAMBLE +The goals of the Open Font License (OFL) are to stimulate worldwide +development of collaborative font projects, to support the font creation +efforts of academic and linguistic communities, and to provide a free and +open framework in which fonts may be shared and improved in partnership +with others. + +The OFL allows the licensed fonts to be used, studied, modified and +redistributed freely as long as they are not sold by themselves. The +fonts, including any derivative works, can be bundled, embedded, +redistributed and/or sold with any software provided that any reserved +names are not used by derivative works. The fonts and derivatives, +however, cannot be released under any other type of license. The +requirement for fonts to remain under this license does not apply +to any document created using the fonts or their derivatives. + +DEFINITIONS +"Font Software" refers to the set of files released by the Copyright +Holder(s) under this license and clearly marked as such. This may +include source files, build scripts and documentation. + +"Reserved Font Name" refers to any names specified as such after the +copyright statement(s). + +"Original Version" refers to the collection of Font Software components as +distributed by the Copyright Holder(s). + +"Modified Version" refers to any derivative made by adding to, deleting, +or substituting -- in part or in whole -- any of the components of the +Original Version, by changing formats or by porting the Font Software to a +new environment. + +"Author" refers to any designer, engineer, programmer, technical +writer or other person who contributed to the Font Software. + +PERMISSION & CONDITIONS +Permission is hereby granted, free of charge, to any person obtaining +a copy of the Font Software, to use, study, copy, merge, embed, modify, +redistribute, and sell modified and unmodified copies of the Font +Software, subject to the following conditions: + +1) Neither the Font Software nor any of its individual components, +in Original or Modified Versions, may be sold by itself. + +2) Original or Modified Versions of the Font Software may be bundled, +redistributed and/or sold with any software, provided that each copy +contains the above copyright notice and this license. These can be +included either as stand-alone text files, human-readable headers or +in the appropriate machine-readable metadata fields within text or +binary files as long as those fields can be easily viewed by the user. + +3) No Modified Version of the Font Software may use the Reserved Font +Name(s) unless explicit written permission is granted by the corresponding +Copyright Holder. This restriction only applies to the primary font name as +presented to the users. + +4) The name(s) of the Copyright Holder(s) or the Author(s) of the Font +Software shall not be used to promote, endorse or advertise any +Modified Version, except to acknowledge the contribution(s) of the +Copyright Holder(s) and the Author(s) or with their explicit written +permission. + +5) The Font Software, modified or unmodified, in part or in whole, +must be distributed entirely under this license, and must not be +distributed under any other license. The requirement for fonts to +remain under this license does not apply to any document created +using the Font Software. + +TERMINATION +This license becomes null and void if any of the above conditions are +not met. + +DISCLAIMER +THE FONT SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, +EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO ANY WARRANTIES OF +MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT +OF COPYRIGHT, PATENT, TRADEMARK, OR OTHER RIGHT. IN NO EVENT SHALL THE +COPYRIGHT HOLDER BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, +INCLUDING ANY GENERAL, SPECIAL, INDIRECT, INCIDENTAL, OR CONSEQUENTIAL +DAMAGES, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING +FROM, OUT OF THE USE OR INABILITY TO USE THE FONT SOFTWARE OR FROM +OTHER DEALINGS IN THE FONT SOFTWARE. diff --git a/licenses/bin/function-bind.MIT b/licenses/bin/function-bind.MIT new file mode 100644 index 000000000000..62d6d237ff17 --- /dev/null +++ b/licenses/bin/function-bind.MIT @@ -0,0 +1,20 @@ +Copyright (c) 2013 Raynos. + +Permission is hereby granted, free of charge, to any person obtaining a copy +of this software and associated documentation files (the "Software"), to deal +in the Software without restriction, including without limitation the rights +to use, copy, modify, merge, publish, distribute, sublicense, and/or sell +copies of the Software, and to permit persons to whom the Software is +furnished to do so, subject to the following conditions: + +The above copyright notice and this permission notice shall be included in +all copies or substantial portions of the Software. + +THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR +IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, +FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE +AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER +LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, +OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN +THE SOFTWARE. + diff --git a/licenses/bin/has-symbols.MIT b/licenses/bin/has-symbols.MIT new file mode 100644 index 000000000000..df31cbf3c064 --- /dev/null +++ b/licenses/bin/has-symbols.MIT @@ -0,0 +1,21 @@ +MIT License + +Copyright (c) 2016 Jordan Harband + +Permission is hereby granted, free of charge, to any person obtaining a copy +of this software and associated documentation files (the "Software"), to deal +in the Software without restriction, including without limitation the rights +to use, copy, modify, merge, publish, distribute, sublicense, and/or sell +copies of the Software, and to permit persons to whom the Software is +furnished to do so, subject to the following conditions: + +The above copyright notice and this permission notice shall be included in all +copies or substantial portions of the Software. + +THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR +IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, +FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE +AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER +LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, +OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE +SOFTWARE. diff --git a/licenses/bin/has.MIT b/licenses/bin/has.MIT new file mode 100644 index 000000000000..ae7014d385df --- /dev/null +++ b/licenses/bin/has.MIT @@ -0,0 +1,22 @@ +Copyright (c) 2013 Thiago de Arruda + +Permission is hereby granted, free of charge, to any person +obtaining a copy of this software and associated documentation +files (the "Software"), to deal in the Software without +restriction, including without limitation the rights to use, +copy, modify, merge, publish, distribute, sublicense, and/or sell +copies of the Software, and to permit persons to whom the +Software is furnished to do so, subject to the following +conditions: + +The above copyright notice and this permission notice shall be +included in all copies or substantial portions of the Software. + +THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, +EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES +OF MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND +NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT +HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, +WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING +FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR +OTHER DEALINGS IN THE SOFTWARE. diff --git a/licenses/bin/history.MIT b/licenses/bin/history.MIT new file mode 100644 index 000000000000..312045b34eae --- /dev/null +++ b/licenses/bin/history.MIT @@ -0,0 +1,21 @@ +The MIT License (MIT) + +Copyright (c) 2015-2016 Michael Jackson + +Permission is hereby granted, free of charge, to any person obtaining a copy +of this software and associated documentation files (the "Software"), to deal +in the Software without restriction, including without limitation the rights +to use, copy, modify, merge, publish, distribute, sublicense, and/or sell +copies of the Software, and to permit persons to whom the Software is +furnished to do so, subject to the following conditions: + +The above copyright notice and this permission notice shall be included in all +copies or substantial portions of the Software. + +THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR +IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, +FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE +AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER +LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, +OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE +SOFTWARE. diff --git a/licenses/bin/hjson.MIT b/licenses/bin/hjson.MIT new file mode 100644 index 000000000000..f814dd5830a3 --- /dev/null +++ b/licenses/bin/hjson.MIT @@ -0,0 +1,21 @@ +The MIT License (MIT) + +Copyright (c) 2014-2017 Christian Zangl + +Permission is hereby granted, free of charge, to any person obtaining a copy +of this software and associated documentation files (the "Software"), to deal +in the Software without restriction, including without limitation the rights +to use, copy, modify, merge, publish, distribute, sublicense, and/or sell +copies of the Software, and to permit persons to whom the Software is +furnished to do so, subject to the following conditions: + +The above copyright notice and this permission notice shall be included in all +copies or substantial portions of the Software. + +THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR +IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, +FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE +AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER +LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, +OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE +SOFTWARE. diff --git a/licenses/bin/hoist-non-react-statics.BSD3 b/licenses/bin/hoist-non-react-statics.BSD3 new file mode 100644 index 000000000000..2464f59ec1f7 --- /dev/null +++ b/licenses/bin/hoist-non-react-statics.BSD3 @@ -0,0 +1,29 @@ +Software License Agreement (BSD License) +======================================== + +Copyright (c) 2015, Yahoo! Inc. All rights reserved. +---------------------------------------------------- + +Redistribution and use of this software in source and binary forms, with or +without modification, are permitted provided that the following conditions are +met: + + * Redistributions of source code must retain the above copyright notice, this + list of conditions and the following disclaimer. + * Redistributions in binary form must reproduce the above copyright notice, + this list of conditions and the following disclaimer in the documentation + and/or other materials provided with the distribution. + * Neither the name of Yahoo! Inc. nor the names of YUI's contributors may be + used to endorse or promote products derived from this software without + specific prior written permission of Yahoo! Inc. + +THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" AND +ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED +WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE +DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE LIABLE FOR +ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES +(INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; +LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON +ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT +(INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS +SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. diff --git a/licenses/bin/icu4j.ICU b/licenses/bin/icu4j.ICU new file mode 100644 index 000000000000..ec2a91321d28 --- /dev/null +++ b/licenses/bin/icu4j.ICU @@ -0,0 +1,385 @@ + + + + +ICU License - ICU 1.8.1 and later + + + + +

ICU License - ICU 1.8.1 and later

+ +

COPYRIGHT AND PERMISSION NOTICE

+ +

+Copyright (c) 1995-2014 International Business Machines Corporation and others +

+

+All rights reserved. +

+

+Permission is hereby granted, free of charge, to any person obtaining a copy +of this software and associated documentation files (the "Software"), +to deal in the Software without restriction, including without limitation +the rights to use, copy, modify, merge, publish, distribute, and/or sell +copies of the Software, and to permit persons +to whom the Software is furnished to do so, provided that the above +copyright notice(s) and this permission notice appear in all copies +of the Software and that both the above copyright notice(s) and this +permission notice appear in supporting documentation. +

+

+THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR IMPLIED, +INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, FITNESS FOR A +PARTICULAR PURPOSE AND NONINFRINGEMENT OF THIRD PARTY RIGHTS. IN NO EVENT SHALL +THE COPYRIGHT HOLDER OR HOLDERS INCLUDED IN THIS NOTICE BE LIABLE FOR ANY CLAIM, +OR ANY SPECIAL INDIRECT OR CONSEQUENTIAL DAMAGES, OR ANY DAMAGES WHATSOEVER +RESULTING FROM LOSS OF USE, DATA OR PROFITS, WHETHER IN AN ACTION OF CONTRACT, +NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT OF OR IN CONNECTION WITH THE +USE OR PERFORMANCE OF THIS SOFTWARE. +

+

+Except as contained in this notice, the name of a copyright holder shall not be +used in advertising or otherwise to promote the sale, use or other dealings in +this Software without prior written authorization of the copyright holder. +

+ +
+

+All trademarks and registered trademarks mentioned herein are the property of their respective owners. +

+ +
+ +

Third-Party Software Licenses

+This section contains third-party software notices and/or additional terms for licensed +third-party software components included within ICU libraries. + +

1. Unicode Data Files and Software

+ +
COPYRIGHT AND PERMISSION NOTICE
+
+Copyright © 1991-2014 Unicode, Inc. All rights reserved.
+Distributed under the Terms of Use in
+http://www.unicode.org/copyright.html.
+
+Permission is hereby granted, free of charge, to any person obtaining
+a copy of the Unicode data files and any associated documentation
+(the "Data Files") or Unicode software and any associated documentation
+(the "Software") to deal in the Data Files or Software
+without restriction, including without limitation the rights to use,
+copy, modify, merge, publish, distribute, and/or sell copies of
+the Data Files or Software, and to permit persons to whom the Data Files
+or Software are furnished to do so, provided that
+(a) this copyright and permission notice appear with all copies
+of the Data Files or Software,
+(b) this copyright and permission notice appear in associated
+documentation, and
+(c) there is clear notice in each modified Data File or in the Software
+as well as in the documentation associated with the Data File(s) or
+Software that the data or software has been modified.
+
+THE DATA FILES AND SOFTWARE ARE PROVIDED "AS IS", WITHOUT WARRANTY OF
+ANY KIND, EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE
+WARRANTIES OF MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
+NONINFRINGEMENT OF THIRD PARTY RIGHTS.
+IN NO EVENT SHALL THE COPYRIGHT HOLDER OR HOLDERS INCLUDED IN THIS
+NOTICE BE LIABLE FOR ANY CLAIM, OR ANY SPECIAL INDIRECT OR CONSEQUENTIAL
+DAMAGES, OR ANY DAMAGES WHATSOEVER RESULTING FROM LOSS OF USE,
+DATA OR PROFITS, WHETHER IN AN ACTION OF CONTRACT, NEGLIGENCE OR OTHER
+TORTIOUS ACTION, ARISING OUT OF OR IN CONNECTION WITH THE USE OR
+PERFORMANCE OF THE DATA FILES OR SOFTWARE.
+
+Except as contained in this notice, the name of a copyright holder
+shall not be used in advertising or otherwise to promote the sale,
+use or other dealings in these Data Files or Software without prior
+written authorization of the copyright holder.
+ +

2. Chinese/Japanese Word Break Dictionary Data (cjdict.txt)

+
+ #    The Google Chrome software developed by Google is licensed under the BSD license. Other software included in this distribution is provided under other licenses, as set forth below.
+ #
+ #	The BSD License
+ #	http://opensource.org/licenses/bsd-license.php
+ #	Copyright (C) 2006-2008, Google Inc.
+ #
+ #	All rights reserved.
+ #
+ #	Redistribution and use in source and binary forms, with or without modification, are permitted provided that the following conditions are met:
+ #
+ #	Redistributions of source code must retain the above copyright notice, this list of conditions and the following disclaimer.
+ #	Redistributions in binary form must reproduce the above copyright notice, this list of conditions and the following disclaimer in the documentation and/or other materials provided with the distribution.
+ #	Neither the name of  Google Inc. nor the names of its contributors may be used to endorse or promote products derived from this software without specific prior written permission.
+ #
+ #
+ #	THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+ #
+ #
+ #	The word list in cjdict.txt are generated by combining three word lists listed
+ #	below with further processing for compound word breaking. The frequency is generated
+ #	with an iterative training against Google web corpora.
+ #
+ #	* Libtabe (Chinese)
+ #	  - https://sourceforge.net/project/?group_id=1519
+ #	  - Its license terms and conditions are shown below.
+ #
+ #	* IPADIC (Japanese)
+ #	  - http://chasen.aist-nara.ac.jp/chasen/distribution.html
+ #	  - Its license terms and conditions are shown below.
+ #
+ #	---------COPYING.libtabe ---- BEGIN--------------------
+ #
+ #	/*
+ #	 * Copyrighy (c) 1999 TaBE Project.
+ #	 * Copyright (c) 1999 Pai-Hsiang Hsiao.
+ #	 * All rights reserved.
+ #	 *
+ #	 * Redistribution and use in source and binary forms, with or without
+ #	 * modification, are permitted provided that the following conditions
+ #	 * are met:
+ #	 *
+ #	 * . Redistributions of source code must retain the above copyright
+ #	 *   notice, this list of conditions and the following disclaimer.
+ #	 * . Redistributions in binary form must reproduce the above copyright
+ #	 *   notice, this list of conditions and the following disclaimer in
+ #	 *   the documentation and/or other materials provided with the
+ #	 *   distribution.
+ #	 * . Neither the name of the TaBE Project nor the names of its
+ #	 *   contributors may be used to endorse or promote products derived
+ #	 *   from this software without specific prior written permission.
+ #	 *
+ #	 * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
+ #	 * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
+ #	 * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS
+ #	 * FOR A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE
+ #	 * REGENTS OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT,
+ #	 * INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES
+ #	 * (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR
+ #	 * SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
+ #	 * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT,
+ #	 * STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
+ #	 * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED
+ #	 * OF THE POSSIBILITY OF SUCH DAMAGE.
+ #	 */
+ #
+ #	/*
+ #	 * Copyright (c) 1999 Computer Systems and Communication Lab,
+ #	 *                    Institute of Information Science, Academia Sinica.
+ #	 * All rights reserved.
+ #	 *
+ #	 * Redistribution and use in source and binary forms, with or without
+ #	 * modification, are permitted provided that the following conditions
+ #	 * are met:
+ #	 *
+ #	 * . Redistributions of source code must retain the above copyright
+ #	 *   notice, this list of conditions and the following disclaimer.
+ #	 * . Redistributions in binary form must reproduce the above copyright
+ #	 *   notice, this list of conditions and the following disclaimer in
+ #	 *   the documentation and/or other materials provided with the
+ #	 *   distribution.
+ #	 * . Neither the name of the Computer Systems and Communication Lab
+ #	 *   nor the names of its contributors may be used to endorse or
+ #	 *   promote products derived from this software without specific
+ #	 *   prior written permission.
+ #	 *
+ #	 * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
+ #	 * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
+ #	 * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS
+ #	 * FOR A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE
+ #	 * REGENTS OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT,
+ #	 * INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES
+ #	 * (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR
+ #	 * SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
+ #	 * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT,
+ #	 * STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
+ #	 * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED
+ #	 * OF THE POSSIBILITY OF SUCH DAMAGE.
+ #	 */
+ #
+ #	Copyright 1996 Chih-Hao Tsai @ Beckman Institute, University of Illinois
+ #	c-tsai4@uiuc.edu  http://casper.beckman.uiuc.edu/~c-tsai4
+ #
+ #	---------------COPYING.libtabe-----END------------------------------------
+ #
+ #
+ #	---------------COPYING.ipadic-----BEGIN------------------------------------
+ #
+ #	Copyright 2000, 2001, 2002, 2003 Nara Institute of Science
+ #	and Technology.  All Rights Reserved.
+ #
+ #	Use, reproduction, and distribution of this software is permitted.
+ #	Any copy of this software, whether in its original form or modified,
+ #	must include both the above copyright notice and the following
+ #	paragraphs.
+ #
+ #	Nara Institute of Science and Technology (NAIST),
+ #	the copyright holders, disclaims all warranties with regard to this
+ #	software, including all implied warranties of merchantability and
+ #	fitness, in no event shall NAIST be liable for
+ #	any special, indirect or consequential damages or any damages
+ #	whatsoever resulting from loss of use, data or profits, whether in an
+ #	action of contract, negligence or other tortuous action, arising out
+ #	of or in connection with the use or performance of this software.
+ #
+ #	A large portion of the dictionary entries
+ #	originate from ICOT Free Software.  The following conditions for ICOT
+ #	Free Software applies to the current dictionary as well.
+ #
+ #	Each User may also freely distribute the Program, whether in its
+ #	original form or modified, to any third party or parties, PROVIDED
+ #	that the provisions of Section 3 ("NO WARRANTY") will ALWAYS appear
+ #	on, or be attached to, the Program, which is distributed substantially
+ #	in the same form as set out herein and that such intended
+ #	distribution, if actually made, will neither violate or otherwise
+ #	contravene any of the laws and regulations of the countries having
+ #	jurisdiction over the User or the intended distribution itself.
+ #
+ #	NO WARRANTY
+ #
+ #	The program was produced on an experimental basis in the course of the
+ #	research and development conducted during the project and is provided
+ #	to users as so produced on an experimental basis.  Accordingly, the
+ #	program is provided without any warranty whatsoever, whether express,
+ #	implied, statutory or otherwise.  The term "warranty" used herein
+ #	includes, but is not limited to, any warranty of the quality,
+ #	performance, merchantability and fitness for a particular purpose of
+ #	the program and the nonexistence of any infringement or violation of
+ #	any right of any third party.
+ #
+ #	Each user of the program will agree and understand, and be deemed to
+ #	have agreed and understood, that there is no warranty whatsoever for
+ #	the program and, accordingly, the entire risk arising from or
+ #	otherwise connected with the program is assumed by the user.
+ #
+ #	Therefore, neither ICOT, the copyright holder, or any other
+ #	organization that participated in or was otherwise related to the
+ #	development of the program and their respective officials, directors,
+ #	officers and other employees shall be held liable for any and all
+ #	damages, including, without limitation, general, special, incidental
+ #	and consequential damages, arising out of or otherwise in connection
+ #	with the use or inability to use the program or any product, material
+ #	or result produced or otherwise obtained by using the program,
+ #	regardless of whether they have been advised of, or otherwise had
+ #	knowledge of, the possibility of such damages at any time during the
+ #	project or thereafter.  Each user will be deemed to have agreed to the
+ #	foregoing by his or her commencement of use of the program.  The term
+ #	"use" as used herein includes, but is not limited to, the use,
+ #	modification, copying and distribution of the program and the
+ #	production of secondary products from the program.
+ #
+ #	In the case where the program, whether in its original form or
+ #	modified, was distributed or delivered to or received by a user from
+ #	any person, organization or entity other than ICOT, unless it makes or
+ #	grants independently of ICOT any specific warranty to the user in
+ #	writing, such person, organization or entity, will also be exempted
+ #	from and not be held liable to the user for any such damages as noted
+ #	above as far as the program is concerned.
+ #
+ #	---------------COPYING.ipadic-----END------------------------------------
+
+ +

3. Lao Word Break Dictionary Data (laodict.txt)

+
+ #	Copyright (c) 2013 International Business Machines Corporation
+ #	and others. All Rights Reserved.
+ #
+ #	Project:    http://code.google.com/p/lao-dictionary/
+ #	Dictionary: http://lao-dictionary.googlecode.com/git/Lao-Dictionary.txt
+ #	License:    http://lao-dictionary.googlecode.com/git/Lao-Dictionary-LICENSE.txt
+ #	            (copied below)
+ #
+ #	This file is derived from the above dictionary, with slight modifications.
+ #	--------------------------------------------------------------------------------
+ #	Copyright (C) 2013 Brian Eugene Wilson, Robert Martin Campbell.
+ #	All rights reserved.
+ #
+ #	Redistribution and use in source and binary forms, with or without modification,
+ #	are permitted provided that the following conditions are met:
+ #
+ #		Redistributions of source code must retain the above copyright notice, this
+ #		list of conditions and the following disclaimer. Redistributions in binary
+ #		form must reproduce the above copyright notice, this list of conditions and
+ #		the following disclaimer in the documentation and/or other materials
+ #		provided with the distribution.
+ #
+ #	THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" AND
+ #	ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED
+ #	WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE
+ #	DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE LIABLE FOR
+ #	ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES
+ #	(INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES;
+ #	LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON
+ #	ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+ #	(INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS
+ #	SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+ #	--------------------------------------------------------------------------------
+
+ +

4. Burmese Word Break Dictionary Data (burmesedict.txt)

+
+ #	Copyright (c) 2014 International Business Machines Corporation
+ #	and others. All Rights Reserved.
+ #
+ #	This list is part of a project hosted at:
+ #	  github.com/kanyawtech/myanmar-karen-word-lists
+ #
+ #	--------------------------------------------------------------------------------
+ #	Copyright (c) 2013, LeRoy Benjamin Sharon
+ #	All rights reserved.
+ #
+ #	Redistribution and use in source and binary forms, with or without modification,
+ #	are permitted provided that the following conditions are met:
+ #
+ #	  Redistributions of source code must retain the above copyright notice, this
+ #	  list of conditions and the following disclaimer.
+ #
+ #	  Redistributions in binary form must reproduce the above copyright notice, this
+ #	  list of conditions and the following disclaimer in the documentation and/or
+ #	  other materials provided with the distribution.
+ #
+ #	  Neither the name Myanmar Karen Word Lists, nor the names of its
+ #	  contributors may be used to endorse or promote products derived from
+ #	  this software without specific prior written permission.
+ #
+ #	THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" AND
+ #	ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED
+ #	WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE
+ #	DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE LIABLE FOR
+ #	ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES
+ #	(INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES;
+ #	LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON
+ #	ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+ #	(INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS
+ #	SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+ #	--------------------------------------------------------------------------------
+
+ +

5. Time Zone Database

+

ICU uses the public domain data and code derived from +Time Zone Database for its time zone support. The ownership of the TZ database is explained +in BCP 175: Procedure for Maintaining the Time Zone +Database section 7.

+ +

+7.  Database Ownership
+
+   The TZ database itself is not an IETF Contribution or an IETF
+   document.  Rather it is a pre-existing and regularly updated work
+   that is in the public domain, and is intended to remain in the public
+   domain.  Therefore, BCPs 78 [RFC5378] and 79 [RFC3979] do not apply
+   to the TZ Database or contributions that individuals make to it.
+   Should any claims be made and substantiated against the TZ Database,
+   the organization that is providing the IANA Considerations defined in
+   this RFC, under the memorandum of understanding with the IETF,
+   currently ICANN, may act in accordance with all competent court
+   orders.  No ownership claims will be made by ICANN or the IETF Trust
+   on the database or the code.  Any person making a contribution to the
+   database or code waives all rights to future claims in that
+   contribution or in the TZ Database.
+
+
+ + + + diff --git a/licenses/bin/invariant.MIT b/licenses/bin/invariant.MIT new file mode 100644 index 000000000000..188fb2b0bd8e --- /dev/null +++ b/licenses/bin/invariant.MIT @@ -0,0 +1,21 @@ +MIT License + +Copyright (c) 2013-present, Facebook, Inc. + +Permission is hereby granted, free of charge, to any person obtaining a copy +of this software and associated documentation files (the "Software"), to deal +in the Software without restriction, including without limitation the rights +to use, copy, modify, merge, publish, distribute, sublicense, and/or sell +copies of the Software, and to permit persons to whom the Software is +furnished to do so, subject to the following conditions: + +The above copyright notice and this permission notice shall be included in all +copies or substantial portions of the Software. + +THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR +IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, +FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE +AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER +LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, +OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE +SOFTWARE. diff --git a/licenses/bin/is-buffer.MIT b/licenses/bin/is-buffer.MIT new file mode 100644 index 000000000000..0c068ceecbd4 --- /dev/null +++ b/licenses/bin/is-buffer.MIT @@ -0,0 +1,21 @@ +The MIT License (MIT) + +Copyright (c) Feross Aboukhadijeh + +Permission is hereby granted, free of charge, to any person obtaining a copy +of this software and associated documentation files (the "Software"), to deal +in the Software without restriction, including without limitation the rights +to use, copy, modify, merge, publish, distribute, sublicense, and/or sell +copies of the Software, and to permit persons to whom the Software is +furnished to do so, subject to the following conditions: + +The above copyright notice and this permission notice shall be included in +all copies or substantial portions of the Software. + +THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR +IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, +FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE +AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER +LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, +OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN +THE SOFTWARE. diff --git a/licenses/bin/is-callable.MIT b/licenses/bin/is-callable.MIT new file mode 100644 index 000000000000..b43df444e518 --- /dev/null +++ b/licenses/bin/is-callable.MIT @@ -0,0 +1,22 @@ +The MIT License (MIT) + +Copyright (c) 2015 Jordan Harband + +Permission is hereby granted, free of charge, to any person obtaining a copy +of this software and associated documentation files (the "Software"), to deal +in the Software without restriction, including without limitation the rights +to use, copy, modify, merge, publish, distribute, sublicense, and/or sell +copies of the Software, and to permit persons to whom the Software is +furnished to do so, subject to the following conditions: + +The above copyright notice and this permission notice shall be included in all +copies or substantial portions of the Software. + +THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR +IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, +FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE +AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER +LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, +OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE +SOFTWARE. + diff --git a/licenses/bin/is-date-object.MIT b/licenses/bin/is-date-object.MIT new file mode 100644 index 000000000000..b43df444e518 --- /dev/null +++ b/licenses/bin/is-date-object.MIT @@ -0,0 +1,22 @@ +The MIT License (MIT) + +Copyright (c) 2015 Jordan Harband + +Permission is hereby granted, free of charge, to any person obtaining a copy +of this software and associated documentation files (the "Software"), to deal +in the Software without restriction, including without limitation the rights +to use, copy, modify, merge, publish, distribute, sublicense, and/or sell +copies of the Software, and to permit persons to whom the Software is +furnished to do so, subject to the following conditions: + +The above copyright notice and this permission notice shall be included in all +copies or substantial portions of the Software. + +THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR +IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, +FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE +AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER +LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, +OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE +SOFTWARE. + diff --git a/licenses/bin/is-regex.MIT b/licenses/bin/is-regex.MIT new file mode 100644 index 000000000000..47b7b5078fce --- /dev/null +++ b/licenses/bin/is-regex.MIT @@ -0,0 +1,20 @@ +The MIT License (MIT) + +Copyright (c) 2014 Jordan Harband + +Permission is hereby granted, free of charge, to any person obtaining a copy of +this software and associated documentation files (the "Software"), to deal in +the Software without restriction, including without limitation the rights to +use, copy, modify, merge, publish, distribute, sublicense, and/or sell copies of +the Software, and to permit persons to whom the Software is furnished to do so, +subject to the following conditions: + +The above copyright notice and this permission notice shall be included in all +copies or substantial portions of the Software. + +THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR +IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, FITNESS +FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR +COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER +IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN +CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE. diff --git a/licenses/bin/is-symbol.MIT b/licenses/bin/is-symbol.MIT new file mode 100644 index 000000000000..b43df444e518 --- /dev/null +++ b/licenses/bin/is-symbol.MIT @@ -0,0 +1,22 @@ +The MIT License (MIT) + +Copyright (c) 2015 Jordan Harband + +Permission is hereby granted, free of charge, to any person obtaining a copy +of this software and associated documentation files (the "Software"), to deal +in the Software without restriction, including without limitation the rights +to use, copy, modify, merge, publish, distribute, sublicense, and/or sell +copies of the Software, and to permit persons to whom the Software is +furnished to do so, subject to the following conditions: + +The above copyright notice and this permission notice shall be included in all +copies or substantial portions of the Software. + +THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR +IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, +FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE +AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER +LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, +OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE +SOFTWARE. + diff --git a/licenses/bin/isarray.MIT b/licenses/bin/isarray.MIT new file mode 100644 index 000000000000..2cdc8e4148cc --- /dev/null +++ b/licenses/bin/isarray.MIT @@ -0,0 +1,21 @@ +(MIT) + +Copyright (c) 2013 Julian Gruber <julian@juliangruber.com> + +Permission is hereby granted, free of charge, to any person obtaining a copy of +this software and associated documentation files (the "Software"), to deal in +the Software without restriction, including without limitation the rights to +use, copy, modify, merge, publish, distribute, sublicense, and/or sell copies +of the Software, and to permit persons to whom the Software is furnished to do +so, subject to the following conditions: + +The above copyright notice and this permission notice shall be included in all +copies or substantial portions of the Software. + +THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR +IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, +FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE +AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER +LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, +OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE +SOFTWARE. diff --git a/licenses/bin/janino.BSD3 b/licenses/bin/janino.BSD3 new file mode 100644 index 000000000000..ef871e242621 --- /dev/null +++ b/licenses/bin/janino.BSD3 @@ -0,0 +1,31 @@ +Janino - An embedded Java[TM] compiler + +Copyright (c) 2001-2016, Arno Unkrig +Copyright (c) 2015-2016 TIBCO Software Inc. +All rights reserved. + +Redistribution and use in source and binary forms, with or without +modification, are permitted provided that the following conditions +are met: + + 1. Redistributions of source code must retain the above copyright + notice, this list of conditions and the following disclaimer. + 2. Redistributions in binary form must reproduce the above + copyright notice, this list of conditions and the following + disclaimer in the documentation and/or other materials + provided with the distribution. + 3. Neither the name of JANINO nor the names of its contributors + may be used to endorse or promote products derived from this + software without specific prior written permission. + +THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" +AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE +IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE +ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDERS OR CONTRIBUTORS BE +LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR +CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF +SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS +INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER +IN CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR +OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN +IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. diff --git a/licenses/bin/javax-el.CDDL11 b/licenses/bin/javax-el.CDDL11 new file mode 100644 index 000000000000..82dfb5ccd433 --- /dev/null +++ b/licenses/bin/javax-el.CDDL11 @@ -0,0 +1,362 @@ +COMMON DEVELOPMENT AND DISTRIBUTION LICENSE (CDDL) Version 1.1 + +1. Definitions. + + 1.1. "Contributor" means each individual or entity that creates or + contributes to the creation of Modifications. + + 1.2. "Contributor Version" means the combination of the Original + Software, prior Modifications used by a Contributor (if any), and + the Modifications made by that particular Contributor. + + 1.3. "Covered Software" means (a) the Original Software, or (b) + Modifications, or (c) the combination of files containing Original + Software with files containing Modifications, in each case including + portions thereof. + + 1.4. "Executable" means the Covered Software in any form other than + Source Code. + + 1.5. "Initial Developer" means the individual or entity that first + makes Original Software available under this License. + + 1.6. "Larger Work" means a work which combines Covered Software or + portions thereof with code not governed by the terms of this License. + + 1.7. "License" means this document. + + 1.8. "Licensable" means having the right to grant, to the maximum + extent possible, whether at the time of the initial grant or + subsequently acquired, any and all of the rights conveyed herein. + + 1.9. "Modifications" means the Source Code and Executable form of + any of the following: + + A. Any file that results from an addition to, deletion from or + modification of the contents of a file containing Original Software + or previous Modifications; + + B. Any new file that contains any part of the Original Software or + previous Modification; or + + C. Any new file that is contributed or otherwise made available + under the terms of this License. + + 1.10. "Original Software" means the Source Code and Executable form + of computer software code that is originally released under this + License. + + 1.11. "Patent Claims" means any patent claim(s), now owned or + hereafter acquired, including without limitation, method, process, + and apparatus claims, in any patent Licensable by grantor. + + 1.12. "Source Code" means (a) the common form of computer software + code in which modifications are made and (b) associated + documentation included in or with such code. + + 1.13. "You" (or "Your") means an individual or a legal entity + exercising rights under, and complying with all of the terms of, + this License. For legal entities, "You" includes any entity which + controls, is controlled by, or is under common control with You. For + purposes of this definition, "control" means (a) the power, direct + or indirect, to cause the direction or management of such entity, + whether by contract or otherwise, or (b) ownership of more than + fifty percent (50%) of the outstanding shares or beneficial + ownership of such entity. + +2. License Grants. + + 2.1. The Initial Developer Grant. + + Conditioned upon Your compliance with Section 3.1 below and subject + to third party intellectual property claims, the Initial Developer + hereby grants You a world-wide, royalty-free, non-exclusive license: + + (a) under intellectual property rights (other than patent or + trademark) Licensable by Initial Developer, to use, reproduce, + modify, display, perform, sublicense and distribute the Original + Software (or portions thereof), with or without Modifications, + and/or as part of a Larger Work; and + + (b) under Patent Claims infringed by the making, using or selling of + Original Software, to make, have made, use, practice, sell, and + offer for sale, and/or otherwise dispose of the Original Software + (or portions thereof). + + (c) The licenses granted in Sections 2.1(a) and (b) are effective on + the date Initial Developer first distributes or otherwise makes the + Original Software available to a third party under the terms of this + License. + + (d) Notwithstanding Section 2.1(b) above, no patent license is + granted: (1) for code that You delete from the Original Software, or + (2) for infringements caused by: (i) the modification of the + Original Software, or (ii) the combination of the Original Software + with other software or devices. + + 2.2. Contributor Grant. + + Conditioned upon Your compliance with Section 3.1 below and subject + to third party intellectual property claims, each Contributor hereby + grants You a world-wide, royalty-free, non-exclusive license: + + (a) under intellectual property rights (other than patent or + trademark) Licensable by Contributor to use, reproduce, modify, + display, perform, sublicense and distribute the Modifications + created by such Contributor (or portions thereof), either on an + unmodified basis, with other Modifications, as Covered Software + and/or as part of a Larger Work; and + + (b) under Patent Claims infringed by the making, using, or selling + of Modifications made by that Contributor either alone and/or in + combination with its Contributor Version (or portions of such + combination), to make, use, sell, offer for sale, have made, and/or + otherwise dispose of: (1) Modifications made by that Contributor (or + portions thereof); and (2) the combination of Modifications made by + that Contributor with its Contributor Version (or portions of such + combination). + + (c) The licenses granted in Sections 2.2(a) and 2.2(b) are effective + on the date Contributor first distributes or otherwise makes the + Modifications available to a third party. + + (d) Notwithstanding Section 2.2(b) above, no patent license is + granted: (1) for any code that Contributor has deleted from the + Contributor Version; (2) for infringements caused by: (i) third + party modifications of Contributor Version, or (ii) the combination + of Modifications made by that Contributor with other software + (except as part of the Contributor Version) or other devices; or (3) + under Patent Claims infringed by Covered Software in the absence of + Modifications made by that Contributor. + +3. Distribution Obligations. + + 3.1. Availability of Source Code. + + Any Covered Software that You distribute or otherwise make available + in Executable form must also be made available in Source Code form + and that Source Code form must be distributed only under the terms + of this License. You must include a copy of this License with every + copy of the Source Code form of the Covered Software You distribute + or otherwise make available. You must inform recipients of any such + Covered Software in Executable form as to how they can obtain such + Covered Software in Source Code form in a reasonable manner on or + through a medium customarily used for software exchange. + + 3.2. Modifications. + + The Modifications that You create or to which You contribute are + governed by the terms of this License. You represent that You + believe Your Modifications are Your original creation(s) and/or You + have sufficient rights to grant the rights conveyed by this License. + + 3.3. Required Notices. + + You must include a notice in each of Your Modifications that + identifies You as the Contributor of the Modification. You may not + remove or alter any copyright, patent or trademark notices contained + within the Covered Software, or any notices of licensing or any + descriptive text giving attribution to any Contributor or the + Initial Developer. + + 3.4. Application of Additional Terms. + + You may not offer or impose any terms on any Covered Software in + Source Code form that alters or restricts the applicable version of + this License or the recipients' rights hereunder. You may choose to + offer, and to charge a fee for, warranty, support, indemnity or + liability obligations to one or more recipients of Covered Software. + However, you may do so only on Your own behalf, and not on behalf of + the Initial Developer or any Contributor. You must make it + absolutely clear that any such warranty, support, indemnity or + liability obligation is offered by You alone, and You hereby agree + to indemnify the Initial Developer and every Contributor for any + liability incurred by the Initial Developer or such Contributor as a + result of warranty, support, indemnity or liability terms You offer. + + 3.5. Distribution of Executable Versions. + + You may distribute the Executable form of the Covered Software under + the terms of this License or under the terms of a license of Your + choice, which may contain terms different from this License, + provided that You are in compliance with the terms of this License + and that the license for the Executable form does not attempt to + limit or alter the recipient's rights in the Source Code form from + the rights set forth in this License. If You distribute the Covered + Software in Executable form under a different license, You must make + it absolutely clear that any terms which differ from this License + are offered by You alone, not by the Initial Developer or + Contributor. You hereby agree to indemnify the Initial Developer and + every Contributor for any liability incurred by the Initial + Developer or such Contributor as a result of any such terms You offer. + + 3.6. Larger Works. + + You may create a Larger Work by combining Covered Software with + other code not governed by the terms of this License and distribute + the Larger Work as a single product. In such a case, You must make + sure the requirements of this License are fulfilled for the Covered + Software. + +4. Versions of the License. + + 4.1. New Versions. + + Oracle is the initial license steward and may publish revised and/or + new versions of this License from time to time. Each version will be + given a distinguishing version number. Except as provided in Section + 4.3, no one other than the license steward has the right to modify + this License. + + 4.2. Effect of New Versions. + + You may always continue to use, distribute or otherwise make the + Covered Software available under the terms of the version of the + License under which You originally received the Covered Software. If + the Initial Developer includes a notice in the Original Software + prohibiting it from being distributed or otherwise made available + under any subsequent version of the License, You must distribute and + make the Covered Software available under the terms of the version + of the License under which You originally received the Covered + Software. Otherwise, You may also choose to use, distribute or + otherwise make the Covered Software available under the terms of any + subsequent version of the License published by the license steward. + + 4.3. Modified Versions. + + When You are an Initial Developer and You want to create a new + license for Your Original Software, You may create and use a + modified version of this License if You: (a) rename the license and + remove any references to the name of the license steward (except to + note that the license differs from this License); and (b) otherwise + make it clear that the license contains terms which differ from this + License. + +5. DISCLAIMER OF WARRANTY. + + COVERED SOFTWARE IS PROVIDED UNDER THIS LICENSE ON AN "AS IS" BASIS, + WITHOUT WARRANTY OF ANY KIND, EITHER EXPRESSED OR IMPLIED, + INCLUDING, WITHOUT LIMITATION, WARRANTIES THAT THE COVERED SOFTWARE + IS FREE OF DEFECTS, MERCHANTABLE, FIT FOR A PARTICULAR PURPOSE OR + NON-INFRINGING. THE ENTIRE RISK AS TO THE QUALITY AND PERFORMANCE OF + THE COVERED SOFTWARE IS WITH YOU. SHOULD ANY COVERED SOFTWARE PROVE + DEFECTIVE IN ANY RESPECT, YOU (NOT THE INITIAL DEVELOPER OR ANY + OTHER CONTRIBUTOR) ASSUME THE COST OF ANY NECESSARY SERVICING, + REPAIR OR CORRECTION. THIS DISCLAIMER OF WARRANTY CONSTITUTES AN + ESSENTIAL PART OF THIS LICENSE. NO USE OF ANY COVERED SOFTWARE IS + AUTHORIZED HEREUNDER EXCEPT UNDER THIS DISCLAIMER. + +6. TERMINATION. + + 6.1. This License and the rights granted hereunder will terminate + automatically if You fail to comply with terms herein and fail to + cure such breach within 30 days of becoming aware of the breach. + Provisions which, by their nature, must remain in effect beyond the + termination of this License shall survive. + + 6.2. If You assert a patent infringement claim (excluding + declaratory judgment actions) against Initial Developer or a + Contributor (the Initial Developer or Contributor against whom You + assert such claim is referred to as "Participant") alleging that the + Participant Software (meaning the Contributor Version where the + Participant is a Contributor or the Original Software where the + Participant is the Initial Developer) directly or indirectly + infringes any patent, then any and all rights granted directly or + indirectly to You by such Participant, the Initial Developer (if the + Initial Developer is not the Participant) and all Contributors under + Sections 2.1 and/or 2.2 of this License shall, upon 60 days notice + from Participant terminate prospectively and automatically at the + expiration of such 60 day notice period, unless if within such 60 + day period You withdraw Your claim with respect to the Participant + Software against such Participant either unilaterally or pursuant to + a written agreement with Participant. + + 6.3. If You assert a patent infringement claim against Participant + alleging that the Participant Software directly or indirectly + infringes any patent where such claim is resolved (such as by + license or settlement) prior to the initiation of patent + infringement litigation, then the reasonable value of the licenses + granted by such Participant under Sections 2.1 or 2.2 shall be taken + into account in determining the amount or value of any payment or + license. + + 6.4. In the event of termination under Sections 6.1 or 6.2 above, + all end user licenses that have been validly granted by You or any + distributor hereunder prior to termination (excluding licenses + granted to You by any distributor) shall survive termination. + +7. LIMITATION OF LIABILITY. + + UNDER NO CIRCUMSTANCES AND UNDER NO LEGAL THEORY, WHETHER TORT + (INCLUDING NEGLIGENCE), CONTRACT, OR OTHERWISE, SHALL YOU, THE + INITIAL DEVELOPER, ANY OTHER CONTRIBUTOR, OR ANY DISTRIBUTOR OF + COVERED SOFTWARE, OR ANY SUPPLIER OF ANY OF SUCH PARTIES, BE LIABLE + TO ANY PERSON FOR ANY INDIRECT, SPECIAL, INCIDENTAL, OR + CONSEQUENTIAL DAMAGES OF ANY CHARACTER INCLUDING, WITHOUT + LIMITATION, DAMAGES FOR LOSS OF GOODWILL, WORK STOPPAGE, COMPUTER + FAILURE OR MALFUNCTION, OR ANY AND ALL OTHER COMMERCIAL DAMAGES OR + LOSSES, EVEN IF SUCH PARTY SHALL HAVE BEEN INFORMED OF THE + POSSIBILITY OF SUCH DAMAGES. THIS LIMITATION OF LIABILITY SHALL NOT + APPLY TO LIABILITY FOR DEATH OR PERSONAL INJURY RESULTING FROM SUCH + PARTY'S NEGLIGENCE TO THE EXTENT APPLICABLE LAW PROHIBITS SUCH + LIMITATION. SOME JURISDICTIONS DO NOT ALLOW THE EXCLUSION OR + LIMITATION OF INCIDENTAL OR CONSEQUENTIAL DAMAGES, SO THIS EXCLUSION + AND LIMITATION MAY NOT APPLY TO YOU. + +8. U.S. GOVERNMENT END USERS. + + The Covered Software is a "commercial item," as that term is defined + in 48 C.F.R. 2.101 (Oct. 1995), consisting of "commercial computer + software" (as that term is defined at 48 C.F.R. ß + 252.227-7014(a)(1)) and "commercial computer software documentation" + as such terms are used in 48 C.F.R. 12.212 (Sept. 1995). Consistent + with 48 C.F.R. 12.212 and 48 C.F.R. 227.7202-1 through 227.7202-4 + (June 1995), all U.S. Government End Users acquire Covered Software + with only those rights set forth herein. This U.S. Government Rights + clause is in lieu of, and supersedes, any other FAR, DFAR, or other + clause or provision that addresses Government rights in computer + software under this License. + +9. MISCELLANEOUS. + + This License represents the complete agreement concerning subject + matter hereof. If any provision of this License is held to be + unenforceable, such provision shall be reformed only to the extent + necessary to make it enforceable. This License shall be governed by + the law of the jurisdiction specified in a notice contained within + the Original Software (except to the extent applicable law, if any, + provides otherwise), excluding such jurisdiction's conflict-of-law + provisions. Any litigation relating to this License shall be subject + to the jurisdiction of the courts located in the jurisdiction and + venue specified in a notice contained within the Original Software, + with the losing party responsible for costs, including, without + limitation, court costs and reasonable attorneys' fees and expenses. + The application of the United Nations Convention on Contracts for + the International Sale of Goods is expressly excluded. Any law or + regulation which provides that the language of a contract shall be + construed against the drafter shall not apply to this License. You + agree that You alone are responsible for compliance with the United + States export administration regulations (and the export control + laws and regulation of any other countries) when You use, distribute + or otherwise make available any Covered Software. + +10. RESPONSIBILITY FOR CLAIMS. + + As between Initial Developer and the Contributors, each party is + responsible for claims and damages arising, directly or indirectly, + out of its utilization of rights under this License and You agree to + work with Initial Developer and Contributors to distribute such + responsibility on an equitable basis. Nothing herein is intended or + shall be deemed to constitute any admission of liability. + +------------------------------------------------------------------------ + +NOTICE PURSUANT TO SECTION 9 OF THE COMMON DEVELOPMENT AND DISTRIBUTION +LICENSE (CDDL) + +The code released under the CDDL shall be governed by the laws of the +State of California (excluding conflict-of-law provisions). Any +litigation relating to this License shall be subject to the jurisdiction +of the Federal Courts of the Northern District of California and the +state courts of the State of California, with venue lying in Santa Clara +County, California. diff --git a/licenses/bin/javax.CDDL11 b/licenses/bin/javax.CDDL11 new file mode 100644 index 000000000000..596a510633df --- /dev/null +++ b/licenses/bin/javax.CDDL11 @@ -0,0 +1,362 @@ +COMMON DEVELOPMENT AND DISTRIBUTION LICENSE (CDDL) Version 1.1 + +1. Definitions. + + 1.1. "Contributor" means each individual or entity that creates or + contributes to the creation of Modifications. + + 1.2. "Contributor Version" means the combination of the Original + Software, prior Modifications used by a Contributor (if any), and + the Modifications made by that particular Contributor. + + 1.3. "Covered Software" means (a) the Original Software, or (b) + Modifications, or (c) the combination of files containing Original + Software with files containing Modifications, in each case including + portions thereof. + + 1.4. "Executable" means the Covered Software in any form other than + Source Code. + + 1.5. "Initial Developer" means the individual or entity that first + makes Original Software available under this License. + + 1.6. "Larger Work" means a work which combines Covered Software or + portions thereof with code not governed by the terms of this License. + + 1.7. "License" means this document. + + 1.8. "Licensable" means having the right to grant, to the maximum + extent possible, whether at the time of the initial grant or + subsequently acquired, any and all of the rights conveyed herein. + + 1.9. "Modifications" means the Source Code and Executable form of + any of the following: + + A. Any file that results from an addition to, deletion from or + modification of the contents of a file containing Original Software + or previous Modifications; + + B. Any new file that contains any part of the Original Software or + previous Modification; or + + C. Any new file that is contributed or otherwise made available + under the terms of this License. + + 1.10. "Original Software" means the Source Code and Executable form + of computer software code that is originally released under this + License. + + 1.11. "Patent Claims" means any patent claim(s), now owned or + hereafter acquired, including without limitation, method, process, + and apparatus claims, in any patent Licensable by grantor. + + 1.12. "Source Code" means (a) the common form of computer software + code in which modifications are made and (b) associated + documentation included in or with such code. + + 1.13. "You" (or "Your") means an individual or a legal entity + exercising rights under, and complying with all of the terms of, + this License. For legal entities, "You" includes any entity which + controls, is controlled by, or is under common control with You. For + purposes of this definition, "control" means (a) the power, direct + or indirect, to cause the direction or management of such entity, + whether by contract or otherwise, or (b) ownership of more than + fifty percent (50%) of the outstanding shares or beneficial + ownership of such entity. + +2. License Grants. + + 2.1. The Initial Developer Grant. + + Conditioned upon Your compliance with Section 3.1 below and subject + to third party intellectual property claims, the Initial Developer + hereby grants You a world-wide, royalty-free, non-exclusive license: + + (a) under intellectual property rights (other than patent or + trademark) Licensable by Initial Developer, to use, reproduce, + modify, display, perform, sublicense and distribute the Original + Software (or portions thereof), with or without Modifications, + and/or as part of a Larger Work; and + + (b) under Patent Claims infringed by the making, using or selling of + Original Software, to make, have made, use, practice, sell, and + offer for sale, and/or otherwise dispose of the Original Software + (or portions thereof). + + (c) The licenses granted in Sections 2.1(a) and (b) are effective on + the date Initial Developer first distributes or otherwise makes the + Original Software available to a third party under the terms of this + License. + + (d) Notwithstanding Section 2.1(b) above, no patent license is + granted: (1) for code that You delete from the Original Software, or + (2) for infringements caused by: (i) the modification of the + Original Software, or (ii) the combination of the Original Software + with other software or devices. + + 2.2. Contributor Grant. + + Conditioned upon Your compliance with Section 3.1 below and subject + to third party intellectual property claims, each Contributor hereby + grants You a world-wide, royalty-free, non-exclusive license: + + (a) under intellectual property rights (other than patent or + trademark) Licensable by Contributor to use, reproduce, modify, + display, perform, sublicense and distribute the Modifications + created by such Contributor (or portions thereof), either on an + unmodified basis, with other Modifications, as Covered Software + and/or as part of a Larger Work; and + + (b) under Patent Claims infringed by the making, using, or selling + of Modifications made by that Contributor either alone and/or in + combination with its Contributor Version (or portions of such + combination), to make, use, sell, offer for sale, have made, and/or + otherwise dispose of: (1) Modifications made by that Contributor (or + portions thereof); and (2) the combination of Modifications made by + that Contributor with its Contributor Version (or portions of such + combination). + + (c) The licenses granted in Sections 2.2(a) and 2.2(b) are effective + on the date Contributor first distributes or otherwise makes the + Modifications available to a third party. + + (d) Notwithstanding Section 2.2(b) above, no patent license is + granted: (1) for any code that Contributor has deleted from the + Contributor Version; (2) for infringements caused by: (i) third + party modifications of Contributor Version, or (ii) the combination + of Modifications made by that Contributor with other software + (except as part of the Contributor Version) or other devices; or (3) + under Patent Claims infringed by Covered Software in the absence of + Modifications made by that Contributor. + +3. Distribution Obligations. + + 3.1. Availability of Source Code. + + Any Covered Software that You distribute or otherwise make available + in Executable form must also be made available in Source Code form + and that Source Code form must be distributed only under the terms + of this License. You must include a copy of this License with every + copy of the Source Code form of the Covered Software You distribute + or otherwise make available. You must inform recipients of any such + Covered Software in Executable form as to how they can obtain such + Covered Software in Source Code form in a reasonable manner on or + through a medium customarily used for software exchange. + + 3.2. Modifications. + + The Modifications that You create or to which You contribute are + governed by the terms of this License. You represent that You + believe Your Modifications are Your original creation(s) and/or You + have sufficient rights to grant the rights conveyed by this License. + + 3.3. Required Notices. + + You must include a notice in each of Your Modifications that + identifies You as the Contributor of the Modification. You may not + remove or alter any copyright, patent or trademark notices contained + within the Covered Software, or any notices of licensing or any + descriptive text giving attribution to any Contributor or the + Initial Developer. + + 3.4. Application of Additional Terms. + + You may not offer or impose any terms on any Covered Software in + Source Code form that alters or restricts the applicable version of + this License or the recipients' rights hereunder. You may choose to + offer, and to charge a fee for, warranty, support, indemnity or + liability obligations to one or more recipients of Covered Software. + However, you may do so only on Your own behalf, and not on behalf of + the Initial Developer or any Contributor. You must make it + absolutely clear that any such warranty, support, indemnity or + liability obligation is offered by You alone, and You hereby agree + to indemnify the Initial Developer and every Contributor for any + liability incurred by the Initial Developer or such Contributor as a + result of warranty, support, indemnity or liability terms You offer. + + 3.5. Distribution of Executable Versions. + + You may distribute the Executable form of the Covered Software under + the terms of this License or under the terms of a license of Your + choice, which may contain terms different from this License, + provided that You are in compliance with the terms of this License + and that the license for the Executable form does not attempt to + limit or alter the recipient's rights in the Source Code form from + the rights set forth in this License. If You distribute the Covered + Software in Executable form under a different license, You must make + it absolutely clear that any terms which differ from this License + are offered by You alone, not by the Initial Developer or + Contributor. You hereby agree to indemnify the Initial Developer and + every Contributor for any liability incurred by the Initial + Developer or such Contributor as a result of any such terms You offer. + + 3.6. Larger Works. + + You may create a Larger Work by combining Covered Software with + other code not governed by the terms of this License and distribute + the Larger Work as a single product. In such a case, You must make + sure the requirements of this License are fulfilled for the Covered + Software. + +4. Versions of the License. + + 4.1. New Versions. + + Oracle is the initial license steward and may publish revised and/or + new versions of this License from time to time. Each version will be + given a distinguishing version number. Except as provided in Section + 4.3, no one other than the license steward has the right to modify + this License. + + 4.2. Effect of New Versions. + + You may always continue to use, distribute or otherwise make the + Covered Software available under the terms of the version of the + License under which You originally received the Covered Software. If + the Initial Developer includes a notice in the Original Software + prohibiting it from being distributed or otherwise made available + under any subsequent version of the License, You must distribute and + make the Covered Software available under the terms of the version + of the License under which You originally received the Covered + Software. Otherwise, You may also choose to use, distribute or + otherwise make the Covered Software available under the terms of any + subsequent version of the License published by the license steward. + + 4.3. Modified Versions. + + When You are an Initial Developer and You want to create a new + license for Your Original Software, You may create and use a + modified version of this License if You: (a) rename the license and + remove any references to the name of the license steward (except to + note that the license differs from this License); and (b) otherwise + make it clear that the license contains terms which differ from this + License. + +5. DISCLAIMER OF WARRANTY. + + COVERED SOFTWARE IS PROVIDED UNDER THIS LICENSE ON AN "AS IS" BASIS, + WITHOUT WARRANTY OF ANY KIND, EITHER EXPRESSED OR IMPLIED, + INCLUDING, WITHOUT LIMITATION, WARRANTIES THAT THE COVERED SOFTWARE + IS FREE OF DEFECTS, MERCHANTABLE, FIT FOR A PARTICULAR PURPOSE OR + NON-INFRINGING. THE ENTIRE RISK AS TO THE QUALITY AND PERFORMANCE OF + THE COVERED SOFTWARE IS WITH YOU. SHOULD ANY COVERED SOFTWARE PROVE + DEFECTIVE IN ANY RESPECT, YOU (NOT THE INITIAL DEVELOPER OR ANY + OTHER CONTRIBUTOR) ASSUME THE COST OF ANY NECESSARY SERVICING, + REPAIR OR CORRECTION. THIS DISCLAIMER OF WARRANTY CONSTITUTES AN + ESSENTIAL PART OF THIS LICENSE. NO USE OF ANY COVERED SOFTWARE IS + AUTHORIZED HEREUNDER EXCEPT UNDER THIS DISCLAIMER. + +6. TERMINATION. + + 6.1. This License and the rights granted hereunder will terminate + automatically if You fail to comply with terms herein and fail to + cure such breach within 30 days of becoming aware of the breach. + Provisions which, by their nature, must remain in effect beyond the + termination of this License shall survive. + + 6.2. If You assert a patent infringement claim (excluding + declaratory judgment actions) against Initial Developer or a + Contributor (the Initial Developer or Contributor against whom You + assert such claim is referred to as "Participant") alleging that the + Participant Software (meaning the Contributor Version where the + Participant is a Contributor or the Original Software where the + Participant is the Initial Developer) directly or indirectly + infringes any patent, then any and all rights granted directly or + indirectly to You by such Participant, the Initial Developer (if the + Initial Developer is not the Participant) and all Contributors under + Sections 2.1 and/or 2.2 of this License shall, upon 60 days notice + from Participant terminate prospectively and automatically at the + expiration of such 60 day notice period, unless if within such 60 + day period You withdraw Your claim with respect to the Participant + Software against such Participant either unilaterally or pursuant to + a written agreement with Participant. + + 6.3. If You assert a patent infringement claim against Participant + alleging that the Participant Software directly or indirectly + infringes any patent where such claim is resolved (such as by + license or settlement) prior to the initiation of patent + infringement litigation, then the reasonable value of the licenses + granted by such Participant under Sections 2.1 or 2.2 shall be taken + into account in determining the amount or value of any payment or + license. + + 6.4. In the event of termination under Sections 6.1 or 6.2 above, + all end user licenses that have been validly granted by You or any + distributor hereunder prior to termination (excluding licenses + granted to You by any distributor) shall survive termination. + +7. LIMITATION OF LIABILITY. + + UNDER NO CIRCUMSTANCES AND UNDER NO LEGAL THEORY, WHETHER TORT + (INCLUDING NEGLIGENCE), CONTRACT, OR OTHERWISE, SHALL YOU, THE + INITIAL DEVELOPER, ANY OTHER CONTRIBUTOR, OR ANY DISTRIBUTOR OF + COVERED SOFTWARE, OR ANY SUPPLIER OF ANY OF SUCH PARTIES, BE LIABLE + TO ANY PERSON FOR ANY INDIRECT, SPECIAL, INCIDENTAL, OR + CONSEQUENTIAL DAMAGES OF ANY CHARACTER INCLUDING, WITHOUT + LIMITATION, DAMAGES FOR LOSS OF GOODWILL, WORK STOPPAGE, COMPUTER + FAILURE OR MALFUNCTION, OR ANY AND ALL OTHER COMMERCIAL DAMAGES OR + LOSSES, EVEN IF SUCH PARTY SHALL HAVE BEEN INFORMED OF THE + POSSIBILITY OF SUCH DAMAGES. THIS LIMITATION OF LIABILITY SHALL NOT + APPLY TO LIABILITY FOR DEATH OR PERSONAL INJURY RESULTING FROM SUCH + PARTY'S NEGLIGENCE TO THE EXTENT APPLICABLE LAW PROHIBITS SUCH + LIMITATION. SOME JURISDICTIONS DO NOT ALLOW THE EXCLUSION OR + LIMITATION OF INCIDENTAL OR CONSEQUENTIAL DAMAGES, SO THIS EXCLUSION + AND LIMITATION MAY NOT APPLY TO YOU. + +8. U.S. GOVERNMENT END USERS. + + The Covered Software is a "commercial item," as that term is defined + in 48 C.F.R. 2.101 (Oct. 1995), consisting of "commercial computer + software" (as that term is defined at 48 C.F.R. � + 252.227-7014(a)(1)) and "commercial computer software documentation" + as such terms are used in 48 C.F.R. 12.212 (Sept. 1995). Consistent + with 48 C.F.R. 12.212 and 48 C.F.R. 227.7202-1 through 227.7202-4 + (June 1995), all U.S. Government End Users acquire Covered Software + with only those rights set forth herein. This U.S. Government Rights + clause is in lieu of, and supersedes, any other FAR, DFAR, or other + clause or provision that addresses Government rights in computer + software under this License. + +9. MISCELLANEOUS. + + This License represents the complete agreement concerning subject + matter hereof. If any provision of this License is held to be + unenforceable, such provision shall be reformed only to the extent + necessary to make it enforceable. This License shall be governed by + the law of the jurisdiction specified in a notice contained within + the Original Software (except to the extent applicable law, if any, + provides otherwise), excluding such jurisdiction's conflict-of-law + provisions. Any litigation relating to this License shall be subject + to the jurisdiction of the courts located in the jurisdiction and + venue specified in a notice contained within the Original Software, + with the losing party responsible for costs, including, without + limitation, court costs and reasonable attorneys' fees and expenses. + The application of the United Nations Convention on Contracts for + the International Sale of Goods is expressly excluded. Any law or + regulation which provides that the language of a contract shall be + construed against the drafter shall not apply to this License. You + agree that You alone are responsible for compliance with the United + States export administration regulations (and the export control + laws and regulation of any other countries) when You use, distribute + or otherwise make available any Covered Software. + +10. RESPONSIBILITY FOR CLAIMS. + + As between Initial Developer and the Contributors, each party is + responsible for claims and damages arising, directly or indirectly, + out of its utilization of rights under this License and You agree to + work with Initial Developer and Contributors to distribute such + responsibility on an equitable basis. Nothing herein is intended or + shall be deemed to constitute any admission of liability. + +------------------------------------------------------------------------ + +NOTICE PURSUANT TO SECTION 9 OF THE COMMON DEVELOPMENT AND DISTRIBUTION +LICENSE (CDDL) + +The code released under the CDDL shall be governed by the laws of the +State of California (excluding conflict-of-law provisions). Any +litigation relating to this License shall be subject to the jurisdiction +of the Federal Courts of the Northern District of California and the +state courts of the State of California, with venue lying in Santa Clara +County, California. diff --git a/licenses/bin/javax.activation.CDDL11 b/licenses/bin/javax.activation.CDDL11 new file mode 100644 index 000000000000..596a510633df --- /dev/null +++ b/licenses/bin/javax.activation.CDDL11 @@ -0,0 +1,362 @@ +COMMON DEVELOPMENT AND DISTRIBUTION LICENSE (CDDL) Version 1.1 + +1. Definitions. + + 1.1. "Contributor" means each individual or entity that creates or + contributes to the creation of Modifications. + + 1.2. "Contributor Version" means the combination of the Original + Software, prior Modifications used by a Contributor (if any), and + the Modifications made by that particular Contributor. + + 1.3. "Covered Software" means (a) the Original Software, or (b) + Modifications, or (c) the combination of files containing Original + Software with files containing Modifications, in each case including + portions thereof. + + 1.4. "Executable" means the Covered Software in any form other than + Source Code. + + 1.5. "Initial Developer" means the individual or entity that first + makes Original Software available under this License. + + 1.6. "Larger Work" means a work which combines Covered Software or + portions thereof with code not governed by the terms of this License. + + 1.7. "License" means this document. + + 1.8. "Licensable" means having the right to grant, to the maximum + extent possible, whether at the time of the initial grant or + subsequently acquired, any and all of the rights conveyed herein. + + 1.9. "Modifications" means the Source Code and Executable form of + any of the following: + + A. Any file that results from an addition to, deletion from or + modification of the contents of a file containing Original Software + or previous Modifications; + + B. Any new file that contains any part of the Original Software or + previous Modification; or + + C. Any new file that is contributed or otherwise made available + under the terms of this License. + + 1.10. "Original Software" means the Source Code and Executable form + of computer software code that is originally released under this + License. + + 1.11. "Patent Claims" means any patent claim(s), now owned or + hereafter acquired, including without limitation, method, process, + and apparatus claims, in any patent Licensable by grantor. + + 1.12. "Source Code" means (a) the common form of computer software + code in which modifications are made and (b) associated + documentation included in or with such code. + + 1.13. "You" (or "Your") means an individual or a legal entity + exercising rights under, and complying with all of the terms of, + this License. For legal entities, "You" includes any entity which + controls, is controlled by, or is under common control with You. For + purposes of this definition, "control" means (a) the power, direct + or indirect, to cause the direction or management of such entity, + whether by contract or otherwise, or (b) ownership of more than + fifty percent (50%) of the outstanding shares or beneficial + ownership of such entity. + +2. License Grants. + + 2.1. The Initial Developer Grant. + + Conditioned upon Your compliance with Section 3.1 below and subject + to third party intellectual property claims, the Initial Developer + hereby grants You a world-wide, royalty-free, non-exclusive license: + + (a) under intellectual property rights (other than patent or + trademark) Licensable by Initial Developer, to use, reproduce, + modify, display, perform, sublicense and distribute the Original + Software (or portions thereof), with or without Modifications, + and/or as part of a Larger Work; and + + (b) under Patent Claims infringed by the making, using or selling of + Original Software, to make, have made, use, practice, sell, and + offer for sale, and/or otherwise dispose of the Original Software + (or portions thereof). + + (c) The licenses granted in Sections 2.1(a) and (b) are effective on + the date Initial Developer first distributes or otherwise makes the + Original Software available to a third party under the terms of this + License. + + (d) Notwithstanding Section 2.1(b) above, no patent license is + granted: (1) for code that You delete from the Original Software, or + (2) for infringements caused by: (i) the modification of the + Original Software, or (ii) the combination of the Original Software + with other software or devices. + + 2.2. Contributor Grant. + + Conditioned upon Your compliance with Section 3.1 below and subject + to third party intellectual property claims, each Contributor hereby + grants You a world-wide, royalty-free, non-exclusive license: + + (a) under intellectual property rights (other than patent or + trademark) Licensable by Contributor to use, reproduce, modify, + display, perform, sublicense and distribute the Modifications + created by such Contributor (or portions thereof), either on an + unmodified basis, with other Modifications, as Covered Software + and/or as part of a Larger Work; and + + (b) under Patent Claims infringed by the making, using, or selling + of Modifications made by that Contributor either alone and/or in + combination with its Contributor Version (or portions of such + combination), to make, use, sell, offer for sale, have made, and/or + otherwise dispose of: (1) Modifications made by that Contributor (or + portions thereof); and (2) the combination of Modifications made by + that Contributor with its Contributor Version (or portions of such + combination). + + (c) The licenses granted in Sections 2.2(a) and 2.2(b) are effective + on the date Contributor first distributes or otherwise makes the + Modifications available to a third party. + + (d) Notwithstanding Section 2.2(b) above, no patent license is + granted: (1) for any code that Contributor has deleted from the + Contributor Version; (2) for infringements caused by: (i) third + party modifications of Contributor Version, or (ii) the combination + of Modifications made by that Contributor with other software + (except as part of the Contributor Version) or other devices; or (3) + under Patent Claims infringed by Covered Software in the absence of + Modifications made by that Contributor. + +3. Distribution Obligations. + + 3.1. Availability of Source Code. + + Any Covered Software that You distribute or otherwise make available + in Executable form must also be made available in Source Code form + and that Source Code form must be distributed only under the terms + of this License. You must include a copy of this License with every + copy of the Source Code form of the Covered Software You distribute + or otherwise make available. You must inform recipients of any such + Covered Software in Executable form as to how they can obtain such + Covered Software in Source Code form in a reasonable manner on or + through a medium customarily used for software exchange. + + 3.2. Modifications. + + The Modifications that You create or to which You contribute are + governed by the terms of this License. You represent that You + believe Your Modifications are Your original creation(s) and/or You + have sufficient rights to grant the rights conveyed by this License. + + 3.3. Required Notices. + + You must include a notice in each of Your Modifications that + identifies You as the Contributor of the Modification. You may not + remove or alter any copyright, patent or trademark notices contained + within the Covered Software, or any notices of licensing or any + descriptive text giving attribution to any Contributor or the + Initial Developer. + + 3.4. Application of Additional Terms. + + You may not offer or impose any terms on any Covered Software in + Source Code form that alters or restricts the applicable version of + this License or the recipients' rights hereunder. You may choose to + offer, and to charge a fee for, warranty, support, indemnity or + liability obligations to one or more recipients of Covered Software. + However, you may do so only on Your own behalf, and not on behalf of + the Initial Developer or any Contributor. You must make it + absolutely clear that any such warranty, support, indemnity or + liability obligation is offered by You alone, and You hereby agree + to indemnify the Initial Developer and every Contributor for any + liability incurred by the Initial Developer or such Contributor as a + result of warranty, support, indemnity or liability terms You offer. + + 3.5. Distribution of Executable Versions. + + You may distribute the Executable form of the Covered Software under + the terms of this License or under the terms of a license of Your + choice, which may contain terms different from this License, + provided that You are in compliance with the terms of this License + and that the license for the Executable form does not attempt to + limit or alter the recipient's rights in the Source Code form from + the rights set forth in this License. If You distribute the Covered + Software in Executable form under a different license, You must make + it absolutely clear that any terms which differ from this License + are offered by You alone, not by the Initial Developer or + Contributor. You hereby agree to indemnify the Initial Developer and + every Contributor for any liability incurred by the Initial + Developer or such Contributor as a result of any such terms You offer. + + 3.6. Larger Works. + + You may create a Larger Work by combining Covered Software with + other code not governed by the terms of this License and distribute + the Larger Work as a single product. In such a case, You must make + sure the requirements of this License are fulfilled for the Covered + Software. + +4. Versions of the License. + + 4.1. New Versions. + + Oracle is the initial license steward and may publish revised and/or + new versions of this License from time to time. Each version will be + given a distinguishing version number. Except as provided in Section + 4.3, no one other than the license steward has the right to modify + this License. + + 4.2. Effect of New Versions. + + You may always continue to use, distribute or otherwise make the + Covered Software available under the terms of the version of the + License under which You originally received the Covered Software. If + the Initial Developer includes a notice in the Original Software + prohibiting it from being distributed or otherwise made available + under any subsequent version of the License, You must distribute and + make the Covered Software available under the terms of the version + of the License under which You originally received the Covered + Software. Otherwise, You may also choose to use, distribute or + otherwise make the Covered Software available under the terms of any + subsequent version of the License published by the license steward. + + 4.3. Modified Versions. + + When You are an Initial Developer and You want to create a new + license for Your Original Software, You may create and use a + modified version of this License if You: (a) rename the license and + remove any references to the name of the license steward (except to + note that the license differs from this License); and (b) otherwise + make it clear that the license contains terms which differ from this + License. + +5. DISCLAIMER OF WARRANTY. + + COVERED SOFTWARE IS PROVIDED UNDER THIS LICENSE ON AN "AS IS" BASIS, + WITHOUT WARRANTY OF ANY KIND, EITHER EXPRESSED OR IMPLIED, + INCLUDING, WITHOUT LIMITATION, WARRANTIES THAT THE COVERED SOFTWARE + IS FREE OF DEFECTS, MERCHANTABLE, FIT FOR A PARTICULAR PURPOSE OR + NON-INFRINGING. THE ENTIRE RISK AS TO THE QUALITY AND PERFORMANCE OF + THE COVERED SOFTWARE IS WITH YOU. SHOULD ANY COVERED SOFTWARE PROVE + DEFECTIVE IN ANY RESPECT, YOU (NOT THE INITIAL DEVELOPER OR ANY + OTHER CONTRIBUTOR) ASSUME THE COST OF ANY NECESSARY SERVICING, + REPAIR OR CORRECTION. THIS DISCLAIMER OF WARRANTY CONSTITUTES AN + ESSENTIAL PART OF THIS LICENSE. NO USE OF ANY COVERED SOFTWARE IS + AUTHORIZED HEREUNDER EXCEPT UNDER THIS DISCLAIMER. + +6. TERMINATION. + + 6.1. This License and the rights granted hereunder will terminate + automatically if You fail to comply with terms herein and fail to + cure such breach within 30 days of becoming aware of the breach. + Provisions which, by their nature, must remain in effect beyond the + termination of this License shall survive. + + 6.2. If You assert a patent infringement claim (excluding + declaratory judgment actions) against Initial Developer or a + Contributor (the Initial Developer or Contributor against whom You + assert such claim is referred to as "Participant") alleging that the + Participant Software (meaning the Contributor Version where the + Participant is a Contributor or the Original Software where the + Participant is the Initial Developer) directly or indirectly + infringes any patent, then any and all rights granted directly or + indirectly to You by such Participant, the Initial Developer (if the + Initial Developer is not the Participant) and all Contributors under + Sections 2.1 and/or 2.2 of this License shall, upon 60 days notice + from Participant terminate prospectively and automatically at the + expiration of such 60 day notice period, unless if within such 60 + day period You withdraw Your claim with respect to the Participant + Software against such Participant either unilaterally or pursuant to + a written agreement with Participant. + + 6.3. If You assert a patent infringement claim against Participant + alleging that the Participant Software directly or indirectly + infringes any patent where such claim is resolved (such as by + license or settlement) prior to the initiation of patent + infringement litigation, then the reasonable value of the licenses + granted by such Participant under Sections 2.1 or 2.2 shall be taken + into account in determining the amount or value of any payment or + license. + + 6.4. In the event of termination under Sections 6.1 or 6.2 above, + all end user licenses that have been validly granted by You or any + distributor hereunder prior to termination (excluding licenses + granted to You by any distributor) shall survive termination. + +7. LIMITATION OF LIABILITY. + + UNDER NO CIRCUMSTANCES AND UNDER NO LEGAL THEORY, WHETHER TORT + (INCLUDING NEGLIGENCE), CONTRACT, OR OTHERWISE, SHALL YOU, THE + INITIAL DEVELOPER, ANY OTHER CONTRIBUTOR, OR ANY DISTRIBUTOR OF + COVERED SOFTWARE, OR ANY SUPPLIER OF ANY OF SUCH PARTIES, BE LIABLE + TO ANY PERSON FOR ANY INDIRECT, SPECIAL, INCIDENTAL, OR + CONSEQUENTIAL DAMAGES OF ANY CHARACTER INCLUDING, WITHOUT + LIMITATION, DAMAGES FOR LOSS OF GOODWILL, WORK STOPPAGE, COMPUTER + FAILURE OR MALFUNCTION, OR ANY AND ALL OTHER COMMERCIAL DAMAGES OR + LOSSES, EVEN IF SUCH PARTY SHALL HAVE BEEN INFORMED OF THE + POSSIBILITY OF SUCH DAMAGES. THIS LIMITATION OF LIABILITY SHALL NOT + APPLY TO LIABILITY FOR DEATH OR PERSONAL INJURY RESULTING FROM SUCH + PARTY'S NEGLIGENCE TO THE EXTENT APPLICABLE LAW PROHIBITS SUCH + LIMITATION. SOME JURISDICTIONS DO NOT ALLOW THE EXCLUSION OR + LIMITATION OF INCIDENTAL OR CONSEQUENTIAL DAMAGES, SO THIS EXCLUSION + AND LIMITATION MAY NOT APPLY TO YOU. + +8. U.S. GOVERNMENT END USERS. + + The Covered Software is a "commercial item," as that term is defined + in 48 C.F.R. 2.101 (Oct. 1995), consisting of "commercial computer + software" (as that term is defined at 48 C.F.R. � + 252.227-7014(a)(1)) and "commercial computer software documentation" + as such terms are used in 48 C.F.R. 12.212 (Sept. 1995). Consistent + with 48 C.F.R. 12.212 and 48 C.F.R. 227.7202-1 through 227.7202-4 + (June 1995), all U.S. Government End Users acquire Covered Software + with only those rights set forth herein. This U.S. Government Rights + clause is in lieu of, and supersedes, any other FAR, DFAR, or other + clause or provision that addresses Government rights in computer + software under this License. + +9. MISCELLANEOUS. + + This License represents the complete agreement concerning subject + matter hereof. If any provision of this License is held to be + unenforceable, such provision shall be reformed only to the extent + necessary to make it enforceable. This License shall be governed by + the law of the jurisdiction specified in a notice contained within + the Original Software (except to the extent applicable law, if any, + provides otherwise), excluding such jurisdiction's conflict-of-law + provisions. Any litigation relating to this License shall be subject + to the jurisdiction of the courts located in the jurisdiction and + venue specified in a notice contained within the Original Software, + with the losing party responsible for costs, including, without + limitation, court costs and reasonable attorneys' fees and expenses. + The application of the United Nations Convention on Contracts for + the International Sale of Goods is expressly excluded. Any law or + regulation which provides that the language of a contract shall be + construed against the drafter shall not apply to this License. You + agree that You alone are responsible for compliance with the United + States export administration regulations (and the export control + laws and regulation of any other countries) when You use, distribute + or otherwise make available any Covered Software. + +10. RESPONSIBILITY FOR CLAIMS. + + As between Initial Developer and the Contributors, each party is + responsible for claims and damages arising, directly or indirectly, + out of its utilization of rights under this License and You agree to + work with Initial Developer and Contributors to distribute such + responsibility on an equitable basis. Nothing herein is intended or + shall be deemed to constitute any admission of liability. + +------------------------------------------------------------------------ + +NOTICE PURSUANT TO SECTION 9 OF THE COMMON DEVELOPMENT AND DISTRIBUTION +LICENSE (CDDL) + +The code released under the CDDL shall be governed by the laws of the +State of California (excluding conflict-of-law provisions). Any +litigation relating to this License shall be subject to the jurisdiction +of the Federal Courts of the Northern District of California and the +state courts of the State of California, with venue lying in Santa Clara +County, California. diff --git a/licenses/bin/javax.el-api.CDDL11 b/licenses/bin/javax.el-api.CDDL11 new file mode 100644 index 000000000000..82dfb5ccd433 --- /dev/null +++ b/licenses/bin/javax.el-api.CDDL11 @@ -0,0 +1,362 @@ +COMMON DEVELOPMENT AND DISTRIBUTION LICENSE (CDDL) Version 1.1 + +1. Definitions. + + 1.1. "Contributor" means each individual or entity that creates or + contributes to the creation of Modifications. + + 1.2. "Contributor Version" means the combination of the Original + Software, prior Modifications used by a Contributor (if any), and + the Modifications made by that particular Contributor. + + 1.3. "Covered Software" means (a) the Original Software, or (b) + Modifications, or (c) the combination of files containing Original + Software with files containing Modifications, in each case including + portions thereof. + + 1.4. "Executable" means the Covered Software in any form other than + Source Code. + + 1.5. "Initial Developer" means the individual or entity that first + makes Original Software available under this License. + + 1.6. "Larger Work" means a work which combines Covered Software or + portions thereof with code not governed by the terms of this License. + + 1.7. "License" means this document. + + 1.8. "Licensable" means having the right to grant, to the maximum + extent possible, whether at the time of the initial grant or + subsequently acquired, any and all of the rights conveyed herein. + + 1.9. "Modifications" means the Source Code and Executable form of + any of the following: + + A. Any file that results from an addition to, deletion from or + modification of the contents of a file containing Original Software + or previous Modifications; + + B. Any new file that contains any part of the Original Software or + previous Modification; or + + C. Any new file that is contributed or otherwise made available + under the terms of this License. + + 1.10. "Original Software" means the Source Code and Executable form + of computer software code that is originally released under this + License. + + 1.11. "Patent Claims" means any patent claim(s), now owned or + hereafter acquired, including without limitation, method, process, + and apparatus claims, in any patent Licensable by grantor. + + 1.12. "Source Code" means (a) the common form of computer software + code in which modifications are made and (b) associated + documentation included in or with such code. + + 1.13. "You" (or "Your") means an individual or a legal entity + exercising rights under, and complying with all of the terms of, + this License. For legal entities, "You" includes any entity which + controls, is controlled by, or is under common control with You. For + purposes of this definition, "control" means (a) the power, direct + or indirect, to cause the direction or management of such entity, + whether by contract or otherwise, or (b) ownership of more than + fifty percent (50%) of the outstanding shares or beneficial + ownership of such entity. + +2. License Grants. + + 2.1. The Initial Developer Grant. + + Conditioned upon Your compliance with Section 3.1 below and subject + to third party intellectual property claims, the Initial Developer + hereby grants You a world-wide, royalty-free, non-exclusive license: + + (a) under intellectual property rights (other than patent or + trademark) Licensable by Initial Developer, to use, reproduce, + modify, display, perform, sublicense and distribute the Original + Software (or portions thereof), with or without Modifications, + and/or as part of a Larger Work; and + + (b) under Patent Claims infringed by the making, using or selling of + Original Software, to make, have made, use, practice, sell, and + offer for sale, and/or otherwise dispose of the Original Software + (or portions thereof). + + (c) The licenses granted in Sections 2.1(a) and (b) are effective on + the date Initial Developer first distributes or otherwise makes the + Original Software available to a third party under the terms of this + License. + + (d) Notwithstanding Section 2.1(b) above, no patent license is + granted: (1) for code that You delete from the Original Software, or + (2) for infringements caused by: (i) the modification of the + Original Software, or (ii) the combination of the Original Software + with other software or devices. + + 2.2. Contributor Grant. + + Conditioned upon Your compliance with Section 3.1 below and subject + to third party intellectual property claims, each Contributor hereby + grants You a world-wide, royalty-free, non-exclusive license: + + (a) under intellectual property rights (other than patent or + trademark) Licensable by Contributor to use, reproduce, modify, + display, perform, sublicense and distribute the Modifications + created by such Contributor (or portions thereof), either on an + unmodified basis, with other Modifications, as Covered Software + and/or as part of a Larger Work; and + + (b) under Patent Claims infringed by the making, using, or selling + of Modifications made by that Contributor either alone and/or in + combination with its Contributor Version (or portions of such + combination), to make, use, sell, offer for sale, have made, and/or + otherwise dispose of: (1) Modifications made by that Contributor (or + portions thereof); and (2) the combination of Modifications made by + that Contributor with its Contributor Version (or portions of such + combination). + + (c) The licenses granted in Sections 2.2(a) and 2.2(b) are effective + on the date Contributor first distributes or otherwise makes the + Modifications available to a third party. + + (d) Notwithstanding Section 2.2(b) above, no patent license is + granted: (1) for any code that Contributor has deleted from the + Contributor Version; (2) for infringements caused by: (i) third + party modifications of Contributor Version, or (ii) the combination + of Modifications made by that Contributor with other software + (except as part of the Contributor Version) or other devices; or (3) + under Patent Claims infringed by Covered Software in the absence of + Modifications made by that Contributor. + +3. Distribution Obligations. + + 3.1. Availability of Source Code. + + Any Covered Software that You distribute or otherwise make available + in Executable form must also be made available in Source Code form + and that Source Code form must be distributed only under the terms + of this License. You must include a copy of this License with every + copy of the Source Code form of the Covered Software You distribute + or otherwise make available. You must inform recipients of any such + Covered Software in Executable form as to how they can obtain such + Covered Software in Source Code form in a reasonable manner on or + through a medium customarily used for software exchange. + + 3.2. Modifications. + + The Modifications that You create or to which You contribute are + governed by the terms of this License. You represent that You + believe Your Modifications are Your original creation(s) and/or You + have sufficient rights to grant the rights conveyed by this License. + + 3.3. Required Notices. + + You must include a notice in each of Your Modifications that + identifies You as the Contributor of the Modification. You may not + remove or alter any copyright, patent or trademark notices contained + within the Covered Software, or any notices of licensing or any + descriptive text giving attribution to any Contributor or the + Initial Developer. + + 3.4. Application of Additional Terms. + + You may not offer or impose any terms on any Covered Software in + Source Code form that alters or restricts the applicable version of + this License or the recipients' rights hereunder. You may choose to + offer, and to charge a fee for, warranty, support, indemnity or + liability obligations to one or more recipients of Covered Software. + However, you may do so only on Your own behalf, and not on behalf of + the Initial Developer or any Contributor. You must make it + absolutely clear that any such warranty, support, indemnity or + liability obligation is offered by You alone, and You hereby agree + to indemnify the Initial Developer and every Contributor for any + liability incurred by the Initial Developer or such Contributor as a + result of warranty, support, indemnity or liability terms You offer. + + 3.5. Distribution of Executable Versions. + + You may distribute the Executable form of the Covered Software under + the terms of this License or under the terms of a license of Your + choice, which may contain terms different from this License, + provided that You are in compliance with the terms of this License + and that the license for the Executable form does not attempt to + limit or alter the recipient's rights in the Source Code form from + the rights set forth in this License. If You distribute the Covered + Software in Executable form under a different license, You must make + it absolutely clear that any terms which differ from this License + are offered by You alone, not by the Initial Developer or + Contributor. You hereby agree to indemnify the Initial Developer and + every Contributor for any liability incurred by the Initial + Developer or such Contributor as a result of any such terms You offer. + + 3.6. Larger Works. + + You may create a Larger Work by combining Covered Software with + other code not governed by the terms of this License and distribute + the Larger Work as a single product. In such a case, You must make + sure the requirements of this License are fulfilled for the Covered + Software. + +4. Versions of the License. + + 4.1. New Versions. + + Oracle is the initial license steward and may publish revised and/or + new versions of this License from time to time. Each version will be + given a distinguishing version number. Except as provided in Section + 4.3, no one other than the license steward has the right to modify + this License. + + 4.2. Effect of New Versions. + + You may always continue to use, distribute or otherwise make the + Covered Software available under the terms of the version of the + License under which You originally received the Covered Software. If + the Initial Developer includes a notice in the Original Software + prohibiting it from being distributed or otherwise made available + under any subsequent version of the License, You must distribute and + make the Covered Software available under the terms of the version + of the License under which You originally received the Covered + Software. Otherwise, You may also choose to use, distribute or + otherwise make the Covered Software available under the terms of any + subsequent version of the License published by the license steward. + + 4.3. Modified Versions. + + When You are an Initial Developer and You want to create a new + license for Your Original Software, You may create and use a + modified version of this License if You: (a) rename the license and + remove any references to the name of the license steward (except to + note that the license differs from this License); and (b) otherwise + make it clear that the license contains terms which differ from this + License. + +5. DISCLAIMER OF WARRANTY. + + COVERED SOFTWARE IS PROVIDED UNDER THIS LICENSE ON AN "AS IS" BASIS, + WITHOUT WARRANTY OF ANY KIND, EITHER EXPRESSED OR IMPLIED, + INCLUDING, WITHOUT LIMITATION, WARRANTIES THAT THE COVERED SOFTWARE + IS FREE OF DEFECTS, MERCHANTABLE, FIT FOR A PARTICULAR PURPOSE OR + NON-INFRINGING. THE ENTIRE RISK AS TO THE QUALITY AND PERFORMANCE OF + THE COVERED SOFTWARE IS WITH YOU. SHOULD ANY COVERED SOFTWARE PROVE + DEFECTIVE IN ANY RESPECT, YOU (NOT THE INITIAL DEVELOPER OR ANY + OTHER CONTRIBUTOR) ASSUME THE COST OF ANY NECESSARY SERVICING, + REPAIR OR CORRECTION. THIS DISCLAIMER OF WARRANTY CONSTITUTES AN + ESSENTIAL PART OF THIS LICENSE. NO USE OF ANY COVERED SOFTWARE IS + AUTHORIZED HEREUNDER EXCEPT UNDER THIS DISCLAIMER. + +6. TERMINATION. + + 6.1. This License and the rights granted hereunder will terminate + automatically if You fail to comply with terms herein and fail to + cure such breach within 30 days of becoming aware of the breach. + Provisions which, by their nature, must remain in effect beyond the + termination of this License shall survive. + + 6.2. If You assert a patent infringement claim (excluding + declaratory judgment actions) against Initial Developer or a + Contributor (the Initial Developer or Contributor against whom You + assert such claim is referred to as "Participant") alleging that the + Participant Software (meaning the Contributor Version where the + Participant is a Contributor or the Original Software where the + Participant is the Initial Developer) directly or indirectly + infringes any patent, then any and all rights granted directly or + indirectly to You by such Participant, the Initial Developer (if the + Initial Developer is not the Participant) and all Contributors under + Sections 2.1 and/or 2.2 of this License shall, upon 60 days notice + from Participant terminate prospectively and automatically at the + expiration of such 60 day notice period, unless if within such 60 + day period You withdraw Your claim with respect to the Participant + Software against such Participant either unilaterally or pursuant to + a written agreement with Participant. + + 6.3. If You assert a patent infringement claim against Participant + alleging that the Participant Software directly or indirectly + infringes any patent where such claim is resolved (such as by + license or settlement) prior to the initiation of patent + infringement litigation, then the reasonable value of the licenses + granted by such Participant under Sections 2.1 or 2.2 shall be taken + into account in determining the amount or value of any payment or + license. + + 6.4. In the event of termination under Sections 6.1 or 6.2 above, + all end user licenses that have been validly granted by You or any + distributor hereunder prior to termination (excluding licenses + granted to You by any distributor) shall survive termination. + +7. LIMITATION OF LIABILITY. + + UNDER NO CIRCUMSTANCES AND UNDER NO LEGAL THEORY, WHETHER TORT + (INCLUDING NEGLIGENCE), CONTRACT, OR OTHERWISE, SHALL YOU, THE + INITIAL DEVELOPER, ANY OTHER CONTRIBUTOR, OR ANY DISTRIBUTOR OF + COVERED SOFTWARE, OR ANY SUPPLIER OF ANY OF SUCH PARTIES, BE LIABLE + TO ANY PERSON FOR ANY INDIRECT, SPECIAL, INCIDENTAL, OR + CONSEQUENTIAL DAMAGES OF ANY CHARACTER INCLUDING, WITHOUT + LIMITATION, DAMAGES FOR LOSS OF GOODWILL, WORK STOPPAGE, COMPUTER + FAILURE OR MALFUNCTION, OR ANY AND ALL OTHER COMMERCIAL DAMAGES OR + LOSSES, EVEN IF SUCH PARTY SHALL HAVE BEEN INFORMED OF THE + POSSIBILITY OF SUCH DAMAGES. THIS LIMITATION OF LIABILITY SHALL NOT + APPLY TO LIABILITY FOR DEATH OR PERSONAL INJURY RESULTING FROM SUCH + PARTY'S NEGLIGENCE TO THE EXTENT APPLICABLE LAW PROHIBITS SUCH + LIMITATION. SOME JURISDICTIONS DO NOT ALLOW THE EXCLUSION OR + LIMITATION OF INCIDENTAL OR CONSEQUENTIAL DAMAGES, SO THIS EXCLUSION + AND LIMITATION MAY NOT APPLY TO YOU. + +8. U.S. GOVERNMENT END USERS. + + The Covered Software is a "commercial item," as that term is defined + in 48 C.F.R. 2.101 (Oct. 1995), consisting of "commercial computer + software" (as that term is defined at 48 C.F.R. ß + 252.227-7014(a)(1)) and "commercial computer software documentation" + as such terms are used in 48 C.F.R. 12.212 (Sept. 1995). Consistent + with 48 C.F.R. 12.212 and 48 C.F.R. 227.7202-1 through 227.7202-4 + (June 1995), all U.S. Government End Users acquire Covered Software + with only those rights set forth herein. This U.S. Government Rights + clause is in lieu of, and supersedes, any other FAR, DFAR, or other + clause or provision that addresses Government rights in computer + software under this License. + +9. MISCELLANEOUS. + + This License represents the complete agreement concerning subject + matter hereof. If any provision of this License is held to be + unenforceable, such provision shall be reformed only to the extent + necessary to make it enforceable. This License shall be governed by + the law of the jurisdiction specified in a notice contained within + the Original Software (except to the extent applicable law, if any, + provides otherwise), excluding such jurisdiction's conflict-of-law + provisions. Any litigation relating to this License shall be subject + to the jurisdiction of the courts located in the jurisdiction and + venue specified in a notice contained within the Original Software, + with the losing party responsible for costs, including, without + limitation, court costs and reasonable attorneys' fees and expenses. + The application of the United Nations Convention on Contracts for + the International Sale of Goods is expressly excluded. Any law or + regulation which provides that the language of a contract shall be + construed against the drafter shall not apply to this License. You + agree that You alone are responsible for compliance with the United + States export administration regulations (and the export control + laws and regulation of any other countries) when You use, distribute + or otherwise make available any Covered Software. + +10. RESPONSIBILITY FOR CLAIMS. + + As between Initial Developer and the Contributors, each party is + responsible for claims and damages arising, directly or indirectly, + out of its utilization of rights under this License and You agree to + work with Initial Developer and Contributors to distribute such + responsibility on an equitable basis. Nothing herein is intended or + shall be deemed to constitute any admission of liability. + +------------------------------------------------------------------------ + +NOTICE PURSUANT TO SECTION 9 OF THE COMMON DEVELOPMENT AND DISTRIBUTION +LICENSE (CDDL) + +The code released under the CDDL shall be governed by the laws of the +State of California (excluding conflict-of-law provisions). Any +litigation relating to this License shall be subject to the jurisdiction +of the Federal Courts of the Northern District of California and the +state courts of the State of California, with venue lying in Santa Clara +County, California. diff --git a/licenses/bin/javax.servlet-api.CDDL11 b/licenses/bin/javax.servlet-api.CDDL11 new file mode 100644 index 000000000000..82dfb5ccd433 --- /dev/null +++ b/licenses/bin/javax.servlet-api.CDDL11 @@ -0,0 +1,362 @@ +COMMON DEVELOPMENT AND DISTRIBUTION LICENSE (CDDL) Version 1.1 + +1. Definitions. + + 1.1. "Contributor" means each individual or entity that creates or + contributes to the creation of Modifications. + + 1.2. "Contributor Version" means the combination of the Original + Software, prior Modifications used by a Contributor (if any), and + the Modifications made by that particular Contributor. + + 1.3. "Covered Software" means (a) the Original Software, or (b) + Modifications, or (c) the combination of files containing Original + Software with files containing Modifications, in each case including + portions thereof. + + 1.4. "Executable" means the Covered Software in any form other than + Source Code. + + 1.5. "Initial Developer" means the individual or entity that first + makes Original Software available under this License. + + 1.6. "Larger Work" means a work which combines Covered Software or + portions thereof with code not governed by the terms of this License. + + 1.7. "License" means this document. + + 1.8. "Licensable" means having the right to grant, to the maximum + extent possible, whether at the time of the initial grant or + subsequently acquired, any and all of the rights conveyed herein. + + 1.9. "Modifications" means the Source Code and Executable form of + any of the following: + + A. Any file that results from an addition to, deletion from or + modification of the contents of a file containing Original Software + or previous Modifications; + + B. Any new file that contains any part of the Original Software or + previous Modification; or + + C. Any new file that is contributed or otherwise made available + under the terms of this License. + + 1.10. "Original Software" means the Source Code and Executable form + of computer software code that is originally released under this + License. + + 1.11. "Patent Claims" means any patent claim(s), now owned or + hereafter acquired, including without limitation, method, process, + and apparatus claims, in any patent Licensable by grantor. + + 1.12. "Source Code" means (a) the common form of computer software + code in which modifications are made and (b) associated + documentation included in or with such code. + + 1.13. "You" (or "Your") means an individual or a legal entity + exercising rights under, and complying with all of the terms of, + this License. For legal entities, "You" includes any entity which + controls, is controlled by, or is under common control with You. For + purposes of this definition, "control" means (a) the power, direct + or indirect, to cause the direction or management of such entity, + whether by contract or otherwise, or (b) ownership of more than + fifty percent (50%) of the outstanding shares or beneficial + ownership of such entity. + +2. License Grants. + + 2.1. The Initial Developer Grant. + + Conditioned upon Your compliance with Section 3.1 below and subject + to third party intellectual property claims, the Initial Developer + hereby grants You a world-wide, royalty-free, non-exclusive license: + + (a) under intellectual property rights (other than patent or + trademark) Licensable by Initial Developer, to use, reproduce, + modify, display, perform, sublicense and distribute the Original + Software (or portions thereof), with or without Modifications, + and/or as part of a Larger Work; and + + (b) under Patent Claims infringed by the making, using or selling of + Original Software, to make, have made, use, practice, sell, and + offer for sale, and/or otherwise dispose of the Original Software + (or portions thereof). + + (c) The licenses granted in Sections 2.1(a) and (b) are effective on + the date Initial Developer first distributes or otherwise makes the + Original Software available to a third party under the terms of this + License. + + (d) Notwithstanding Section 2.1(b) above, no patent license is + granted: (1) for code that You delete from the Original Software, or + (2) for infringements caused by: (i) the modification of the + Original Software, or (ii) the combination of the Original Software + with other software or devices. + + 2.2. Contributor Grant. + + Conditioned upon Your compliance with Section 3.1 below and subject + to third party intellectual property claims, each Contributor hereby + grants You a world-wide, royalty-free, non-exclusive license: + + (a) under intellectual property rights (other than patent or + trademark) Licensable by Contributor to use, reproduce, modify, + display, perform, sublicense and distribute the Modifications + created by such Contributor (or portions thereof), either on an + unmodified basis, with other Modifications, as Covered Software + and/or as part of a Larger Work; and + + (b) under Patent Claims infringed by the making, using, or selling + of Modifications made by that Contributor either alone and/or in + combination with its Contributor Version (or portions of such + combination), to make, use, sell, offer for sale, have made, and/or + otherwise dispose of: (1) Modifications made by that Contributor (or + portions thereof); and (2) the combination of Modifications made by + that Contributor with its Contributor Version (or portions of such + combination). + + (c) The licenses granted in Sections 2.2(a) and 2.2(b) are effective + on the date Contributor first distributes or otherwise makes the + Modifications available to a third party. + + (d) Notwithstanding Section 2.2(b) above, no patent license is + granted: (1) for any code that Contributor has deleted from the + Contributor Version; (2) for infringements caused by: (i) third + party modifications of Contributor Version, or (ii) the combination + of Modifications made by that Contributor with other software + (except as part of the Contributor Version) or other devices; or (3) + under Patent Claims infringed by Covered Software in the absence of + Modifications made by that Contributor. + +3. Distribution Obligations. + + 3.1. Availability of Source Code. + + Any Covered Software that You distribute or otherwise make available + in Executable form must also be made available in Source Code form + and that Source Code form must be distributed only under the terms + of this License. You must include a copy of this License with every + copy of the Source Code form of the Covered Software You distribute + or otherwise make available. You must inform recipients of any such + Covered Software in Executable form as to how they can obtain such + Covered Software in Source Code form in a reasonable manner on or + through a medium customarily used for software exchange. + + 3.2. Modifications. + + The Modifications that You create or to which You contribute are + governed by the terms of this License. You represent that You + believe Your Modifications are Your original creation(s) and/or You + have sufficient rights to grant the rights conveyed by this License. + + 3.3. Required Notices. + + You must include a notice in each of Your Modifications that + identifies You as the Contributor of the Modification. You may not + remove or alter any copyright, patent or trademark notices contained + within the Covered Software, or any notices of licensing or any + descriptive text giving attribution to any Contributor or the + Initial Developer. + + 3.4. Application of Additional Terms. + + You may not offer or impose any terms on any Covered Software in + Source Code form that alters or restricts the applicable version of + this License or the recipients' rights hereunder. You may choose to + offer, and to charge a fee for, warranty, support, indemnity or + liability obligations to one or more recipients of Covered Software. + However, you may do so only on Your own behalf, and not on behalf of + the Initial Developer or any Contributor. You must make it + absolutely clear that any such warranty, support, indemnity or + liability obligation is offered by You alone, and You hereby agree + to indemnify the Initial Developer and every Contributor for any + liability incurred by the Initial Developer or such Contributor as a + result of warranty, support, indemnity or liability terms You offer. + + 3.5. Distribution of Executable Versions. + + You may distribute the Executable form of the Covered Software under + the terms of this License or under the terms of a license of Your + choice, which may contain terms different from this License, + provided that You are in compliance with the terms of this License + and that the license for the Executable form does not attempt to + limit or alter the recipient's rights in the Source Code form from + the rights set forth in this License. If You distribute the Covered + Software in Executable form under a different license, You must make + it absolutely clear that any terms which differ from this License + are offered by You alone, not by the Initial Developer or + Contributor. You hereby agree to indemnify the Initial Developer and + every Contributor for any liability incurred by the Initial + Developer or such Contributor as a result of any such terms You offer. + + 3.6. Larger Works. + + You may create a Larger Work by combining Covered Software with + other code not governed by the terms of this License and distribute + the Larger Work as a single product. In such a case, You must make + sure the requirements of this License are fulfilled for the Covered + Software. + +4. Versions of the License. + + 4.1. New Versions. + + Oracle is the initial license steward and may publish revised and/or + new versions of this License from time to time. Each version will be + given a distinguishing version number. Except as provided in Section + 4.3, no one other than the license steward has the right to modify + this License. + + 4.2. Effect of New Versions. + + You may always continue to use, distribute or otherwise make the + Covered Software available under the terms of the version of the + License under which You originally received the Covered Software. If + the Initial Developer includes a notice in the Original Software + prohibiting it from being distributed or otherwise made available + under any subsequent version of the License, You must distribute and + make the Covered Software available under the terms of the version + of the License under which You originally received the Covered + Software. Otherwise, You may also choose to use, distribute or + otherwise make the Covered Software available under the terms of any + subsequent version of the License published by the license steward. + + 4.3. Modified Versions. + + When You are an Initial Developer and You want to create a new + license for Your Original Software, You may create and use a + modified version of this License if You: (a) rename the license and + remove any references to the name of the license steward (except to + note that the license differs from this License); and (b) otherwise + make it clear that the license contains terms which differ from this + License. + +5. DISCLAIMER OF WARRANTY. + + COVERED SOFTWARE IS PROVIDED UNDER THIS LICENSE ON AN "AS IS" BASIS, + WITHOUT WARRANTY OF ANY KIND, EITHER EXPRESSED OR IMPLIED, + INCLUDING, WITHOUT LIMITATION, WARRANTIES THAT THE COVERED SOFTWARE + IS FREE OF DEFECTS, MERCHANTABLE, FIT FOR A PARTICULAR PURPOSE OR + NON-INFRINGING. THE ENTIRE RISK AS TO THE QUALITY AND PERFORMANCE OF + THE COVERED SOFTWARE IS WITH YOU. SHOULD ANY COVERED SOFTWARE PROVE + DEFECTIVE IN ANY RESPECT, YOU (NOT THE INITIAL DEVELOPER OR ANY + OTHER CONTRIBUTOR) ASSUME THE COST OF ANY NECESSARY SERVICING, + REPAIR OR CORRECTION. THIS DISCLAIMER OF WARRANTY CONSTITUTES AN + ESSENTIAL PART OF THIS LICENSE. NO USE OF ANY COVERED SOFTWARE IS + AUTHORIZED HEREUNDER EXCEPT UNDER THIS DISCLAIMER. + +6. TERMINATION. + + 6.1. This License and the rights granted hereunder will terminate + automatically if You fail to comply with terms herein and fail to + cure such breach within 30 days of becoming aware of the breach. + Provisions which, by their nature, must remain in effect beyond the + termination of this License shall survive. + + 6.2. If You assert a patent infringement claim (excluding + declaratory judgment actions) against Initial Developer or a + Contributor (the Initial Developer or Contributor against whom You + assert such claim is referred to as "Participant") alleging that the + Participant Software (meaning the Contributor Version where the + Participant is a Contributor or the Original Software where the + Participant is the Initial Developer) directly or indirectly + infringes any patent, then any and all rights granted directly or + indirectly to You by such Participant, the Initial Developer (if the + Initial Developer is not the Participant) and all Contributors under + Sections 2.1 and/or 2.2 of this License shall, upon 60 days notice + from Participant terminate prospectively and automatically at the + expiration of such 60 day notice period, unless if within such 60 + day period You withdraw Your claim with respect to the Participant + Software against such Participant either unilaterally or pursuant to + a written agreement with Participant. + + 6.3. If You assert a patent infringement claim against Participant + alleging that the Participant Software directly or indirectly + infringes any patent where such claim is resolved (such as by + license or settlement) prior to the initiation of patent + infringement litigation, then the reasonable value of the licenses + granted by such Participant under Sections 2.1 or 2.2 shall be taken + into account in determining the amount or value of any payment or + license. + + 6.4. In the event of termination under Sections 6.1 or 6.2 above, + all end user licenses that have been validly granted by You or any + distributor hereunder prior to termination (excluding licenses + granted to You by any distributor) shall survive termination. + +7. LIMITATION OF LIABILITY. + + UNDER NO CIRCUMSTANCES AND UNDER NO LEGAL THEORY, WHETHER TORT + (INCLUDING NEGLIGENCE), CONTRACT, OR OTHERWISE, SHALL YOU, THE + INITIAL DEVELOPER, ANY OTHER CONTRIBUTOR, OR ANY DISTRIBUTOR OF + COVERED SOFTWARE, OR ANY SUPPLIER OF ANY OF SUCH PARTIES, BE LIABLE + TO ANY PERSON FOR ANY INDIRECT, SPECIAL, INCIDENTAL, OR + CONSEQUENTIAL DAMAGES OF ANY CHARACTER INCLUDING, WITHOUT + LIMITATION, DAMAGES FOR LOSS OF GOODWILL, WORK STOPPAGE, COMPUTER + FAILURE OR MALFUNCTION, OR ANY AND ALL OTHER COMMERCIAL DAMAGES OR + LOSSES, EVEN IF SUCH PARTY SHALL HAVE BEEN INFORMED OF THE + POSSIBILITY OF SUCH DAMAGES. THIS LIMITATION OF LIABILITY SHALL NOT + APPLY TO LIABILITY FOR DEATH OR PERSONAL INJURY RESULTING FROM SUCH + PARTY'S NEGLIGENCE TO THE EXTENT APPLICABLE LAW PROHIBITS SUCH + LIMITATION. SOME JURISDICTIONS DO NOT ALLOW THE EXCLUSION OR + LIMITATION OF INCIDENTAL OR CONSEQUENTIAL DAMAGES, SO THIS EXCLUSION + AND LIMITATION MAY NOT APPLY TO YOU. + +8. U.S. GOVERNMENT END USERS. + + The Covered Software is a "commercial item," as that term is defined + in 48 C.F.R. 2.101 (Oct. 1995), consisting of "commercial computer + software" (as that term is defined at 48 C.F.R. ß + 252.227-7014(a)(1)) and "commercial computer software documentation" + as such terms are used in 48 C.F.R. 12.212 (Sept. 1995). Consistent + with 48 C.F.R. 12.212 and 48 C.F.R. 227.7202-1 through 227.7202-4 + (June 1995), all U.S. Government End Users acquire Covered Software + with only those rights set forth herein. This U.S. Government Rights + clause is in lieu of, and supersedes, any other FAR, DFAR, or other + clause or provision that addresses Government rights in computer + software under this License. + +9. MISCELLANEOUS. + + This License represents the complete agreement concerning subject + matter hereof. If any provision of this License is held to be + unenforceable, such provision shall be reformed only to the extent + necessary to make it enforceable. This License shall be governed by + the law of the jurisdiction specified in a notice contained within + the Original Software (except to the extent applicable law, if any, + provides otherwise), excluding such jurisdiction's conflict-of-law + provisions. Any litigation relating to this License shall be subject + to the jurisdiction of the courts located in the jurisdiction and + venue specified in a notice contained within the Original Software, + with the losing party responsible for costs, including, without + limitation, court costs and reasonable attorneys' fees and expenses. + The application of the United Nations Convention on Contracts for + the International Sale of Goods is expressly excluded. Any law or + regulation which provides that the language of a contract shall be + construed against the drafter shall not apply to this License. You + agree that You alone are responsible for compliance with the United + States export administration regulations (and the export control + laws and regulation of any other countries) when You use, distribute + or otherwise make available any Covered Software. + +10. RESPONSIBILITY FOR CLAIMS. + + As between Initial Developer and the Contributors, each party is + responsible for claims and damages arising, directly or indirectly, + out of its utilization of rights under this License and You agree to + work with Initial Developer and Contributors to distribute such + responsibility on an equitable basis. Nothing herein is intended or + shall be deemed to constitute any admission of liability. + +------------------------------------------------------------------------ + +NOTICE PURSUANT TO SECTION 9 OF THE COMMON DEVELOPMENT AND DISTRIBUTION +LICENSE (CDDL) + +The code released under the CDDL shall be governed by the laws of the +State of California (excluding conflict-of-law provisions). Any +litigation relating to this License shall be subject to the jurisdiction +of the Federal Courts of the Northern District of California and the +state courts of the State of California, with venue lying in Santa Clara +County, California. diff --git a/licenses/bin/jcl-over-slf4j.IMT b/licenses/bin/jcl-over-slf4j.IMT new file mode 100644 index 000000000000..8fda22f4d72f --- /dev/null +++ b/licenses/bin/jcl-over-slf4j.IMT @@ -0,0 +1,21 @@ +Copyright (c) 2004-2014 QOS.ch +All rights reserved. + +Permission is hereby granted, free of charge, to any person obtaining +a copy of this software and associated documentation files (the +"Software"), to deal in the Software without restriction, including +without limitation the rights to use, copy, modify, merge, publish, +distribute, sublicense, and/or sell copies of the Software, and to +permit persons to whom the Software is furnished to do so, subject to +the following conditions: + +The above copyright notice and this permission notice shall be +included in all copies or substantial portions of the Software. + +THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, +EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF +MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND +NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS BE +LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION +OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION +WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE. diff --git a/licenses/bin/jcodings.MIT b/licenses/bin/jcodings.MIT new file mode 100644 index 000000000000..f85e365de02a --- /dev/null +++ b/licenses/bin/jcodings.MIT @@ -0,0 +1,17 @@ +Permission is hereby granted, free of charge, to any person obtaining a copy of +this software and associated documentation files (the "Software"), to deal in +the Software without restriction, including without limitation the rights to +use, copy, modify, merge, publish, distribute, sublicense, and/or sell copies +of the Software, and to permit persons to whom the Software is furnished to do +so, subject to the following conditions: + +The above copyright notice and this permission notice shall be included in all +copies or substantial portions of the Software. + +THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR +IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, +FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE +AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER +LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, +OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE +SOFTWARE. diff --git a/licenses/bin/jersey.CDDL11 b/licenses/bin/jersey.CDDL11 new file mode 100644 index 000000000000..82dfb5ccd433 --- /dev/null +++ b/licenses/bin/jersey.CDDL11 @@ -0,0 +1,362 @@ +COMMON DEVELOPMENT AND DISTRIBUTION LICENSE (CDDL) Version 1.1 + +1. Definitions. + + 1.1. "Contributor" means each individual or entity that creates or + contributes to the creation of Modifications. + + 1.2. "Contributor Version" means the combination of the Original + Software, prior Modifications used by a Contributor (if any), and + the Modifications made by that particular Contributor. + + 1.3. "Covered Software" means (a) the Original Software, or (b) + Modifications, or (c) the combination of files containing Original + Software with files containing Modifications, in each case including + portions thereof. + + 1.4. "Executable" means the Covered Software in any form other than + Source Code. + + 1.5. "Initial Developer" means the individual or entity that first + makes Original Software available under this License. + + 1.6. "Larger Work" means a work which combines Covered Software or + portions thereof with code not governed by the terms of this License. + + 1.7. "License" means this document. + + 1.8. "Licensable" means having the right to grant, to the maximum + extent possible, whether at the time of the initial grant or + subsequently acquired, any and all of the rights conveyed herein. + + 1.9. "Modifications" means the Source Code and Executable form of + any of the following: + + A. Any file that results from an addition to, deletion from or + modification of the contents of a file containing Original Software + or previous Modifications; + + B. Any new file that contains any part of the Original Software or + previous Modification; or + + C. Any new file that is contributed or otherwise made available + under the terms of this License. + + 1.10. "Original Software" means the Source Code and Executable form + of computer software code that is originally released under this + License. + + 1.11. "Patent Claims" means any patent claim(s), now owned or + hereafter acquired, including without limitation, method, process, + and apparatus claims, in any patent Licensable by grantor. + + 1.12. "Source Code" means (a) the common form of computer software + code in which modifications are made and (b) associated + documentation included in or with such code. + + 1.13. "You" (or "Your") means an individual or a legal entity + exercising rights under, and complying with all of the terms of, + this License. For legal entities, "You" includes any entity which + controls, is controlled by, or is under common control with You. For + purposes of this definition, "control" means (a) the power, direct + or indirect, to cause the direction or management of such entity, + whether by contract or otherwise, or (b) ownership of more than + fifty percent (50%) of the outstanding shares or beneficial + ownership of such entity. + +2. License Grants. + + 2.1. The Initial Developer Grant. + + Conditioned upon Your compliance with Section 3.1 below and subject + to third party intellectual property claims, the Initial Developer + hereby grants You a world-wide, royalty-free, non-exclusive license: + + (a) under intellectual property rights (other than patent or + trademark) Licensable by Initial Developer, to use, reproduce, + modify, display, perform, sublicense and distribute the Original + Software (or portions thereof), with or without Modifications, + and/or as part of a Larger Work; and + + (b) under Patent Claims infringed by the making, using or selling of + Original Software, to make, have made, use, practice, sell, and + offer for sale, and/or otherwise dispose of the Original Software + (or portions thereof). + + (c) The licenses granted in Sections 2.1(a) and (b) are effective on + the date Initial Developer first distributes or otherwise makes the + Original Software available to a third party under the terms of this + License. + + (d) Notwithstanding Section 2.1(b) above, no patent license is + granted: (1) for code that You delete from the Original Software, or + (2) for infringements caused by: (i) the modification of the + Original Software, or (ii) the combination of the Original Software + with other software or devices. + + 2.2. Contributor Grant. + + Conditioned upon Your compliance with Section 3.1 below and subject + to third party intellectual property claims, each Contributor hereby + grants You a world-wide, royalty-free, non-exclusive license: + + (a) under intellectual property rights (other than patent or + trademark) Licensable by Contributor to use, reproduce, modify, + display, perform, sublicense and distribute the Modifications + created by such Contributor (or portions thereof), either on an + unmodified basis, with other Modifications, as Covered Software + and/or as part of a Larger Work; and + + (b) under Patent Claims infringed by the making, using, or selling + of Modifications made by that Contributor either alone and/or in + combination with its Contributor Version (or portions of such + combination), to make, use, sell, offer for sale, have made, and/or + otherwise dispose of: (1) Modifications made by that Contributor (or + portions thereof); and (2) the combination of Modifications made by + that Contributor with its Contributor Version (or portions of such + combination). + + (c) The licenses granted in Sections 2.2(a) and 2.2(b) are effective + on the date Contributor first distributes or otherwise makes the + Modifications available to a third party. + + (d) Notwithstanding Section 2.2(b) above, no patent license is + granted: (1) for any code that Contributor has deleted from the + Contributor Version; (2) for infringements caused by: (i) third + party modifications of Contributor Version, or (ii) the combination + of Modifications made by that Contributor with other software + (except as part of the Contributor Version) or other devices; or (3) + under Patent Claims infringed by Covered Software in the absence of + Modifications made by that Contributor. + +3. Distribution Obligations. + + 3.1. Availability of Source Code. + + Any Covered Software that You distribute or otherwise make available + in Executable form must also be made available in Source Code form + and that Source Code form must be distributed only under the terms + of this License. You must include a copy of this License with every + copy of the Source Code form of the Covered Software You distribute + or otherwise make available. You must inform recipients of any such + Covered Software in Executable form as to how they can obtain such + Covered Software in Source Code form in a reasonable manner on or + through a medium customarily used for software exchange. + + 3.2. Modifications. + + The Modifications that You create or to which You contribute are + governed by the terms of this License. You represent that You + believe Your Modifications are Your original creation(s) and/or You + have sufficient rights to grant the rights conveyed by this License. + + 3.3. Required Notices. + + You must include a notice in each of Your Modifications that + identifies You as the Contributor of the Modification. You may not + remove or alter any copyright, patent or trademark notices contained + within the Covered Software, or any notices of licensing or any + descriptive text giving attribution to any Contributor or the + Initial Developer. + + 3.4. Application of Additional Terms. + + You may not offer or impose any terms on any Covered Software in + Source Code form that alters or restricts the applicable version of + this License or the recipients' rights hereunder. You may choose to + offer, and to charge a fee for, warranty, support, indemnity or + liability obligations to one or more recipients of Covered Software. + However, you may do so only on Your own behalf, and not on behalf of + the Initial Developer or any Contributor. You must make it + absolutely clear that any such warranty, support, indemnity or + liability obligation is offered by You alone, and You hereby agree + to indemnify the Initial Developer and every Contributor for any + liability incurred by the Initial Developer or such Contributor as a + result of warranty, support, indemnity or liability terms You offer. + + 3.5. Distribution of Executable Versions. + + You may distribute the Executable form of the Covered Software under + the terms of this License or under the terms of a license of Your + choice, which may contain terms different from this License, + provided that You are in compliance with the terms of this License + and that the license for the Executable form does not attempt to + limit or alter the recipient's rights in the Source Code form from + the rights set forth in this License. If You distribute the Covered + Software in Executable form under a different license, You must make + it absolutely clear that any terms which differ from this License + are offered by You alone, not by the Initial Developer or + Contributor. You hereby agree to indemnify the Initial Developer and + every Contributor for any liability incurred by the Initial + Developer or such Contributor as a result of any such terms You offer. + + 3.6. Larger Works. + + You may create a Larger Work by combining Covered Software with + other code not governed by the terms of this License and distribute + the Larger Work as a single product. In such a case, You must make + sure the requirements of this License are fulfilled for the Covered + Software. + +4. Versions of the License. + + 4.1. New Versions. + + Oracle is the initial license steward and may publish revised and/or + new versions of this License from time to time. Each version will be + given a distinguishing version number. Except as provided in Section + 4.3, no one other than the license steward has the right to modify + this License. + + 4.2. Effect of New Versions. + + You may always continue to use, distribute or otherwise make the + Covered Software available under the terms of the version of the + License under which You originally received the Covered Software. If + the Initial Developer includes a notice in the Original Software + prohibiting it from being distributed or otherwise made available + under any subsequent version of the License, You must distribute and + make the Covered Software available under the terms of the version + of the License under which You originally received the Covered + Software. Otherwise, You may also choose to use, distribute or + otherwise make the Covered Software available under the terms of any + subsequent version of the License published by the license steward. + + 4.3. Modified Versions. + + When You are an Initial Developer and You want to create a new + license for Your Original Software, You may create and use a + modified version of this License if You: (a) rename the license and + remove any references to the name of the license steward (except to + note that the license differs from this License); and (b) otherwise + make it clear that the license contains terms which differ from this + License. + +5. DISCLAIMER OF WARRANTY. + + COVERED SOFTWARE IS PROVIDED UNDER THIS LICENSE ON AN "AS IS" BASIS, + WITHOUT WARRANTY OF ANY KIND, EITHER EXPRESSED OR IMPLIED, + INCLUDING, WITHOUT LIMITATION, WARRANTIES THAT THE COVERED SOFTWARE + IS FREE OF DEFECTS, MERCHANTABLE, FIT FOR A PARTICULAR PURPOSE OR + NON-INFRINGING. THE ENTIRE RISK AS TO THE QUALITY AND PERFORMANCE OF + THE COVERED SOFTWARE IS WITH YOU. SHOULD ANY COVERED SOFTWARE PROVE + DEFECTIVE IN ANY RESPECT, YOU (NOT THE INITIAL DEVELOPER OR ANY + OTHER CONTRIBUTOR) ASSUME THE COST OF ANY NECESSARY SERVICING, + REPAIR OR CORRECTION. THIS DISCLAIMER OF WARRANTY CONSTITUTES AN + ESSENTIAL PART OF THIS LICENSE. NO USE OF ANY COVERED SOFTWARE IS + AUTHORIZED HEREUNDER EXCEPT UNDER THIS DISCLAIMER. + +6. TERMINATION. + + 6.1. This License and the rights granted hereunder will terminate + automatically if You fail to comply with terms herein and fail to + cure such breach within 30 days of becoming aware of the breach. + Provisions which, by their nature, must remain in effect beyond the + termination of this License shall survive. + + 6.2. If You assert a patent infringement claim (excluding + declaratory judgment actions) against Initial Developer or a + Contributor (the Initial Developer or Contributor against whom You + assert such claim is referred to as "Participant") alleging that the + Participant Software (meaning the Contributor Version where the + Participant is a Contributor or the Original Software where the + Participant is the Initial Developer) directly or indirectly + infringes any patent, then any and all rights granted directly or + indirectly to You by such Participant, the Initial Developer (if the + Initial Developer is not the Participant) and all Contributors under + Sections 2.1 and/or 2.2 of this License shall, upon 60 days notice + from Participant terminate prospectively and automatically at the + expiration of such 60 day notice period, unless if within such 60 + day period You withdraw Your claim with respect to the Participant + Software against such Participant either unilaterally or pursuant to + a written agreement with Participant. + + 6.3. If You assert a patent infringement claim against Participant + alleging that the Participant Software directly or indirectly + infringes any patent where such claim is resolved (such as by + license or settlement) prior to the initiation of patent + infringement litigation, then the reasonable value of the licenses + granted by such Participant under Sections 2.1 or 2.2 shall be taken + into account in determining the amount or value of any payment or + license. + + 6.4. In the event of termination under Sections 6.1 or 6.2 above, + all end user licenses that have been validly granted by You or any + distributor hereunder prior to termination (excluding licenses + granted to You by any distributor) shall survive termination. + +7. LIMITATION OF LIABILITY. + + UNDER NO CIRCUMSTANCES AND UNDER NO LEGAL THEORY, WHETHER TORT + (INCLUDING NEGLIGENCE), CONTRACT, OR OTHERWISE, SHALL YOU, THE + INITIAL DEVELOPER, ANY OTHER CONTRIBUTOR, OR ANY DISTRIBUTOR OF + COVERED SOFTWARE, OR ANY SUPPLIER OF ANY OF SUCH PARTIES, BE LIABLE + TO ANY PERSON FOR ANY INDIRECT, SPECIAL, INCIDENTAL, OR + CONSEQUENTIAL DAMAGES OF ANY CHARACTER INCLUDING, WITHOUT + LIMITATION, DAMAGES FOR LOSS OF GOODWILL, WORK STOPPAGE, COMPUTER + FAILURE OR MALFUNCTION, OR ANY AND ALL OTHER COMMERCIAL DAMAGES OR + LOSSES, EVEN IF SUCH PARTY SHALL HAVE BEEN INFORMED OF THE + POSSIBILITY OF SUCH DAMAGES. THIS LIMITATION OF LIABILITY SHALL NOT + APPLY TO LIABILITY FOR DEATH OR PERSONAL INJURY RESULTING FROM SUCH + PARTY'S NEGLIGENCE TO THE EXTENT APPLICABLE LAW PROHIBITS SUCH + LIMITATION. SOME JURISDICTIONS DO NOT ALLOW THE EXCLUSION OR + LIMITATION OF INCIDENTAL OR CONSEQUENTIAL DAMAGES, SO THIS EXCLUSION + AND LIMITATION MAY NOT APPLY TO YOU. + +8. U.S. GOVERNMENT END USERS. + + The Covered Software is a "commercial item," as that term is defined + in 48 C.F.R. 2.101 (Oct. 1995), consisting of "commercial computer + software" (as that term is defined at 48 C.F.R. ß + 252.227-7014(a)(1)) and "commercial computer software documentation" + as such terms are used in 48 C.F.R. 12.212 (Sept. 1995). Consistent + with 48 C.F.R. 12.212 and 48 C.F.R. 227.7202-1 through 227.7202-4 + (June 1995), all U.S. Government End Users acquire Covered Software + with only those rights set forth herein. This U.S. Government Rights + clause is in lieu of, and supersedes, any other FAR, DFAR, or other + clause or provision that addresses Government rights in computer + software under this License. + +9. MISCELLANEOUS. + + This License represents the complete agreement concerning subject + matter hereof. If any provision of this License is held to be + unenforceable, such provision shall be reformed only to the extent + necessary to make it enforceable. This License shall be governed by + the law of the jurisdiction specified in a notice contained within + the Original Software (except to the extent applicable law, if any, + provides otherwise), excluding such jurisdiction's conflict-of-law + provisions. Any litigation relating to this License shall be subject + to the jurisdiction of the courts located in the jurisdiction and + venue specified in a notice contained within the Original Software, + with the losing party responsible for costs, including, without + limitation, court costs and reasonable attorneys' fees and expenses. + The application of the United Nations Convention on Contracts for + the International Sale of Goods is expressly excluded. Any law or + regulation which provides that the language of a contract shall be + construed against the drafter shall not apply to this License. You + agree that You alone are responsible for compliance with the United + States export administration regulations (and the export control + laws and regulation of any other countries) when You use, distribute + or otherwise make available any Covered Software. + +10. RESPONSIBILITY FOR CLAIMS. + + As between Initial Developer and the Contributors, each party is + responsible for claims and damages arising, directly or indirectly, + out of its utilization of rights under this License and You agree to + work with Initial Developer and Contributors to distribute such + responsibility on an equitable basis. Nothing herein is intended or + shall be deemed to constitute any admission of liability. + +------------------------------------------------------------------------ + +NOTICE PURSUANT TO SECTION 9 OF THE COMMON DEVELOPMENT AND DISTRIBUTION +LICENSE (CDDL) + +The code released under the CDDL shall be governed by the laws of the +State of California (excluding conflict-of-law provisions). Any +litigation relating to this License shall be subject to the jurisdiction +of the Federal Courts of the Northern District of California and the +state courts of the State of California, with venue lying in Santa Clara +County, California. diff --git a/licenses/bin/jline.BSD3 b/licenses/bin/jline.BSD3 new file mode 100644 index 000000000000..246f54f73655 --- /dev/null +++ b/licenses/bin/jline.BSD3 @@ -0,0 +1,32 @@ +Copyright (c) 2002-2006, Marc Prud'hommeaux +All rights reserved. + +Redistribution and use in source and binary forms, with or +without modification, are permitted provided that the following +conditions are met: + +Redistributions of source code must retain the above copyright +notice, this list of conditions and the following disclaimer. + +Redistributions in binary form must reproduce the above copyright +notice, this list of conditions and the following disclaimer +in the documentation and/or other materials provided with +the distribution. + +Neither the name of JLine nor the names of its contributors +may be used to endorse or promote products derived from this +software without specific prior written permission. + +THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS +"AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, +BUT NOT LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY +AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO +EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE LIABLE +FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, +OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, +PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, +DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED +AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT +LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING +IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED +OF THE POSSIBILITY OF SUCH DAMAGE. diff --git a/licenses/bin/joni.MIT b/licenses/bin/joni.MIT new file mode 100644 index 000000000000..9341f05eee9c --- /dev/null +++ b/licenses/bin/joni.MIT @@ -0,0 +1,21 @@ +MIT License + +Copyright (c) 2017 JRuby Team + +Permission is hereby granted, free of charge, to any person obtaining a copy +of this software and associated documentation files (the "Software"), to deal +in the Software without restriction, including without limitation the rights +to use, copy, modify, merge, publish, distribute, sublicense, and/or sell +copies of the Software, and to permit persons to whom the Software is +furnished to do so, subject to the following conditions: + +The above copyright notice and this permission notice shall be included in all +copies or substantial portions of the Software. + +THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR +IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, +FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE +AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER +LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, +OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE +SOFTWARE. diff --git a/licenses/bin/jopt-simple.MIT b/licenses/bin/jopt-simple.MIT new file mode 100644 index 000000000000..6796036beaa5 --- /dev/null +++ b/licenses/bin/jopt-simple.MIT @@ -0,0 +1,24 @@ +/* + The MIT License + + Copyright (c) 2004-2011 Paul R. Holser, Jr. + + Permission is hereby granted, free of charge, to any person obtaining + a copy of this software and associated documentation files (the + "Software"), to deal in the Software without restriction, including + without limitation the rights to use, copy, modify, merge, publish, + distribute, sublicense, and/or sell copies of the Software, and to + permit persons to whom the Software is furnished to do so, subject to + the following conditions: + + The above copyright notice and this permission notice shall be + included in all copies or substantial portions of the Software. + + THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, + EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF + MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND + NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS BE + LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION + OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION + WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE. +*/ diff --git a/licenses/bin/jsch.BSD3 b/licenses/bin/jsch.BSD3 new file mode 100644 index 000000000000..edd491dfbfbf --- /dev/null +++ b/licenses/bin/jsch.BSD3 @@ -0,0 +1,30 @@ +JSch 0.0.* was released under the GNU LGPL license. Later, we have switched +over to a BSD-style license. + +------------------------------------------------------------------------------ +Copyright (c) 2002-2015 Atsuhiko Yamanaka, JCraft,Inc. +All rights reserved. + +Redistribution and use in source and binary forms, with or without +modification, are permitted provided that the following conditions are met: + + 1. Redistributions of source code must retain the above copyright notice, + this list of conditions and the following disclaimer. + + 2. Redistributions in binary form must reproduce the above copyright + notice, this list of conditions and the following disclaimer in + the documentation and/or other materials provided with the distribution. + + 3. The names of the authors may not be used to endorse or promote products + derived from this software without specific prior written permission. + +THIS SOFTWARE IS PROVIDED ``AS IS'' AND ANY EXPRESSED OR IMPLIED WARRANTIES, +INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND +FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL JCRAFT, +INC. OR ANY CONTRIBUTORS TO THIS SOFTWARE BE LIABLE FOR ANY DIRECT, INDIRECT, +INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT +LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, +OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF +LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING +NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, +EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. diff --git a/licenses/bin/jsr305.BSD3 b/licenses/bin/jsr305.BSD3 new file mode 100644 index 000000000000..842476092551 --- /dev/null +++ b/licenses/bin/jsr305.BSD3 @@ -0,0 +1,8 @@ +The JSR-305 reference implementation (lib/jsr305.jar) is +distributed under the terms of the New BSD license: + + http://www.opensource.org/licenses/bsd-license.php + +See the JSR-305 home page for more information: + + http://code.google.com/p/jsr-305/ diff --git a/licenses/bin/jsr311-api.CDDL11 b/licenses/bin/jsr311-api.CDDL11 new file mode 100644 index 000000000000..82dfb5ccd433 --- /dev/null +++ b/licenses/bin/jsr311-api.CDDL11 @@ -0,0 +1,362 @@ +COMMON DEVELOPMENT AND DISTRIBUTION LICENSE (CDDL) Version 1.1 + +1. Definitions. + + 1.1. "Contributor" means each individual or entity that creates or + contributes to the creation of Modifications. + + 1.2. "Contributor Version" means the combination of the Original + Software, prior Modifications used by a Contributor (if any), and + the Modifications made by that particular Contributor. + + 1.3. "Covered Software" means (a) the Original Software, or (b) + Modifications, or (c) the combination of files containing Original + Software with files containing Modifications, in each case including + portions thereof. + + 1.4. "Executable" means the Covered Software in any form other than + Source Code. + + 1.5. "Initial Developer" means the individual or entity that first + makes Original Software available under this License. + + 1.6. "Larger Work" means a work which combines Covered Software or + portions thereof with code not governed by the terms of this License. + + 1.7. "License" means this document. + + 1.8. "Licensable" means having the right to grant, to the maximum + extent possible, whether at the time of the initial grant or + subsequently acquired, any and all of the rights conveyed herein. + + 1.9. "Modifications" means the Source Code and Executable form of + any of the following: + + A. Any file that results from an addition to, deletion from or + modification of the contents of a file containing Original Software + or previous Modifications; + + B. Any new file that contains any part of the Original Software or + previous Modification; or + + C. Any new file that is contributed or otherwise made available + under the terms of this License. + + 1.10. "Original Software" means the Source Code and Executable form + of computer software code that is originally released under this + License. + + 1.11. "Patent Claims" means any patent claim(s), now owned or + hereafter acquired, including without limitation, method, process, + and apparatus claims, in any patent Licensable by grantor. + + 1.12. "Source Code" means (a) the common form of computer software + code in which modifications are made and (b) associated + documentation included in or with such code. + + 1.13. "You" (or "Your") means an individual or a legal entity + exercising rights under, and complying with all of the terms of, + this License. For legal entities, "You" includes any entity which + controls, is controlled by, or is under common control with You. For + purposes of this definition, "control" means (a) the power, direct + or indirect, to cause the direction or management of such entity, + whether by contract or otherwise, or (b) ownership of more than + fifty percent (50%) of the outstanding shares or beneficial + ownership of such entity. + +2. License Grants. + + 2.1. The Initial Developer Grant. + + Conditioned upon Your compliance with Section 3.1 below and subject + to third party intellectual property claims, the Initial Developer + hereby grants You a world-wide, royalty-free, non-exclusive license: + + (a) under intellectual property rights (other than patent or + trademark) Licensable by Initial Developer, to use, reproduce, + modify, display, perform, sublicense and distribute the Original + Software (or portions thereof), with or without Modifications, + and/or as part of a Larger Work; and + + (b) under Patent Claims infringed by the making, using or selling of + Original Software, to make, have made, use, practice, sell, and + offer for sale, and/or otherwise dispose of the Original Software + (or portions thereof). + + (c) The licenses granted in Sections 2.1(a) and (b) are effective on + the date Initial Developer first distributes or otherwise makes the + Original Software available to a third party under the terms of this + License. + + (d) Notwithstanding Section 2.1(b) above, no patent license is + granted: (1) for code that You delete from the Original Software, or + (2) for infringements caused by: (i) the modification of the + Original Software, or (ii) the combination of the Original Software + with other software or devices. + + 2.2. Contributor Grant. + + Conditioned upon Your compliance with Section 3.1 below and subject + to third party intellectual property claims, each Contributor hereby + grants You a world-wide, royalty-free, non-exclusive license: + + (a) under intellectual property rights (other than patent or + trademark) Licensable by Contributor to use, reproduce, modify, + display, perform, sublicense and distribute the Modifications + created by such Contributor (or portions thereof), either on an + unmodified basis, with other Modifications, as Covered Software + and/or as part of a Larger Work; and + + (b) under Patent Claims infringed by the making, using, or selling + of Modifications made by that Contributor either alone and/or in + combination with its Contributor Version (or portions of such + combination), to make, use, sell, offer for sale, have made, and/or + otherwise dispose of: (1) Modifications made by that Contributor (or + portions thereof); and (2) the combination of Modifications made by + that Contributor with its Contributor Version (or portions of such + combination). + + (c) The licenses granted in Sections 2.2(a) and 2.2(b) are effective + on the date Contributor first distributes or otherwise makes the + Modifications available to a third party. + + (d) Notwithstanding Section 2.2(b) above, no patent license is + granted: (1) for any code that Contributor has deleted from the + Contributor Version; (2) for infringements caused by: (i) third + party modifications of Contributor Version, or (ii) the combination + of Modifications made by that Contributor with other software + (except as part of the Contributor Version) or other devices; or (3) + under Patent Claims infringed by Covered Software in the absence of + Modifications made by that Contributor. + +3. Distribution Obligations. + + 3.1. Availability of Source Code. + + Any Covered Software that You distribute or otherwise make available + in Executable form must also be made available in Source Code form + and that Source Code form must be distributed only under the terms + of this License. You must include a copy of this License with every + copy of the Source Code form of the Covered Software You distribute + or otherwise make available. You must inform recipients of any such + Covered Software in Executable form as to how they can obtain such + Covered Software in Source Code form in a reasonable manner on or + through a medium customarily used for software exchange. + + 3.2. Modifications. + + The Modifications that You create or to which You contribute are + governed by the terms of this License. You represent that You + believe Your Modifications are Your original creation(s) and/or You + have sufficient rights to grant the rights conveyed by this License. + + 3.3. Required Notices. + + You must include a notice in each of Your Modifications that + identifies You as the Contributor of the Modification. You may not + remove or alter any copyright, patent or trademark notices contained + within the Covered Software, or any notices of licensing or any + descriptive text giving attribution to any Contributor or the + Initial Developer. + + 3.4. Application of Additional Terms. + + You may not offer or impose any terms on any Covered Software in + Source Code form that alters or restricts the applicable version of + this License or the recipients' rights hereunder. You may choose to + offer, and to charge a fee for, warranty, support, indemnity or + liability obligations to one or more recipients of Covered Software. + However, you may do so only on Your own behalf, and not on behalf of + the Initial Developer or any Contributor. You must make it + absolutely clear that any such warranty, support, indemnity or + liability obligation is offered by You alone, and You hereby agree + to indemnify the Initial Developer and every Contributor for any + liability incurred by the Initial Developer or such Contributor as a + result of warranty, support, indemnity or liability terms You offer. + + 3.5. Distribution of Executable Versions. + + You may distribute the Executable form of the Covered Software under + the terms of this License or under the terms of a license of Your + choice, which may contain terms different from this License, + provided that You are in compliance with the terms of this License + and that the license for the Executable form does not attempt to + limit or alter the recipient's rights in the Source Code form from + the rights set forth in this License. If You distribute the Covered + Software in Executable form under a different license, You must make + it absolutely clear that any terms which differ from this License + are offered by You alone, not by the Initial Developer or + Contributor. You hereby agree to indemnify the Initial Developer and + every Contributor for any liability incurred by the Initial + Developer or such Contributor as a result of any such terms You offer. + + 3.6. Larger Works. + + You may create a Larger Work by combining Covered Software with + other code not governed by the terms of this License and distribute + the Larger Work as a single product. In such a case, You must make + sure the requirements of this License are fulfilled for the Covered + Software. + +4. Versions of the License. + + 4.1. New Versions. + + Oracle is the initial license steward and may publish revised and/or + new versions of this License from time to time. Each version will be + given a distinguishing version number. Except as provided in Section + 4.3, no one other than the license steward has the right to modify + this License. + + 4.2. Effect of New Versions. + + You may always continue to use, distribute or otherwise make the + Covered Software available under the terms of the version of the + License under which You originally received the Covered Software. If + the Initial Developer includes a notice in the Original Software + prohibiting it from being distributed or otherwise made available + under any subsequent version of the License, You must distribute and + make the Covered Software available under the terms of the version + of the License under which You originally received the Covered + Software. Otherwise, You may also choose to use, distribute or + otherwise make the Covered Software available under the terms of any + subsequent version of the License published by the license steward. + + 4.3. Modified Versions. + + When You are an Initial Developer and You want to create a new + license for Your Original Software, You may create and use a + modified version of this License if You: (a) rename the license and + remove any references to the name of the license steward (except to + note that the license differs from this License); and (b) otherwise + make it clear that the license contains terms which differ from this + License. + +5. DISCLAIMER OF WARRANTY. + + COVERED SOFTWARE IS PROVIDED UNDER THIS LICENSE ON AN "AS IS" BASIS, + WITHOUT WARRANTY OF ANY KIND, EITHER EXPRESSED OR IMPLIED, + INCLUDING, WITHOUT LIMITATION, WARRANTIES THAT THE COVERED SOFTWARE + IS FREE OF DEFECTS, MERCHANTABLE, FIT FOR A PARTICULAR PURPOSE OR + NON-INFRINGING. THE ENTIRE RISK AS TO THE QUALITY AND PERFORMANCE OF + THE COVERED SOFTWARE IS WITH YOU. SHOULD ANY COVERED SOFTWARE PROVE + DEFECTIVE IN ANY RESPECT, YOU (NOT THE INITIAL DEVELOPER OR ANY + OTHER CONTRIBUTOR) ASSUME THE COST OF ANY NECESSARY SERVICING, + REPAIR OR CORRECTION. THIS DISCLAIMER OF WARRANTY CONSTITUTES AN + ESSENTIAL PART OF THIS LICENSE. NO USE OF ANY COVERED SOFTWARE IS + AUTHORIZED HEREUNDER EXCEPT UNDER THIS DISCLAIMER. + +6. TERMINATION. + + 6.1. This License and the rights granted hereunder will terminate + automatically if You fail to comply with terms herein and fail to + cure such breach within 30 days of becoming aware of the breach. + Provisions which, by their nature, must remain in effect beyond the + termination of this License shall survive. + + 6.2. If You assert a patent infringement claim (excluding + declaratory judgment actions) against Initial Developer or a + Contributor (the Initial Developer or Contributor against whom You + assert such claim is referred to as "Participant") alleging that the + Participant Software (meaning the Contributor Version where the + Participant is a Contributor or the Original Software where the + Participant is the Initial Developer) directly or indirectly + infringes any patent, then any and all rights granted directly or + indirectly to You by such Participant, the Initial Developer (if the + Initial Developer is not the Participant) and all Contributors under + Sections 2.1 and/or 2.2 of this License shall, upon 60 days notice + from Participant terminate prospectively and automatically at the + expiration of such 60 day notice period, unless if within such 60 + day period You withdraw Your claim with respect to the Participant + Software against such Participant either unilaterally or pursuant to + a written agreement with Participant. + + 6.3. If You assert a patent infringement claim against Participant + alleging that the Participant Software directly or indirectly + infringes any patent where such claim is resolved (such as by + license or settlement) prior to the initiation of patent + infringement litigation, then the reasonable value of the licenses + granted by such Participant under Sections 2.1 or 2.2 shall be taken + into account in determining the amount or value of any payment or + license. + + 6.4. In the event of termination under Sections 6.1 or 6.2 above, + all end user licenses that have been validly granted by You or any + distributor hereunder prior to termination (excluding licenses + granted to You by any distributor) shall survive termination. + +7. LIMITATION OF LIABILITY. + + UNDER NO CIRCUMSTANCES AND UNDER NO LEGAL THEORY, WHETHER TORT + (INCLUDING NEGLIGENCE), CONTRACT, OR OTHERWISE, SHALL YOU, THE + INITIAL DEVELOPER, ANY OTHER CONTRIBUTOR, OR ANY DISTRIBUTOR OF + COVERED SOFTWARE, OR ANY SUPPLIER OF ANY OF SUCH PARTIES, BE LIABLE + TO ANY PERSON FOR ANY INDIRECT, SPECIAL, INCIDENTAL, OR + CONSEQUENTIAL DAMAGES OF ANY CHARACTER INCLUDING, WITHOUT + LIMITATION, DAMAGES FOR LOSS OF GOODWILL, WORK STOPPAGE, COMPUTER + FAILURE OR MALFUNCTION, OR ANY AND ALL OTHER COMMERCIAL DAMAGES OR + LOSSES, EVEN IF SUCH PARTY SHALL HAVE BEEN INFORMED OF THE + POSSIBILITY OF SUCH DAMAGES. THIS LIMITATION OF LIABILITY SHALL NOT + APPLY TO LIABILITY FOR DEATH OR PERSONAL INJURY RESULTING FROM SUCH + PARTY'S NEGLIGENCE TO THE EXTENT APPLICABLE LAW PROHIBITS SUCH + LIMITATION. SOME JURISDICTIONS DO NOT ALLOW THE EXCLUSION OR + LIMITATION OF INCIDENTAL OR CONSEQUENTIAL DAMAGES, SO THIS EXCLUSION + AND LIMITATION MAY NOT APPLY TO YOU. + +8. U.S. GOVERNMENT END USERS. + + The Covered Software is a "commercial item," as that term is defined + in 48 C.F.R. 2.101 (Oct. 1995), consisting of "commercial computer + software" (as that term is defined at 48 C.F.R. ß + 252.227-7014(a)(1)) and "commercial computer software documentation" + as such terms are used in 48 C.F.R. 12.212 (Sept. 1995). Consistent + with 48 C.F.R. 12.212 and 48 C.F.R. 227.7202-1 through 227.7202-4 + (June 1995), all U.S. Government End Users acquire Covered Software + with only those rights set forth herein. This U.S. Government Rights + clause is in lieu of, and supersedes, any other FAR, DFAR, or other + clause or provision that addresses Government rights in computer + software under this License. + +9. MISCELLANEOUS. + + This License represents the complete agreement concerning subject + matter hereof. If any provision of this License is held to be + unenforceable, such provision shall be reformed only to the extent + necessary to make it enforceable. This License shall be governed by + the law of the jurisdiction specified in a notice contained within + the Original Software (except to the extent applicable law, if any, + provides otherwise), excluding such jurisdiction's conflict-of-law + provisions. Any litigation relating to this License shall be subject + to the jurisdiction of the courts located in the jurisdiction and + venue specified in a notice contained within the Original Software, + with the losing party responsible for costs, including, without + limitation, court costs and reasonable attorneys' fees and expenses. + The application of the United Nations Convention on Contracts for + the International Sale of Goods is expressly excluded. Any law or + regulation which provides that the language of a contract shall be + construed against the drafter shall not apply to this License. You + agree that You alone are responsible for compliance with the United + States export administration regulations (and the export control + laws and regulation of any other countries) when You use, distribute + or otherwise make available any Covered Software. + +10. RESPONSIBILITY FOR CLAIMS. + + As between Initial Developer and the Contributors, each party is + responsible for claims and damages arising, directly or indirectly, + out of its utilization of rights under this License and You agree to + work with Initial Developer and Contributors to distribute such + responsibility on an equitable basis. Nothing herein is intended or + shall be deemed to constitute any admission of liability. + +------------------------------------------------------------------------ + +NOTICE PURSUANT TO SECTION 9 OF THE COMMON DEVELOPMENT AND DISTRIBUTION +LICENSE (CDDL) + +The code released under the CDDL shall be governed by the laws of the +State of California (excluding conflict-of-law provisions). Any +litigation relating to this License shall be subject to the jurisdiction +of the Federal Courts of the Northern District of California and the +state courts of the State of California, with venue lying in Santa Clara +County, California. diff --git a/licenses/bin/leveldbjni.BSD3 b/licenses/bin/leveldbjni.BSD3 new file mode 100644 index 000000000000..8edd375909b4 --- /dev/null +++ b/licenses/bin/leveldbjni.BSD3 @@ -0,0 +1,27 @@ +Copyright (c) 2011 FuseSource Corp. All rights reserved. + +Redistribution and use in source and binary forms, with or without +modification, are permitted provided that the following conditions are +met: + + * Redistributions of source code must retain the above copyright +notice, this list of conditions and the following disclaimer. + * Redistributions in binary form must reproduce the above +copyright notice, this list of conditions and the following disclaimer +in the documentation and/or other materials provided with the +distribution. + * Neither the name of FuseSource Corp. nor the names of its +contributors may be used to endorse or promote products derived from +this software without specific prior written permission. + +THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS +"AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT +LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR +A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT +OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, +SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT +LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, +DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY +THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT +(INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE +OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. diff --git a/licenses/bin/numeral.MIT b/licenses/bin/numeral.MIT new file mode 100644 index 000000000000..e373f8976a83 --- /dev/null +++ b/licenses/bin/numeral.MIT @@ -0,0 +1,22 @@ +Copyright (c) 2016 Adam Draper + +Permission is hereby granted, free of charge, to any person +obtaining a copy of this software and associated documentation +files (the "Software"), to deal in the Software without +restriction, including without limitation the rights to use, +copy, modify, merge, publish, distribute, sublicense, and/or sell +copies of the Software, and to permit persons to whom the +Software is furnished to do so, subject to the following +conditions: + +The above copyright notice and this permission notice shall be +included in all copies or substantial portions of the Software. + +THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, +EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES +OF MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND +NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT +HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, +WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING +FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR +OTHER DEALINGS IN THE SOFTWARE. diff --git a/licenses/bin/object-assign.MIT b/licenses/bin/object-assign.MIT new file mode 100644 index 000000000000..654d0bfe9434 --- /dev/null +++ b/licenses/bin/object-assign.MIT @@ -0,0 +1,21 @@ +The MIT License (MIT) + +Copyright (c) Sindre Sorhus (sindresorhus.com) + +Permission is hereby granted, free of charge, to any person obtaining a copy +of this software and associated documentation files (the "Software"), to deal +in the Software without restriction, including without limitation the rights +to use, copy, modify, merge, publish, distribute, sublicense, and/or sell +copies of the Software, and to permit persons to whom the Software is +furnished to do so, subject to the following conditions: + +The above copyright notice and this permission notice shall be included in +all copies or substantial portions of the Software. + +THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR +IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, +FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE +AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER +LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, +OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN +THE SOFTWARE. diff --git a/licenses/bin/object-keys.MIT b/licenses/bin/object-keys.MIT new file mode 100644 index 000000000000..28553fdd0684 --- /dev/null +++ b/licenses/bin/object-keys.MIT @@ -0,0 +1,21 @@ +The MIT License (MIT) + +Copyright (C) 2013 Jordan Harband + +Permission is hereby granted, free of charge, to any person obtaining a copy +of this software and associated documentation files (the "Software"), to deal +in the Software without restriction, including without limitation the rights +to use, copy, modify, merge, publish, distribute, sublicense, and/or sell +copies of the Software, and to permit persons to whom the Software is +furnished to do so, subject to the following conditions: + +The above copyright notice and this permission notice shall be included in +all copies or substantial portions of the Software. + +THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR +IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, +FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE +AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER +LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, +OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN +THE SOFTWARE. \ No newline at end of file diff --git a/licenses/bin/os-browserify.MIT b/licenses/bin/os-browserify.MIT new file mode 100644 index 000000000000..e69de29bb2d1 diff --git a/licenses/bin/paranamer.BSD3 b/licenses/bin/paranamer.BSD3 new file mode 100644 index 000000000000..9eab87918636 --- /dev/null +++ b/licenses/bin/paranamer.BSD3 @@ -0,0 +1,29 @@ +[ ParaNamer used to be 'Pubic Domain', but since it includes a small piece of ASM it is now the same license as that: BSD ] + + Portions copyright (c) 2006-2018 Paul Hammant & ThoughtWorks Inc + Portions copyright (c) 2000-2007 INRIA, France Telecom + All rights reserved. + + Redistribution and use in source and binary forms, with or without + modification, are permitted provided that the following conditions + are met: + 1. Redistributions of source code must retain the above copyright + notice, this list of conditions and the following disclaimer. + 2. Redistributions in binary form must reproduce the above copyright + notice, this list of conditions and the following disclaimer in the + documentation and/or other materials provided with the distribution. + 3. Neither the name of the copyright holders nor the names of its + contributors may be used to endorse or promote products derived from + this software without specific prior written permission. + + THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" + AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE + IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE + ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE + LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR + CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF + SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS + INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN + CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) + ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF + THE POSSIBILITY OF SUCH DAMAGE. diff --git a/licenses/bin/path-to-regexp.MIT b/licenses/bin/path-to-regexp.MIT new file mode 100644 index 000000000000..983fbe8aec3f --- /dev/null +++ b/licenses/bin/path-to-regexp.MIT @@ -0,0 +1,21 @@ +The MIT License (MIT) + +Copyright (c) 2014 Blake Embrey (hello@blakeembrey.com) + +Permission is hereby granted, free of charge, to any person obtaining a copy +of this software and associated documentation files (the "Software"), to deal +in the Software without restriction, including without limitation the rights +to use, copy, modify, merge, publish, distribute, sublicense, and/or sell +copies of the Software, and to permit persons to whom the Software is +furnished to do so, subject to the following conditions: + +The above copyright notice and this permission notice shall be included in +all copies or substantial portions of the Software. + +THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR +IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, +FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE +AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER +LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, +OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN +THE SOFTWARE. diff --git a/licenses/bin/postgresql.BSD3 b/licenses/bin/postgresql.BSD3 new file mode 100644 index 000000000000..fd416d2ec47e --- /dev/null +++ b/licenses/bin/postgresql.BSD3 @@ -0,0 +1,26 @@ +Copyright (c) 1997-2011, PostgreSQL Global Development Group +All rights reserved. + +Redistribution and use in source and binary forms, with or without +modification, are permitted provided that the following conditions are met: + +1. Redistributions of source code must retain the above copyright notice, + this list of conditions and the following disclaimer. +2. Redistributions in binary form must reproduce the above copyright notice, + this list of conditions and the following disclaimer in the documentation + and/or other materials provided with the distribution. +3. Neither the name of the PostgreSQL Global Development Group nor the names + of its contributors may be used to endorse or promote products derived + from this software without specific prior written permission. + +THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" +AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE +IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE +ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE +LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR +CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF +SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS +INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN +CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) +ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE +POSSIBILITY OF SUCH DAMAGE. diff --git a/licenses/bin/process.MIT b/licenses/bin/process.MIT new file mode 100644 index 000000000000..b8c1246cf49c --- /dev/null +++ b/licenses/bin/process.MIT @@ -0,0 +1,22 @@ +(The MIT License) + +Copyright (c) 2013 Roman Shtylman + +Permission is hereby granted, free of charge, to any person obtaining +a copy of this software and associated documentation files (the +'Software'), to deal in the Software without restriction, including +without limitation the rights to use, copy, modify, merge, publish, +distribute, sublicense, and/or sell copies of the Software, and to +permit persons to whom the Software is furnished to do so, subject to +the following conditions: + +The above copyright notice and this permission notice shall be +included in all copies or substantial portions of the Software. + +THE SOFTWARE IS PROVIDED 'AS IS', WITHOUT WARRANTY OF ANY KIND, +EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF +MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. +IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY +CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, +TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE +SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE. diff --git a/licenses/bin/promise.MIT b/licenses/bin/promise.MIT new file mode 100644 index 000000000000..7a1f763640a9 --- /dev/null +++ b/licenses/bin/promise.MIT @@ -0,0 +1,19 @@ +Copyright (c) 2014 Forbes Lindesay + +Permission is hereby granted, free of charge, to any person obtaining a copy +of this software and associated documentation files (the "Software"), to deal +in the Software without restriction, including without limitation the rights +to use, copy, modify, merge, publish, distribute, sublicense, and/or sell +copies of the Software, and to permit persons to whom the Software is +furnished to do so, subject to the following conditions: + +The above copyright notice and this permission notice shall be included in +all copies or substantial portions of the Software. + +THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR +IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, +FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE +AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER +LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, +OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN +THE SOFTWARE. diff --git a/licenses/bin/prop-types.MIT b/licenses/bin/prop-types.MIT new file mode 100644 index 000000000000..188fb2b0bd8e --- /dev/null +++ b/licenses/bin/prop-types.MIT @@ -0,0 +1,21 @@ +MIT License + +Copyright (c) 2013-present, Facebook, Inc. + +Permission is hereby granted, free of charge, to any person obtaining a copy +of this software and associated documentation files (the "Software"), to deal +in the Software without restriction, including without limitation the rights +to use, copy, modify, merge, publish, distribute, sublicense, and/or sell +copies of the Software, and to permit persons to whom the Software is +furnished to do so, subject to the following conditions: + +The above copyright notice and this permission notice shall be included in all +copies or substantial portions of the Software. + +THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR +IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, +FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE +AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER +LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, +OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE +SOFTWARE. diff --git a/licenses/bin/protobuf-java.BSD3 b/licenses/bin/protobuf-java.BSD3 new file mode 100644 index 000000000000..f028c8232420 --- /dev/null +++ b/licenses/bin/protobuf-java.BSD3 @@ -0,0 +1,42 @@ +This license applies to all parts of Protocol Buffers except the following: + + - Atomicops support for generic gcc, located in + src/google/protobuf/stubs/atomicops_internals_generic_gcc.h. + This file is copyrighted by Red Hat Inc. + + - Atomicops support for AIX/POWER, located in + src/google/protobuf/stubs/atomicops_internals_power.h. + This file is copyrighted by Bloomberg Finance LP. + +Copyright 2014, Google Inc. All rights reserved. + +Redistribution and use in source and binary forms, with or without +modification, are permitted provided that the following conditions are +met: + + * Redistributions of source code must retain the above copyright +notice, this list of conditions and the following disclaimer. + * Redistributions in binary form must reproduce the above +copyright notice, this list of conditions and the following disclaimer +in the documentation and/or other materials provided with the +distribution. + * Neither the name of Google Inc. nor the names of its +contributors may be used to endorse or promote products derived from +this software without specific prior written permission. + +THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS +"AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT +LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR +A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT +OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, +SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT +LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, +DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY +THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT +(INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE +OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. + +Code generated by the Protocol Buffer compiler is owned by the owner +of the input file used when generating it. This code is not +standalone and requires a support library to be linked with it. This +support library is itself covered by the above license. diff --git a/licenses/bin/pure-render-decorator.MIT b/licenses/bin/pure-render-decorator.MIT new file mode 100644 index 000000000000..06754d3c037b --- /dev/null +++ b/licenses/bin/pure-render-decorator.MIT @@ -0,0 +1,21 @@ +The MIT License (MIT) + +Copyright (c) 2015 Félix Girault + +Permission is hereby granted, free of charge, to any person obtaining a copy +of this software and associated documentation files (the "Software"), to deal +in the Software without restriction, including without limitation the rights +to use, copy, modify, merge, publish, distribute, sublicense, and/or sell +copies of the Software, and to permit persons to whom the Software is +furnished to do so, subject to the following conditions: + +The above copyright notice and this permission notice shall be included in all +copies or substantial portions of the Software. + +THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR +IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, +FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE +AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER +LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, +OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE +SOFTWARE. diff --git a/licenses/bin/react-ace.MIT b/licenses/bin/react-ace.MIT new file mode 100644 index 000000000000..3010ee6d9234 --- /dev/null +++ b/licenses/bin/react-ace.MIT @@ -0,0 +1,22 @@ +The MIT License (MIT) + +Copyright (c) 2014 James Hrisho + +Permission is hereby granted, free of charge, to any person obtaining a copy +of this software and associated documentation files (the "Software"), to deal +in the Software without restriction, including without limitation the rights +to use, copy, modify, merge, publish, distribute, sublicense, and/or sell +copies of the Software, and to permit persons to whom the Software is +furnished to do so, subject to the following conditions: + +The above copyright notice and this permission notice shall be included in all +copies or substantial portions of the Software. + +THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR +IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, +FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE +AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER +LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, +OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE +SOFTWARE. + diff --git a/licenses/bin/react-addons-css-transition-group.MIT b/licenses/bin/react-addons-css-transition-group.MIT new file mode 100644 index 000000000000..188fb2b0bd8e --- /dev/null +++ b/licenses/bin/react-addons-css-transition-group.MIT @@ -0,0 +1,21 @@ +MIT License + +Copyright (c) 2013-present, Facebook, Inc. + +Permission is hereby granted, free of charge, to any person obtaining a copy +of this software and associated documentation files (the "Software"), to deal +in the Software without restriction, including without limitation the rights +to use, copy, modify, merge, publish, distribute, sublicense, and/or sell +copies of the Software, and to permit persons to whom the Software is +furnished to do so, subject to the following conditions: + +The above copyright notice and this permission notice shall be included in all +copies or substantial portions of the Software. + +THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR +IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, +FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE +AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER +LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, +OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE +SOFTWARE. diff --git a/licenses/bin/react-dom.MIT b/licenses/bin/react-dom.MIT new file mode 100644 index 000000000000..b96dcb0480a0 --- /dev/null +++ b/licenses/bin/react-dom.MIT @@ -0,0 +1,21 @@ +MIT License + +Copyright (c) Facebook, Inc. and its affiliates. + +Permission is hereby granted, free of charge, to any person obtaining a copy +of this software and associated documentation files (the "Software"), to deal +in the Software without restriction, including without limitation the rights +to use, copy, modify, merge, publish, distribute, sublicense, and/or sell +copies of the Software, and to permit persons to whom the Software is +furnished to do so, subject to the following conditions: + +The above copyright notice and this permission notice shall be included in all +copies or substantial portions of the Software. + +THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR +IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, +FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE +AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER +LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, +OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE +SOFTWARE. diff --git a/licenses/bin/react-is.MIT b/licenses/bin/react-is.MIT new file mode 100644 index 000000000000..b96dcb0480a0 --- /dev/null +++ b/licenses/bin/react-is.MIT @@ -0,0 +1,21 @@ +MIT License + +Copyright (c) Facebook, Inc. and its affiliates. + +Permission is hereby granted, free of charge, to any person obtaining a copy +of this software and associated documentation files (the "Software"), to deal +in the Software without restriction, including without limitation the rights +to use, copy, modify, merge, publish, distribute, sublicense, and/or sell +copies of the Software, and to permit persons to whom the Software is +furnished to do so, subject to the following conditions: + +The above copyright notice and this permission notice shall be included in all +copies or substantial portions of the Software. + +THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR +IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, +FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE +AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER +LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, +OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE +SOFTWARE. diff --git a/licenses/bin/react-router-dom.MIT b/licenses/bin/react-router-dom.MIT new file mode 100644 index 000000000000..c79afb81e560 --- /dev/null +++ b/licenses/bin/react-router-dom.MIT @@ -0,0 +1,37 @@ +# react-router-dom + +DOM bindings for [React Router](https://reacttraining.com/react-router). + +## Installation + +Using [npm](https://www.npmjs.com/): + + $ npm install --save react-router-dom + +Then with a module bundler like [webpack](https://webpack.github.io/), use as you would anything else: + +```js +// using ES6 modules +import { BrowserRouter, Route, Link } from 'react-router-dom' + +// using CommonJS modules +const BrowserRouter = require('react-router-dom').BrowserRouter +const Route = require('react-router-dom').Route +const Link = require('react-router-dom').Link +``` + +The UMD build is also available on [unpkg](https://unpkg.com): + +```html + +``` + +You can find the library on `window.ReactRouterDOM`. + +## Issues + +If you find a bug, please file an issue on [our issue tracker on GitHub](https://github.com/ReactTraining/react-router/issues). + +## Credits + +React Router is built and maintained by [React Training](https://reacttraining.com). diff --git a/licenses/bin/react-router.MIT b/licenses/bin/react-router.MIT new file mode 100644 index 000000000000..a46e3167fca3 --- /dev/null +++ b/licenses/bin/react-router.MIT @@ -0,0 +1,40 @@ +# react-router + +Declarative routing for [React](https://facebook.github.io/react). + +## Installation + +Using [npm](https://www.npmjs.com/): + + $ npm install --save react-router + +**Note:** This package provides the core routing functionality for React Router, but you might not want to install it directly. If you are writing an application that will run in the browser, you should instead install `react-router-dom`. Similarly, if you are writing a React Native application, you should instead install `react-router-native`. Both of those will install `react-router` as a dependency. + + +Then with a module bundler like [webpack](https://webpack.github.io/), use as you would anything else: + +```js +// using ES6 modules +import { Router, Route, Switch } from 'react-router' + +// using CommonJS modules +var Router = require('react-router').Router +var Route = require('react-router').Route +var Switch = require('react-router').Switch +``` + +The UMD build is also available on [unpkg](https://unpkg.com): + +```html + +``` + +You can find the library on `window.ReactRouter`. + +## Issues + +If you find a bug, please file an issue on [our issue tracker on GitHub](https://github.com/ReactTraining/react-router/issues). + +## Credits + +React Router is built and maintained by [React Training](https://reacttraining.com). diff --git a/licenses/bin/react-table.MIT b/licenses/bin/react-table.MIT new file mode 100644 index 000000000000..9fe1442eeb48 --- /dev/null +++ b/licenses/bin/react-table.MIT @@ -0,0 +1,21 @@ +MIT License + +Copyright (c) 2016 Tanner Linsley + +Permission is hereby granted, free of charge, to any person obtaining a copy +of this software and associated documentation files (the "Software"), to deal +in the Software without restriction, including without limitation the rights +to use, copy, modify, merge, publish, distribute, sublicense, and/or sell +copies of the Software, and to permit persons to whom the Software is +furnished to do so, subject to the following conditions: + +The above copyright notice and this permission notice shall be included in all +copies or substantial portions of the Software. + +THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR +IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, +FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE +AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER +LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, +OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE +SOFTWARE. diff --git a/licenses/bin/react-transition-group.BSD3 b/licenses/bin/react-transition-group.BSD3 new file mode 100644 index 000000000000..8b268b61104c --- /dev/null +++ b/licenses/bin/react-transition-group.BSD3 @@ -0,0 +1,30 @@ +BSD 3-Clause License + +Copyright (c) 2016, React Community +Forked from React (https://github.com/facebook/react) Copyright 2013-present, Facebook, Inc. +All rights reserved. + +Redistribution and use in source and binary forms, with or without +modification, are permitted provided that the following conditions are met: + +* Redistributions of source code must retain the above copyright notice, this + list of conditions and the following disclaimer. + +* Redistributions in binary form must reproduce the above copyright notice, + this list of conditions and the following disclaimer in the documentation + and/or other materials provided with the distribution. + +* Neither the name of the copyright holder nor the names of its + contributors may be used to endorse or promote products derived from + this software without specific prior written permission. + +THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" +AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE +IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE +DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE LIABLE +FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL +DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR +SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER +CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, +OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE +OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. diff --git a/licenses/bin/react.MIT b/licenses/bin/react.MIT new file mode 100644 index 000000000000..b96dcb0480a0 --- /dev/null +++ b/licenses/bin/react.MIT @@ -0,0 +1,21 @@ +MIT License + +Copyright (c) Facebook, Inc. and its affiliates. + +Permission is hereby granted, free of charge, to any person obtaining a copy +of this software and associated documentation files (the "Software"), to deal +in the Software without restriction, including without limitation the rights +to use, copy, modify, merge, publish, distribute, sublicense, and/or sell +copies of the Software, and to permit persons to whom the Software is +furnished to do so, subject to the following conditions: + +The above copyright notice and this permission notice shall be included in all +copies or substantial portions of the Software. + +THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR +IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, +FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE +AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER +LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, +OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE +SOFTWARE. diff --git a/licenses/bin/reactive-streams.CC0 b/licenses/bin/reactive-streams.CC0 new file mode 100644 index 000000000000..696f2c0ec2d2 --- /dev/null +++ b/licenses/bin/reactive-streams.CC0 @@ -0,0 +1,8 @@ +Licensed under Public Domain (CC0) + +To the extent possible under law, the person who associated CC0 with +this code has waived all copyright and related or neighboring +rights to this code. + +You should have received a copy of the CC0 legalcode along with this +work. If not, see . diff --git a/licenses/bin/resolve-pathname.MIT b/licenses/bin/resolve-pathname.MIT new file mode 100644 index 000000000000..54ff40f6e24e --- /dev/null +++ b/licenses/bin/resolve-pathname.MIT @@ -0,0 +1,21 @@ +MIT License + +Copyright (c) Michael Jackson 2016-2018 + +Permission is hereby granted, free of charge, to any person obtaining a copy +of this software and associated documentation files (the "Software"), to deal +in the Software without restriction, including without limitation the rights +to use, copy, modify, merge, publish, distribute, sublicense, and/or sell +copies of the Software, and to permit persons to whom the Software is +furnished to do so, subject to the following conditions: + +The above copyright notice and this permission notice shall be included in all +copies or substantial portions of the Software. + +THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR +IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, +FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE +AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER +LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, +OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE +SOFTWARE. diff --git a/licenses/bin/rhino.MPL2 b/licenses/bin/rhino.MPL2 new file mode 100644 index 000000000000..cd6ebb83d755 --- /dev/null +++ b/licenses/bin/rhino.MPL2 @@ -0,0 +1,375 @@ +The majority of Rhino is licensed under the MPL 2.0: + +Mozilla Public License Version 2.0 +================================== + +1. Definitions +-------------- + +1.1. "Contributor" + means each individual or legal entity that creates, contributes to + the creation of, or owns Covered Software. + +1.2. "Contributor Version" + means the combination of the Contributions of others (if any) used + by a Contributor and that particular Contributor's Contribution. + +1.3. "Contribution" + means Covered Software of a particular Contributor. + +1.4. "Covered Software" + means Source Code Form to which the initial Contributor has attached + the notice in Exhibit A, the Executable Form of such Source Code + Form, and Modifications of such Source Code Form, in each case + including portions thereof. + +1.5. "Incompatible With Secondary Licenses" + means + + (a) that the initial Contributor has attached the notice described + in Exhibit B to the Covered Software; or + + (b) that the Covered Software was made available under the terms of + version 1.1 or earlier of the License, but not also under the + terms of a Secondary License. + +1.6. "Executable Form" + means any form of the work other than Source Code Form. + +1.7. "Larger Work" + means a work that combines Covered Software with other material, in + a separate file or files, that is not Covered Software. + +1.8. "License" + means this document. + +1.9. "Licensable" + means having the right to grant, to the maximum extent possible, + whether at the time of the initial grant or subsequently, any and + all of the rights conveyed by this License. + +1.10. "Modifications" + means any of the following: + + (a) any file in Source Code Form that results from an addition to, + deletion from, or modification of the contents of Covered + Software; or + + (b) any new file in Source Code Form that contains any Covered + Software. + +1.11. "Patent Claims" of a Contributor + means any patent claim(s), including without limitation, method, + process, and apparatus claims, in any patent Licensable by such + Contributor that would be infringed, but for the grant of the + License, by the making, using, selling, offering for sale, having + made, import, or transfer of either its Contributions or its + Contributor Version. + +1.12. "Secondary License" + means either the GNU General Public License, Version 2.0, the GNU + Lesser General Public License, Version 2.1, the GNU Affero General + Public License, Version 3.0, or any later versions of those + licenses. + +1.13. "Source Code Form" + means the form of the work preferred for making modifications. + +1.14. "You" (or "Your") + means an individual or a legal entity exercising rights under this + License. For legal entities, "You" includes any entity that + controls, is controlled by, or is under common control with You. For + purposes of this definition, "control" means (a) the power, direct + or indirect, to cause the direction or management of such entity, + whether by contract or otherwise, or (b) ownership of more than + fifty percent (50%) of the outstanding shares or beneficial + ownership of such entity. + +2. License Grants and Conditions +-------------------------------- + +2.1. Grants + +Each Contributor hereby grants You a world-wide, royalty-free, +non-exclusive license: + +(a) under intellectual property rights (other than patent or trademark) + Licensable by such Contributor to use, reproduce, make available, + modify, display, perform, distribute, and otherwise exploit its + Contributions, either on an unmodified basis, with Modifications, or + as part of a Larger Work; and + +(b) under Patent Claims of such Contributor to make, use, sell, offer + for sale, have made, import, and otherwise transfer either its + Contributions or its Contributor Version. + +2.2. Effective Date + +The licenses granted in Section 2.1 with respect to any Contribution +become effective for each Contribution on the date the Contributor first +distributes such Contribution. + +2.3. Limitations on Grant Scope + +The licenses granted in this Section 2 are the only rights granted under +this License. No additional rights or licenses will be implied from the +distribution or licensing of Covered Software under this License. +Notwithstanding Section 2.1(b) above, no patent license is granted by a +Contributor: + +(a) for any code that a Contributor has removed from Covered Software; + or + +(b) for infringements caused by: (i) Your and any other third party's + modifications of Covered Software, or (ii) the combination of its + Contributions with other software (except as part of its Contributor + Version); or + +(c) under Patent Claims infringed by Covered Software in the absence of + its Contributions. + +This License does not grant any rights in the trademarks, service marks, +or logos of any Contributor (except as may be necessary to comply with +the notice requirements in Section 3.4). + +2.4. Subsequent Licenses + +No Contributor makes additional grants as a result of Your choice to +distribute the Covered Software under a subsequent version of this +License (see Section 10.2) or under the terms of a Secondary License (if +permitted under the terms of Section 3.3). + +2.5. Representation + +Each Contributor represents that the Contributor believes its +Contributions are its original creation(s) or it has sufficient rights +to grant the rights to its Contributions conveyed by this License. + +2.6. Fair Use + +This License is not intended to limit any rights You have under +applicable copyright doctrines of fair use, fair dealing, or other +equivalents. + +2.7. Conditions + +Sections 3.1, 3.2, 3.3, and 3.4 are conditions of the licenses granted +in Section 2.1. + +3. Responsibilities +------------------- + +3.1. Distribution of Source Form + +All distribution of Covered Software in Source Code Form, including any +Modifications that You create or to which You contribute, must be under +the terms of this License. You must inform recipients that the Source +Code Form of the Covered Software is governed by the terms of this +License, and how they can obtain a copy of this License. You may not +attempt to alter or restrict the recipients' rights in the Source Code +Form. + +3.2. Distribution of Executable Form + +If You distribute Covered Software in Executable Form then: + +(a) such Covered Software must also be made available in Source Code + Form, as described in Section 3.1, and You must inform recipients of + the Executable Form how they can obtain a copy of such Source Code + Form by reasonable means in a timely manner, at a charge no more + than the cost of distribution to the recipient; and + +(b) You may distribute such Executable Form under the terms of this + License, or sublicense it under different terms, provided that the + license for the Executable Form does not attempt to limit or alter + the recipients' rights in the Source Code Form under this License. + +3.3. Distribution of a Larger Work + +You may create and distribute a Larger Work under terms of Your choice, +provided that You also comply with the requirements of this License for +the Covered Software. If the Larger Work is a combination of Covered +Software with a work governed by one or more Secondary Licenses, and the +Covered Software is not Incompatible With Secondary Licenses, this +License permits You to additionally distribute such Covered Software +under the terms of such Secondary License(s), so that the recipient of +the Larger Work may, at their option, further distribute the Covered +Software under the terms of either this License or such Secondary +License(s). + +3.4. Notices + +You may not remove or alter the substance of any license notices +(including copyright notices, patent notices, disclaimers of warranty, +or limitations of liability) contained within the Source Code Form of +the Covered Software, except that You may alter any license notices to +the extent required to remedy known factual inaccuracies. + +3.5. Application of Additional Terms + +You may choose to offer, and to charge a fee for, warranty, support, +indemnity or liability obligations to one or more recipients of Covered +Software. However, You may do so only on Your own behalf, and not on +behalf of any Contributor. You must make it absolutely clear that any +such warranty, support, indemnity, or liability obligation is offered by +You alone, and You hereby agree to indemnify every Contributor for any +liability incurred by such Contributor as a result of warranty, support, +indemnity or liability terms You offer. You may include additional +disclaimers of warranty and limitations of liability specific to any +jurisdiction. + +4. Inability to Comply Due to Statute or Regulation +--------------------------------------------------- + +If it is impossible for You to comply with any of the terms of this +License with respect to some or all of the Covered Software due to +statute, judicial order, or regulation then You must: (a) comply with +the terms of this License to the maximum extent possible; and (b) +describe the limitations and the code they affect. Such description must +be placed in a text file included with all distributions of the Covered +Software under this License. Except to the extent prohibited by statute +or regulation, such description must be sufficiently detailed for a +recipient of ordinary skill to be able to understand it. + +5. Termination +-------------- + +5.1. The rights granted under this License will terminate automatically +if You fail to comply with any of its terms. However, if You become +compliant, then the rights granted under this License from a particular +Contributor are reinstated (a) provisionally, unless and until such +Contributor explicitly and finally terminates Your grants, and (b) on an +ongoing basis, if such Contributor fails to notify You of the +non-compliance by some reasonable means prior to 60 days after You have +come back into compliance. Moreover, Your grants from a particular +Contributor are reinstated on an ongoing basis if such Contributor +notifies You of the non-compliance by some reasonable means, this is the +first time You have received notice of non-compliance with this License +from such Contributor, and You become compliant prior to 30 days after +Your receipt of the notice. + +5.2. If You initiate litigation against any entity by asserting a patent +infringement claim (excluding declaratory judgment actions, +counter-claims, and cross-claims) alleging that a Contributor Version +directly or indirectly infringes any patent, then the rights granted to +You by any and all Contributors for the Covered Software under Section +2.1 of this License shall terminate. + +5.3. In the event of termination under Sections 5.1 or 5.2 above, all +end user license agreements (excluding distributors and resellers) which +have been validly granted by You or Your distributors under this License +prior to termination shall survive termination. + +************************************************************************ +* * +* 6. Disclaimer of Warranty * +* ------------------------- * +* * +* Covered Software is provided under this License on an "as is" * +* basis, without warranty of any kind, either expressed, implied, or * +* statutory, including, without limitation, warranties that the * +* Covered Software is free of defects, merchantable, fit for a * +* particular purpose or non-infringing. The entire risk as to the * +* quality and performance of the Covered Software is with You. * +* Should any Covered Software prove defective in any respect, You * +* (not any Contributor) assume the cost of any necessary servicing, * +* repair, or correction. This disclaimer of warranty constitutes an * +* essential part of this License. No use of any Covered Software is * +* authorized under this License except under this disclaimer. * +* * +************************************************************************ + +************************************************************************ +* * +* 7. Limitation of Liability * +* -------------------------- * +* * +* Under no circumstances and under no legal theory, whether tort * +* (including negligence), contract, or otherwise, shall any * +* Contributor, or anyone who distributes Covered Software as * +* permitted above, be liable to You for any direct, indirect, * +* special, incidental, or consequential damages of any character * +* including, without limitation, damages for lost profits, loss of * +* goodwill, work stoppage, computer failure or malfunction, or any * +* and all other commercial damages or losses, even if such party * +* shall have been informed of the possibility of such damages. This * +* limitation of liability shall not apply to liability for death or * +* personal injury resulting from such party's negligence to the * +* extent applicable law prohibits such limitation. Some * +* jurisdictions do not allow the exclusion or limitation of * +* incidental or consequential damages, so this exclusion and * +* limitation may not apply to You. * +* * +************************************************************************ + +8. Litigation +------------- + +Any litigation relating to this License may be brought only in the +courts of a jurisdiction where the defendant maintains its principal +place of business and such litigation shall be governed by laws of that +jurisdiction, without reference to its conflict-of-law provisions. +Nothing in this Section shall prevent a party's ability to bring +cross-claims or counter-claims. + +9. Miscellaneous +---------------- + +This License represents the complete agreement concerning the subject +matter hereof. If any provision of this License is held to be +unenforceable, such provision shall be reformed only to the extent +necessary to make it enforceable. Any law or regulation which provides +that the language of a contract shall be construed against the drafter +shall not be used to construe this License against a Contributor. + +10. Versions of the License +--------------------------- + +10.1. New Versions + +Mozilla Foundation is the license steward. Except as provided in Section +10.3, no one other than the license steward has the right to modify or +publish new versions of this License. Each version will be given a +distinguishing version number. + +10.2. Effect of New Versions + +You may distribute the Covered Software under the terms of the version +of the License under which You originally received the Covered Software, +or under the terms of any subsequent version published by the license +steward. + +10.3. Modified Versions + +If you create software not governed by this License, and you want to +create a new license for such software, you may create and use a +modified version of this License if you rename the license and remove +any references to the name of the license steward (except to note that +such modified license differs from this License). + +10.4. Distributing Source Code Form that is Incompatible With Secondary +Licenses + +If You choose to distribute Source Code Form that is Incompatible With +Secondary Licenses under the terms of this version of the License, the +notice described in Exhibit B of this License must be attached. + +Exhibit A - Source Code Form License Notice +------------------------------------------- + + This Source Code Form is subject to the terms of the Mozilla Public + License, v. 2.0. If a copy of the MPL was not distributed with this + file, You can obtain one at http://mozilla.org/MPL/2.0/. + +If it is not possible or desirable to put the notice in a particular +file, then You may include the notice in a location (such as a LICENSE +file in a relevant directory) where a recipient would be likely to look +for such a notice. + +You may add additional accurate notices of copyright ownership. + +Exhibit B - "Incompatible With Secondary Licenses" Notice +--------------------------------------------------------- + + This Source Code Form is "Incompatible With Secondary Licenses", as + defined by the Mozilla Public License, v. 2.0. diff --git a/licenses/bin/scala-lang.BSD3 b/licenses/bin/scala-lang.BSD3 new file mode 100644 index 000000000000..7eb343f34fe3 --- /dev/null +++ b/licenses/bin/scala-lang.BSD3 @@ -0,0 +1,12 @@ +Copyright (c) 2002- EPFL +Copyright (c) 2011- Lightbend, Inc. + +Redistribution and use in source and binary forms, with or without modification, are permitted provided that the following conditions are met: + +1. Redistributions of source code must retain the above copyright notice, this list of conditions and the following disclaimer. + +2. Redistributions in binary form must reproduce the above copyright notice, this list of conditions and the following disclaimer in the documentation and/or other materials provided with the distribution. + +3. Neither the name of the copyright holder nor the names of its contributors may be used to endorse or promote products derived from this software without specific prior written permission. + +THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. \ No newline at end of file diff --git a/licenses/bin/scheduler.MIT b/licenses/bin/scheduler.MIT new file mode 100644 index 000000000000..b96dcb0480a0 --- /dev/null +++ b/licenses/bin/scheduler.MIT @@ -0,0 +1,21 @@ +MIT License + +Copyright (c) Facebook, Inc. and its affiliates. + +Permission is hereby granted, free of charge, to any person obtaining a copy +of this software and associated documentation files (the "Software"), to deal +in the Software without restriction, including without limitation the rights +to use, copy, modify, merge, publish, distribute, sublicense, and/or sell +copies of the Software, and to permit persons to whom the Software is +furnished to do so, subject to the following conditions: + +The above copyright notice and this permission notice shall be included in all +copies or substantial portions of the Software. + +THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR +IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, +FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE +AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER +LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, +OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE +SOFTWARE. diff --git a/licenses/bin/slf4j.MIT b/licenses/bin/slf4j.MIT new file mode 100644 index 000000000000..f5ecafa00743 --- /dev/null +++ b/licenses/bin/slf4j.MIT @@ -0,0 +1,21 @@ +Copyright (c) 2004-2008 QOS.ch +All rights reserved. + +Permission is hereby granted, free of charge, to any person obtaining +a copy of this software and associated documentation files (the +"Software"), to deal in the Software without restriction, including +without limitation the rights to use, copy, modify, merge, publish, +distribute, sublicense, and/or sell copies of the Software, and to +permit persons to whom the Software is furnished to do so, subject to +the following conditions: + +The above copyright notice and this permission notice shall be +included in all copies or substantial portions of the Software. + +THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, +EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF +MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND +NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS BE +LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION +OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION +WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE. diff --git a/licenses/bin/string-at.MIT b/licenses/bin/string-at.MIT new file mode 100644 index 000000000000..b43df444e518 --- /dev/null +++ b/licenses/bin/string-at.MIT @@ -0,0 +1,22 @@ +The MIT License (MIT) + +Copyright (c) 2015 Jordan Harband + +Permission is hereby granted, free of charge, to any person obtaining a copy +of this software and associated documentation files (the "Software"), to deal +in the Software without restriction, including without limitation the rights +to use, copy, modify, merge, publish, distribute, sublicense, and/or sell +copies of the Software, and to permit persons to whom the Software is +furnished to do so, subject to the following conditions: + +The above copyright notice and this permission notice shall be included in all +copies or substantial portions of the Software. + +THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR +IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, +FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE +AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER +LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, +OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE +SOFTWARE. + diff --git a/licenses/bin/style-loader.MIT b/licenses/bin/style-loader.MIT new file mode 100644 index 000000000000..8c11fc7289b7 --- /dev/null +++ b/licenses/bin/style-loader.MIT @@ -0,0 +1,20 @@ +Copyright JS Foundation and other contributors + +Permission is hereby granted, free of charge, to any person obtaining +a copy of this software and associated documentation files (the +'Software'), to deal in the Software without restriction, including +without limitation the rights to use, copy, modify, merge, publish, +distribute, sublicense, and/or sell copies of the Software, and to +permit persons to whom the Software is furnished to do so, subject to +the following conditions: + +The above copyright notice and this permission notice shall be +included in all copies or substantial portions of the Software. + +THE SOFTWARE IS PROVIDED 'AS IS', WITHOUT WARRANTY OF ANY KIND, +EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF +MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. +IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY +CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, +TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE +SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE. diff --git a/licenses/bin/tesla-aether.EPL1 b/licenses/bin/tesla-aether.EPL1 new file mode 100644 index 000000000000..16cc69a52355 --- /dev/null +++ b/licenses/bin/tesla-aether.EPL1 @@ -0,0 +1,87 @@ +Eclipse Public License - v 1.0 + +THE ACCOMPANYING PROGRAM IS PROVIDED UNDER THE TERMS OF THIS ECLIPSE PUBLIC LICENSE ("AGREEMENT"). ANY USE, REPRODUCTION OR DISTRIBUTION OF THE PROGRAM CONSTITUTES RECIPIENT'S ACCEPTANCE OF THIS AGREEMENT. + +1. DEFINITIONS + +"Contribution" means: + +a) in the case of the initial Contributor, the initial code and documentation distributed under this Agreement, and + +b) in the case of each subsequent Contributor: + +i) changes to the Program, and + +ii) additions to the Program; + +where such changes and/or additions to the Program originate from and are distributed by that particular Contributor. A Contribution 'originates' from a Contributor if it was added to the Program by such Contributor itself or anyone acting on such Contributor's behalf. Contributions do not include additions to the Program which: (i) are separate modules of software distributed in conjunction with the Program under their own license agreement, and (ii) are not derivative works of the Program. + +"Contributor" means any person or entity that distributes the Program. + +"Licensed Patents" mean patent claims licensable by a Contributor which are necessarily infringed by the use or sale of its Contribution alone or when combined with the Program. + +"Program" means the Contributions distributed in accordance with this Agreement. + +"Recipient" means anyone who receives the Program under this Agreement, including all Contributors. + +2. GRANT OF RIGHTS + +a) Subject to the terms of this Agreement, each Contributor hereby grants Recipient a non-exclusive, worldwide, royalty-free copyright license to reproduce, prepare derivative works of, publicly display, publicly perform, distribute and sublicense the Contribution of such Contributor, if any, and such derivative works, in source code and object code form. + +b) Subject to the terms of this Agreement, each Contributor hereby grants Recipient a non-exclusive, worldwide, royalty-free patent license under Licensed Patents to make, use, sell, offer to sell, import and otherwise transfer the Contribution of such Contributor, if any, in source code and object code form. This patent license shall apply to the combination of the Contribution and the Program if, at the time the Contribution is added by the Contributor, such addition of the Contribution causes such combination to be covered by the Licensed Patents. The patent license shall not apply to any other combinations which include the Contribution. No hardware per se is licensed hereunder. + +c) Recipient understands that although each Contributor grants the licenses to its Contributions set forth herein, no assurances are provided by any Contributor that the Program does not infringe the patent or other intellectual property rights of any other entity. Each Contributor disclaims any liability to Recipient for claims brought by any other entity based on infringement of intellectual property rights or otherwise. As a condition to exercising the rights and licenses granted hereunder, each Recipient hereby assumes sole responsibility to secure any other intellectual property rights needed, if any. For example, if a third party patent license is required to allow Recipient to distribute the Program, it is Recipient's responsibility to acquire that license before distributing the Program. + +d) Each Contributor represents that to its knowledge it has sufficient copyright rights in its Contribution, if any, to grant the copyright license set forth in this Agreement. + +3. REQUIREMENTS + +A Contributor may choose to distribute the Program in object code form under its own license agreement, provided that: + +a) it complies with the terms and conditions of this Agreement; and + +b) its license agreement: + +i) effectively disclaims on behalf of all Contributors all warranties and conditions, express and implied, including warranties or conditions of title and non-infringement, and implied warranties or conditions of merchantability and fitness for a particular purpose; + +ii) effectively excludes on behalf of all Contributors all liability for damages, including direct, indirect, special, incidental and consequential damages, such as lost profits; + +iii) states that any provisions which differ from this Agreement are offered by that Contributor alone and not by any other party; and + +iv) states that source code for the Program is available from such Contributor, and informs licensees how to obtain it in a reasonable manner on or through a medium customarily used for software exchange. + +When the Program is made available in source code form: + +a) it must be made available under this Agreement; and + +b) a copy of this Agreement must be included with each copy of the Program. + +Contributors may not remove or alter any copyright notices contained within the Program. + +Each Contributor must identify itself as the originator of its Contribution, if any, in a manner that reasonably allows subsequent Recipients to identify the originator of the Contribution. + +4. COMMERCIAL DISTRIBUTION + +Commercial distributors of software may accept certain responsibilities with respect to end users, business partners and the like. While this license is intended to facilitate the commercial use of the Program, the Contributor who includes the Program in a commercial product offering should do so in a manner which does not create potential liability for other Contributors. Therefore, if a Contributor includes the Program in a commercial product offering, such Contributor ("Commercial Contributor") hereby agrees to defend and indemnify every other Contributor ("Indemnified Contributor") against any losses, damages and costs (collectively "Losses") arising from claims, lawsuits and other legal actions brought by a third party against the Indemnified Contributor to the extent caused by the acts or omissions of such Commercial Contributor in connection with its distribution of the Program in a commercial product offering. The obligations in this section do not apply to any claims or Losses relating to any actual or alleged intellectual property infringement. In order to qualify, an Indemnified Contributor must: a) promptly notify the Commercial Contributor in writing of such claim, and b) allow the Commercial Contributor to control, and cooperate with the Commercial Contributor in, the defense and any related settlement negotiations. The Indemnified Contributor may participate in any such claim at its own expense. + +For example, a Contributor might include the Program in a commercial product offering, Product X. That Contributor is then a Commercial Contributor. If that Commercial Contributor then makes performance claims, or offers warranties related to Product X, those performance claims and warranties are such Commercial Contributor's responsibility alone. Under this section, the Commercial Contributor would have to defend claims against the other Contributors related to those performance claims and warranties, and if a court requires any other Contributor to pay any damages as a result, the Commercial Contributor must pay those damages. + +5. NO WARRANTY + +EXCEPT AS EXPRESSLY SET FORTH IN THIS AGREEMENT, THE PROGRAM IS PROVIDED ON AN "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, EITHER EXPRESS OR IMPLIED INCLUDING, WITHOUT LIMITATION, ANY WARRANTIES OR CONDITIONS OF TITLE, NON-INFRINGEMENT, MERCHANTABILITY OR FITNESS FOR A PARTICULAR PURPOSE. Each Recipient is solely responsible for determining the appropriateness of using and distributing the Program and assumes all risks associated with its exercise of rights under this Agreement , including but not limited to the risks and costs of program errors, compliance with applicable laws, damage to or loss of data, programs or equipment, and unavailability or interruption of operations. + +6. DISCLAIMER OF LIABILITY + +EXCEPT AS EXPRESSLY SET FORTH IN THIS AGREEMENT, NEITHER RECIPIENT NOR ANY CONTRIBUTORS SHALL HAVE ANY LIABILITY FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING WITHOUT LIMITATION LOST PROFITS), HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OR DISTRIBUTION OF THE PROGRAM OR THE EXERCISE OF ANY RIGHTS GRANTED HEREUNDER, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGES. + +7. GENERAL + +If any provision of this Agreement is invalid or unenforceable under applicable law, it shall not affect the validity or enforceability of the remainder of the terms of this Agreement, and without further action by the parties hereto, such provision shall be reformed to the minimum extent necessary to make such provision valid and enforceable. + +If Recipient institutes patent litigation against any entity (including a cross-claim or counterclaim in a lawsuit) alleging that the Program itself (excluding combinations of the Program with other software or hardware) infringes such Recipient's patent(s), then such Recipient's rights granted under Section 2(b) shall terminate as of the date such litigation is filed. + +All Recipient's rights under this Agreement shall terminate if it fails to comply with any of the material terms or conditions of this Agreement and does not cure such failure in a reasonable period of time after becoming aware of such noncompliance. If all Recipient's rights under this Agreement terminate, Recipient agrees to cease use and distribution of the Program as soon as reasonably practicable. However, Recipient's obligations under this Agreement and any licenses granted by Recipient relating to the Program shall continue and survive. + +Everyone is permitted to copy and distribute copies of this Agreement, but in order to avoid inconsistency the Agreement is copyrighted and may only be modified in the following manner. The Agreement Steward reserves the right to publish new versions (including revisions) of this Agreement from time to time. No one other than the Agreement Steward has the right to modify this Agreement. The Eclipse Foundation is the initial Agreement Steward. The Eclipse Foundation may assign the responsibility to serve as the Agreement Steward to a suitable separate entity. Each new version of the Agreement will be given a distinguishing version number. The Program (including Contributions) may always be distributed subject to the version of the Agreement under which it was received. In addition, after a new version of the Agreement is published, Contributor may elect to distribute the Program (including its Contributions) under the new version. Except as expressly stated in Sections 2(a) and 2(b) above, Recipient receives no rights or licenses to the intellectual property of any Contributor under this Agreement, whether expressly, by implication, estoppel or otherwise. All rights in the Program not expressly granted under this Agreement are reserved. + +This Agreement is governed by the laws of the State of New York and the intellectual property laws of the United States of America. No party to this Agreement will bring a legal action under this Agreement more than one year after the cause of action arose. Each party waives its rights to a jury trial in any resulting litigation. diff --git a/licenses/bin/tether.MIT b/licenses/bin/tether.MIT new file mode 100644 index 000000000000..fabf1dc3dd72 --- /dev/null +++ b/licenses/bin/tether.MIT @@ -0,0 +1,8 @@ +Copyright (c) 2014-2017 HubSpot, Inc. + +Permission is hereby granted, free of charge, to any person obtaining a copy of this software and associated documentation files (the "Software"), to deal in the Software without restriction, including without limitation the rights to use, copy, modify, merge, publish, distribute, sublicense, and/or sell copies of the Software, and to permit persons to whom the Software is furnished to do so, subject to the following conditions: + +The above copyright notice and this permission notice shall be included in all copies or substantial portions of the Software. + +THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE. + diff --git a/licenses/bin/ua-parser-js.MIT b/licenses/bin/ua-parser-js.MIT new file mode 100644 index 000000000000..1f3ef511cf1b --- /dev/null +++ b/licenses/bin/ua-parser-js.MIT @@ -0,0 +1,21 @@ +MIT License + +Copyright (c) 2012-2018 Faisal Salman <> + +Permission is hereby granted, free of charge, to any person obtaining a copy +of this software and associated documentation files (the "Software"), to deal +in the Software without restriction, including without limitation the rights +to use, copy, modify, merge, publish, distribute, sublicense, and/or sell +copies of the Software, and to permit persons to whom the Software is +furnished to do so, subject to the following conditions: + +The above copyright notice and this permission notice shall be included in all +copies or substantial portions of the Software. + +THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR +IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, +FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE +AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER +LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, +OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE +SOFTWARE. diff --git a/licenses/bin/value-equal.MIT b/licenses/bin/value-equal.MIT new file mode 100644 index 000000000000..54ff40f6e24e --- /dev/null +++ b/licenses/bin/value-equal.MIT @@ -0,0 +1,21 @@ +MIT License + +Copyright (c) Michael Jackson 2016-2018 + +Permission is hereby granted, free of charge, to any person obtaining a copy +of this software and associated documentation files (the "Software"), to deal +in the Software without restriction, including without limitation the rights +to use, copy, modify, merge, publish, distribute, sublicense, and/or sell +copies of the Software, and to permit persons to whom the Software is +furnished to do so, subject to the following conditions: + +The above copyright notice and this permission notice shall be included in all +copies or substantial portions of the Software. + +THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR +IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, +FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE +AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER +LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, +OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE +SOFTWARE. diff --git a/licenses/bin/warning.MIT b/licenses/bin/warning.MIT new file mode 100644 index 000000000000..188fb2b0bd8e --- /dev/null +++ b/licenses/bin/warning.MIT @@ -0,0 +1,21 @@ +MIT License + +Copyright (c) 2013-present, Facebook, Inc. + +Permission is hereby granted, free of charge, to any person obtaining a copy +of this software and associated documentation files (the "Software"), to deal +in the Software without restriction, including without limitation the rights +to use, copy, modify, merge, publish, distribute, sublicense, and/or sell +copies of the Software, and to permit persons to whom the Software is +furnished to do so, subject to the following conditions: + +The above copyright notice and this permission notice shall be included in all +copies or substantial portions of the Software. + +THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR +IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, +FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE +AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER +LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, +OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE +SOFTWARE. diff --git a/licenses/bin/webpack.MIT b/licenses/bin/webpack.MIT new file mode 100644 index 000000000000..8c11fc7289b7 --- /dev/null +++ b/licenses/bin/webpack.MIT @@ -0,0 +1,20 @@ +Copyright JS Foundation and other contributors + +Permission is hereby granted, free of charge, to any person obtaining +a copy of this software and associated documentation files (the +'Software'), to deal in the Software without restriction, including +without limitation the rights to use, copy, modify, merge, publish, +distribute, sublicense, and/or sell copies of the Software, and to +permit persons to whom the Software is furnished to do so, subject to +the following conditions: + +The above copyright notice and this permission notice shall be +included in all copies or substantial portions of the Software. + +THE SOFTWARE IS PROVIDED 'AS IS', WITHOUT WARRANTY OF ANY KIND, +EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF +MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. +IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY +CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, +TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE +SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE. diff --git a/licenses/bin/xmlenc.BSD3 b/licenses/bin/xmlenc.BSD3 new file mode 100644 index 000000000000..3a70c9bfcdad --- /dev/null +++ b/licenses/bin/xmlenc.BSD3 @@ -0,0 +1,27 @@ +Copyright 2003-2005, Ernst de Haan +All rights reserved. + +Redistribution and use in source and binary forms, with or without +modification, are permitted provided that the following conditions are met: + +1. Redistributions of source code must retain the above copyright notice, this + list of conditions and the following disclaimer. + +2. Redistributions in binary form must reproduce the above copyright notice, + this list of conditions and the following disclaimer in the documentation + and/or other materials provided with the distribution. + +3. Neither the name of the copyright holder nor the names of its contributors + may be used to endorse or promote products derived from this software + without specific prior written permission. + +THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDER AND CONTRIBUTORS "AS IS" +AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE +IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE +DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE LIABLE +FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL +DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR +SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER +CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, +OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE +OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. diff --git a/licenses/bin/zstandard.BSD3 b/licenses/bin/zstandard.BSD3 new file mode 100644 index 000000000000..a793a8028925 --- /dev/null +++ b/licenses/bin/zstandard.BSD3 @@ -0,0 +1,30 @@ +BSD License + +For Zstandard software + +Copyright (c) 2016-present, Facebook, Inc. All rights reserved. + +Redistribution and use in source and binary forms, with or without modification, +are permitted provided that the following conditions are met: + + * Redistributions of source code must retain the above copyright notice, this + list of conditions and the following disclaimer. + + * Redistributions in binary form must reproduce the above copyright notice, + this list of conditions and the following disclaimer in the documentation + and/or other materials provided with the distribution. + + * Neither the name Facebook nor the names of its contributors may be used to + endorse or promote products derived from this software without specific + prior written permission. + +THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" AND +ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED +WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE +DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE LIABLE FOR +ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES +(INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; +LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON +ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT +(INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS +SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. diff --git a/licenses/bin/zstd-jni.BSD2 b/licenses/bin/zstd-jni.BSD2 new file mode 100644 index 000000000000..32c6bbdd980d --- /dev/null +++ b/licenses/bin/zstd-jni.BSD2 @@ -0,0 +1,26 @@ +Zstd-jni: JNI bindings to Zstd Library + +Copyright (c) 2015-2016, Luben Karavelov/ All rights reserved. + +BSD License + +Redistribution and use in source and binary forms, with or without modification, +are permitted provided that the following conditions are met: + +* Redistributions of source code must retain the above copyright notice, this + list of conditions and the following disclaimer. + +* Redistributions in binary form must reproduce the above copyright notice, this + list of conditions and the following disclaimer in the documentation and/or + other materials provided with the distribution. + +THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" AND +ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED +WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE +DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE LIABLE FOR +ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES +(INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; +LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON +ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT +(INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS +SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. diff --git a/licenses/src/datatables.BSD3 b/licenses/src/datatables.BSD3 new file mode 100644 index 000000000000..19a177bf7493 --- /dev/null +++ b/licenses/src/datatables.BSD3 @@ -0,0 +1,10 @@ +Copyright (c) 2008-2010, Allan Jardine +All rights reserved. + +Redistribution and use in source and binary forms, with or without modification, are permitted provided that the following conditions are met: + + * Redistributions of source code must retain the above copyright notice, this list of conditions and the following disclaimer. + * Redistributions in binary form must reproduce the above copyright notice, this list of conditions and the following disclaimer in the documentation and/or other materials provided with the distribution. + * Neither the name of Allan Jardine nor SpryMedia UK may be used to endorse or promote products derived from this software without specific prior written permission. + +THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDERS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. \ No newline at end of file diff --git a/licenses/src/jquery-ui.MIT b/licenses/src/jquery-ui.MIT new file mode 100644 index 000000000000..741585591f0f --- /dev/null +++ b/licenses/src/jquery-ui.MIT @@ -0,0 +1,26 @@ +Copyright 2012 jQuery Foundation and other contributors, +http://jqueryui.com/ + +This software consists of voluntary contributions made by many +individuals (AUTHORS.txt, http://jqueryui.com/about) For exact +contribution history, see the revision history and logs, available +at http://jquery-ui.googlecode.com/svn/ + +Permission is hereby granted, free of charge, to any person obtaining +a copy of this software and associated documentation files (the +"Software"), to deal in the Software without restriction, including +without limitation the rights to use, copy, modify, merge, publish, +distribute, sublicense, and/or sell copies of the Software, and to +permit persons to whom the Software is furnished to do so, subject to +the following conditions: + +The above copyright notice and this permission notice shall be +included in all copies or substantial portions of the Software. + +THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, +EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF +MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND +NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS BE +LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION +OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION +WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE. diff --git a/licenses/src/jquery.MIT b/licenses/src/jquery.MIT new file mode 100644 index 000000000000..163c7cbd29de --- /dev/null +++ b/licenses/src/jquery.MIT @@ -0,0 +1,10 @@ +The MIT License (MIT) + +Copyright (c) 2005, 2014 jQuery Foundation, Inc. + +Permission is hereby granted, free of charge, to any person obtaining a copy of this software and associated documentation files (the "Software"), to deal in the Software without restriction, including without limitation the rights to use, copy, modify, merge, publish, distribute, sublicense, and/or sell copies of the Software, and to permit persons to whom the Software is furnished to do so, subject to the following conditions: + +The above copyright notice and this permission notice shall be included in all copies or substantial portions of the Software. + +THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE. + diff --git a/licenses/src/underscore.MIT b/licenses/src/underscore.MIT new file mode 100644 index 000000000000..de86c6ca6b64 --- /dev/null +++ b/licenses/src/underscore.MIT @@ -0,0 +1,22 @@ +Copyright (c) 2011 Jeremy Ashkenas, DocumentCloud + +Permission is hereby granted, free of charge, to any person +obtaining a copy of this software and associated documentation +files (the "Software"), to deal in the Software without +restriction, including without limitation the rights to use, +copy, modify, merge, publish, distribute, sublicense, and/or sell +copies of the Software, and to permit persons to whom the +Software is furnished to do so, subject to the following +conditions: + +The above copyright notice and this permission notice shall be +included in all copies or substantial portions of the Software. + +THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, +EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES +OF MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND +NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT +HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, +WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING +FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR +OTHER DEALINGS IN THE SOFTWARE. diff --git a/pom.xml b/pom.xml index 4e2f85e173da..715ea9619a93 100644 --- a/pom.xml +++ b/pom.xml @@ -18,8 +18,7 @@ ~ under the License. --> - + 4.0.0 @@ -30,7 +29,7 @@ org.apache.druid druid - 0.14.0-incubating-SNAPSHOT + 0.14.1-incubating-SNAPSHOT pom Druid @@ -1422,7 +1421,7 @@ MIT MIT JQuery - + Copyright 2012 jQuery Foundation and other contributors; Licensed MIT jQuery Foundation, Inc. | jquery.org/license @@ -1431,7 +1430,7 @@ Underscore Underscore - + Underscore is freely distributable under the MIT license @@ -1439,7 +1438,7 @@ Allan Jardine Allan Jardine - + Copyright 2009 Allan Jardine. All Rights Reserved @@ -1447,7 +1446,7 @@ Allan Jardine Allan Jardine - + Copyright 2009 Allan Jardine. All Rights Reserved Copyright 2008-2011 Allan Jardine @@ -1473,6 +1472,7 @@ quickstart/tutorial/conf/** docker/*.conf target/** + licenses/** **/test/resources/** **/derby.log **/jvm.config @@ -1481,6 +1481,12 @@ **/*.json **/*.parq **/*.parquet + LICENSE + LICENSE.BINARY + NOTICE + NOTICE.BINARY + LABELS.md + .github/ISSUE_TEMPLATE/*.md diff --git a/processing/pom.xml b/processing/pom.xml index ccaf9c534295..ee8d7b0c1149 100644 --- a/processing/pom.xml +++ b/processing/pom.xml @@ -28,7 +28,7 @@ org.apache.druid druid - 0.14.0-incubating-SNAPSHOT + 0.14.1-incubating-SNAPSHOT @@ -113,7 +113,7 @@
org.checkerframework - checker + checker-qual ${checkerframework.version} diff --git a/processing/src/main/java/org/apache/druid/query/datasourcemetadata/DataSourceQueryQueryToolChest.java b/processing/src/main/java/org/apache/druid/query/datasourcemetadata/DataSourceQueryQueryToolChest.java index e1befab71ad3..7b951c83647a 100644 --- a/processing/src/main/java/org/apache/druid/query/datasourcemetadata/DataSourceQueryQueryToolChest.java +++ b/processing/src/main/java/org/apache/druid/query/datasourcemetadata/DataSourceQueryQueryToolChest.java @@ -22,9 +22,6 @@ import com.fasterxml.jackson.core.type.TypeReference; import com.google.common.base.Function; import com.google.common.base.Functions; -import com.google.common.base.Predicate; -import com.google.common.collect.Iterables; -import com.google.common.collect.Lists; import com.google.inject.Inject; import org.apache.druid.java.util.common.guava.Sequence; import org.apache.druid.java.util.common.guava.Sequences; @@ -42,6 +39,7 @@ import java.util.List; import java.util.Map; +import java.util.stream.Collectors; /** */ @@ -68,19 +66,9 @@ public List filterSegments(DataSourceMetadataQuery final T max = segments.get(segments.size() - 1); - return Lists.newArrayList( - Iterables.filter( - segments, - new Predicate() - { - @Override - public boolean apply(T input) - { - return max != null && input.getInterval().overlaps(max.getInterval()); - } - } - ) - ); + return segments.stream() + .filter(input -> max != null && input.getInterval().overlaps(max.getTrueInterval())) + .collect(Collectors.toList()); } @Override diff --git a/processing/src/main/java/org/apache/druid/query/metadata/SegmentAnalyzer.java b/processing/src/main/java/org/apache/druid/query/metadata/SegmentAnalyzer.java index 8db528560e40..15acc4ce7abc 100644 --- a/processing/src/main/java/org/apache/druid/query/metadata/SegmentAnalyzer.java +++ b/processing/src/main/java/org/apache/druid/query/metadata/SegmentAnalyzer.java @@ -42,6 +42,7 @@ import org.apache.druid.segment.column.ColumnCapabilitiesImpl; import org.apache.druid.segment.column.ColumnHolder; import org.apache.druid.segment.column.ComplexColumn; +import org.apache.druid.segment.column.DictionaryEncodedColumn; import org.apache.druid.segment.column.ValueType; import org.apache.druid.segment.data.IndexedInts; import org.apache.druid.segment.serde.ComplexMetricSerde; @@ -194,30 +195,38 @@ private ColumnAnalysis analyzeStringColumn( final ColumnHolder columnHolder ) { - long size = 0; - Comparable min = null; Comparable max = null; - - if (!capabilities.hasBitmapIndexes()) { - return ColumnAnalysis.error("string_no_bitmap"); - } - - final BitmapIndex bitmapIndex = columnHolder.getBitmapIndex(); - final int cardinality = bitmapIndex.getCardinality(); - - if (analyzingSize()) { - for (int i = 0; i < cardinality; ++i) { - String value = bitmapIndex.getValue(i); - if (value != null) { - size += StringUtils.estimatedBinaryLengthAsUTF8(value) * bitmapIndex.getBitmap(bitmapIndex.getIndex(value)).size(); + long size = 0; + final int cardinality; + if (capabilities.hasBitmapIndexes()) { + final BitmapIndex bitmapIndex = columnHolder.getBitmapIndex(); + cardinality = bitmapIndex.getCardinality(); + + if (analyzingSize()) { + for (int i = 0; i < cardinality; ++i) { + String value = bitmapIndex.getValue(i); + if (value != null) { + size += StringUtils.estimatedBinaryLengthAsUTF8(value) * bitmapIndex.getBitmap(bitmapIndex.getIndex(value)) + .size(); + } } } - } - if (analyzingMinMax() && cardinality > 0) { - min = NullHandling.nullToEmptyIfNeeded(bitmapIndex.getValue(0)); - max = NullHandling.nullToEmptyIfNeeded(bitmapIndex.getValue(cardinality - 1)); + if (analyzingMinMax() && cardinality > 0) { + min = NullHandling.nullToEmptyIfNeeded(bitmapIndex.getValue(0)); + max = NullHandling.nullToEmptyIfNeeded(bitmapIndex.getValue(cardinality - 1)); + } + } else if (capabilities.isDictionaryEncoded()) { + // fallback if no bitmap index + DictionaryEncodedColumn theColumn = (DictionaryEncodedColumn) columnHolder.getColumn(); + cardinality = theColumn.getCardinality(); + if (analyzingMinMax() && cardinality > 0) { + min = NullHandling.nullToEmptyIfNeeded(theColumn.lookupName(0)); + max = NullHandling.nullToEmptyIfNeeded(theColumn.lookupName(cardinality - 1)); + } + } else { + cardinality = 0; } return new ColumnAnalysis( diff --git a/processing/src/main/java/org/apache/druid/query/timeboundary/TimeBoundaryQueryQueryToolChest.java b/processing/src/main/java/org/apache/druid/query/timeboundary/TimeBoundaryQueryQueryToolChest.java index 13d5521b2c2e..351903799f85 100644 --- a/processing/src/main/java/org/apache/druid/query/timeboundary/TimeBoundaryQueryQueryToolChest.java +++ b/processing/src/main/java/org/apache/druid/query/timeboundary/TimeBoundaryQueryQueryToolChest.java @@ -23,8 +23,6 @@ import com.google.common.annotations.VisibleForTesting; import com.google.common.base.Function; import com.google.common.base.Functions; -import com.google.common.base.Predicate; -import com.google.common.collect.Iterables; import com.google.common.collect.Lists; import com.google.inject.Inject; import org.apache.druid.java.util.common.DateTimes; @@ -46,6 +44,7 @@ import java.nio.ByteBuffer; import java.util.List; import java.util.Map; +import java.util.stream.Collectors; /** */ @@ -85,20 +84,10 @@ public List filterSegments(TimeBoundaryQuery query final T min = query.isMaxTime() ? null : segments.get(0); final T max = query.isMinTime() ? null : segments.get(segments.size() - 1); - return Lists.newArrayList( - Iterables.filter( - segments, - new Predicate() - { - @Override - public boolean apply(T input) - { - return (min != null && input.getInterval().overlaps(min.getInterval())) || - (max != null && input.getInterval().overlaps(max.getInterval())); - } - } - ) - ); + return segments.stream() + .filter(input -> (min != null && input.getInterval().overlaps(min.getTrueInterval())) || + (max != null && input.getInterval().overlaps(max.getTrueInterval()))) + .collect(Collectors.toList()); } @Override diff --git a/processing/src/test/java/org/apache/druid/query/datasourcemetadata/DataSourceMetadataQueryTest.java b/processing/src/test/java/org/apache/druid/query/datasourcemetadata/DataSourceMetadataQueryTest.java index 2a23b547f660..3f20c25f87cd 100644 --- a/processing/src/test/java/org/apache/druid/query/datasourcemetadata/DataSourceMetadataQueryTest.java +++ b/processing/src/test/java/org/apache/druid/query/datasourcemetadata/DataSourceMetadataQueryTest.java @@ -164,6 +164,12 @@ public Interval getInterval() { return Intervals.of("2012-01-01/P1D"); } + + @Override + public Interval getTrueInterval() + { + return getInterval(); + } }, new LogicalSegment() { @@ -172,6 +178,12 @@ public Interval getInterval() { return Intervals.of("2012-01-01T01/PT1H"); } + + @Override + public Interval getTrueInterval() + { + return getInterval(); + } }, new LogicalSegment() { @@ -180,6 +192,12 @@ public Interval getInterval() { return Intervals.of("2013-01-01/P1D"); } + + @Override + public Interval getTrueInterval() + { + return getInterval(); + } }, new LogicalSegment() { @@ -188,6 +206,12 @@ public Interval getInterval() { return Intervals.of("2013-01-01T01/PT1H"); } + + @Override + public Interval getTrueInterval() + { + return getInterval(); + } }, new LogicalSegment() { @@ -196,6 +220,12 @@ public Interval getInterval() { return Intervals.of("2013-01-01T02/PT1H"); } + + @Override + public Interval getTrueInterval() + { + return getInterval(); + } } ) ); @@ -210,6 +240,12 @@ public Interval getInterval() { return Intervals.of("2013-01-01/P1D"); } + + @Override + public Interval getTrueInterval() + { + return getInterval(); + } }, new LogicalSegment() { @@ -218,6 +254,12 @@ public Interval getInterval() { return Intervals.of("2013-01-01T02/PT1H"); } + + @Override + public Interval getTrueInterval() + { + return getInterval(); + } } ); @@ -226,6 +268,143 @@ public Interval getInterval() } } + @Test + public void testFilterOverlappingSegments() + { + final GenericQueryMetricsFactory queryMetricsFactory = DefaultGenericQueryMetricsFactory.instance(); + final DataSourceQueryQueryToolChest toolChest = new DataSourceQueryQueryToolChest(queryMetricsFactory); + final List segments = toolChest + .filterSegments( + null, + ImmutableList.of( + new LogicalSegment() + { + @Override + public Interval getInterval() + { + return Intervals.of("2015/2016-08-01"); + } + + @Override + public Interval getTrueInterval() + { + return Intervals.of("2015/2016-08-01"); + } + }, + new LogicalSegment() + { + @Override + public Interval getInterval() + { + return Intervals.of("2016-08-01/2017"); + } + + @Override + public Interval getTrueInterval() + { + return Intervals.of("2016-08-01/2017"); + } + }, + new LogicalSegment() + { + @Override + public Interval getInterval() + { + return Intervals.of("2017/2017-08-01"); + } + + @Override + public Interval getTrueInterval() + { + return Intervals.of("2017/2018"); + } + }, + new LogicalSegment() + { + + @Override + public Interval getInterval() + { + return Intervals.of("2017-08-01/2017-08-02"); + } + + @Override + public Interval getTrueInterval() + { + return Intervals.of("2017-08-01/2017-08-02"); + } + }, + new LogicalSegment() + { + @Override + public Interval getInterval() + { + return Intervals.of("2017-08-02/2018"); + } + + @Override + public Interval getTrueInterval() + { + return Intervals.of("2017/2018"); + } + } + ) + ); + + final List expected = ImmutableList.of( + new LogicalSegment() + { + @Override + public Interval getInterval() + { + return Intervals.of("2017/2017-08-01"); + } + + @Override + public Interval getTrueInterval() + { + return Intervals.of("2017/2018"); + } + }, + new LogicalSegment() + { + + @Override + public Interval getInterval() + { + return Intervals.of("2017-08-01/2017-08-02"); + } + + @Override + public Interval getTrueInterval() + { + return Intervals.of("2017-08-01/2017-08-02"); + } + }, + new LogicalSegment() + { + @Override + public Interval getInterval() + { + return Intervals.of("2017-08-02/2018"); + } + + @Override + public Interval getTrueInterval() + { + return Intervals.of("2017/2018"); + } + } + ); + + Assert.assertEquals(expected.size(), segments.size()); + + for (int i = 0; i < expected.size(); i++) { + Assert.assertEquals(expected.get(i).getInterval(), segments.get(i).getInterval()); + Assert.assertEquals(expected.get(i).getTrueInterval(), segments.get(i).getTrueInterval()); + } + } + @Test public void testResultSerialization() { diff --git a/processing/src/test/java/org/apache/druid/query/metadata/SegmentMetadataQueryQueryToolChestTest.java b/processing/src/test/java/org/apache/druid/query/metadata/SegmentMetadataQueryQueryToolChestTest.java index c841af498d02..f823937ef03b 100644 --- a/processing/src/test/java/org/apache/druid/query/metadata/SegmentMetadataQueryQueryToolChestTest.java +++ b/processing/src/test/java/org/apache/druid/query/metadata/SegmentMetadataQueryQueryToolChestTest.java @@ -39,6 +39,7 @@ import org.apache.druid.query.spec.LegacySegmentSpec; import org.apache.druid.segment.column.ValueType; import org.apache.druid.timeline.LogicalSegment; +import org.joda.time.Interval; import org.joda.time.Period; import org.junit.Assert; import org.junit.Test; @@ -292,7 +293,20 @@ public void testFilterSegments() "2000-01-09/P1D" ) .stream() - .map(interval -> (LogicalSegment) () -> Intervals.of(interval)) + .map(interval -> new LogicalSegment() + { + @Override + public Interval getInterval() + { + return Intervals.of(interval); + } + + @Override + public Interval getTrueInterval() + { + return Intervals.of(interval); + } + }) .collect(Collectors.toList()) ); diff --git a/processing/src/test/java/org/apache/druid/query/metadata/SegmentMetadataQueryTest.java b/processing/src/test/java/org/apache/druid/query/metadata/SegmentMetadataQueryTest.java index c5afad436d6d..5c613acf658a 100644 --- a/processing/src/test/java/org/apache/druid/query/metadata/SegmentMetadataQueryTest.java +++ b/processing/src/test/java/org/apache/druid/query/metadata/SegmentMetadataQueryTest.java @@ -83,10 +83,16 @@ public class SegmentMetadataQueryTest public static QueryRunner makeMMappedQueryRunner( SegmentId segmentId, boolean rollup, + boolean bitmaps, QueryRunnerFactory factory ) { - QueryableIndex index = rollup ? TestIndex.getMMappedTestIndex() : TestIndex.getNoRollupMMappedTestIndex(); + QueryableIndex index; + if (bitmaps) { + index = rollup ? TestIndex.getMMappedTestIndex() : TestIndex.getNoRollupMMappedTestIndex(); + } else { + index = TestIndex.getNoBitmapMMappedTestIndex(); + } return QueryRunnerTestHelper.makeQueryRunner( factory, segmentId, @@ -99,10 +105,16 @@ public static QueryRunner makeMMappedQueryRunner( public static QueryRunner makeIncrementalIndexQueryRunner( SegmentId segmentId, boolean rollup, + boolean bitmaps, QueryRunnerFactory factory ) { - IncrementalIndex index = rollup ? TestIndex.getIncrementalTestIndex() : TestIndex.getNoRollupIncrementalTestIndex(); + IncrementalIndex index; + if (bitmaps) { + index = rollup ? TestIndex.getIncrementalTestIndex() : TestIndex.getNoRollupIncrementalTestIndex(); + } else { + index = TestIndex.getNoBitmapIncrementalTestIndex(); + } return QueryRunnerTestHelper.makeQueryRunner( factory, segmentId, @@ -121,17 +133,19 @@ public static QueryRunner makeIncrementalIndexQueryRunner( private final SegmentMetadataQuery testQuery; private final SegmentAnalysis expectedSegmentAnalysis1; private final SegmentAnalysis expectedSegmentAnalysis2; + private final boolean bitmaps; - @Parameterized.Parameters(name = "mmap1 = {0}, mmap2 = {1}, rollup1 = {2}, rollup2 = {3}, differentIds = {4}") + @Parameterized.Parameters(name = "mmap1 = {0}, mmap2 = {1}, rollup1 = {2}, rollup2 = {3}, differentIds = {4}, bitmaps={5}") public static Collection constructorFeeder() { return ImmutableList.of( - new Object[]{true, true, true, true, false}, - new Object[]{true, false, true, false, false}, - new Object[]{false, true, true, false, false}, - new Object[]{false, false, false, false, false}, - new Object[]{false, false, true, true, false}, - new Object[]{false, false, false, true, true} + new Object[]{true, true, true, true, false, true}, + new Object[]{true, false, true, false, false, true}, + new Object[]{false, true, true, false, false, true}, + new Object[]{false, false, false, false, false, true}, + new Object[]{false, false, true, true, false, true}, + new Object[]{false, false, false, true, true, true}, + new Object[]{true, true, false, false, false, false} ); } @@ -140,22 +154,24 @@ public SegmentMetadataQueryTest( boolean mmap2, boolean rollup1, boolean rollup2, - boolean differentIds + boolean differentIds, + boolean bitmaps ) { final SegmentId id1 = SegmentId.dummy(differentIds ? "testSegment1" : "testSegment"); final SegmentId id2 = SegmentId.dummy(differentIds ? "testSegment2" : "testSegment"); this.runner1 = mmap1 - ? makeMMappedQueryRunner(id1, rollup1, FACTORY) - : makeIncrementalIndexQueryRunner(id1, rollup1, FACTORY); + ? makeMMappedQueryRunner(id1, rollup1, bitmaps, FACTORY) + : makeIncrementalIndexQueryRunner(id1, rollup1, bitmaps, FACTORY); this.runner2 = mmap2 - ? makeMMappedQueryRunner(id2, rollup2, FACTORY) - : makeIncrementalIndexQueryRunner(id2, rollup2, FACTORY); + ? makeMMappedQueryRunner(id2, rollup2, bitmaps, FACTORY) + : makeIncrementalIndexQueryRunner(id2, rollup2, bitmaps, FACTORY); this.mmap1 = mmap1; this.mmap2 = mmap2; this.rollup1 = rollup1; this.rollup2 = rollup2; this.differentIds = differentIds; + this.bitmaps = bitmaps; testQuery = Druids.newSegmentMetadataQueryBuilder() .dataSource("testing") .intervals("2013/2014") @@ -169,6 +185,16 @@ public SegmentMetadataQueryTest( .merge(true) .build(); + int preferedSize1 = 0; + int placementSize2 = 0; + int overallSize1 = 119691; + int overallSize2 = 119691; + if (bitmaps) { + preferedSize1 = mmap1 ? 10881 : 10764; + placementSize2 = mmap2 ? 10881 : 0; + overallSize1 = mmap1 ? 167493 : 168188; + overallSize2 = mmap2 ? 167493 : 168188; + } expectedSegmentAnalysis1 = new SegmentAnalysis( id1.toString(), ImmutableList.of(Intervals.of("2011-01-12T00:00:00.000Z/2011-04-15T00:00:00.001Z")), @@ -187,7 +213,7 @@ public SegmentMetadataQueryTest( new ColumnAnalysis( ValueType.STRING.toString(), false, - mmap1 ? 10881 : 10764, + preferedSize1, 1, "preferred", "preferred", @@ -203,7 +229,7 @@ public SegmentMetadataQueryTest( null, null ) - ), mmap1 ? 167493 : 168188, + ), overallSize1, 1209, null, null, @@ -228,7 +254,7 @@ public SegmentMetadataQueryTest( new ColumnAnalysis( ValueType.STRING.toString(), false, - mmap2 ? 10881 : 0, + placementSize2, 1, null, null, @@ -245,7 +271,7 @@ public SegmentMetadataQueryTest( null ) // null_column will be included only for incremental index, which makes a little bigger result than expected - ), mmap2 ? 167493 : 168188, + ), overallSize2, 1209, null, null, @@ -470,10 +496,16 @@ public void testSegmentMetadataQueryWithComplexColumnMerge() @Test public void testSegmentMetadataQueryWithDefaultAnalysisMerge() { + int size1 = 0; + int size2 = 0; + if (bitmaps) { + size1 = mmap1 ? 10881 : 10764; + size2 = mmap2 ? 10881 : 10764; + } ColumnAnalysis analysis = new ColumnAnalysis( ValueType.STRING.toString(), false, - (mmap1 ? 10881 : 10764) + (mmap2 ? 10881 : 10764), + size1 + size2, 1, "preferred", "preferred", @@ -485,10 +517,16 @@ public void testSegmentMetadataQueryWithDefaultAnalysisMerge() @Test public void testSegmentMetadataQueryWithDefaultAnalysisMerge2() { + int size1 = 0; + int size2 = 0; + if (bitmaps) { + size1 = mmap1 ? 6882 : 6808; + size2 = mmap2 ? 6882 : 6808; + } ColumnAnalysis analysis = new ColumnAnalysis( ValueType.STRING.toString(), false, - (mmap1 ? 6882 : 6808) + (mmap2 ? 6882 : 6808), + size1 + size2, 3, "spot", "upfront", @@ -500,10 +538,16 @@ public void testSegmentMetadataQueryWithDefaultAnalysisMerge2() @Test public void testSegmentMetadataQueryWithDefaultAnalysisMerge3() { + int size1 = 0; + int size2 = 0; + if (bitmaps) { + size1 = mmap1 ? 9765 : 9660; + size2 = mmap2 ? 9765 : 9660; + } ColumnAnalysis analysis = new ColumnAnalysis( ValueType.STRING.toString(), false, - (mmap1 ? 9765 : 9660) + (mmap2 ? 9765 : 9660), + size1 + size2, 9, "automotive", "travel", @@ -923,6 +967,12 @@ public Interval getInterval() { return Intervals.of("2012-01-01/P1D"); } + + @Override + public Interval getTrueInterval() + { + return getInterval(); + } }, new LogicalSegment() { @@ -931,6 +981,12 @@ public Interval getInterval() { return Intervals.of("2012-01-01T01/PT1H"); } + + @Override + public Interval getTrueInterval() + { + return getInterval(); + } }, new LogicalSegment() { @@ -939,6 +995,12 @@ public Interval getInterval() { return Intervals.of("2013-01-05/P1D"); } + + @Override + public Interval getTrueInterval() + { + return getInterval(); + } }, new LogicalSegment() { @@ -947,6 +1009,12 @@ public Interval getInterval() { return Intervals.of("2013-05-20/P1D"); } + + @Override + public Interval getTrueInterval() + { + return getInterval(); + } }, new LogicalSegment() { @@ -955,6 +1023,12 @@ public Interval getInterval() { return Intervals.of("2014-01-05/P1D"); } + + @Override + public Interval getTrueInterval() + { + return getInterval(); + } }, new LogicalSegment() { @@ -963,6 +1037,12 @@ public Interval getInterval() { return Intervals.of("2014-02-05/P1D"); } + + @Override + public Interval getTrueInterval() + { + return getInterval(); + } }, new LogicalSegment() { @@ -971,6 +1051,12 @@ public Interval getInterval() { return Intervals.of("2015-01-19T01/PT1H"); } + + @Override + public Interval getTrueInterval() + { + return getInterval(); + } }, new LogicalSegment() { @@ -979,6 +1065,12 @@ public Interval getInterval() { return Intervals.of("2015-01-20T02/PT1H"); } + + @Override + public Interval getTrueInterval() + { + return getInterval(); + } } ); @@ -998,6 +1090,12 @@ public Interval getInterval() { return Intervals.of("2015-01-19T01/PT1H"); } + + @Override + public Interval getTrueInterval() + { + return getInterval(); + } }, new LogicalSegment() { @@ -1006,6 +1104,12 @@ public Interval getInterval() { return Intervals.of("2015-01-20T02/PT1H"); } + + @Override + public Interval getTrueInterval() + { + return getInterval(); + } } ); @@ -1031,6 +1135,12 @@ public Interval getInterval() { return Intervals.of("2013-05-20/P1D"); } + + @Override + public Interval getTrueInterval() + { + return getInterval(); + } }, new LogicalSegment() { @@ -1039,6 +1149,12 @@ public Interval getInterval() { return Intervals.of("2014-01-05/P1D"); } + + @Override + public Interval getTrueInterval() + { + return getInterval(); + } }, new LogicalSegment() { @@ -1047,6 +1163,12 @@ public Interval getInterval() { return Intervals.of("2014-02-05/P1D"); } + + @Override + public Interval getTrueInterval() + { + return getInterval(); + } }, new LogicalSegment() { @@ -1055,6 +1177,12 @@ public Interval getInterval() { return Intervals.of("2015-01-19T01/PT1H"); } + + @Override + public Interval getTrueInterval() + { + return getInterval(); + } }, new LogicalSegment() { @@ -1063,6 +1191,12 @@ public Interval getInterval() { return Intervals.of("2015-01-20T02/PT1H"); } + + @Override + public Interval getTrueInterval() + { + return getInterval(); + } } ); diff --git a/processing/src/test/java/org/apache/druid/query/timeboundary/TimeBoundaryQueryQueryToolChestTest.java b/processing/src/test/java/org/apache/druid/query/timeboundary/TimeBoundaryQueryQueryToolChestTest.java index b62fb283c7f8..6ab886fe084a 100644 --- a/processing/src/test/java/org/apache/druid/query/timeboundary/TimeBoundaryQueryQueryToolChestTest.java +++ b/processing/src/test/java/org/apache/druid/query/timeboundary/TimeBoundaryQueryQueryToolChestTest.java @@ -73,6 +73,11 @@ public class TimeBoundaryQueryQueryToolChestTest .build(); private static LogicalSegment createLogicalSegment(final Interval interval) + { + return createLogicalSegment(interval, interval); + } + + private static LogicalSegment createLogicalSegment(final Interval interval, final Interval trueInterval) { return new LogicalSegment() { @@ -81,6 +86,12 @@ public Interval getInterval() { return interval; } + + @Override + public Interval getTrueInterval() + { + return trueInterval; + } }; } @@ -116,6 +127,35 @@ public void testFilterSegments() } } + @Test + public void testFilterOverlapingSegments() + { + final List actual = new TimeBoundaryQueryQueryToolChest().filterSegments( + TIME_BOUNDARY_QUERY, + Arrays.asList( + createLogicalSegment(Intervals.of("2015/2016-08-01")), + createLogicalSegment(Intervals.of("2016-08-01/2017")), + createLogicalSegment(Intervals.of("2017/2017-08-01"), Intervals.of("2017/2018")), + createLogicalSegment(Intervals.of("2017-08-01/2017-08-02")), + createLogicalSegment(Intervals.of("2017-08-02/2018"), Intervals.of("2017/2018")) + ) + ); + + final List expected = Arrays.asList( + createLogicalSegment(Intervals.of("2015/2016-08-01")), + createLogicalSegment(Intervals.of("2017/2017-08-01"), Intervals.of("2017/2018")), + createLogicalSegment(Intervals.of("2017-08-01/2017-08-02")), + createLogicalSegment(Intervals.of("2017-08-02/2018"), Intervals.of("2017/2018")) + ); + + Assert.assertEquals(expected.size(), actual.size()); + + for (int i = 0; i < actual.size(); i++) { + Assert.assertEquals(expected.get(i).getInterval(), actual.get(i).getInterval()); + Assert.assertEquals(expected.get(i).getTrueInterval(), actual.get(i).getTrueInterval()); + } + } + @Test public void testMaxTimeFilterSegments() { @@ -145,6 +185,62 @@ public void testMaxTimeFilterSegments() } } + @Test + public void testMaxTimeFilterOverlapingSegments() + { + final List actual = new TimeBoundaryQueryQueryToolChest().filterSegments( + MAXTIME_BOUNDARY_QUERY, + Arrays.asList( + createLogicalSegment(Intervals.of("2015/2016-08-01")), + createLogicalSegment(Intervals.of("2016-08-01/2017")), + createLogicalSegment(Intervals.of("2017/2017-08-01"), Intervals.of("2017/2018")), + createLogicalSegment(Intervals.of("2017-08-01/2017-08-02")), + createLogicalSegment(Intervals.of("2017-08-02/2018"), Intervals.of("2017/2018")) + ) + ); + + final List expected = Arrays.asList( + createLogicalSegment(Intervals.of("2017/2017-08-01"), Intervals.of("2017/2018")), + createLogicalSegment(Intervals.of("2017-08-01/2017-08-02")), + createLogicalSegment(Intervals.of("2017-08-02/2018"), Intervals.of("2017/2018")) + ); + + Assert.assertEquals(expected.size(), actual.size()); + + for (int i = 0; i < actual.size(); i++) { + Assert.assertEquals(expected.get(i).getInterval(), actual.get(i).getInterval()); + Assert.assertEquals(expected.get(i).getTrueInterval(), actual.get(i).getTrueInterval()); + } + } + + @Test + public void testMinTimeFilterOverlapingSegments() + { + final List actual = new TimeBoundaryQueryQueryToolChest().filterSegments( + MINTIME_BOUNDARY_QUERY, + Arrays.asList( + createLogicalSegment(Intervals.of("2017/2017-08-01"), Intervals.of("2017/2018")), + createLogicalSegment(Intervals.of("2017-08-01/2017-08-02")), + createLogicalSegment(Intervals.of("2017-08-02/2018"), Intervals.of("2017/2018")), + createLogicalSegment(Intervals.of("2018/2018-08-01")), + createLogicalSegment(Intervals.of("2018-08-01/2019")) + ) + ); + + final List expected = Arrays.asList( + createLogicalSegment(Intervals.of("2017/2017-08-01"), Intervals.of("2017/2018")), + createLogicalSegment(Intervals.of("2017-08-01/2017-08-02")), + createLogicalSegment(Intervals.of("2017-08-02/2018"), Intervals.of("2017/2018")) + ); + + Assert.assertEquals(expected.size(), actual.size()); + + for (int i = 0; i < actual.size(); i++) { + Assert.assertEquals(expected.get(i).getInterval(), actual.get(i).getInterval()); + Assert.assertEquals(expected.get(i).getTrueInterval(), actual.get(i).getTrueInterval()); + } + } + @Test public void testMinTimeFilterSegments() { @@ -192,6 +288,7 @@ public void testFilteredFilterSegments() Assert.assertEquals(7, segments.size()); } + @Test public void testCacheStrategy() throws Exception { diff --git a/processing/src/test/java/org/apache/druid/segment/TestIndex.java b/processing/src/test/java/org/apache/druid/segment/TestIndex.java index 72b52873386f..87d8abf11586 100644 --- a/processing/src/test/java/org/apache/druid/segment/TestIndex.java +++ b/processing/src/test/java/org/apache/druid/segment/TestIndex.java @@ -20,6 +20,7 @@ package org.apache.druid.segment; import com.google.common.base.Supplier; +import com.google.common.base.Suppliers; import com.google.common.base.Throwables; import com.google.common.io.CharSource; import com.google.common.io.LineProcessor; @@ -111,12 +112,31 @@ public class TestIndex new StringDimensionSchema("null_column") ); + public static final List DIMENSION_SCHEMAS_NO_BITMAP = Arrays.asList( + new StringDimensionSchema("market", null, false), + new StringDimensionSchema("quality", null, false), + new LongDimensionSchema("qualityLong"), + new FloatDimensionSchema("qualityFloat"), + new DoubleDimensionSchema("qualityDouble"), + new StringDimensionSchema("qualityNumericString", null, false), + new StringDimensionSchema("placement", null, false), + new StringDimensionSchema("placementish", null, false), + new StringDimensionSchema("partial_null_column", null, false), + new StringDimensionSchema("null_column", null, false) + ); + public static final DimensionsSpec DIMENSIONS_SPEC = new DimensionsSpec( DIMENSION_SCHEMAS, null, null ); + public static final DimensionsSpec DIMENSIONS_SPEC_NO_BITMAPS = new DimensionsSpec( + DIMENSION_SCHEMAS_NO_BITMAP, + null, + null + ); + public static final String[] DOUBLE_METRICS = new String[]{"index", "indexMin", "indexMaxPlusTen"}; public static final String[] FLOAT_METRICS = new String[]{"indexFloat", "indexMinFloat", "indexMaxFloat"}; private static final Logger log = new Logger(TestIndex.class); @@ -147,107 +167,95 @@ public class TestIndex } } - private static IncrementalIndex realtimeIndex = null; - private static IncrementalIndex noRollupRealtimeIndex = null; - private static QueryableIndex mmappedIndex = null; - private static QueryableIndex noRollupMmappedIndex = null; - private static QueryableIndex mergedRealtime = null; + private static Supplier realtimeIndex = Suppliers.memoize( + () -> makeRealtimeIndex("druid.sample.numeric.tsv") + ); + private static Supplier noRollupRealtimeIndex = Suppliers.memoize( + () -> makeRealtimeIndex("druid.sample.numeric.tsv", false) + ); + private static Supplier noBitmapRealtimeIndex = Suppliers.memoize( + () -> makeRealtimeIndex("druid.sample.numeric.tsv", false, false) + ); + private static Supplier mmappedIndex = Suppliers.memoize( + () -> persistRealtimeAndLoadMMapped(realtimeIndex.get()) + ); + private static Supplier noRollupMmappedIndex = Suppliers.memoize( + () -> persistRealtimeAndLoadMMapped(noRollupRealtimeIndex.get()) + ); + private static Supplier noBitmapMmappedIndex = Suppliers.memoize( + () -> persistRealtimeAndLoadMMapped(noBitmapRealtimeIndex.get()) + ); + private static Supplier mergedRealtime = Suppliers.memoize(() -> { + try { + IncrementalIndex top = makeRealtimeIndex("druid.sample.numeric.tsv.top"); + IncrementalIndex bottom = makeRealtimeIndex("druid.sample.numeric.tsv.bottom"); + + File tmpFile = File.createTempFile("yay", "who"); + tmpFile.delete(); + + File topFile = new File(tmpFile, "top"); + File bottomFile = new File(tmpFile, "bottom"); + File mergedFile = new File(tmpFile, "merged"); + + topFile.mkdirs(); + topFile.deleteOnExit(); + bottomFile.mkdirs(); + bottomFile.deleteOnExit(); + mergedFile.mkdirs(); + mergedFile.deleteOnExit(); + + INDEX_MERGER.persist(top, DATA_INTERVAL, topFile, indexSpec, null); + INDEX_MERGER.persist(bottom, DATA_INTERVAL, bottomFile, indexSpec, null); + + return INDEX_IO.loadIndex( + INDEX_MERGER.mergeQueryableIndex( + Arrays.asList(INDEX_IO.loadIndex(topFile), INDEX_IO.loadIndex(bottomFile)), + true, + METRIC_AGGS, + mergedFile, + indexSpec, + null + ) + ); + } + catch (IOException e) { + throw Throwables.propagate(e); + } + }); public static IncrementalIndex getIncrementalTestIndex() { - synchronized (log) { - if (realtimeIndex != null) { - return realtimeIndex; - } - } - - return realtimeIndex = makeRealtimeIndex("druid.sample.numeric.tsv"); + return realtimeIndex.get(); } public static IncrementalIndex getNoRollupIncrementalTestIndex() { - synchronized (log) { - if (noRollupRealtimeIndex != null) { - return noRollupRealtimeIndex; - } - } + return noRollupRealtimeIndex.get(); + } - return noRollupRealtimeIndex = makeRealtimeIndex("druid.sample.numeric.tsv", false); + public static IncrementalIndex getNoBitmapIncrementalTestIndex() + { + return noBitmapRealtimeIndex.get(); } public static QueryableIndex getMMappedTestIndex() { - synchronized (log) { - if (mmappedIndex != null) { - return mmappedIndex; - } - } - - IncrementalIndex incrementalIndex = getIncrementalTestIndex(); - mmappedIndex = persistRealtimeAndLoadMMapped(incrementalIndex); - - return mmappedIndex; + return mmappedIndex.get(); } public static QueryableIndex getNoRollupMMappedTestIndex() { - synchronized (log) { - if (noRollupMmappedIndex != null) { - return noRollupMmappedIndex; - } - } - - IncrementalIndex incrementalIndex = getNoRollupIncrementalTestIndex(); - noRollupMmappedIndex = persistRealtimeAndLoadMMapped(incrementalIndex); + return noRollupMmappedIndex.get(); + } - return noRollupMmappedIndex; + public static QueryableIndex getNoBitmapMMappedTestIndex() + { + return noBitmapMmappedIndex.get(); } public static QueryableIndex mergedRealtimeIndex() { - synchronized (log) { - if (mergedRealtime != null) { - return mergedRealtime; - } - - try { - IncrementalIndex top = makeRealtimeIndex("druid.sample.numeric.tsv.top"); - IncrementalIndex bottom = makeRealtimeIndex("druid.sample.numeric.tsv.bottom"); - - File tmpFile = File.createTempFile("yay", "who"); - tmpFile.delete(); - - File topFile = new File(tmpFile, "top"); - File bottomFile = new File(tmpFile, "bottom"); - File mergedFile = new File(tmpFile, "merged"); - - topFile.mkdirs(); - topFile.deleteOnExit(); - bottomFile.mkdirs(); - bottomFile.deleteOnExit(); - mergedFile.mkdirs(); - mergedFile.deleteOnExit(); - - INDEX_MERGER.persist(top, DATA_INTERVAL, topFile, indexSpec, null); - INDEX_MERGER.persist(bottom, DATA_INTERVAL, bottomFile, indexSpec, null); - - mergedRealtime = INDEX_IO.loadIndex( - INDEX_MERGER.mergeQueryableIndex( - Arrays.asList(INDEX_IO.loadIndex(topFile), INDEX_IO.loadIndex(bottomFile)), - true, - METRIC_AGGS, - mergedFile, - indexSpec, - null - ) - ); - - return mergedRealtime; - } - catch (IOException e) { - throw Throwables.propagate(e); - } - } + return mergedRealtime.get(); } public static IncrementalIndex makeRealtimeIndex(final String resourceFilename) @@ -256,6 +264,11 @@ public static IncrementalIndex makeRealtimeIndex(final String resourceFilename) } public static IncrementalIndex makeRealtimeIndex(final String resourceFilename, boolean rollup) + { + return makeRealtimeIndex(resourceFilename, rollup, true); + } + + public static IncrementalIndex makeRealtimeIndex(final String resourceFilename, boolean rollup, boolean bitmap) { final URL resource = TestIndex.class.getClassLoader().getResource(resourceFilename); if (resource == null) { @@ -263,20 +276,20 @@ public static IncrementalIndex makeRealtimeIndex(final String resourceFilename, } log.info("Realtime loading index file[%s]", resource); CharSource stream = Resources.asByteSource(resource).asCharSource(StandardCharsets.UTF_8); - return makeRealtimeIndex(stream, rollup); + return makeRealtimeIndex(stream, rollup, bitmap); } public static IncrementalIndex makeRealtimeIndex(final CharSource source) { - return makeRealtimeIndex(source, true); + return makeRealtimeIndex(source, true, true); } - public static IncrementalIndex makeRealtimeIndex(final CharSource source, boolean rollup) + public static IncrementalIndex makeRealtimeIndex(final CharSource source, boolean rollup, boolean bitmap) { final IncrementalIndexSchema schema = new IncrementalIndexSchema.Builder() .withMinTimestamp(DateTimes.of("2011-01-12T00:00:00.000Z").getMillis()) .withTimestampSpec(new TimestampSpec("ds", "auto", null)) - .withDimensionsSpec(DIMENSIONS_SPEC) + .withDimensionsSpec(bitmap ? DIMENSIONS_SPEC : DIMENSIONS_SPEC_NO_BITMAPS) .withVirtualColumns(VIRTUAL_COLUMNS) .withMetrics(METRIC_AGGS) .withRollup(rollup) diff --git a/server/pom.xml b/server/pom.xml index 9bdafbb75801..e676622e4a50 100644 --- a/server/pom.xml +++ b/server/pom.xml @@ -28,7 +28,7 @@ org.apache.druid druid - 0.14.0-incubating-SNAPSHOT + 0.14.1-incubating-SNAPSHOT diff --git a/server/src/main/java/org/apache/druid/server/coordinator/CoordinatorDynamicConfig.java b/server/src/main/java/org/apache/druid/server/coordinator/CoordinatorDynamicConfig.java index de034af5dfc2..14bf3395add3 100644 --- a/server/src/main/java/org/apache/druid/server/coordinator/CoordinatorDynamicConfig.java +++ b/server/src/main/java/org/apache/druid/server/coordinator/CoordinatorDynamicConfig.java @@ -28,6 +28,8 @@ import javax.annotation.Nonnull; import javax.annotation.Nullable; +import javax.validation.constraints.Max; +import javax.validation.constraints.Min; import java.util.Collection; import java.util.HashSet; import java.util.Objects; @@ -56,8 +58,8 @@ public class CoordinatorDynamicConfig private final boolean emitBalancingStats; private final boolean killAllDataSources; private final Set killableDataSources; - private final Set historicalNodesInMaintenance; - private final int nodesInMaintenancePriority; + private final Set decommissioningNodes; + private final int decommissioningMaxPercentOfMaxSegmentsToMove; // The pending segments of the dataSources in this list are not killed. private final Set protectedPendingSegmentDatasources; @@ -88,8 +90,8 @@ public CoordinatorDynamicConfig( @JsonProperty("killAllDataSources") boolean killAllDataSources, @JsonProperty("killPendingSegmentsSkipList") Object protectedPendingSegmentDatasources, @JsonProperty("maxSegmentsInNodeLoadingQueue") int maxSegmentsInNodeLoadingQueue, - @JsonProperty("historicalNodesInMaintenance") Object historicalNodesInMaintenance, - @JsonProperty("nodesInMaintenancePriority") int nodesInMaintenancePriority + @JsonProperty("decommissioningNodes") Object decommissioningNodes, + @JsonProperty("decommissioningMaxPercentOfMaxSegmentsToMove") int decommissioningMaxPercentOfMaxSegmentsToMove ) { this.millisToWaitBeforeDeleting = millisToWaitBeforeDeleting; @@ -104,12 +106,12 @@ public CoordinatorDynamicConfig( this.killableDataSources = parseJsonStringOrArray(killableDataSources); this.protectedPendingSegmentDatasources = parseJsonStringOrArray(protectedPendingSegmentDatasources); this.maxSegmentsInNodeLoadingQueue = maxSegmentsInNodeLoadingQueue; - this.historicalNodesInMaintenance = parseJsonStringOrArray(historicalNodesInMaintenance); + this.decommissioningNodes = parseJsonStringOrArray(decommissioningNodes); Preconditions.checkArgument( - nodesInMaintenancePriority >= 0 && nodesInMaintenancePriority <= 10, - "nodesInMaintenancePriority should be in range [0, 10]" + decommissioningMaxPercentOfMaxSegmentsToMove >= 0 && decommissioningMaxPercentOfMaxSegmentsToMove <= 100, + "decommissioningMaxPercentOfMaxSegmentsToMove should be in range [0, 100]" ); - this.nodesInMaintenancePriority = nodesInMaintenancePriority; + this.decommissioningMaxPercentOfMaxSegmentsToMove = decommissioningMaxPercentOfMaxSegmentsToMove; if (this.killAllDataSources && !this.killableDataSources.isEmpty()) { throw new IAE("can't have killAllDataSources and non-empty killDataSourceWhitelist"); @@ -231,32 +233,37 @@ public int getMaxSegmentsInNodeLoadingQueue() } /** - * Historical nodes list in maintenance mode. Coordinator doesn't assign new segments on those nodes and moves - * segments from those nodes according to a specified priority. + * List of historical servers to 'decommission'. Coordinator will not assign new segments to 'decommissioning' servers, + * and segments will be moved away from them to be placed on non-decommissioning servers at the maximum rate specified by + * {@link CoordinatorDynamicConfig#getDecommissioningMaxPercentOfMaxSegmentsToMove}. * * @return list of host:port entries */ @JsonProperty - public Set getHistoricalNodesInMaintenance() + public Set getDecommissioningNodes() { - return historicalNodesInMaintenance; + return decommissioningNodes; + } /** - * Priority of segments from servers in maintenance. Coordinator takes ceil(maxSegmentsToMove * (priority / 10)) - * from servers in maitenance during balancing phase, i.e.: - * 0 - no segments from servers in maintenance will be processed during balancing - * 5 - 50% segments from servers in maintenance - * 10 - 100% segments from servers in maintenance - * By leveraging the priority an operator can prevent general nodes from overload or decrease maitenance time - * instead. + * The percent of {@link CoordinatorDynamicConfig#getMaxSegmentsToMove()} that determines the maximum number of + * segments that may be moved away from 'decommissioning' servers (specified by + * {@link CoordinatorDynamicConfig#getDecommissioningNodes()}) to non-decommissioning servers during one Coordinator + * balancer run. If this value is 0, segments will neither be moved from or to 'decommissioning' servers, effectively + * putting them in a sort of "maintenance" mode that will not participate in balancing or assignment by load rules. + * Decommissioning can also become stalled if there are no available active servers to place the segments. By + * adjusting this value, an operator can prevent active servers from overload by prioritizing balancing, or + * decrease decommissioning time instead. * - * @return number in range [0, 10] + * @return number in range [0, 100] */ + @Min(0) + @Max(100) @JsonProperty - public int getNodesInMaintenancePriority() + public int getDecommissioningMaxPercentOfMaxSegmentsToMove() { - return nodesInMaintenancePriority; + return decommissioningMaxPercentOfMaxSegmentsToMove; } @Override @@ -275,8 +282,8 @@ public String toString() ", killDataSourceWhitelist=" + killableDataSources + ", protectedPendingSegmentDatasources=" + protectedPendingSegmentDatasources + ", maxSegmentsInNodeLoadingQueue=" + maxSegmentsInNodeLoadingQueue + - ", historicalNodesInMaintenance=" + historicalNodesInMaintenance + - ", nodesInMaintenancePriority=" + nodesInMaintenancePriority + + ", decommissioningNodes=" + decommissioningNodes + + ", decommissioningMaxPercentOfMaxSegmentsToMove=" + decommissioningMaxPercentOfMaxSegmentsToMove + '}'; } @@ -328,10 +335,10 @@ public boolean equals(Object o) if (!Objects.equals(protectedPendingSegmentDatasources, that.protectedPendingSegmentDatasources)) { return false; } - if (!Objects.equals(historicalNodesInMaintenance, that.historicalNodesInMaintenance)) { + if (!Objects.equals(decommissioningNodes, that.decommissioningNodes)) { return false; } - return nodesInMaintenancePriority == that.nodesInMaintenancePriority; + return decommissioningMaxPercentOfMaxSegmentsToMove == that.decommissioningMaxPercentOfMaxSegmentsToMove; } @Override @@ -350,8 +357,8 @@ public int hashCode() maxSegmentsInNodeLoadingQueue, killableDataSources, protectedPendingSegmentDatasources, - historicalNodesInMaintenance, - nodesInMaintenancePriority + decommissioningNodes, + decommissioningMaxPercentOfMaxSegmentsToMove ); } @@ -372,7 +379,7 @@ public static class Builder private static final boolean DEFAULT_EMIT_BALANCING_STATS = false; private static final boolean DEFAULT_KILL_ALL_DATA_SOURCES = false; private static final int DEFAULT_MAX_SEGMENTS_IN_NODE_LOADING_QUEUE = 0; - private static final int DEFAULT_MAINTENANCE_MODE_SEGMENTS_PRIORITY = 7; + private static final int DEFAULT_DECOMMISSIONING_MAX_SEGMENTS_TO_MOVE_PERCENT = 70; private Long millisToWaitBeforeDeleting; private Long mergeBytesLimit; @@ -386,8 +393,8 @@ public static class Builder private Boolean killAllDataSources; private Object killPendingSegmentsSkipList; private Integer maxSegmentsInNodeLoadingQueue; - private Object maintenanceList; - private Integer maintenanceModeSegmentsPriority; + private Object decommissioningNodes; + private Integer decommissioningMaxPercentOfMaxSegmentsToMove; public Builder() { @@ -407,8 +414,8 @@ public Builder( @JsonProperty("killAllDataSources") @Nullable Boolean killAllDataSources, @JsonProperty("killPendingSegmentsSkipList") @Nullable Object killPendingSegmentsSkipList, @JsonProperty("maxSegmentsInNodeLoadingQueue") @Nullable Integer maxSegmentsInNodeLoadingQueue, - @JsonProperty("historicalNodesInMaintenance") @Nullable Object maintenanceList, - @JsonProperty("nodesInMaintenancePriority") @Nullable Integer maintenanceModeSegmentsPriority + @JsonProperty("decommissioningNodes") @Nullable Object decommissioningNodes, + @JsonProperty("decommissioningMaxPercentOfMaxSegmentsToMove") @Nullable Integer decommissioningMaxPercentOfMaxSegmentsToMove ) { this.millisToWaitBeforeDeleting = millisToWaitBeforeDeleting; @@ -423,8 +430,8 @@ public Builder( this.killableDataSources = killableDataSources; this.killPendingSegmentsSkipList = killPendingSegmentsSkipList; this.maxSegmentsInNodeLoadingQueue = maxSegmentsInNodeLoadingQueue; - this.maintenanceList = maintenanceList; - this.maintenanceModeSegmentsPriority = maintenanceModeSegmentsPriority; + this.decommissioningNodes = decommissioningNodes; + this.decommissioningMaxPercentOfMaxSegmentsToMove = decommissioningMaxPercentOfMaxSegmentsToMove; } public Builder withMillisToWaitBeforeDeleting(long millisToWaitBeforeDeleting) @@ -493,15 +500,15 @@ public Builder withMaxSegmentsInNodeLoadingQueue(int maxSegmentsInNodeLoadingQue return this; } - public Builder withMaintenanceList(Set list) + public Builder withDecommissioningNodes(Set decommissioning) { - this.maintenanceList = list; + this.decommissioningNodes = decommissioning; return this; } - public Builder withMaintenanceModeSegmentsPriority(Integer priority) + public Builder withDecommissioningMaxPercentOfMaxSegmentsToMove(Integer percent) { - this.maintenanceModeSegmentsPriority = priority; + this.decommissioningMaxPercentOfMaxSegmentsToMove = percent; return this; } @@ -522,10 +529,10 @@ public CoordinatorDynamicConfig build() maxSegmentsInNodeLoadingQueue == null ? DEFAULT_MAX_SEGMENTS_IN_NODE_LOADING_QUEUE : maxSegmentsInNodeLoadingQueue, - maintenanceList, - maintenanceModeSegmentsPriority == null - ? DEFAULT_MAINTENANCE_MODE_SEGMENTS_PRIORITY - : maintenanceModeSegmentsPriority + decommissioningNodes, + decommissioningMaxPercentOfMaxSegmentsToMove == null + ? DEFAULT_DECOMMISSIONING_MAX_SEGMENTS_TO_MOVE_PERCENT + : decommissioningMaxPercentOfMaxSegmentsToMove ); } @@ -548,10 +555,10 @@ public CoordinatorDynamicConfig build(CoordinatorDynamicConfig defaults) maxSegmentsInNodeLoadingQueue == null ? defaults.getMaxSegmentsInNodeLoadingQueue() : maxSegmentsInNodeLoadingQueue, - maintenanceList == null ? defaults.getHistoricalNodesInMaintenance() : maintenanceList, - maintenanceModeSegmentsPriority == null - ? defaults.getNodesInMaintenancePriority() - : maintenanceModeSegmentsPriority + decommissioningNodes == null ? defaults.getDecommissioningNodes() : decommissioningNodes, + decommissioningMaxPercentOfMaxSegmentsToMove == null + ? defaults.getDecommissioningMaxPercentOfMaxSegmentsToMove() + : decommissioningMaxPercentOfMaxSegmentsToMove ); } } diff --git a/server/src/main/java/org/apache/druid/server/coordinator/DruidCoordinator.java b/server/src/main/java/org/apache/druid/server/coordinator/DruidCoordinator.java index b92effc8651f..c20ae0c5d514 100644 --- a/server/src/main/java/org/apache/druid/server/coordinator/DruidCoordinator.java +++ b/server/src/main/java/org/apache/druid/server/coordinator/DruidCoordinator.java @@ -694,7 +694,7 @@ public CoordinatorHistoricalManagerRunnable(final int startingLeaderCounter) } // Find all historical servers, group them by subType and sort by ascending usage - Set nodesInMaintenance = params.getCoordinatorDynamicConfig().getHistoricalNodesInMaintenance(); + Set decommissioningServers = params.getCoordinatorDynamicConfig().getDecommissioningNodes(); final DruidCluster cluster = new DruidCluster(); for (ImmutableDruidServer server : servers) { if (!loadManagementPeons.containsKey(server.getName())) { @@ -709,7 +709,7 @@ public CoordinatorHistoricalManagerRunnable(final int startingLeaderCounter) new ServerHolder( server, loadManagementPeons.get(server.getName()), - nodesInMaintenance.contains(server.getHost()) + decommissioningServers.contains(server.getHost()) ) ); } diff --git a/server/src/main/java/org/apache/druid/server/coordinator/ServerHolder.java b/server/src/main/java/org/apache/druid/server/coordinator/ServerHolder.java index c7d7a86c825c..ba96566a4dfd 100644 --- a/server/src/main/java/org/apache/druid/server/coordinator/ServerHolder.java +++ b/server/src/main/java/org/apache/druid/server/coordinator/ServerHolder.java @@ -32,18 +32,18 @@ public class ServerHolder implements Comparable private static final Logger log = new Logger(ServerHolder.class); private final ImmutableDruidServer server; private final LoadQueuePeon peon; - private final boolean inMaintenance; + private final boolean isDecommissioning; public ServerHolder(ImmutableDruidServer server, LoadQueuePeon peon) { this(server, peon, false); } - public ServerHolder(ImmutableDruidServer server, LoadQueuePeon peon, boolean inMaintenance) + public ServerHolder(ImmutableDruidServer server, LoadQueuePeon peon, boolean isDecommissioning) { this.server = server; this.peon = peon; - this.inMaintenance = inMaintenance; + this.isDecommissioning = isDecommissioning; } public ImmutableDruidServer getServer() @@ -82,14 +82,15 @@ public double getPercentUsed() } /** - * Historical nodes can be placed in maintenance mode, which instructs Coordinator to move segments from them - * according to a specified priority. The mechanism allows to drain segments from nodes which are planned for - * replacement. - * @return true if the node is in maitenance mode + * Historical nodes can be 'decommissioned', which instructs Coordinator to move segments from them according to + * the percent of move operations diverted from normal balancer moves for this purpose by + * {@link CoordinatorDynamicConfig#getDecommissioningMaxPercentOfMaxSegmentsToMove()}. The mechanism allows draining + * segments from nodes which are planned for replacement. + * @return true if the node is decommissioning */ - public boolean isInMaintenance() + public boolean isDecommissioning() { - return inMaintenance; + return isDecommissioning; } public long getAvailableSize() diff --git a/server/src/main/java/org/apache/druid/server/coordinator/helper/DruidCoordinatorBalancer.java b/server/src/main/java/org/apache/druid/server/coordinator/helper/DruidCoordinatorBalancer.java index 10499ac807a4..cf8d7253191f 100644 --- a/server/src/main/java/org/apache/druid/server/coordinator/helper/DruidCoordinatorBalancer.java +++ b/server/src/main/java/org/apache/druid/server/coordinator/helper/DruidCoordinatorBalancer.java @@ -95,37 +95,41 @@ private void balanceTier( { if (params.getAvailableSegments().size() == 0) { - log.info("Metadata segments are not available. Cannot balance."); + log.warn("Metadata segments are not available. Cannot balance."); + // suppress emit zero stats return; } currentlyMovingSegments.computeIfAbsent(tier, t -> new ConcurrentHashMap<>()); if (!currentlyMovingSegments.get(tier).isEmpty()) { reduceLifetimes(tier); - log.info("[%s]: Still waiting on %,d segments to be moved", tier, currentlyMovingSegments.get(tier).size()); + log.info( + "[%s]: Still waiting on %,d segments to be moved. Skipping balance.", + tier, + currentlyMovingSegments.get(tier).size() + ); + // suppress emit zero stats return; } /* - Take as much segments from maintenance servers as priority allows and find the best location for them on - available servers. After that, balance segments within available servers pool. + Take as many segments from decommissioning servers as decommissioningMaxPercentOfMaxSegmentsToMove allows and find + the best location for them on active servers. After that, balance segments within active servers pool. */ Map> partitions = - servers.stream().collect(Collectors.partitioningBy(ServerHolder::isInMaintenance)); - final List maintenanceServers = partitions.get(true); - final List availableServers = partitions.get(false); + servers.stream().collect(Collectors.partitioningBy(ServerHolder::isDecommissioning)); + final List decommissioningServers = partitions.get(true); + final List activeServers = partitions.get(false); log.info( - "Found %d servers in maintenance, %d available servers servers", - maintenanceServers.size(), - availableServers.size() + "Found %d active servers, %d decommissioning servers", + activeServers.size(), + decommissioningServers.size() ); - if (maintenanceServers.isEmpty()) { - if (availableServers.size() <= 1) { - log.info("[%s]: %d available servers servers found. Cannot balance.", tier, availableServers.size()); - } - } else if (availableServers.isEmpty()) { - log.info("[%s]: no available servers servers found during maintenance. Cannot balance.", tier); + if ((decommissioningServers.isEmpty() && activeServers.size() <= 1) || activeServers.isEmpty()) { + log.warn("[%s]: insufficient active servers. Cannot balance.", tier); + // suppress emit zero stats + return; } int numSegments = 0; @@ -134,23 +138,30 @@ private void balanceTier( } if (numSegments == 0) { - log.info("No segments found. Cannot balance."); + log.info("No segments found. Cannot balance."); + // suppress emit zero stats return; } final int maxSegmentsToMove = Math.min(params.getCoordinatorDynamicConfig().getMaxSegmentsToMove(), numSegments); - int priority = params.getCoordinatorDynamicConfig().getNodesInMaintenancePriority(); - int maxMaintenanceSegmentsToMove = (int) Math.ceil(maxSegmentsToMove * priority / 10.0); - log.info("Processing %d segments from servers in maintenance mode", maxMaintenanceSegmentsToMove); - Pair maintenanceResult = - balanceServers(params, maintenanceServers, availableServers, maxMaintenanceSegmentsToMove); - int maxGeneralSegmentsToMove = maxSegmentsToMove - maintenanceResult.lhs; - log.info("Processing %d segments from servers in general mode", maxGeneralSegmentsToMove); + int decommissioningMaxPercentOfMaxSegmentsToMove = + params.getCoordinatorDynamicConfig().getDecommissioningMaxPercentOfMaxSegmentsToMove(); + int maxSegmentsToMoveFromDecommissioningNodes = + (int) Math.ceil(maxSegmentsToMove * (decommissioningMaxPercentOfMaxSegmentsToMove / 100.0)); + log.info( + "Processing %d segments for moving from decommissioning servers", + maxSegmentsToMoveFromDecommissioningNodes + ); + Pair decommissioningResult = + balanceServers(params, decommissioningServers, activeServers, maxSegmentsToMoveFromDecommissioningNodes); + + int maxGeneralSegmentsToMove = maxSegmentsToMove - decommissioningResult.lhs; + log.info("Processing %d segments for balancing between active servers", maxGeneralSegmentsToMove); Pair generalResult = - balanceServers(params, availableServers, availableServers, maxGeneralSegmentsToMove); + balanceServers(params, activeServers, activeServers, maxGeneralSegmentsToMove); - int moved = generalResult.lhs + maintenanceResult.lhs; - int unmoved = generalResult.rhs + maintenanceResult.rhs; + int moved = generalResult.lhs + decommissioningResult.lhs; + int unmoved = generalResult.rhs + decommissioningResult.rhs; if (unmoved == maxSegmentsToMove) { // Cluster should be alive and constantly adjusting log.info("No good moves found in tier [%s]", tier); diff --git a/server/src/main/java/org/apache/druid/server/coordinator/rules/BroadcastDistributionRule.java b/server/src/main/java/org/apache/druid/server/coordinator/rules/BroadcastDistributionRule.java index b28f569e55b4..658171236adc 100644 --- a/server/src/main/java/org/apache/druid/server/coordinator/rules/BroadcastDistributionRule.java +++ b/server/src/main/java/org/apache/druid/server/coordinator/rules/BroadcastDistributionRule.java @@ -46,7 +46,7 @@ public CoordinatorStats run(DruidCoordinator coordinator, DruidCoordinatorRuntim } else { params.getDruidCluster().getAllServers().forEach( eachHolder -> { - if (!eachHolder.isInMaintenance() + if (!eachHolder.isDecommissioning() && colocatedDataSources.stream() .anyMatch(source -> eachHolder.getServer().getDataSource(source) != null)) { loadServerHolders.add(eachHolder); diff --git a/server/src/main/java/org/apache/druid/server/coordinator/rules/LoadRule.java b/server/src/main/java/org/apache/druid/server/coordinator/rules/LoadRule.java index 3de93cfdaf99..1de3479fe30e 100644 --- a/server/src/main/java/org/apache/druid/server/coordinator/rules/LoadRule.java +++ b/server/src/main/java/org/apache/druid/server/coordinator/rules/LoadRule.java @@ -153,8 +153,8 @@ private static List getFilteredHolders( log.makeAlert("Tier[%s] has no servers! Check your cluster configuration!", tier).emit(); return Collections.emptyList(); } - Predicate isNotInMaintenance = s -> !s.isInMaintenance(); - return queue.stream().filter(isNotInMaintenance.and(predicate)).collect(Collectors.toList()); + Predicate isActive = s -> !s.isDecommissioning(); + return queue.stream().filter(isActive.and(predicate)).collect(Collectors.toList()); } /** @@ -385,14 +385,14 @@ private static int dropForTier( Map> holders = holdersInTier.stream() .filter(s -> s.isServingSegment(segment)) .collect(Collectors.partitioningBy( - ServerHolder::isInMaintenance, + ServerHolder::isDecommissioning, Collectors.toCollection(TreeSet::new) )); - TreeSet maintenanceServers = holders.get(true); - TreeSet availableServers = holders.get(false); - int left = dropSegmentFromServers(balancerStrategy, segment, maintenanceServers, numToDrop); + TreeSet decommissioningServers = holders.get(true); + TreeSet activeServers = holders.get(false); + int left = dropSegmentFromServers(balancerStrategy, segment, decommissioningServers, numToDrop); if (left > 0) { - left = dropSegmentFromServers(balancerStrategy, segment, availableServers, left); + left = dropSegmentFromServers(balancerStrategy, segment, activeServers, left); } if (left != 0) { log.warn("Wtf, holder was null? I have no servers serving [%s]?", segment.getId()); diff --git a/server/src/main/java/org/apache/druid/server/http/LookupCoordinatorResource.java b/server/src/main/java/org/apache/druid/server/http/LookupCoordinatorResource.java index 90e56941ce7d..b79acea71612 100644 --- a/server/src/main/java/org/apache/druid/server/http/LookupCoordinatorResource.java +++ b/server/src/main/java/org/apache/druid/server/http/LookupCoordinatorResource.java @@ -100,7 +100,8 @@ public Response getTiers( if (discover) { return Response.ok().entity(lookupCoordinatorManager.discoverTiers()).build(); } - final Map> knownLookups = lookupCoordinatorManager.getKnownLookups(); + final Map> knownLookups = lookupCoordinatorManager + .getKnownLookups(); if (knownLookups == null) { return Response.status(Response.Status.NOT_FOUND).build(); } else { @@ -113,6 +114,26 @@ public Response getTiers( } } + @GET + @Produces({MediaType.APPLICATION_JSON}) + @Path("/config/all") + public Response getAllLookupSpecs() + { + try { + final Map> knownLookups = lookupCoordinatorManager + .getKnownLookups(); + if (knownLookups == null) { + return Response.status(Response.Status.NOT_FOUND).build(); + } else { + return Response.ok().entity(knownLookups).build(); + } + } + catch (Exception ex) { + LOG.error(ex, "Error getting lookups status"); + return Response.serverError().entity(ServletResourceUtils.sanitizeException(ex)).build(); + } + } + @POST @Path("/config") @Produces({MediaType.APPLICATION_JSON, SmileMediaTypes.APPLICATION_JACKSON_SMILE}) @@ -314,14 +335,16 @@ public Response getAllLookupsStatus( ) { try { - Map> configuredLookups = lookupCoordinatorManager.getKnownLookups(); + Map> configuredLookups = lookupCoordinatorManager + .getKnownLookups(); if (configuredLookups == null) { return Response.status(Response.Status.NOT_FOUND) .entity(ServletResourceUtils.jsonize("No lookups found")) .build(); } - Map> lookupsStateOnNodes = lookupCoordinatorManager.getLastKnownLookupsStateOnNodes(); + Map> lookupsStateOnNodes = lookupCoordinatorManager + .getLastKnownLookupsStateOnNodes(); Map> result = new HashMap<>(); @@ -362,7 +385,8 @@ public Response getLookupStatusForTier( ) { try { - Map> configuredLookups = lookupCoordinatorManager.getKnownLookups(); + Map> configuredLookups = lookupCoordinatorManager + .getKnownLookups(); if (configuredLookups == null) { return Response.status(Response.Status.NOT_FOUND) .entity(ServletResourceUtils.jsonize("No lookups found")) @@ -380,7 +404,8 @@ public Response getLookupStatusForTier( Map lookupStatusMap = new HashMap<>(); Collection hosts = lookupCoordinatorManager.discoverNodesInTier(tier); - Map> lookupsStateOnNodes = lookupCoordinatorManager.getLastKnownLookupsStateOnNodes(); + Map> lookupsStateOnNodes = lookupCoordinatorManager + .getLastKnownLookupsStateOnNodes(); for (Map.Entry lookupsEntry : tierLookups.entrySet()) { lookupStatusMap.put( @@ -407,7 +432,8 @@ public Response getSpecificLookupStatus( ) { try { - Map> configuredLookups = lookupCoordinatorManager.getKnownLookups(); + Map> configuredLookups = lookupCoordinatorManager + .getKnownLookups(); if (configuredLookups == null) { return Response.status(Response.Status.NOT_FOUND) .entity(ServletResourceUtils.jsonize("No lookups found")) @@ -486,7 +512,8 @@ public Response getAllNodesStatus( if (discover) { tiers = lookupCoordinatorManager.discoverTiers(); } else { - Map> configuredLookups = lookupCoordinatorManager.getKnownLookups(); + Map> configuredLookups = lookupCoordinatorManager + .getKnownLookups(); if (configuredLookups == null) { return Response.status(Response.Status.NOT_FOUND) .entity(ServletResourceUtils.jsonize("No lookups configured.")) @@ -495,7 +522,8 @@ public Response getAllNodesStatus( tiers = configuredLookups.keySet(); } - Map> lookupsStateOnHosts = lookupCoordinatorManager.getLastKnownLookupsStateOnNodes(); + Map> lookupsStateOnHosts = lookupCoordinatorManager + .getLastKnownLookupsStateOnNodes(); Map>> result = new HashMap<>(); @@ -531,7 +559,8 @@ public Response getNodesStatusInTier( ) { try { - Map> lookupsStateOnHosts = lookupCoordinatorManager.getLastKnownLookupsStateOnNodes(); + Map> lookupsStateOnHosts = lookupCoordinatorManager + .getLastKnownLookupsStateOnNodes(); Map> tierNodesStatus = new HashMap<>(); @@ -563,7 +592,8 @@ public Response getSpecificNodeStatus( ) { try { - Map> lookupsStateOnHosts = lookupCoordinatorManager.getLastKnownLookupsStateOnNodes(); + Map> lookupsStateOnHosts = lookupCoordinatorManager + .getLastKnownLookupsStateOnNodes(); LookupsState lookupsState = lookupsStateOnHosts.get(hostAndPort); if (lookupsState == null) { diff --git a/server/src/test/java/org/apache/druid/server/coordinator/DruidCoordinatorBalancerTest.java b/server/src/test/java/org/apache/druid/server/coordinator/DruidCoordinatorBalancerTest.java index 67521e397e00..dbd3048e5396 100644 --- a/server/src/test/java/org/apache/druid/server/coordinator/DruidCoordinatorBalancerTest.java +++ b/server/src/test/java/org/apache/druid/server/coordinator/DruidCoordinatorBalancerTest.java @@ -201,14 +201,14 @@ public void testMoveToEmptyServerBalancer() /** * Server 1 has 2 segments. - * Server 2 (maintenance) has 2 segments. + * Server 2 (decommissioning) has 2 segments. * Server 3 is empty. - * Maintenance has priority 7. + * Decommissioning percent is 60. * Max segments to move is 3. * 2 (of 2) segments should be moved from Server 2 and 1 (of 2) from Server 1. */ @Test - public void testMoveMaintenancePriority() + public void testMoveDecommissioningMaxPercentOfMaxSegmentsToMove() { mockDruidServer(druidServer1, "1", "normal", 30L, 100L, Arrays.asList(segment1, segment2)); mockDruidServer(druidServer2, "2", "normal", 30L, 100L, Arrays.asList(segment3, segment4)); @@ -239,8 +239,8 @@ public void testMoveMaintenancePriority() .withDynamicConfigs( CoordinatorDynamicConfig.builder() .withMaxSegmentsToMove(3) - .withMaintenanceModeSegmentsPriority(6) - .build() // ceil(3 * 0.6) = 2 segments from servers in maintenance + .withDecommissioningMaxPercentOfMaxSegmentsToMove(60) + .build() // ceil(3 * 0.6) = 2 segments from decommissioning servers ) .withBalancerStrategy(strategy) .build(); @@ -251,28 +251,28 @@ public void testMoveMaintenancePriority() } @Test - public void testZeroMaintenancePriority() + public void testZeroDecommissioningMaxPercentOfMaxSegmentsToMove() { - DruidCoordinatorRuntimeParams params = setupParamsForMaintenancePriority(0); + DruidCoordinatorRuntimeParams params = setupParamsForDecommissioningMaxPercentOfMaxSegmentsToMove(0); params = new DruidCoordinatorBalancerTester(coordinator).run(params); Assert.assertEquals(1L, params.getCoordinatorStats().getTieredStat("movedCount", "normal")); Assert.assertThat(peon3.getSegmentsToLoad(), is(equalTo(ImmutableSet.of(segment1)))); } @Test - public void testMaxMaintenancePriority() + public void testMaxDecommissioningMaxPercentOfMaxSegmentsToMove() { - DruidCoordinatorRuntimeParams params = setupParamsForMaintenancePriority(10); + DruidCoordinatorRuntimeParams params = setupParamsForDecommissioningMaxPercentOfMaxSegmentsToMove(10); params = new DruidCoordinatorBalancerTester(coordinator).run(params); Assert.assertEquals(1L, params.getCoordinatorStats().getTieredStat("movedCount", "normal")); Assert.assertThat(peon3.getSegmentsToLoad(), is(equalTo(ImmutableSet.of(segment2)))); } /** - * Should balance segments as usual (ignoring priority) with empty maintenanceList. + * Should balance segments as usual (ignoring percent) with empty decommissioningNodes. */ @Test - public void testMoveMaintenancePriorityWithNoMaintenance() + public void testMoveDecommissioningMaxPercentOfMaxSegmentsToMoveWithNoDecommissioning() { mockDruidServer(druidServer1, "1", "normal", 30L, 100L, Arrays.asList(segment1, segment2)); mockDruidServer(druidServer2, "2", "normal", 0L, 100L, Arrays.asList(segment3, segment4)); @@ -300,7 +300,7 @@ public void testMoveMaintenancePriorityWithNoMaintenance() ImmutableList.of(false, false, false) ) .withDynamicConfigs( - CoordinatorDynamicConfig.builder().withMaxSegmentsToMove(3).withMaintenanceModeSegmentsPriority(9).build() + CoordinatorDynamicConfig.builder().withMaxSegmentsToMove(3).withDecommissioningMaxPercentOfMaxSegmentsToMove(9).build() ) .withBalancerStrategy(strategy) .build(); @@ -311,10 +311,10 @@ public void testMoveMaintenancePriorityWithNoMaintenance() } /** - * Shouldn't move segments to a server in maintenance mode. + * Shouldn't move segments to a decommissioning server. */ @Test - public void testMoveToServerInMaintenance() + public void testMoveToDecommissioningServer() { mockDruidServer(druidServer1, "1", "normal", 30L, 100L, segments); mockDruidServer(druidServer2, "2", "normal", 0L, 100L, Collections.emptyList()); @@ -347,7 +347,7 @@ public void testMoveToServerInMaintenance() } @Test - public void testMoveFromServerInMaintenance() + public void testMoveFromDecommissioningServer() { mockDruidServer(druidServer1, "1", "normal", 30L, 100L, segments); mockDruidServer(druidServer2, "2", "normal", 0L, 100L, Collections.emptyList()); @@ -512,7 +512,7 @@ private DruidCoordinatorRuntimeParams.Builder defaultRuntimeParamsBuilder( private DruidCoordinatorRuntimeParams.Builder defaultRuntimeParamsBuilder( List druidServers, List peons, - List maintenance + List decommissioning ) { return DruidCoordinatorRuntimeParams @@ -524,7 +524,7 @@ private DruidCoordinatorRuntimeParams.Builder defaultRuntimeParamsBuilder( "normal", IntStream .range(0, druidServers.size()) - .mapToObj(i -> new ServerHolder(druidServers.get(i), peons.get(i), maintenance.get(i))) + .mapToObj(i -> new ServerHolder(druidServers.get(i), peons.get(i), decommissioning.get(i))) .collect(Collectors.toSet()) ) ) @@ -622,7 +622,7 @@ public void emitStats(String tier, CoordinatorStats stats, List se } } - private DruidCoordinatorRuntimeParams setupParamsForMaintenancePriority(int priority) + private DruidCoordinatorRuntimeParams setupParamsForDecommissioningMaxPercentOfMaxSegmentsToMove(int percent) { mockDruidServer(druidServer1, "1", "normal", 30L, 100L, Arrays.asList(segment1, segment3)); mockDruidServer(druidServer2, "2", "normal", 30L, 100L, Arrays.asList(segment2, segment3)); @@ -632,7 +632,7 @@ private DruidCoordinatorRuntimeParams setupParamsForMaintenancePriority(int prio mockCoordinator(coordinator); - // either maintenance servers list or general ones (ie servers list is [2] or [1, 3]) + // either decommissioning servers list or acitve ones (ie servers list is [2] or [1, 3]) BalancerStrategy strategy = EasyMock.createMock(BalancerStrategy.class); EasyMock.expect(strategy.pickSegmentToMove(ImmutableList.of(new ServerHolder(druidServer2, peon2, true)))) .andReturn(new BalancerSegmentHolder(druidServer2, segment2)); @@ -651,7 +651,7 @@ private DruidCoordinatorRuntimeParams setupParamsForMaintenancePriority(int prio .withDynamicConfigs( CoordinatorDynamicConfig.builder() .withMaxSegmentsToMove(1) - .withMaintenanceModeSegmentsPriority(priority) + .withDecommissioningMaxPercentOfMaxSegmentsToMove(percent) .build() ) .withBalancerStrategy(strategy) diff --git a/server/src/test/java/org/apache/druid/server/coordinator/rules/BroadcastDistributionRuleTest.java b/server/src/test/java/org/apache/druid/server/coordinator/rules/BroadcastDistributionRuleTest.java index 359fb6895cce..83398d7f6dc4 100644 --- a/server/src/test/java/org/apache/druid/server/coordinator/rules/BroadcastDistributionRuleTest.java +++ b/server/src/test/java/org/apache/druid/server/coordinator/rules/BroadcastDistributionRuleTest.java @@ -58,9 +58,9 @@ public class BroadcastDistributionRuleTest private final List largeSegments2 = new ArrayList<>(); private DataSegment smallSegment; private DruidCluster secondCluster; - private ServerHolder generalServer; - private ServerHolder maintenanceServer2; - private ServerHolder maintenanceServer1; + private ServerHolder activeServer; + private ServerHolder decommissioningServer1; + private ServerHolder decommissioningServer2; @Before public void setUp() @@ -200,9 +200,9 @@ public void setUp() ) ); - generalServer = new ServerHolder( + activeServer = new ServerHolder( new DruidServer( - "general", + "active", "host1", null, 100, @@ -214,9 +214,9 @@ public void setUp() new LoadQueuePeonTester() ); - maintenanceServer1 = new ServerHolder( + decommissioningServer1 = new ServerHolder( new DruidServer( - "maintenance1", + "decommissioning1", "host2", null, 100, @@ -229,9 +229,9 @@ public void setUp() true ); - maintenanceServer2 = new ServerHolder( + decommissioningServer2 = new ServerHolder( new DruidServer( - "maintenance2", + "decommissioning2", "host3", null, 100, @@ -267,9 +267,9 @@ public void setUp() ImmutableMap.of( "tier1", Stream.of( - generalServer, - maintenanceServer1, - maintenanceServer2 + activeServer, + decommissioningServer1, + decommissioningServer2 ).collect(Collectors.toCollection(() -> new TreeSet<>(Collections.reverseOrder()))) ) ); @@ -315,19 +315,19 @@ public void testBroadcastToSingleDataSource() /** * Servers: - * name | segments - * -------------+-------------- - * general | large segment - * maintenance1 | small segment - * maintenance2 | large segment + * name | segments + * -----------------+-------------- + * active | large segment + * decommissioning1 | small segment + * decommissioning2 | large segment * * After running the rule for the small segment: - * general | large & small segments - * maintenance1 | - * maintenance2 | large segment + * active | large & small segments + * decommissioning1 | + * decommissionint2 | large segment */ @Test - public void testBroadcastWithMaintenance() + public void testBroadcastDecommissioning() { final ForeverBroadcastDistributionRule rule = new ForeverBroadcastDistributionRule(ImmutableList.of("large_source")); @@ -348,9 +348,9 @@ public void testBroadcastWithMaintenance() assertEquals(1L, stats.getGlobalStat(LoadRule.ASSIGNED_COUNT)); assertEquals(false, stats.hasPerTierStats()); - assertEquals(1, generalServer.getPeon().getSegmentsToLoad().size()); - assertEquals(1, maintenanceServer1.getPeon().getSegmentsToDrop().size()); - assertEquals(0, maintenanceServer2.getPeon().getSegmentsToLoad().size()); + assertEquals(1, activeServer.getPeon().getSegmentsToLoad().size()); + assertEquals(1, decommissioningServer1.getPeon().getSegmentsToDrop().size()); + assertEquals(0, decommissioningServer2.getPeon().getSegmentsToLoad().size()); } @Test diff --git a/server/src/test/java/org/apache/druid/server/coordinator/rules/LoadRuleTest.java b/server/src/test/java/org/apache/druid/server/coordinator/rules/LoadRuleTest.java index 9a9bcb170a75..a8793b2a6474 100644 --- a/server/src/test/java/org/apache/druid/server/coordinator/rules/LoadRuleTest.java +++ b/server/src/test/java/org/apache/druid/server/coordinator/rules/LoadRuleTest.java @@ -687,11 +687,11 @@ public void testMaxLoadingQueueSize() } /** - * 2 servers in different tiers, the first is in maitenance mode. - * Should not load a segment to the server in maintenance mode. + * 2 servers in different tiers, the first is decommissioning. + * Should not load a segment to the server that is decommissioning */ @Test - public void testLoadDuringMaitenance() + public void testLoadDecommissioning() { final LoadQueuePeon mockPeon1 = createEmptyPeon(); final LoadQueuePeon mockPeon2 = createOneCallPeonMock(); @@ -737,11 +737,11 @@ public void testLoadDuringMaitenance() } /** - * 2 tiers, 2 servers each, 1 server of the second tier is in maintenance. - * Should not load a segment to the server in maintenance mode. + * 2 tiers, 2 servers each, 1 server of the second tier is decommissioning. + * Should not load a segment to the server that is decommssioning. */ @Test - public void testLoadReplicaDuringMaitenance() + public void testLoadReplicaDuringDecommissioning() { EasyMock.expect(throttler.canCreateReplicant(EasyMock.anyString())).andReturn(true).anyTimes(); @@ -796,11 +796,11 @@ public void testLoadReplicaDuringMaitenance() } /** - * 2 servers with a segment, one server in maintenance mode. + * 2 servers with a segment, one server decommissioning. * Should drop a segment from both. */ @Test - public void testDropDuringMaintenance() + public void testDropDuringDecommissioning() { final LoadQueuePeon mockPeon = createEmptyPeon(); mockPeon.dropSegment(EasyMock.anyObject(), EasyMock.anyObject()); @@ -859,12 +859,12 @@ public void testDropDuringMaintenance() /** * 3 servers hosting 3 replicas of the segment. - * 1 servers is in maitenance. + * 1 servers is decommissioning. * 1 replica is redundant. - * Should drop from the server in maintenance. + * Should drop from the decommissioning server. */ @Test - public void testRedundantReplicaDropDuringMaintenance() + public void testRedundantReplicaDropDuringDecommissioning() { final LoadQueuePeon mockPeon1 = new LoadQueuePeonTester(); final LoadQueuePeon mockPeon2 = new LoadQueuePeonTester(); @@ -1019,12 +1019,12 @@ private static LoadQueuePeon createOneCallPeonMock() return mockPeon2; } - private static ServerHolder createServerHolder(String tier, LoadQueuePeon mockPeon1, boolean maintenance) + private static ServerHolder createServerHolder(String tier, LoadQueuePeon mockPeon1, boolean isDecommissioning) { return new ServerHolder( createServer(tier).toImmutableDruidServer(), mockPeon1, - maintenance + isDecommissioning ); } } diff --git a/server/src/test/java/org/apache/druid/server/http/CoordinatorDynamicConfigTest.java b/server/src/test/java/org/apache/druid/server/http/CoordinatorDynamicConfigTest.java index e0979252012c..af97906f9cc7 100644 --- a/server/src/test/java/org/apache/druid/server/http/CoordinatorDynamicConfigTest.java +++ b/server/src/test/java/org/apache/druid/server/http/CoordinatorDynamicConfigTest.java @@ -50,8 +50,8 @@ public void testSerde() throws Exception + " \"emitBalancingStats\": true,\n" + " \"killDataSourceWhitelist\": [\"test1\",\"test2\"],\n" + " \"maxSegmentsInNodeLoadingQueue\": 1,\n" - + " \"historicalNodesInMaintenance\": [\"host1\", \"host2\"],\n" - + " \"nodesInMaintenancePriority\": 9\n" + + " \"decommissioningNodes\": [\"host1\", \"host2\"],\n" + + " \"decommissioningMaxPercentOfMaxSegmentsToMove\": 9\n" + "}\n"; CoordinatorDynamicConfig actual = mapper.readValue( @@ -63,19 +63,19 @@ public void testSerde() throws Exception ), CoordinatorDynamicConfig.class ); - ImmutableSet maintenance = ImmutableSet.of("host1", "host2"); + ImmutableSet decommissioning = ImmutableSet.of("host1", "host2"); ImmutableSet whitelist = ImmutableSet.of("test1", "test2"); - assertConfig(actual, 1, 1, 1, 1, 1, 1, 2, true, whitelist, false, 1, maintenance, 9); + assertConfig(actual, 1, 1, 1, 1, 1, 1, 2, true, whitelist, false, 1, decommissioning, 9); - actual = CoordinatorDynamicConfig.builder().withMaintenanceList(ImmutableSet.of("host1")).build(actual); + actual = CoordinatorDynamicConfig.builder().withDecommissioningNodes(ImmutableSet.of("host1")).build(actual); assertConfig(actual, 1, 1, 1, 1, 1, 1, 2, true, whitelist, false, 1, ImmutableSet.of("host1"), 9); - actual = CoordinatorDynamicConfig.builder().withMaintenanceModeSegmentsPriority(5).build(actual); + actual = CoordinatorDynamicConfig.builder().withDecommissioningMaxPercentOfMaxSegmentsToMove(5).build(actual); assertConfig(actual, 1, 1, 1, 1, 1, 1, 2, true, whitelist, false, 1, ImmutableSet.of("host1"), 5); } @Test - public void testMaintenanceParametersBackwardCompatibility() throws Exception + public void testDecommissioningParametersBackwardCompatibility() throws Exception { String jsonStr = "{\n" + " \"millisToWaitBeforeDeleting\": 1,\n" @@ -99,14 +99,14 @@ public void testMaintenanceParametersBackwardCompatibility() throws Exception ), CoordinatorDynamicConfig.class ); - ImmutableSet maintenance = ImmutableSet.of(); + ImmutableSet decommissioning = ImmutableSet.of(); ImmutableSet whitelist = ImmutableSet.of("test1", "test2"); - assertConfig(actual, 1, 1, 1, 1, 1, 1, 2, true, whitelist, false, 1, maintenance, 0); + assertConfig(actual, 1, 1, 1, 1, 1, 1, 2, true, whitelist, false, 1, decommissioning, 0); - actual = CoordinatorDynamicConfig.builder().withMaintenanceList(ImmutableSet.of("host1")).build(actual); + actual = CoordinatorDynamicConfig.builder().withDecommissioningNodes(ImmutableSet.of("host1")).build(actual); assertConfig(actual, 1, 1, 1, 1, 1, 1, 2, true, whitelist, false, 1, ImmutableSet.of("host1"), 0); - actual = CoordinatorDynamicConfig.builder().withMaintenanceModeSegmentsPriority(5).build(actual); + actual = CoordinatorDynamicConfig.builder().withDecommissioningMaxPercentOfMaxSegmentsToMove(5).build(actual); assertConfig(actual, 1, 1, 1, 1, 1, 1, 2, true, whitelist, false, 1, ImmutableSet.of("host1"), 5); } @@ -217,7 +217,7 @@ public void testBuilderDefaults() { CoordinatorDynamicConfig defaultConfig = CoordinatorDynamicConfig.builder().build(); ImmutableSet emptyList = ImmutableSet.of(); - assertConfig(defaultConfig, 900000, 524288000, 100, 5, 15, 10, 1, false, emptyList, false, 0, emptyList, 7); + assertConfig(defaultConfig, 900000, 524288000, 100, 5, 15, 10, 1, false, emptyList, false, 0, emptyList, 70); } @Test @@ -257,8 +257,8 @@ private void assertConfig( Set expectedKillableDatasources, boolean expectedKillAllDataSources, int expectedMaxSegmentsInNodeLoadingQueue, - Set maintenanceList, - int maintenancePriority + Set decommissioning, + int decommissioningMaxPercentOfMaxSegmentsToMove ) { Assert.assertEquals(expectedMillisToWaitBeforeDeleting, config.getMillisToWaitBeforeDeleting()); @@ -272,7 +272,7 @@ private void assertConfig( Assert.assertEquals(expectedKillableDatasources, config.getKillableDataSources()); Assert.assertEquals(expectedKillAllDataSources, config.isKillAllDataSources()); Assert.assertEquals(expectedMaxSegmentsInNodeLoadingQueue, config.getMaxSegmentsInNodeLoadingQueue()); - Assert.assertEquals(maintenanceList, config.getHistoricalNodesInMaintenance()); - Assert.assertEquals(maintenancePriority, config.getNodesInMaintenancePriority()); + Assert.assertEquals(decommissioning, config.getDecommissioningNodes()); + Assert.assertEquals(decommissioningMaxPercentOfMaxSegmentsToMove, config.getDecommissioningMaxPercentOfMaxSegmentsToMove()); } } diff --git a/server/src/test/java/org/apache/druid/server/http/LookupCoordinatorResourceTest.java b/server/src/test/java/org/apache/druid/server/http/LookupCoordinatorResourceTest.java index 621479b8a2e4..17f3d46e11ed 100644 --- a/server/src/test/java/org/apache/druid/server/http/LookupCoordinatorResourceTest.java +++ b/server/src/test/java/org/apache/druid/server/http/LookupCoordinatorResourceTest.java @@ -39,6 +39,7 @@ import javax.servlet.http.HttpServletRequest; import javax.ws.rs.core.MediaType; import javax.ws.rs.core.Response; +import javax.ws.rs.core.Response.Status; import java.io.ByteArrayInputStream; import java.io.IOException; import java.io.InputStream; @@ -1078,4 +1079,68 @@ public void testGetSpecificNodeStatus() EasyMock.verify(lookupCoordinatorManager); } + + @Test + public void testGetAllLookupSpecs() + { + final Map> lookups = ImmutableMap.of( + "tier1", + ImmutableMap.of( + "lookup1", + new LookupExtractorFactoryMapContainer( + "v0", + ImmutableMap.of("k1", "v2") + ), + "lookup2", + new LookupExtractorFactoryMapContainer( + "v1", + ImmutableMap.of("k", "v") + ) + ), + "tier2", + ImmutableMap.of( + "lookup1", + new LookupExtractorFactoryMapContainer( + "v0", + ImmutableMap.of("k1", "v2") + ) + ) + ); + final LookupCoordinatorManager lookupCoordinatorManager = EasyMock.createStrictMock( + LookupCoordinatorManager.class + ); + EasyMock.expect(lookupCoordinatorManager.getKnownLookups()) + .andReturn(lookups) + .once(); + EasyMock.replay(lookupCoordinatorManager); + final LookupCoordinatorResource lookupCoordinatorResource = new LookupCoordinatorResource( + lookupCoordinatorManager, + mapper, + mapper + ); + final Response response = lookupCoordinatorResource.getAllLookupSpecs(); + Assert.assertEquals(Status.OK.getStatusCode(), response.getStatus()); + Assert.assertEquals(lookups, response.getEntity()); + EasyMock.verify(lookupCoordinatorManager); + } + + @Test + public void testGetEmptyAllLookupSpecs() + { + final LookupCoordinatorManager lookupCoordinatorManager = EasyMock.createStrictMock( + LookupCoordinatorManager.class + ); + EasyMock.expect(lookupCoordinatorManager.getKnownLookups()) + .andReturn(null) + .once(); + EasyMock.replay(lookupCoordinatorManager); + final LookupCoordinatorResource lookupCoordinatorResource = new LookupCoordinatorResource( + lookupCoordinatorManager, + mapper, + mapper + ); + final Response response = lookupCoordinatorResource.getAllLookupSpecs(); + Assert.assertEquals(Status.NOT_FOUND.getStatusCode(), response.getStatus()); + EasyMock.verify(lookupCoordinatorManager); + } } diff --git a/services/pom.xml b/services/pom.xml index 1220492fdd18..7eac4dfca89a 100644 --- a/services/pom.xml +++ b/services/pom.xml @@ -27,7 +27,7 @@ org.apache.druid druid - 0.14.0-incubating-SNAPSHOT + 0.14.1-incubating-SNAPSHOT @@ -118,7 +118,7 @@ - + diff --git a/services/src/main/java/org/apache/druid/cli/RouterJettyServerInitializer.java b/services/src/main/java/org/apache/druid/cli/RouterJettyServerInitializer.java index 57b6cbbd2e78..d76fe83e4487 100644 --- a/services/src/main/java/org/apache/druid/cli/RouterJettyServerInitializer.java +++ b/services/src/main/java/org/apache/druid/cli/RouterJettyServerInitializer.java @@ -66,6 +66,7 @@ public class RouterJettyServerInitializer implements JettyServerInitializer "/", "/coordinator-console/*", "/public/*", + "/assets/*", "/old-console/*", "/pages/*", "/unified-console.html", diff --git a/sql/pom.xml b/sql/pom.xml index 57c0d5f04875..bea2de61d5cb 100644 --- a/sql/pom.xml +++ b/sql/pom.xml @@ -18,8 +18,7 @@ ~ under the License. --> - + 4.0.0 druid-sql @@ -29,7 +28,7 @@ org.apache.druid druid - 0.14.0-incubating-SNAPSHOT + 0.14.1-incubating-SNAPSHOT diff --git a/sql/src/main/java/org/apache/druid/sql/calcite/schema/DruidSchema.java b/sql/src/main/java/org/apache/druid/sql/calcite/schema/DruidSchema.java index 3efbd8d0074e..4aaa95e12a63 100644 --- a/sql/src/main/java/org/apache/druid/sql/calcite/schema/DruidSchema.java +++ b/sql/src/main/java/org/apache/druid/sql/calcite/schema/DruidSchema.java @@ -19,6 +19,8 @@ package org.apache.druid.sql.calcite.schema; +import com.amazonaws.annotation.GuardedBy; +import com.google.common.annotations.VisibleForTesting; import com.google.common.base.Preconditions; import com.google.common.base.Predicates; import com.google.common.collect.FluentIterable; @@ -62,7 +64,6 @@ import org.apache.druid.sql.calcite.view.DruidViewMacro; import org.apache.druid.sql.calcite.view.ViewManager; import org.apache.druid.timeline.DataSegment; -import org.apache.druid.timeline.SegmentId; import java.io.IOException; import java.util.Comparator; @@ -94,8 +95,9 @@ public class DruidSchema extends AbstractSchema private static final EmittingLogger log = new EmittingLogger(DruidSchema.class); private static final int MAX_SEGMENTS_PER_QUERY = 15000; - private static final long IS_PUBLISHED = 0; - private static final long IS_AVAILABLE = 1; + private static final long DEFAULT_IS_PUBLISHED = 0; + private static final long DEFAULT_IS_AVAILABLE = 1; + private static final long DEFAULT_NUM_ROWS = 0; private final QueryLifecycleFactory queryLifecycleFactory; private final PlannerConfig config; @@ -106,12 +108,12 @@ public class DruidSchema extends AbstractSchema // For awaitInitialization. private final CountDownLatch initialized = new CountDownLatch(1); - // Protects access to segmentSignatures, mutableSegments, segmentsNeedingRefresh, lastRefresh, isServerViewInitialized + // Protects access to segmentSignatures, mutableSegments, segmentsNeedingRefresh, lastRefresh, isServerViewInitialized, segmentMetadata private final Object lock = new Object(); // DataSource -> Segment -> SegmentMetadataHolder(contains RowSignature) for that segment. // Use TreeMap for segments so they are merged in deterministic order, from older to newer. - // This data structure need to be accessed in a thread-safe way since SystemSchema accesses it + @GuardedBy("lock") private final Map> segmentMetadataInfo = new HashMap<>(); private int totalSegments = 0; @@ -350,7 +352,8 @@ protected Multimap getFunctionMultim return builder.build(); } - private void addSegment(final DruidServerMetadata server, final DataSegment segment) + @VisibleForTesting + void addSegment(final DruidServerMetadata server, final DataSegment segment) { synchronized (lock) { final Map knownSegments = segmentMetadataInfo.get(segment.getDataSource()); @@ -359,16 +362,18 @@ private void addSegment(final DruidServerMetadata server, final DataSegment segm // segmentReplicatable is used to determine if segments are served by realtime servers or not final long isRealtime = server.segmentReplicatable() ? 0 : 1; - final Map> serverSegmentMap = ImmutableMap.of( + final Set servers = ImmutableSet.of(server.getName()); + holder = SegmentMetadataHolder.builder( segment.getId(), - ImmutableSet.of(server.getName()) - ); - - holder = SegmentMetadataHolder - .builder(segment.getId(), IS_PUBLISHED, IS_AVAILABLE, isRealtime, serverSegmentMap) - .build(); + DEFAULT_IS_PUBLISHED, + DEFAULT_IS_AVAILABLE, + isRealtime, + servers, + null, + DEFAULT_NUM_ROWS + ).build(); // Unknown segment. - setSegmentSignature(segment, holder); + setSegmentMetadataHolder(segment, holder); segmentsNeedingRefresh.add(segment); if (!server.segmentReplicatable()) { log.debug("Added new mutable segment[%s].", segment.getId()); @@ -377,14 +382,14 @@ private void addSegment(final DruidServerMetadata server, final DataSegment segm log.debug("Added new immutable segment[%s].", segment.getId()); } } else { - final Map> segmentServerMap = holder.getReplicas(); + final Set segmentServers = holder.getReplicas(); final ImmutableSet servers = new ImmutableSet.Builder() - .addAll(segmentServerMap.get(segment.getId())) + .addAll(segmentServers) .add(server.getName()) .build(); final SegmentMetadataHolder holderWithNumReplicas = SegmentMetadataHolder .from(holder) - .withReplicas(ImmutableMap.of(segment.getId(), servers)) + .withReplicas(servers) .build(); knownSegments.put(segment, holderWithNumReplicas); if (server.segmentReplicatable()) { @@ -402,7 +407,8 @@ private void addSegment(final DruidServerMetadata server, final DataSegment segm } } - private void removeSegment(final DataSegment segment) + @VisibleForTesting + void removeSegment(final DataSegment segment) { synchronized (lock) { log.debug("Segment[%s] is gone.", segment.getId()); @@ -433,13 +439,13 @@ private void removeServerSegment(final DruidServerMetadata server, final DataSeg log.debug("Segment[%s] is gone from server[%s]", segment.getId(), server.getName()); final Map knownSegments = segmentMetadataInfo.get(segment.getDataSource()); final SegmentMetadataHolder holder = knownSegments.get(segment); - final Map> segmentServerMap = holder.getReplicas(); - final ImmutableSet servers = FluentIterable.from(segmentServerMap.get(segment.getId())) + final Set segmentServers = holder.getReplicas(); + final ImmutableSet servers = FluentIterable.from(segmentServers) .filter(Predicates.not(Predicates.equalTo(server.getName()))) .toSet(); final SegmentMetadataHolder holderWithNumReplicas = SegmentMetadataHolder .from(holder) - .withReplicas(ImmutableMap.of(segment.getId(), servers)) + .withReplicas(servers) .build(); knownSegments.put(segment, holderWithNumReplicas); lock.notifyAll(); @@ -450,7 +456,8 @@ private void removeServerSegment(final DruidServerMetadata server, final DataSeg * Attempt to refresh "segmentSignatures" for a set of segments. Returns the set of segments actually refreshed, * which may be a subset of the asked-for set. */ - private Set refreshSegments(final Set segments) throws IOException + @VisibleForTesting + Set refreshSegments(final Set segments) throws IOException { final Set retVal = new HashSet<>(); @@ -506,15 +513,26 @@ private Set refreshSegmentsForDataSource(final String dataSource, f log.debug("Segment[%s] has signature[%s].", segment.getId(), rowSignature); final Map dataSourceSegments = segmentMetadataInfo.get(segment.getDataSource()); - SegmentMetadataHolder holder = dataSourceSegments.get(segment); - SegmentMetadataHolder updatedHolder = SegmentMetadataHolder - .from(holder) - .withRowSignature(rowSignature) - .withNumRows(analysis.getNumRows()) - .build(); - dataSourceSegments.put(segment, updatedHolder); - setSegmentSignature(segment, updatedHolder); - retVal.add(segment); + if (dataSourceSegments == null) { + log.warn("No segment map found with datasource[%s], skipping refresh", segment.getDataSource()); + } else { + SegmentMetadataHolder holder = dataSourceSegments.get(segment); + if (holder == null) { + log.warn( + "No segment[%s] found, skipping refresh", + segment.getId() + ); + } else { + SegmentMetadataHolder updatedHolder = SegmentMetadataHolder + .from(holder) + .withRowSignature(rowSignature) + .withNumRows(analysis.getNumRows()) + .build(); + dataSourceSegments.put(segment, updatedHolder); + setSegmentMetadataHolder(segment, updatedHolder); + retVal.add(segment); + } + } } } @@ -536,7 +554,8 @@ private Set refreshSegmentsForDataSource(final String dataSource, f return retVal; } - private void setSegmentSignature(final DataSegment segment, final SegmentMetadataHolder segmentMetadataHolder) + @VisibleForTesting + void setSegmentMetadataHolder(final DataSegment segment, final SegmentMetadataHolder segmentMetadataHolder) { synchronized (lock) { TreeMap dataSourceSegments = segmentMetadataInfo.computeIfAbsent( @@ -628,7 +647,7 @@ private static RowSignature analysisToRowSignature(final SegmentAnalysis analysi return rowSignatureBuilder.build(); } - public Map getSegmentMetadata() + Map getSegmentMetadata() { final Map segmentMetadata = new HashMap<>(); synchronized (lock) { diff --git a/sql/src/main/java/org/apache/druid/sql/calcite/schema/SegmentMetadataHolder.java b/sql/src/main/java/org/apache/druid/sql/calcite/schema/SegmentMetadataHolder.java index f2d5ab313b5c..38ff92858ec0 100644 --- a/sql/src/main/java/org/apache/druid/sql/calcite/schema/SegmentMetadataHolder.java +++ b/sql/src/main/java/org/apache/druid/sql/calcite/schema/SegmentMetadataHolder.java @@ -23,7 +23,6 @@ import org.apache.druid.timeline.SegmentId; import javax.annotation.Nullable; -import java.util.Map; import java.util.Set; /** @@ -36,15 +35,25 @@ public static Builder builder( long isPublished, long isAvailable, long isRealtime, - Map> segmentServerMap + Set segmentServers, + RowSignature rowSignature, + long numRows ) { - return new Builder(segmentId, isPublished, isAvailable, isRealtime, segmentServerMap); + return new Builder(segmentId, isPublished, isAvailable, isRealtime, segmentServers, rowSignature, numRows); } public static Builder from(SegmentMetadataHolder h) { - return new Builder(h.getSegmentId(), h.isPublished(), h.isAvailable(), h.isRealtime(), h.getReplicas()); + return new Builder( + h.getSegmentId(), + h.isPublished(), + h.isAvailable(), + h.isRealtime(), + h.getReplicas(), + h.getRowSignature(), + h.getNumRows() + ); } private final SegmentId segmentId; @@ -54,8 +63,8 @@ public static Builder from(SegmentMetadataHolder h) private final long isPublished; private final long isAvailable; private final long isRealtime; - //segmentId -> set of servers that contain the segment - private final Map> segmentServerMap; + // set of servers that contain the segment + private final Set segmentServers; private final long numRows; @Nullable private final RowSignature rowSignature; @@ -66,7 +75,7 @@ private SegmentMetadataHolder(Builder builder) this.isPublished = builder.isPublished; this.isAvailable = builder.isAvailable; this.isRealtime = builder.isRealtime; - this.segmentServerMap = builder.segmentServerMap; + this.segmentServers = builder.segmentServers; this.numRows = builder.numRows; this.segmentId = builder.segmentId; } @@ -91,14 +100,14 @@ public SegmentId getSegmentId() return segmentId; } - public Map> getReplicas() + public Set getReplicas() { - return segmentServerMap; + return segmentServers; } - public long getNumReplicas(SegmentId segmentId) + public long getNumReplicas() { - return segmentServerMap.get(segmentId).size(); + return segmentServers.size(); } public long getNumRows() @@ -119,7 +128,7 @@ public static class Builder private final long isAvailable; private final long isRealtime; - private Map> segmentServerMap; + private Set segmentServers; @Nullable private RowSignature rowSignature; private long numRows; @@ -129,14 +138,18 @@ private Builder( long isPublished, long isAvailable, long isRealtime, - Map> segmentServerMap + Set servers, + RowSignature rowSignature, + long numRows ) { this.segmentId = segmentId; this.isPublished = isPublished; this.isAvailable = isAvailable; this.isRealtime = isRealtime; - this.segmentServerMap = segmentServerMap; + this.segmentServers = servers; + this.rowSignature = rowSignature; + this.numRows = numRows; } public Builder withRowSignature(RowSignature rowSignature) @@ -151,9 +164,9 @@ public Builder withNumRows(long numRows) return this; } - public Builder withReplicas(Map> segmentServerMap) + public Builder withReplicas(Set servers) { - this.segmentServerMap = segmentServerMap; + this.segmentServers = servers; return this; } diff --git a/sql/src/main/java/org/apache/druid/sql/calcite/schema/SystemSchema.java b/sql/src/main/java/org/apache/druid/sql/calcite/schema/SystemSchema.java index d0599f861903..e895113f8505 100644 --- a/sql/src/main/java/org/apache/druid/sql/calcite/schema/SystemSchema.java +++ b/sql/src/main/java/org/apache/druid/sql/calcite/schema/SystemSchema.java @@ -224,7 +224,7 @@ public Enumerable scan(DataContext root) Maps.newHashMapWithExpectedSize(druidSchema.getTotalSegments()); for (SegmentMetadataHolder h : availableSegmentMetadata.values()) { PartialSegmentData partialSegmentData = - new PartialSegmentData(h.isAvailable(), h.isRealtime(), h.getNumReplicas(h.getSegmentId()), h.getNumRows()); + new PartialSegmentData(h.isAvailable(), h.isRealtime(), h.getNumReplicas(), h.getNumRows()); partialSegmentDataMap.put(h.getSegmentId(), partialSegmentData); } diff --git a/sql/src/test/java/org/apache/druid/sql/calcite/schema/DruidSchemaTest.java b/sql/src/test/java/org/apache/druid/sql/calcite/schema/DruidSchemaTest.java index 1b4e00819bb6..e707c40aeb5c 100644 --- a/sql/src/test/java/org/apache/druid/sql/calcite/schema/DruidSchemaTest.java +++ b/sql/src/test/java/org/apache/druid/sql/calcite/schema/DruidSchemaTest.java @@ -27,6 +27,8 @@ import org.apache.calcite.rel.type.RelDataTypeField; import org.apache.calcite.schema.Table; import org.apache.calcite.sql.type.SqlTypeName; +import org.apache.druid.client.ImmutableDruidServer; +import org.apache.druid.client.TimelineServerView; import org.apache.druid.data.input.InputRow; import org.apache.druid.java.util.common.Intervals; import org.apache.druid.java.util.common.Pair; @@ -40,6 +42,7 @@ import org.apache.druid.segment.QueryableIndex; import org.apache.druid.segment.incremental.IncrementalIndexSchema; import org.apache.druid.segment.writeout.OffHeapMemorySegmentWriteOutMediumFactory; +import org.apache.druid.server.coordination.DruidServerMetadata; import org.apache.druid.server.security.NoopEscalator; import org.apache.druid.sql.calcite.planner.PlannerConfig; import org.apache.druid.sql.calcite.table.DruidTable; @@ -63,6 +66,7 @@ import java.io.IOException; import java.util.List; import java.util.Map; +import java.util.Set; public class DruidSchemaTest extends CalciteTestBase { @@ -83,6 +87,8 @@ public class DruidSchemaTest extends CalciteTestBase private static QueryRunnerFactoryConglomerate conglomerate; private static Closer resourceCloser; + private List druidServers; + @BeforeClass public static void setUpClass() { @@ -162,10 +168,12 @@ public void setUp() throws Exception index2 ); + final TimelineServerView serverView = new TestServerInventoryView(walker.getSegments()); + druidServers = serverView.getDruidServers(); schema = new DruidSchema( CalciteTests.createMockQueryLifecycleFactory(walker, conglomerate), - new TestServerInventoryView(walker.getSegments()), + serverView, PLANNER_CONFIG_DEFAULT, new NoopViewManager(), new NoopEscalator() @@ -237,4 +245,100 @@ public void testGetTableMapFoo2() Assert.assertEquals("m1", fields.get(2).getName()); Assert.assertEquals(SqlTypeName.BIGINT, fields.get(2).getType().getSqlTypeName()); } + + /** + * This tests that {@link SegmentMetadataHolder#getNumRows()} is correct in case + * of multiple replicas i.e. when {@link DruidSchema#addSegment(DruidServerMetadata, DataSegment)} + * is called more than once for same segment + */ + @Test + public void testSegmentMetadataHolderNumRows() + { + Map segmentsMetadata = schema.getSegmentMetadata(); + final Set segments = segmentsMetadata.keySet(); + Assert.assertEquals(3, segments.size()); + // find the only segment with datasource "foo2" + final DataSegment existingSegment = segments.stream() + .filter(segment -> segment.getDataSource().equals("foo2")) + .findFirst() + .orElse(null); + Assert.assertNotNull(existingSegment); + final SegmentMetadataHolder existingHolder = segmentsMetadata.get(existingSegment); + // update SegmentMetadataHolder of existingSegment with numRows=5 + SegmentMetadataHolder updatedHolder = SegmentMetadataHolder.from(existingHolder).withNumRows(5).build(); + schema.setSegmentMetadataHolder(existingSegment, updatedHolder); + // find a druidServer holding existingSegment + final Pair pair = druidServers.stream() + .flatMap(druidServer -> druidServer.getSegments() + .stream() + .filter(segment -> segment + .equals( + existingSegment)) + .map(segment -> Pair + .of( + druidServer, + segment + ))) + .findAny() + .orElse(null); + Assert.assertNotNull(pair); + final ImmutableDruidServer server = pair.lhs; + Assert.assertNotNull(server); + final DruidServerMetadata druidServerMetadata = server.getMetadata(); + // invoke DruidSchema#addSegment on existingSegment + schema.addSegment(druidServerMetadata, existingSegment); + segmentsMetadata = schema.getSegmentMetadata(); + // get the only segment with datasource "foo2" + final DataSegment currentSegment = segments.stream() + .filter(segment -> segment.getDataSource().equals("foo2")) + .findFirst() + .orElse(null); + final SegmentMetadataHolder currentHolder = segmentsMetadata.get(currentSegment); + Assert.assertEquals(updatedHolder.getSegmentId(), currentHolder.getSegmentId()); + Assert.assertEquals(updatedHolder.getNumRows(), currentHolder.getNumRows()); + // numreplicas do not change here since we addSegment with the same server which was serving existingSegment before + Assert.assertEquals(updatedHolder.getNumReplicas(), currentHolder.getNumReplicas()); + Assert.assertEquals(updatedHolder.isAvailable(), currentHolder.isAvailable()); + Assert.assertEquals(updatedHolder.isPublished(), currentHolder.isPublished()); + } + + @Test + public void testNullDatasource() throws IOException + { + Map segmentMetadatas = schema.getSegmentMetadata(); + Set segments = segmentMetadatas.keySet(); + Assert.assertEquals(segments.size(), 3); + // segments contains two segments with datasource "foo" and one with datasource "foo2" + // let's remove the only segment with datasource "foo2" + final DataSegment segmentToRemove = segments.stream() + .filter(segment -> segment.getDataSource().equals("foo2")) + .findFirst() + .orElse(null); + Assert.assertFalse(segmentToRemove == null); + schema.removeSegment(segmentToRemove); + schema.refreshSegments(segments); // can cause NPE without dataSourceSegments null check in DruidSchema#refreshSegmentsForDataSource + segmentMetadatas = schema.getSegmentMetadata(); + segments = segmentMetadatas.keySet(); + Assert.assertEquals(segments.size(), 2); + } + + @Test + public void testNullSegmentMetadataHolder() throws IOException + { + Map segmentMetadatas = schema.getSegmentMetadata(); + Set segments = segmentMetadatas.keySet(); + Assert.assertEquals(segments.size(), 3); + // remove one of the segments with datasource "foo" + final DataSegment segmentToRemove = segments.stream() + .filter(segment -> segment.getDataSource().equals("foo")) + .findFirst() + .orElse(null); + Assert.assertFalse(segmentToRemove == null); + schema.removeSegment(segmentToRemove); + schema.refreshSegments(segments); // can cause NPE without holder null check in SegmentMetadataHolder#from + segmentMetadatas = schema.getSegmentMetadata(); + segments = segmentMetadatas.keySet(); + Assert.assertEquals(segments.size(), 2); + } + } diff --git a/sql/src/test/java/org/apache/druid/sql/calcite/schema/SystemSchemaTest.java b/sql/src/test/java/org/apache/druid/sql/calcite/schema/SystemSchemaTest.java index 7d8cdaad5729..d354f141d9af 100644 --- a/sql/src/test/java/org/apache/druid/sql/calcite/schema/SystemSchemaTest.java +++ b/sql/src/test/java/org/apache/druid/sql/calcite/schema/SystemSchemaTest.java @@ -118,6 +118,11 @@ public class SystemSchemaTest extends CalciteTestBase CalciteTests.createRow(ImmutableMap.of("t", "2001-01-03", "m1", "6.0")) ); + private static final List ROWS3 = ImmutableList.of( + CalciteTests.createRow(ImmutableMap.of("t", "2001-01-01", "m1", "7.0", "dim3", ImmutableList.of("x"))), + CalciteTests.createRow(ImmutableMap.of("t", "2001-01-02", "m1", "8.0", "dim3", ImmutableList.of("xyz"))) + ); + private SystemSchema schema; private SpecificSegmentsQuerySegmentWalker walker; private DruidLeaderClient client; @@ -204,11 +209,22 @@ public Authorizer getAuthorizer(String name) ) .rows(ROWS2) .buildMMappedIndex(); + final QueryableIndex index3 = IndexBuilder.create() + .tmpDir(new File(tmpDir, "3")) + .segmentWriteOutMediumFactory(OffHeapMemorySegmentWriteOutMediumFactory.instance()) + .schema( + new IncrementalIndexSchema.Builder() + .withMetrics(new LongSumAggregatorFactory("m1", "m1")) + .withRollup(false) + .build() + ) + .rows(ROWS3) + .buildMMappedIndex(); walker = new SpecificSegmentsQuerySegmentWalker(conglomerate) .add(segment1, index1) .add(segment2, index2) - .add(segment3, index2); + .add(segment3, index3); druidSchema = new DruidSchema( CalciteTests.createMockQueryLifecycleFactory(walker, conglomerate), @@ -469,7 +485,7 @@ public Object get(String name) 100L, 2L, //partition_num 1L, //num_replicas - 3L, //numRows + 2L, //numRows 0L, //is_published 1L, //is_available 0L //is_realtime @@ -481,7 +497,7 @@ public Object get(String name) 100L, 0L, //partition_num 1L, //num_replicas - 0L, //numRows = 3 + 0L, //numRows 0L, //is_published 1L, //is_available 1L //is_realtime diff --git a/sql/src/test/java/org/apache/druid/sql/calcite/util/TestServerInventoryView.java b/sql/src/test/java/org/apache/druid/sql/calcite/util/TestServerInventoryView.java index 6718b1bd1f80..2dcc56959837 100644 --- a/sql/src/test/java/org/apache/druid/sql/calcite/util/TestServerInventoryView.java +++ b/sql/src/test/java/org/apache/druid/sql/calcite/util/TestServerInventoryView.java @@ -20,7 +20,9 @@ package org.apache.druid.sql.calcite.util; import com.google.common.collect.ImmutableList; +import com.google.common.collect.ImmutableMap; import org.apache.druid.client.DruidServer; +import org.apache.druid.client.ImmutableDruidDataSource; import org.apache.druid.client.ImmutableDruidServer; import org.apache.druid.client.TimelineServerView; import org.apache.druid.client.selector.ServerSelector; @@ -33,6 +35,7 @@ import javax.annotation.Nullable; import java.util.ArrayList; +import java.util.Collections; import java.util.List; import java.util.concurrent.Executor; @@ -83,7 +86,14 @@ public TimelineLookup getTimeline(DataSource dataSource) @Override public List getDruidServers() { - throw new UnsupportedOperationException(); + final ImmutableDruidDataSource dataSource = new ImmutableDruidDataSource("DUMMY", Collections.emptyMap(), segments); + final ImmutableDruidServer server = new ImmutableDruidServer( + DUMMY_SERVER, + 0L, + ImmutableMap.of("src", dataSource), + 1 + ); + return ImmutableList.of(server); } @Override diff --git a/web-console/.gitignore b/web-console/.gitignore index 540af4c32577..defba1913d1e 100644 --- a/web-console/.gitignore +++ b/web-console/.gitignore @@ -2,6 +2,7 @@ node/ node_modules/ resources/ public/ +assets/ lib/*.css coordinator-console/ @@ -11,3 +12,4 @@ index.html .tscache tscommand-*.tmp.txt +licenses.json \ No newline at end of file diff --git a/web-console/package-lock.json b/web-console/package-lock.json index 6bf0577c2f1c..8c5035ac60de 100644 --- a/web-console/package-lock.json +++ b/web-console/package-lock.json @@ -14,22 +14,22 @@ } }, "@babel/core": { - "version": "7.2.2", - "resolved": "https://registry.npmjs.org/@babel/core/-/core-7.2.2.tgz", - "integrity": "sha512-59vB0RWt09cAct5EIe58+NzGP4TFSD3Bz//2/ELy3ZeTeKF6VTD1AXlH8BGGbCX0PuobZBsIzO7IAI9PH67eKw==", + "version": "7.3.4", + "resolved": "https://registry.npmjs.org/@babel/core/-/core-7.3.4.tgz", + "integrity": "sha512-jRsuseXBo9pN197KnDwhhaaBzyZr2oIcLHHTt2oDdQrej5Qp57dCCJafWx5ivU8/alEYDpssYqv1MUqcxwQlrA==", "dev": true, "requires": { "@babel/code-frame": "^7.0.0", - "@babel/generator": "^7.2.2", + "@babel/generator": "^7.3.4", "@babel/helpers": "^7.2.0", - "@babel/parser": "^7.2.2", + "@babel/parser": "^7.3.4", "@babel/template": "^7.2.2", - "@babel/traverse": "^7.2.2", - "@babel/types": "^7.2.2", + "@babel/traverse": "^7.3.4", + "@babel/types": "^7.3.4", "convert-source-map": "^1.1.0", "debug": "^4.1.0", "json5": "^2.1.0", - "lodash": "^4.17.10", + "lodash": "^4.17.11", "resolve": "^1.3.2", "semver": "^5.4.1", "source-map": "^0.5.0" @@ -53,12 +53,6 @@ "minimist": "^1.2.0" } }, - "ms": { - "version": "2.1.1", - "resolved": "https://registry.npmjs.org/ms/-/ms-2.1.1.tgz", - "integrity": "sha512-tgp+dl5cGk28utYktBsrFqA7HKgrhgPsg6Z/EfhWI4gl1Hwq8B/GmY/0oXZ6nF8hDVesS/FpnYaD/kOWhYQvyg==", - "dev": true - }, "source-map": { "version": "0.5.7", "resolved": "https://registry.npmjs.org/source-map/-/source-map-0.5.7.tgz", @@ -68,14 +62,14 @@ } }, "@babel/generator": { - "version": "7.3.0", - "resolved": "https://registry.npmjs.org/@babel/generator/-/generator-7.3.0.tgz", - "integrity": "sha512-dZTwMvTgWfhmibq4V9X+LMf6Bgl7zAodRn9PvcPdhlzFMbvUutx74dbEv7Atz3ToeEpevYEJtAwfxq/bDCzHWg==", + "version": "7.3.4", + "resolved": "https://registry.npmjs.org/@babel/generator/-/generator-7.3.4.tgz", + "integrity": "sha512-8EXhHRFqlVVWXPezBW5keTiQi/rJMQTg/Y9uVCEZ0CAF3PKtCCaVRnp64Ii1ujhkoDhhF1fVsImoN4yJ2uz4Wg==", "dev": true, "requires": { - "@babel/types": "^7.3.0", + "@babel/types": "^7.3.4", "jsesc": "^2.5.1", - "lodash": "^4.17.10", + "lodash": "^4.17.11", "source-map": "^0.5.0", "trim-right": "^1.0.1" }, @@ -152,9 +146,9 @@ } }, "@babel/parser": { - "version": "7.3.1", - "resolved": "https://registry.npmjs.org/@babel/parser/-/parser-7.3.1.tgz", - "integrity": "sha512-ATz6yX/L8LEnC3dtLQnIx4ydcPxhLcoy9Vl6re00zb2w5lG6itY6Vhnr1KFRPq/FHNsgl/gh2mjNN20f9iJTTA==", + "version": "7.3.4", + "resolved": "https://registry.npmjs.org/@babel/parser/-/parser-7.3.4.tgz", + "integrity": "sha512-tXZCqWtlOOP4wgCp6RjRvLmfuhnqTLy9VHwRochJBCP2nDm27JnnuFEnXFASVyQNHk36jD1tAammsCEEqgscIQ==", "dev": true }, "@babel/plugin-syntax-object-rest-spread": { @@ -167,9 +161,9 @@ } }, "@babel/runtime": { - "version": "7.3.1", - "resolved": "https://registry.npmjs.org/@babel/runtime/-/runtime-7.3.1.tgz", - "integrity": "sha512-7jGW8ppV0ant637pIqAcFfQDDH1orEPGJb8aXfUozuCU3QqX7rX4DA8iwrbPrR1hcH0FTTHz47yQnk+bl5xHQA==", + "version": "7.3.4", + "resolved": "https://registry.npmjs.org/@babel/runtime/-/runtime-7.3.4.tgz", + "integrity": "sha512-IvfvnMdSaLBateu0jfsYIpZTxAc2cKEXEMiezGGN75QcBcecDUKd3PgLAncT0oOgxKy8dd8hrJKj9MfzgfZd6g==", "requires": { "regenerator-runtime": "^0.12.0" } @@ -186,20 +180,20 @@ } }, "@babel/traverse": { - "version": "7.2.3", - "resolved": "https://registry.npmjs.org/@babel/traverse/-/traverse-7.2.3.tgz", - "integrity": "sha512-Z31oUD/fJvEWVR0lNZtfgvVt512ForCTNKYcJBGbPb1QZfve4WGH8Wsy7+Mev33/45fhP/hwQtvgusNdcCMgSw==", + "version": "7.3.4", + "resolved": "https://registry.npmjs.org/@babel/traverse/-/traverse-7.3.4.tgz", + "integrity": "sha512-TvTHKp6471OYEcE/91uWmhR6PrrYywQntCHSaZ8CM8Vmp+pjAusal4nGB2WCCQd0rvI7nOMKn9GnbcvTUz3/ZQ==", "dev": true, "requires": { "@babel/code-frame": "^7.0.0", - "@babel/generator": "^7.2.2", + "@babel/generator": "^7.3.4", "@babel/helper-function-name": "^7.1.0", "@babel/helper-split-export-declaration": "^7.0.0", - "@babel/parser": "^7.2.3", - "@babel/types": "^7.2.2", + "@babel/parser": "^7.3.4", + "@babel/types": "^7.3.4", "debug": "^4.1.0", "globals": "^11.1.0", - "lodash": "^4.17.10" + "lodash": "^4.17.11" }, "dependencies": { "debug": { @@ -210,50 +204,30 @@ "requires": { "ms": "^2.1.1" } - }, - "ms": { - "version": "2.1.1", - "resolved": "https://registry.npmjs.org/ms/-/ms-2.1.1.tgz", - "integrity": "sha512-tgp+dl5cGk28utYktBsrFqA7HKgrhgPsg6Z/EfhWI4gl1Hwq8B/GmY/0oXZ6nF8hDVesS/FpnYaD/kOWhYQvyg==", - "dev": true } } }, "@babel/types": { - "version": "7.3.0", - "resolved": "https://registry.npmjs.org/@babel/types/-/types-7.3.0.tgz", - "integrity": "sha512-QkFPw68QqWU1/RVPyBe8SO7lXbPfjtqAxRYQKpFpaB8yMq7X2qAqfwK5LKoQufEkSmO5NQ70O6Kc3Afk03RwXw==", + "version": "7.3.4", + "resolved": "https://registry.npmjs.org/@babel/types/-/types-7.3.4.tgz", + "integrity": "sha512-WEkp8MsLftM7O/ty580wAmZzN1nDmCACc5+jFzUt+GUFNNIi3LdRlueYz0YIlmJhlZx1QYDMZL5vdWCL0fNjFQ==", "dev": true, "requires": { "esutils": "^2.0.2", - "lodash": "^4.17.10", + "lodash": "^4.17.11", "to-fast-properties": "^2.0.0" } }, "@blueprintjs/core": { - "version": "3.12.0", - "resolved": "https://registry.npmjs.org/@blueprintjs/core/-/core-3.12.0.tgz", - "integrity": "sha512-nZGVzgel8YjFye14MU39iMLHTx7iBxG/vPrl432q6pJ7PDuk0M2vJK/eH/0pWISzhTK+/t78mpt3WhUelsvkQg==", - "requires": { - "@blueprintjs/icons": "^3.5.1", - "@types/dom4": "^2.0.0", - "classnames": "^2.2", - "dom4": "^2.0.1", - "normalize.css": "^8.0.0", - "popper.js": "^1.14.1", - "react-popper": "^1.0.0", - "react-transition-group": "^2.2.1", - "resize-observer-polyfill": "^1.5.0", - "tslib": "^1.9.0" - } - }, - "@blueprintjs/icons": { - "version": "3.5.1", - "resolved": "https://registry.npmjs.org/@blueprintjs/icons/-/icons-3.5.1.tgz", - "integrity": "sha512-sognhg9kAViMCrd2sZJ9KAo0vSm7Co/cBHGLSxblPodV4cI7xyzFi1cavPx+d1D3Q72ATOM46T0N7dggVac/Tw==", + "version": "1.0.1", + "resolved": "https://registry.npmjs.org/@blueprintjs/core/-/core-1.0.1.tgz", + "integrity": "sha1-gfv9/g6gK7kLgLG+MaAec+tiWAY=", "requires": { "classnames": "^2.2", - "tslib": "^1.9.0" + "dom4": "^1.8", + "normalize.css": "4.1.1", + "pure-render-decorator": "~1.1.1", + "tether": "^1.2" } }, "@csstools/convert-colors": { @@ -285,16 +259,11 @@ "dev": true }, "@types/d3-array": { - "version": "1.2.4", - "resolved": "https://registry.npmjs.org/@types/d3-array/-/d3-array-1.2.4.tgz", - "integrity": "sha512-3r1fOAAb+SGfcOGXty/LGvoP0ovMec4UtGNUyHOSzYyvSGpmt+eNMxLowol/3HryusevznSfcHZebEShXMwsZA==", + "version": "1.2.6", + "resolved": "https://registry.npmjs.org/@types/d3-array/-/d3-array-1.2.6.tgz", + "integrity": "sha512-/EcY/15X5tnwkMT2txpjiLUNJj5xHA2vGHOXI8NTYGhETK914RRLQLjNm6EpAI1D2IY5vh3CzuLODnqBAwKjPA==", "dev": true }, - "@types/dom4": { - "version": "2.0.1", - "resolved": "https://registry.npmjs.org/@types/dom4/-/dom4-2.0.1.tgz", - "integrity": "sha512-kSkVAvWmMZiCYtvqjqQEwOmvKwcH+V4uiv3qPQ8pAh1Xl39xggGEo8gHUqV4waYGHezdFw0rKBR8Jt0CrQSDZA==" - }, "@types/history": { "version": "4.7.2", "resolved": "https://registry.npmjs.org/@types/history/-/history-4.7.2.tgz", @@ -302,41 +271,42 @@ "dev": true }, "@types/hjson": { - "version": "2.4.0", - "resolved": "https://registry.npmjs.org/@types/hjson/-/hjson-2.4.0.tgz", - "integrity": "sha512-U1/DhtNB1DeIjJjusD3MwAnX1AeAmxlTrBK+R+hvJ07VBDeNgbQI0lb8rLCMXWRH30ok+x6U31ZoEYgwztJkKA==" + "version": "2.4.1", + "resolved": "https://registry.npmjs.org/@types/hjson/-/hjson-2.4.1.tgz", + "integrity": "sha512-yXq/C73UHM8GQc6RYJnUXUgxudr2Q9227Iawhkp03YCnfJJTc+6LJnnVLx+UR/Dvw6imO5Q3vpGNmR9IRBI0JQ==", + "dev": true }, "@types/jest": { - "version": "23.3.13", - "resolved": "https://registry.npmjs.org/@types/jest/-/jest-23.3.13.tgz", - "integrity": "sha512-ePl4l+7dLLmCucIwgQHAgjiepY++qcI6nb8eAwGNkB6OxmTe3Z9rQU3rSpomqu42PCCnlThZbOoxsf+qylJsLA==", + "version": "23.3.14", + "resolved": "https://registry.npmjs.org/@types/jest/-/jest-23.3.14.tgz", + "integrity": "sha512-Q5hTcfdudEL2yOmluA1zaSyPbzWPmJ3XfSWeP3RyoYvS9hnje1ZyagrZOuQ6+1nQC1Gw+7gap3pLNL3xL6UBug==", "dev": true }, "@types/lodash": { - "version": "4.14.120", - "resolved": "https://registry.npmjs.org/@types/lodash/-/lodash-4.14.120.tgz", - "integrity": "sha512-jQ21kQ120mo+IrDs1nFNVm/AsdFxIx2+vZ347DbogHJPd/JzKNMOqU6HCYin1W6v8l5R9XSO2/e9cxmn7HAnVw==", + "version": "4.14.121", + "resolved": "https://registry.npmjs.org/@types/lodash/-/lodash-4.14.121.tgz", + "integrity": "sha512-ORj7IBWj13iYufXt/VXrCNMbUuCTJfhzme5kx9U/UtcIPdJYuvPDUAlHlbNhz/8lKCLy9XGIZnGrqXOtQbPGoQ==", "dev": true }, "@types/lodash.debounce": { - "version": "4.0.4", - "resolved": "https://registry.npmjs.org/@types/lodash.debounce/-/lodash.debounce-4.0.4.tgz", - "integrity": "sha512-W3oJCQXSCmOE9uIqOdrUWT08YNSXyqXed8JhxJKCe4SH40yxz5HSdtStN1ZQYkvT7S/tae8PA34Y0TO5C7Z8Ng==", + "version": "4.0.5", + "resolved": "https://registry.npmjs.org/@types/lodash.debounce/-/lodash.debounce-4.0.5.tgz", + "integrity": "sha512-f7x1/7U8xNDCYgO0UEB9bRkYDxmOl3OAFZS5l4PvTa6gtURzy1Mxv2f7f1+WBSGgOGhl5jia+Hw027H1f+S90Q==", "dev": true, "requires": { "@types/lodash": "*" } }, "@types/mocha": { - "version": "5.2.5", - "resolved": "https://registry.npmjs.org/@types/mocha/-/mocha-5.2.5.tgz", - "integrity": "sha512-lAVp+Kj54ui/vLUFxsJTMtWvZraZxum3w3Nwkble2dNuV5VnPA+Mi2oGX9XYJAaIvZi3tn3cbjS/qcJXRb6Bww==", + "version": "5.2.6", + "resolved": "https://registry.npmjs.org/@types/mocha/-/mocha-5.2.6.tgz", + "integrity": "sha512-1axi39YdtBI7z957vdqXI4Ac25e7YihYQtJa+Clnxg1zTJEaIRbndt71O3sP4GAMgiAm0pY26/b9BrY4MR/PMw==", "dev": true }, "@types/node": { - "version": "10.12.18", - "resolved": "https://registry.npmjs.org/@types/node/-/node-10.12.18.tgz", - "integrity": "sha512-fh+pAqt4xRzPfqA6eh3Z2y6fyZavRIumvjhaCL753+TVkGKGhpPeyrJG2JftD0T9q4GF00KjefsQ+PQNDdWQaQ==", + "version": "10.12.27", + "resolved": "https://registry.npmjs.org/@types/node/-/node-10.12.27.tgz", + "integrity": "sha512-e9wgeY6gaY21on3ve0xAjgBVjGDWq/xUteK0ujsE53bUoxycMkqfnkUgMt6ffZtykZ5X12Mg3T7Pw4TRCObDKg==", "dev": true }, "@types/numeral": { @@ -346,15 +316,15 @@ "dev": true }, "@types/prop-types": { - "version": "15.5.8", - "resolved": "https://registry.npmjs.org/@types/prop-types/-/prop-types-15.5.8.tgz", - "integrity": "sha512-3AQoUxQcQtLHsK25wtTWIoIpgYjH3vSDroZOUr7PpCHw/jLY1RB9z9E8dBT/OSmwStVgkRNvdh+ZHNiomRieaw==", + "version": "15.5.9", + "resolved": "https://registry.npmjs.org/@types/prop-types/-/prop-types-15.5.9.tgz", + "integrity": "sha512-Nha5b+jmBI271jdTMwrHiNXM+DvThjHOfyZtMX9kj/c/LUj2xiLHsG/1L3tJ8DjAoQN48cHwUwtqBotjyXaSdQ==", "dev": true }, "@types/react": { - "version": "16.7.21", - "resolved": "https://registry.npmjs.org/@types/react/-/react-16.7.21.tgz", - "integrity": "sha512-8BPxwygC83LgaIjOVVLrzB4mpP2u1ih01fbfy76L3h9OgKN+fNyMVPXj/0mGpWnxImjiM/2lqb3YOeT2Ca+NYQ==", + "version": "16.8.5", + "resolved": "https://registry.npmjs.org/@types/react/-/react-16.8.5.tgz", + "integrity": "sha512-8LRySaaSJVLNZb2dbOGvGmzn88cbAfrgDpuWy+6lLgQ0OJFgHHvyuaCX4/7ikqJlpmCPf4uazJAZcfTQRdJqdQ==", "dev": true, "requires": { "@types/prop-types": "*", @@ -362,18 +332,18 @@ } }, "@types/react-dom": { - "version": "16.0.11", - "resolved": "https://registry.npmjs.org/@types/react-dom/-/react-dom-16.0.11.tgz", - "integrity": "sha512-x6zUx9/42B5Kl2Vl9HlopV8JF64wLpX3c+Pst9kc1HgzrsH+mkehe/zmHMQTplIrR48H2gpU7ZqurQolYu8XBA==", + "version": "16.8.2", + "resolved": "https://registry.npmjs.org/@types/react-dom/-/react-dom-16.8.2.tgz", + "integrity": "sha512-MX7n1wq3G/De15RGAAqnmidzhr2Y9O/ClxPxyqaNg96pGyeXUYPSvujgzEVpLo9oIP4Wn1UETl+rxTN02KEpBw==", "dev": true, "requires": { "@types/react": "*" } }, "@types/react-router": { - "version": "4.4.3", - "resolved": "https://registry.npmjs.org/@types/react-router/-/react-router-4.4.3.tgz", - "integrity": "sha512-8GmjakEBFNCLJbpg9jtDp1EDvFP0VkIPPKBpVwmB3Q+9whFoHu8rluMUXUE5SoGkEQvVOtgJzWmUsJojNpFMQQ==", + "version": "4.4.4", + "resolved": "https://registry.npmjs.org/@types/react-router/-/react-router-4.4.4.tgz", + "integrity": "sha512-TZVfpT6nvUv/lbho/nRtckEtgkhspOQr3qxrnpXixwgQRKKyg5PvDfNKc8Uend/p/Pi70614VCmC0NPAKWF+0g==", "dev": true, "requires": { "@types/history": "*", @@ -392,184 +362,188 @@ } }, "@types/react-table": { - "version": "6.7.21", - "resolved": "https://registry.npmjs.org/@types/react-table/-/react-table-6.7.21.tgz", - "integrity": "sha512-XiYCcn/CBajrj18vLA3kO79AHr5yZTCJe2kl87ZNTRxLO14y9D0IGeGZ3xLsqhfYrJSkkVzAJV8v+bQ4nuKCRQ==", + "version": "6.7.22", + "resolved": "https://registry.npmjs.org/@types/react-table/-/react-table-6.7.22.tgz", + "integrity": "sha512-gFW1QLTMmcPKUVsb2YCF9m6FwwTelVRehb8hjJRluM9KKJl5ANA0jSYZz4zN9fVFsMn11BoYO43a/3jKi2XH/w==", "dev": true, "requires": { "@types/react": "*" } }, "@webassemblyjs/ast": { - "version": "1.7.11", - "resolved": "https://registry.npmjs.org/@webassemblyjs/ast/-/ast-1.7.11.tgz", - "integrity": "sha512-ZEzy4vjvTzScC+SH8RBssQUawpaInUdMTYwYYLh54/s8TuT0gBLuyUnppKsVyZEi876VmmStKsUs28UxPgdvrA==", + "version": "1.8.3", + "resolved": "https://registry.npmjs.org/@webassemblyjs/ast/-/ast-1.8.3.tgz", + "integrity": "sha512-xy3m06+Iu4D32+6soz6zLnwznigXJRuFNTovBX2M4GqVqLb0dnyWLbPnpcXvUSdEN+9DVyDeaq2jyH1eIL2LZQ==", "dev": true, "requires": { - "@webassemblyjs/helper-module-context": "1.7.11", - "@webassemblyjs/helper-wasm-bytecode": "1.7.11", - "@webassemblyjs/wast-parser": "1.7.11" + "@webassemblyjs/helper-module-context": "1.8.3", + "@webassemblyjs/helper-wasm-bytecode": "1.8.3", + "@webassemblyjs/wast-parser": "1.8.3" } }, "@webassemblyjs/floating-point-hex-parser": { - "version": "1.7.11", - "resolved": "https://registry.npmjs.org/@webassemblyjs/floating-point-hex-parser/-/floating-point-hex-parser-1.7.11.tgz", - "integrity": "sha512-zY8dSNyYcgzNRNT666/zOoAyImshm3ycKdoLsyDw/Bwo6+/uktb7p4xyApuef1dwEBo/U/SYQzbGBvV+nru2Xg==", + "version": "1.8.3", + "resolved": "https://registry.npmjs.org/@webassemblyjs/floating-point-hex-parser/-/floating-point-hex-parser-1.8.3.tgz", + "integrity": "sha512-vq1TISG4sts4f0lDwMUM0f3kpe0on+G3YyV5P0IySHFeaLKRYZ++n2fCFfG4TcCMYkqFeTUYFxm75L3ddlk2xA==", "dev": true }, "@webassemblyjs/helper-api-error": { - "version": "1.7.11", - "resolved": "https://registry.npmjs.org/@webassemblyjs/helper-api-error/-/helper-api-error-1.7.11.tgz", - "integrity": "sha512-7r1qXLmiglC+wPNkGuXCvkmalyEstKVwcueZRP2GNC2PAvxbLYwLLPr14rcdJaE4UtHxQKfFkuDFuv91ipqvXg==", + "version": "1.8.3", + "resolved": "https://registry.npmjs.org/@webassemblyjs/helper-api-error/-/helper-api-error-1.8.3.tgz", + "integrity": "sha512-BmWEynI4FnZbjk8CaYZXwcv9a6gIiu+rllRRouQUo73hglanXD3AGFJE7Q4JZCoVE0p5/jeX6kf5eKa3D4JxwQ==", "dev": true }, "@webassemblyjs/helper-buffer": { - "version": "1.7.11", - "resolved": "https://registry.npmjs.org/@webassemblyjs/helper-buffer/-/helper-buffer-1.7.11.tgz", - "integrity": "sha512-MynuervdylPPh3ix+mKZloTcL06P8tenNH3sx6s0qE8SLR6DdwnfgA7Hc9NSYeob2jrW5Vql6GVlsQzKQCa13w==", + "version": "1.8.3", + "resolved": "https://registry.npmjs.org/@webassemblyjs/helper-buffer/-/helper-buffer-1.8.3.tgz", + "integrity": "sha512-iVIMhWnNHoFB94+/2l7LpswfCsXeMRnWfExKtqsZ/E2NxZyUx9nTeKK/MEMKTQNEpyfznIUX06OchBHQ+VKi/Q==", "dev": true }, "@webassemblyjs/helper-code-frame": { - "version": "1.7.11", - "resolved": "https://registry.npmjs.org/@webassemblyjs/helper-code-frame/-/helper-code-frame-1.7.11.tgz", - "integrity": "sha512-T8ESC9KMXFTXA5urJcyor5cn6qWeZ4/zLPyWeEXZ03hj/x9weSokGNkVCdnhSabKGYWxElSdgJ+sFa9G/RdHNw==", + "version": "1.8.3", + "resolved": "https://registry.npmjs.org/@webassemblyjs/helper-code-frame/-/helper-code-frame-1.8.3.tgz", + "integrity": "sha512-K1UxoJML7GKr1QXR+BG7eXqQkvu+eEeTjlSl5wUFQ6W6vaOc5OwSxTcb3oE9x/3+w4NHhrIKD4JXXCZmLdL2cg==", "dev": true, "requires": { - "@webassemblyjs/wast-printer": "1.7.11" + "@webassemblyjs/wast-printer": "1.8.3" } }, "@webassemblyjs/helper-fsm": { - "version": "1.7.11", - "resolved": "https://registry.npmjs.org/@webassemblyjs/helper-fsm/-/helper-fsm-1.7.11.tgz", - "integrity": "sha512-nsAQWNP1+8Z6tkzdYlXT0kxfa2Z1tRTARd8wYnc/e3Zv3VydVVnaeePgqUzFrpkGUyhUUxOl5ML7f1NuT+gC0A==", + "version": "1.8.3", + "resolved": "https://registry.npmjs.org/@webassemblyjs/helper-fsm/-/helper-fsm-1.8.3.tgz", + "integrity": "sha512-387zipfrGyO77/qm7/SDUiZBjQ5KGk4qkrVIyuoubmRNIiqn3g+6ijY8BhnlGqsCCQX5bYKOnttJobT5xoyviA==", "dev": true }, "@webassemblyjs/helper-module-context": { - "version": "1.7.11", - "resolved": "https://registry.npmjs.org/@webassemblyjs/helper-module-context/-/helper-module-context-1.7.11.tgz", - "integrity": "sha512-JxfD5DX8Ygq4PvXDucq0M+sbUFA7BJAv/GGl9ITovqE+idGX+J3QSzJYz+LwQmL7fC3Rs+utvWoJxDb6pmC0qg==", - "dev": true + "version": "1.8.3", + "resolved": "https://registry.npmjs.org/@webassemblyjs/helper-module-context/-/helper-module-context-1.8.3.tgz", + "integrity": "sha512-lPLFdQfaRssfnGEJit5Sk785kbBPPPK4ZS6rR5W/8hlUO/5v3F+rN8XuUcMj/Ny9iZiyKhhuinWGTUuYL4VKeQ==", + "dev": true, + "requires": { + "@webassemblyjs/ast": "1.8.3", + "mamacro": "^0.0.3" + } }, "@webassemblyjs/helper-wasm-bytecode": { - "version": "1.7.11", - "resolved": "https://registry.npmjs.org/@webassemblyjs/helper-wasm-bytecode/-/helper-wasm-bytecode-1.7.11.tgz", - "integrity": "sha512-cMXeVS9rhoXsI9LLL4tJxBgVD/KMOKXuFqYb5oCJ/opScWpkCMEz9EJtkonaNcnLv2R3K5jIeS4TRj/drde1JQ==", + "version": "1.8.3", + "resolved": "https://registry.npmjs.org/@webassemblyjs/helper-wasm-bytecode/-/helper-wasm-bytecode-1.8.3.tgz", + "integrity": "sha512-R1nJW7bjyJLjsJQR5t3K/9LJ0QWuZezl8fGa49DZq4IVaejgvkbNlKEQxLYTC579zgT4IIIVHb5JA59uBPHXyw==", "dev": true }, "@webassemblyjs/helper-wasm-section": { - "version": "1.7.11", - "resolved": "https://registry.npmjs.org/@webassemblyjs/helper-wasm-section/-/helper-wasm-section-1.7.11.tgz", - "integrity": "sha512-8ZRY5iZbZdtNFE5UFunB8mmBEAbSI3guwbrsCl4fWdfRiAcvqQpeqd5KHhSWLL5wuxo53zcaGZDBU64qgn4I4Q==", + "version": "1.8.3", + "resolved": "https://registry.npmjs.org/@webassemblyjs/helper-wasm-section/-/helper-wasm-section-1.8.3.tgz", + "integrity": "sha512-P6F7D61SJY73Yz+fs49Q3+OzlYAZP86OfSpaSY448KzUy65NdfzDmo2NPVte+Rw4562MxEAacvq/mnDuvRWOcg==", "dev": true, "requires": { - "@webassemblyjs/ast": "1.7.11", - "@webassemblyjs/helper-buffer": "1.7.11", - "@webassemblyjs/helper-wasm-bytecode": "1.7.11", - "@webassemblyjs/wasm-gen": "1.7.11" + "@webassemblyjs/ast": "1.8.3", + "@webassemblyjs/helper-buffer": "1.8.3", + "@webassemblyjs/helper-wasm-bytecode": "1.8.3", + "@webassemblyjs/wasm-gen": "1.8.3" } }, "@webassemblyjs/ieee754": { - "version": "1.7.11", - "resolved": "https://registry.npmjs.org/@webassemblyjs/ieee754/-/ieee754-1.7.11.tgz", - "integrity": "sha512-Mmqx/cS68K1tSrvRLtaV/Lp3NZWzXtOHUW2IvDvl2sihAwJh4ACE0eL6A8FvMyDG9abes3saB6dMimLOs+HMoQ==", + "version": "1.8.3", + "resolved": "https://registry.npmjs.org/@webassemblyjs/ieee754/-/ieee754-1.8.3.tgz", + "integrity": "sha512-UD4HuLU99hjIvWz1pD68b52qsepWQlYCxDYVFJQfHh3BHyeAyAlBJ+QzLR1nnS5J6hAzjki3I3AoJeobNNSZlg==", "dev": true, "requires": { "@xtuc/ieee754": "^1.2.0" } }, "@webassemblyjs/leb128": { - "version": "1.7.11", - "resolved": "https://registry.npmjs.org/@webassemblyjs/leb128/-/leb128-1.7.11.tgz", - "integrity": "sha512-vuGmgZjjp3zjcerQg+JA+tGOncOnJLWVkt8Aze5eWQLwTQGNgVLcyOTqgSCxWTR4J42ijHbBxnuRaL1Rv7XMdw==", + "version": "1.8.3", + "resolved": "https://registry.npmjs.org/@webassemblyjs/leb128/-/leb128-1.8.3.tgz", + "integrity": "sha512-XXd3s1BmkC1gpGABuCRLqCGOD6D2L+Ma2BpwpjrQEHeQATKWAQtxAyU9Z14/z8Ryx6IG+L4/NDkIGHrccEhRUg==", "dev": true, "requires": { - "@xtuc/long": "4.2.1" + "@xtuc/long": "4.2.2" } }, "@webassemblyjs/utf8": { - "version": "1.7.11", - "resolved": "https://registry.npmjs.org/@webassemblyjs/utf8/-/utf8-1.7.11.tgz", - "integrity": "sha512-C6GFkc7aErQIAH+BMrIdVSmW+6HSe20wg57HEC1uqJP8E/xpMjXqQUxkQw07MhNDSDcGpxI9G5JSNOQCqJk4sA==", + "version": "1.8.3", + "resolved": "https://registry.npmjs.org/@webassemblyjs/utf8/-/utf8-1.8.3.tgz", + "integrity": "sha512-Wv/WH9Zo5h5ZMyfCNpUrjFsLZ3X1amdfEuwdb7MLdG3cPAjRS6yc6ElULlpjLiiBTuzvmLhr3ENsuGyJ3wyCgg==", "dev": true }, "@webassemblyjs/wasm-edit": { - "version": "1.7.11", - "resolved": "https://registry.npmjs.org/@webassemblyjs/wasm-edit/-/wasm-edit-1.7.11.tgz", - "integrity": "sha512-FUd97guNGsCZQgeTPKdgxJhBXkUbMTY6hFPf2Y4OedXd48H97J+sOY2Ltaq6WGVpIH8o/TGOVNiVz/SbpEMJGg==", + "version": "1.8.3", + "resolved": "https://registry.npmjs.org/@webassemblyjs/wasm-edit/-/wasm-edit-1.8.3.tgz", + "integrity": "sha512-nB19eUx3Yhi1Vvv3yev5r+bqQixZprMtaoCs1brg9Efyl8Hto3tGaUoZ0Yb4Umn/gQCyoEGFfUxPLp1/8+Jvnw==", "dev": true, "requires": { - "@webassemblyjs/ast": "1.7.11", - "@webassemblyjs/helper-buffer": "1.7.11", - "@webassemblyjs/helper-wasm-bytecode": "1.7.11", - "@webassemblyjs/helper-wasm-section": "1.7.11", - "@webassemblyjs/wasm-gen": "1.7.11", - "@webassemblyjs/wasm-opt": "1.7.11", - "@webassemblyjs/wasm-parser": "1.7.11", - "@webassemblyjs/wast-printer": "1.7.11" + "@webassemblyjs/ast": "1.8.3", + "@webassemblyjs/helper-buffer": "1.8.3", + "@webassemblyjs/helper-wasm-bytecode": "1.8.3", + "@webassemblyjs/helper-wasm-section": "1.8.3", + "@webassemblyjs/wasm-gen": "1.8.3", + "@webassemblyjs/wasm-opt": "1.8.3", + "@webassemblyjs/wasm-parser": "1.8.3", + "@webassemblyjs/wast-printer": "1.8.3" } }, "@webassemblyjs/wasm-gen": { - "version": "1.7.11", - "resolved": "https://registry.npmjs.org/@webassemblyjs/wasm-gen/-/wasm-gen-1.7.11.tgz", - "integrity": "sha512-U/KDYp7fgAZX5KPfq4NOupK/BmhDc5Kjy2GIqstMhvvdJRcER/kUsMThpWeRP8BMn4LXaKhSTggIJPOeYHwISA==", + "version": "1.8.3", + "resolved": "https://registry.npmjs.org/@webassemblyjs/wasm-gen/-/wasm-gen-1.8.3.tgz", + "integrity": "sha512-sDNmu2nLBJZ/huSzlJvd9IK8B1EjCsOl7VeMV9VJPmxKYgTJ47lbkSP+KAXMgZWGcArxmcrznqm7FrAPQ7vVGg==", "dev": true, "requires": { - "@webassemblyjs/ast": "1.7.11", - "@webassemblyjs/helper-wasm-bytecode": "1.7.11", - "@webassemblyjs/ieee754": "1.7.11", - "@webassemblyjs/leb128": "1.7.11", - "@webassemblyjs/utf8": "1.7.11" + "@webassemblyjs/ast": "1.8.3", + "@webassemblyjs/helper-wasm-bytecode": "1.8.3", + "@webassemblyjs/ieee754": "1.8.3", + "@webassemblyjs/leb128": "1.8.3", + "@webassemblyjs/utf8": "1.8.3" } }, "@webassemblyjs/wasm-opt": { - "version": "1.7.11", - "resolved": "https://registry.npmjs.org/@webassemblyjs/wasm-opt/-/wasm-opt-1.7.11.tgz", - "integrity": "sha512-XynkOwQyiRidh0GLua7SkeHvAPXQV/RxsUeERILmAInZegApOUAIJfRuPYe2F7RcjOC9tW3Cb9juPvAC/sCqvg==", + "version": "1.8.3", + "resolved": "https://registry.npmjs.org/@webassemblyjs/wasm-opt/-/wasm-opt-1.8.3.tgz", + "integrity": "sha512-j8lmQVFR+FR4/645VNgV4R/Jz8i50eaPAj93GZyd3EIJondVshE/D9pivpSDIXyaZt+IkCodlzOoZUE4LnQbeA==", "dev": true, "requires": { - "@webassemblyjs/ast": "1.7.11", - "@webassemblyjs/helper-buffer": "1.7.11", - "@webassemblyjs/wasm-gen": "1.7.11", - "@webassemblyjs/wasm-parser": "1.7.11" + "@webassemblyjs/ast": "1.8.3", + "@webassemblyjs/helper-buffer": "1.8.3", + "@webassemblyjs/wasm-gen": "1.8.3", + "@webassemblyjs/wasm-parser": "1.8.3" } }, "@webassemblyjs/wasm-parser": { - "version": "1.7.11", - "resolved": "https://registry.npmjs.org/@webassemblyjs/wasm-parser/-/wasm-parser-1.7.11.tgz", - "integrity": "sha512-6lmXRTrrZjYD8Ng8xRyvyXQJYUQKYSXhJqXOBLw24rdiXsHAOlvw5PhesjdcaMadU/pyPQOJ5dHreMjBxwnQKg==", + "version": "1.8.3", + "resolved": "https://registry.npmjs.org/@webassemblyjs/wasm-parser/-/wasm-parser-1.8.3.tgz", + "integrity": "sha512-NBI3SNNtRoy4T/KBsRZCAWUzE9lI94RH2nneLwa1KKIrt/2zzcTavWg6oY05ArCbb/PZDk3OUi63CD1RYtN65w==", "dev": true, "requires": { - "@webassemblyjs/ast": "1.7.11", - "@webassemblyjs/helper-api-error": "1.7.11", - "@webassemblyjs/helper-wasm-bytecode": "1.7.11", - "@webassemblyjs/ieee754": "1.7.11", - "@webassemblyjs/leb128": "1.7.11", - "@webassemblyjs/utf8": "1.7.11" + "@webassemblyjs/ast": "1.8.3", + "@webassemblyjs/helper-api-error": "1.8.3", + "@webassemblyjs/helper-wasm-bytecode": "1.8.3", + "@webassemblyjs/ieee754": "1.8.3", + "@webassemblyjs/leb128": "1.8.3", + "@webassemblyjs/utf8": "1.8.3" } }, "@webassemblyjs/wast-parser": { - "version": "1.7.11", - "resolved": "https://registry.npmjs.org/@webassemblyjs/wast-parser/-/wast-parser-1.7.11.tgz", - "integrity": "sha512-lEyVCg2np15tS+dm7+JJTNhNWq9yTZvi3qEhAIIOaofcYlUp0UR5/tVqOwa/gXYr3gjwSZqw+/lS9dscyLelbQ==", + "version": "1.8.3", + "resolved": "https://registry.npmjs.org/@webassemblyjs/wast-parser/-/wast-parser-1.8.3.tgz", + "integrity": "sha512-gZPst4CNcmGtKC1eYQmgCx6gwQvxk4h/nPjfPBbRoD+Raw3Hs+BS3yhrfgyRKtlYP+BJ8LcY9iFODEQofl2qbg==", "dev": true, "requires": { - "@webassemblyjs/ast": "1.7.11", - "@webassemblyjs/floating-point-hex-parser": "1.7.11", - "@webassemblyjs/helper-api-error": "1.7.11", - "@webassemblyjs/helper-code-frame": "1.7.11", - "@webassemblyjs/helper-fsm": "1.7.11", - "@xtuc/long": "4.2.1" + "@webassemblyjs/ast": "1.8.3", + "@webassemblyjs/floating-point-hex-parser": "1.8.3", + "@webassemblyjs/helper-api-error": "1.8.3", + "@webassemblyjs/helper-code-frame": "1.8.3", + "@webassemblyjs/helper-fsm": "1.8.3", + "@xtuc/long": "4.2.2" } }, "@webassemblyjs/wast-printer": { - "version": "1.7.11", - "resolved": "https://registry.npmjs.org/@webassemblyjs/wast-printer/-/wast-printer-1.7.11.tgz", - "integrity": "sha512-m5vkAsuJ32QpkdkDOUPGSltrg8Cuk3KBx4YrmAGQwCZPRdUHXxG4phIOuuycLemHFr74sWL9Wthqss4fzdzSwg==", + "version": "1.8.3", + "resolved": "https://registry.npmjs.org/@webassemblyjs/wast-printer/-/wast-printer-1.8.3.tgz", + "integrity": "sha512-DTA6kpXuHK4PHu16yAD9QVuT1WZQRT7079oIFFmFSjqjLWGXS909I/7kiLTn931mcj7wGsaUNungjwNQ2lGQ3Q==", "dev": true, "requires": { - "@webassemblyjs/ast": "1.7.11", - "@webassemblyjs/wast-parser": "1.7.11", - "@xtuc/long": "4.2.1" + "@webassemblyjs/ast": "1.8.3", + "@webassemblyjs/wast-parser": "1.8.3", + "@xtuc/long": "4.2.2" } }, "@xtuc/ieee754": { @@ -579,9 +553,9 @@ "dev": true }, "@xtuc/long": { - "version": "4.2.1", - "resolved": "https://registry.npmjs.org/@xtuc/long/-/long-4.2.1.tgz", - "integrity": "sha512-FZdkNBDqBRHKQ2MEbSC17xnPFOhZxeJ2YGSfr2BKf3sujG49Qe3bB+rGCwQfIaA7WHnGeGkSijX4FuBCdrzW/g==", + "version": "4.2.2", + "resolved": "https://registry.npmjs.org/@xtuc/long/-/long-4.2.2.tgz", + "integrity": "sha512-NuHqBY1PB/D8xU6s/thBgOAiAP7HOYDQ32+BFZILJ8ivkUkAHQnWfn6WhL79Owj1qmUnoN/YPhktdIoucipkAQ==", "dev": true }, "abab": { @@ -629,9 +603,9 @@ }, "dependencies": { "acorn": { - "version": "6.0.5", - "resolved": "https://registry.npmjs.org/acorn/-/acorn-6.0.5.tgz", - "integrity": "sha512-i33Zgp3XWtmZBMNvCr4azvOFeWVw1Rk6p3hfi3LUDvIFraOMywb1kAtrbi+med14m4Xfpqm3zRZMT+c0FNE7kg==", + "version": "6.1.0", + "resolved": "https://registry.npmjs.org/acorn/-/acorn-6.1.0.tgz", + "integrity": "sha512-MW/FjM+IvU9CgBzjO3UIPCE2pyEwUsoFl+VGdczOPEdxfGFjuKny/gN54mOuX7Qxmb9Rg9MCn2oKiSUeW+pjrw==", "dev": true } } @@ -643,9 +617,9 @@ "dev": true }, "ajv": { - "version": "6.7.0", - "resolved": "https://registry.npmjs.org/ajv/-/ajv-6.7.0.tgz", - "integrity": "sha512-RZXPviBTtfmtka9n9sy1N5M5b82CbxWIR6HIis4s3WQTXDJamc/0gpCWNGz6EWdWp4DOfjzJfhz/AS9zVPjjWg==", + "version": "6.9.2", + "resolved": "https://registry.npmjs.org/ajv/-/ajv-6.9.2.tgz", + "integrity": "sha512-4UFy0/LgDo7Oa/+wOAlj44tp9K78u38E5/359eSrqEp1Z5PdVfimCcs7SluXMP755RUQu6d2b4AvF0R1C9RZjg==", "dev": true, "requires": { "fast-deep-equal": "^2.0.1", @@ -661,9 +635,9 @@ "dev": true }, "ajv-keywords": { - "version": "3.3.0", - "resolved": "https://registry.npmjs.org/ajv-keywords/-/ajv-keywords-3.3.0.tgz", - "integrity": "sha512-CMzN9S62ZOO4sA/mJZIO4S++ZM7KFWzH3PPWkveLhy4OZ9i1/VatgwWMD46w/XbGCBy7Ye0gCk+Za6mmyfKK7g==", + "version": "3.4.0", + "resolved": "https://registry.npmjs.org/ajv-keywords/-/ajv-keywords-3.4.0.tgz", + "integrity": "sha512-aUjdRFISbuFOl0EIZc+9e4FfZp0bDZgAdOOf30bJmw8VM9v84SHyVyxDfbWxpGYbdZD/9XoKxfHVNmxPkhwyGw==", "dev": true }, "amdefine": { @@ -679,9 +653,9 @@ "dev": true }, "ansi-escapes": { - "version": "3.1.0", - "resolved": "https://registry.npmjs.org/ansi-escapes/-/ansi-escapes-3.1.0.tgz", - "integrity": "sha512-UgAb8H9D41AQnu/PbWlCofQVcnV4Gs2bBJi9eZPxfU/hgglFh3SMDMENRIqdr7H6XFnXdoknctFByVsCOotTVw==", + "version": "3.2.0", + "resolved": "https://registry.npmjs.org/ansi-escapes/-/ansi-escapes-3.2.0.tgz", + "integrity": "sha512-cBhpre4ma+U0T1oM5fXg7Dy1Jw7zzwv7lt/GoCpr+hDQJoYnKVPLL4dCvSEFMmQurOQvSrwT7SL/DAlhBI97RQ==", "dev": true }, "ansi-html": { @@ -897,12 +871,12 @@ "dev": true }, "async": { - "version": "2.6.1", - "resolved": "https://registry.npmjs.org/async/-/async-2.6.1.tgz", - "integrity": "sha512-fNEiL2+AZt6AlAw/29Cr0UDe4sRAHCpEHh54WMz+Bb7QfNcFw4h3loofyJpLeQs4Yx7yuqu/2dLgM5hKOs6HlQ==", + "version": "2.6.2", + "resolved": "https://registry.npmjs.org/async/-/async-2.6.2.tgz", + "integrity": "sha512-H1qVYh1MYhEEFLsP97cVKqCGo7KfCyTt6uEWqsTBr9SO84oK9Uwbyd/yCW+6rKJLHksBNUVWZDAjfS+Ccx0Bbg==", "dev": true, "requires": { - "lodash": "^4.17.10" + "lodash": "^4.17.11" } }, "async-each": { @@ -936,16 +910,16 @@ "dev": true }, "autoprefixer": { - "version": "9.4.6", - "resolved": "https://registry.npmjs.org/autoprefixer/-/autoprefixer-9.4.6.tgz", - "integrity": "sha512-Yp51mevbOEdxDUy5WjiKtpQaecqYq9OqZSL04rSoCiry7Tc5I9FEyo3bfxiTJc1DfHeKwSFCUYbBAiOQ2VGfiw==", + "version": "9.4.9", + "resolved": "https://registry.npmjs.org/autoprefixer/-/autoprefixer-9.4.9.tgz", + "integrity": "sha512-OyUl7KvbGBoFQbGQu51hMywz1aaVeud/6uX8r1R1DNcqFvqGUUy6+BDHnAZE8s5t5JyEObaSw+O1DpAdjAmLuw==", "dev": true, "requires": { - "browserslist": "^4.4.1", - "caniuse-lite": "^1.0.30000929", + "browserslist": "^4.4.2", + "caniuse-lite": "^1.0.30000939", "normalize-range": "^0.1.2", "num2fraction": "^1.2.2", - "postcss": "^7.0.13", + "postcss": "^7.0.14", "postcss-value-parser": "^3.3.1" } }, @@ -971,19 +945,21 @@ } }, "babel-jest": { - "version": "24.0.0", - "resolved": "https://registry.npmjs.org/babel-jest/-/babel-jest-24.0.0.tgz", - "integrity": "sha512-YGKRbZUjoRmNIAyG7x4wYxUyHvHPFpYXj6Mx1A5cslhaQOUgP/+LF3wtFgMuOQkIpjbVNBufmOnVY0QVwB5v9Q==", + "version": "24.1.0", + "resolved": "https://registry.npmjs.org/babel-jest/-/babel-jest-24.1.0.tgz", + "integrity": "sha512-MLcagnVrO9ybQGLEfZUqnOzv36iQzU7Bj4elm39vCukumLVSfoX+tRy3/jW7lUKc7XdpRmB/jech6L/UCsSZjw==", "dev": true, "requires": { "babel-plugin-istanbul": "^5.1.0", - "babel-preset-jest": "^24.0.0" + "babel-preset-jest": "^24.1.0", + "chalk": "^2.4.2", + "slash": "^2.0.0" } }, "babel-plugin-istanbul": { - "version": "5.1.0", - "resolved": "https://registry.npmjs.org/babel-plugin-istanbul/-/babel-plugin-istanbul-5.1.0.tgz", - "integrity": "sha512-CLoXPRSUWiR8yao8bShqZUIC6qLfZVVY3X1wj+QPNXu0wfmrRRfarh1LYy+dYMVI+bDj0ghy3tuqFFRFZmL1Nw==", + "version": "5.1.1", + "resolved": "https://registry.npmjs.org/babel-plugin-istanbul/-/babel-plugin-istanbul-5.1.1.tgz", + "integrity": "sha512-RNNVv2lsHAXJQsEJ5jonQwrJVWK8AcZpG1oxhnjCUaAjL7xahYLANhPUZbzEQHjKy1NMYUwn+0NPKQc8iSY4xQ==", "dev": true, "requires": { "find-up": "^3.0.0", @@ -992,19 +968,19 @@ } }, "babel-plugin-jest-hoist": { - "version": "24.0.0", - "resolved": "https://registry.npmjs.org/babel-plugin-jest-hoist/-/babel-plugin-jest-hoist-24.0.0.tgz", - "integrity": "sha512-ipefE7YWNyRNVaV/MonUb/I5nef53ZRFR74P9meMGmJxqt8s1BJmfhw11YeIMbcjXN4fxtWUaskZZe8yreXE1Q==", + "version": "24.1.0", + "resolved": "https://registry.npmjs.org/babel-plugin-jest-hoist/-/babel-plugin-jest-hoist-24.1.0.tgz", + "integrity": "sha512-gljYrZz8w1b6fJzKcsfKsipSru2DU2DmQ39aB6nV3xQ0DDv3zpIzKGortA5gknrhNnPN8DweaEgrnZdmbGmhnw==", "dev": true }, "babel-preset-jest": { - "version": "24.0.0", - "resolved": "https://registry.npmjs.org/babel-preset-jest/-/babel-preset-jest-24.0.0.tgz", - "integrity": "sha512-ECMMOLvNDCmsn3geBa3JkwzylcfpThMpAdfreONQm8EmXcs4tXUpXZDQPxiIMg7nMobTuAC2zDGIKrbrBXW2Vg==", + "version": "24.1.0", + "resolved": "https://registry.npmjs.org/babel-preset-jest/-/babel-preset-jest-24.1.0.tgz", + "integrity": "sha512-FfNLDxFWsNX9lUmtwY7NheGlANnagvxq8LZdl5PKnVG3umP+S/g0XbVBfwtA4Ai3Ri/IMkWabBz3Tyk9wdspcw==", "dev": true, "requires": { "@babel/plugin-syntax-object-rest-spread": "^7.0.0", - "babel-plugin-jest-hoist": "^24.0.0" + "babel-plugin-jest-hoist": "^24.1.0" } }, "balanced-match": { @@ -1096,9 +1072,9 @@ "dev": true }, "binary-extensions": { - "version": "1.12.0", - "resolved": "https://registry.npmjs.org/binary-extensions/-/binary-extensions-1.12.0.tgz", - "integrity": "sha512-DYWGk01lDcxeS/K9IHPGWfT8PsJmbXRtRd2Sx72Tnb8pcYZQFF1oSDb8hJtS1vhp212q1Rzi5dUf9+nq0o9UIg==", + "version": "1.13.0", + "resolved": "https://registry.npmjs.org/binary-extensions/-/binary-extensions-1.13.0.tgz", + "integrity": "sha512-EgmjVLMn22z7eGGv3kcnHwSnJXmFHjISTY9E/S5lIcTD3Oxw05QTcBLNkJFzcb3cNueUdF/IN4U+d78V0zO8Hw==", "dev": true }, "block-stream": { @@ -1157,6 +1133,12 @@ "requires": { "safer-buffer": ">= 2.1.2 < 3" } + }, + "ms": { + "version": "2.0.0", + "resolved": "https://registry.npmjs.org/ms/-/ms-2.0.0.tgz", + "integrity": "sha1-VgiurfwAvmwpAd9fmGF4jeDVl8g=", + "dev": true } } }, @@ -1174,6 +1156,11 @@ "multicast-dns-service-types": "^1.1.0" } }, + "brace": { + "version": "0.11.1", + "resolved": "https://registry.npmjs.org/brace/-/brace-0.11.1.tgz", + "integrity": "sha1-SJb8ydVE7vRfS7dmDbMg07N5/lg=" + }, "brace-expansion": { "version": "1.1.11", "resolved": "https://registry.npmjs.org/brace-expansion/-/brace-expansion-1.1.11.tgz", @@ -1320,14 +1307,14 @@ } }, "browserslist": { - "version": "4.4.1", - "resolved": "https://registry.npmjs.org/browserslist/-/browserslist-4.4.1.tgz", - "integrity": "sha512-pEBxEXg7JwaakBXjATYw/D1YZh4QUSCX/Mnd/wnqSRPPSi1U39iDhDoKGoBUcraKdxDlrYqJxSI5nNvD+dWP2A==", + "version": "4.4.2", + "resolved": "https://registry.npmjs.org/browserslist/-/browserslist-4.4.2.tgz", + "integrity": "sha512-ISS/AIAiHERJ3d45Fz0AVYKkgcy+F/eJHzKEvv1j0wwKGKD9T3BrwKr/5g45L+Y4XIK5PlTqefHciRFcfE1Jxg==", "dev": true, "requires": { - "caniuse-lite": "^1.0.30000929", - "electron-to-chromium": "^1.3.103", - "node-releases": "^1.1.3" + "caniuse-lite": "^1.0.30000939", + "electron-to-chromium": "^1.3.113", + "node-releases": "^1.1.8" } }, "bs-logger": { @@ -1385,12 +1372,6 @@ "integrity": "sha1-JuYe0UIvtw3ULm42cp7VHYVf6Nk=", "dev": true }, - "builtin-modules": { - "version": "1.1.1", - "resolved": "https://registry.npmjs.org/builtin-modules/-/builtin-modules-1.1.1.tgz", - "integrity": "sha1-Jw8HbFpywC9bZaR9+Uxf46J4iS8=", - "dev": true - }, "builtin-status-codes": { "version": "3.0.0", "resolved": "https://registry.npmjs.org/builtin-status-codes/-/builtin-status-codes-3.0.0.tgz", @@ -1496,9 +1477,9 @@ } }, "caniuse-lite": { - "version": "1.0.30000932", - "resolved": "https://registry.npmjs.org/caniuse-lite/-/caniuse-lite-1.0.30000932.tgz", - "integrity": "sha512-4bghJFItvzz8m0T3lLZbacmEY9X1Z2AtIzTr7s7byqZIOumASfr4ynDx7rtm0J85nDmx8vsgR6vnaSoeU8Oh0A==", + "version": "1.0.30000939", + "resolved": "https://registry.npmjs.org/caniuse-lite/-/caniuse-lite-1.0.30000939.tgz", + "integrity": "sha512-oXB23ImDJOgQpGjRv1tCtzAvJr4/OvrHi5SO2vUgB0g0xpdZZoA/BxfImiWfdwoYdUTtQrPsXsvYU/dmCSM8gg==", "dev": true }, "capture-exit": { @@ -1516,6 +1497,11 @@ "integrity": "sha1-G2gcIf+EAzyCZUMJBolCDRhxUdw=", "dev": true }, + "chain-function": { + "version": "1.0.1", + "resolved": "https://registry.npmjs.org/chain-function/-/chain-function-1.0.1.tgz", + "integrity": "sha512-SxltgMwL9uCko5/ZCLiyG2B7R9fY4pDZUw7hJ4MhirdjBLosoDqkWABi3XMucddHdLiFJMb7PD2MZifZriuMTg==" + }, "chalk": { "version": "2.4.2", "resolved": "https://registry.npmjs.org/chalk/-/chalk-2.4.2.tgz", @@ -1539,24 +1525,31 @@ } }, "chokidar": { - "version": "2.0.4", - "resolved": "https://registry.npmjs.org/chokidar/-/chokidar-2.0.4.tgz", - "integrity": "sha512-z9n7yt9rOvIJrMhvDtDictKrkFHeihkNl6uWMmZlmL6tJtX9Cs+87oK+teBx+JIgzvbX3yZHT3eF8vpbDxHJXQ==", + "version": "2.1.2", + "resolved": "https://registry.npmjs.org/chokidar/-/chokidar-2.1.2.tgz", + "integrity": "sha512-IwXUx0FXc5ibYmPC2XeEj5mpXoV66sR+t3jqu2NS2GYwCktt3KF1/Qqjws/NkegajBA4RbZ5+DDwlOiJsxDHEg==", "dev": true, "requires": { "anymatch": "^2.0.0", - "async-each": "^1.0.0", - "braces": "^2.3.0", - "fsevents": "^1.2.2", + "async-each": "^1.0.1", + "braces": "^2.3.2", + "fsevents": "^1.2.7", "glob-parent": "^3.1.0", - "inherits": "^2.0.1", + "inherits": "^2.0.3", "is-binary-path": "^1.0.0", "is-glob": "^4.0.0", - "lodash.debounce": "^4.0.8", - "normalize-path": "^2.1.1", + "normalize-path": "^3.0.0", "path-is-absolute": "^1.0.0", - "readdirp": "^2.0.0", - "upath": "^1.0.5" + "readdirp": "^2.2.1", + "upath": "^1.1.0" + }, + "dependencies": { + "normalize-path": { + "version": "3.0.0", + "resolved": "https://registry.npmjs.org/normalize-path/-/normalize-path-3.0.0.tgz", + "integrity": "sha512-6eZs5Ls3WtCisHWp9S2GUy8dqkpGi4BVSz3GaqiE6ezub0512ESztXUwUB6C6IKbQkY2Pnb/mD4WYojCRwcwLA==", + "dev": true + } } }, "chownr": { @@ -1729,12 +1722,12 @@ "dev": true }, "compressible": { - "version": "2.0.15", - "resolved": "https://registry.npmjs.org/compressible/-/compressible-2.0.15.tgz", - "integrity": "sha512-4aE67DL33dSW9gw4CI2H/yTxqHLNcxp0yS6jB+4h+wr3e43+1z7vm0HU9qXOH8j+qjKuL8+UtkOxYQSMq60Ylw==", + "version": "2.0.16", + "resolved": "https://registry.npmjs.org/compressible/-/compressible-2.0.16.tgz", + "integrity": "sha512-JQfEOdnI7dASwCuSPWIeVYwc/zMsu/+tRhoUvEfXz2gxOA2DNjmG5vhtFdBlhWPPGo+RdT9S3tgc/uH5qgDiiA==", "dev": true, "requires": { - "mime-db": ">= 1.36.0 < 2" + "mime-db": ">= 1.38.0 < 2" } }, "compression": { @@ -1760,6 +1753,12 @@ "requires": { "ms": "2.0.0" } + }, + "ms": { + "version": "2.0.0", + "resolved": "https://registry.npmjs.org/ms/-/ms-2.0.0.tgz", + "integrity": "sha1-VgiurfwAvmwpAd9fmGF4jeDVl8g=", + "dev": true } } }, @@ -1921,15 +1920,6 @@ "sha.js": "^2.4.8" } }, - "create-react-context": { - "version": "0.2.2", - "resolved": "https://registry.npmjs.org/create-react-context/-/create-react-context-0.2.2.tgz", - "integrity": "sha512-KkpaLARMhsTsgp0d2NA/R94F/eDLbhXERdIq3LvX2biCAXcDvHYoOqHfWCHf1+OLj+HKBotLG3KqaOOf+C1C+A==", - "requires": { - "fbjs": "^0.8.0", - "gud": "^1.0.0" - } - }, "cross-spawn": { "version": "6.0.5", "resolved": "https://registry.npmjs.org/cross-spawn/-/cross-spawn-6.0.5.tgz", @@ -2038,24 +2028,24 @@ "dev": true }, "cssom": { - "version": "0.3.4", - "resolved": "https://registry.npmjs.org/cssom/-/cssom-0.3.4.tgz", - "integrity": "sha512-+7prCSORpXNeR4/fUP3rL+TzqtiFfhMvTd7uEqMdgPvLPt4+uzFUeufx5RHjGTACCargg/DiEt/moMQmvnfkog==", + "version": "0.3.6", + "resolved": "https://registry.npmjs.org/cssom/-/cssom-0.3.6.tgz", + "integrity": "sha512-DtUeseGk9/GBW0hl0vVPpU22iHL6YB5BUX7ml1hB+GMpo0NX5G4voX3kdWiMSEguFtcW3Vh3djqNF4aIe6ne0A==", "dev": true }, "cssstyle": { - "version": "1.1.1", - "resolved": "https://registry.npmjs.org/cssstyle/-/cssstyle-1.1.1.tgz", - "integrity": "sha512-364AI1l/M5TYcFH83JnOH/pSqgaNnKmYgKrm0didZMGKWjQB60dymwWy1rKUgL3J1ffdq9xVi2yGLHdSjjSNog==", + "version": "1.2.1", + "resolved": "https://registry.npmjs.org/cssstyle/-/cssstyle-1.2.1.tgz", + "integrity": "sha512-7DYm8qe+gPx/h77QlCyFmX80+fGaE/6A/Ekl0zaszYOubvySO2saYFdQ78P29D0UsULxFKCetDGNaNRUdSF+2A==", "dev": true, "requires": { "cssom": "0.3.x" } }, "csstype": { - "version": "2.6.1", - "resolved": "https://registry.npmjs.org/csstype/-/csstype-2.6.1.tgz", - "integrity": "sha512-wv7IRqCGsL7WGKB8gPvrl+++HlFM9kxAM6jL1EXNPNTshEJYilMkbfS2SnuHha77uosp/YVK0wAp2jmlBzn1tg==", + "version": "2.6.2", + "resolved": "https://registry.npmjs.org/csstype/-/csstype-2.6.2.tgz", + "integrity": "sha512-Rl7PvTae0pflc1YtxtKbiSqq20Ts6vpIYOD5WBafl4y123DyHUeLrRdQP66sQW8/6gmX8jrYJLXwNeMqYVJcow==", "dev": true }, "currently-unhandled": { @@ -2118,13 +2108,19 @@ "dev": true }, "debug": { - "version": "3.1.0", - "resolved": "https://registry.npmjs.org/debug/-/debug-3.1.0.tgz", - "integrity": "sha512-OX8XqP7/1a9cqkxYw2yXss15f26NKWBpDXQd0/uK/KPqdQhxbPa994hnzjcE2VqQpDslf55723cKPUOGSmMY3g==", + "version": "3.2.6", + "resolved": "https://registry.npmjs.org/debug/-/debug-3.2.6.tgz", + "integrity": "sha512-mel+jf7nrtEl5Pn1Qx46zARXKDpBbvzezse7p7LqINmdoIk8PYP5SySaxEmYv6TZ0JyEKA1hsCId6DIhgITtWQ==", "requires": { - "ms": "2.0.0" + "ms": "^2.1.1" } }, + "debuglog": { + "version": "1.0.1", + "resolved": "https://registry.npmjs.org/debuglog/-/debuglog-1.0.1.tgz", + "integrity": "sha1-qiT/uaw9+aI1GDfPstJ5NgzXhJI=", + "dev": true + }, "decamelize": { "version": "1.2.0", "resolved": "https://registry.npmjs.org/decamelize/-/decamelize-1.2.0.tgz", @@ -2150,36 +2146,13 @@ "dev": true }, "default-gateway": { - "version": "2.7.2", - "resolved": "https://registry.npmjs.org/default-gateway/-/default-gateway-2.7.2.tgz", - "integrity": "sha512-lAc4i9QJR0YHSDFdzeBQKfZ1SRDG3hsJNEkrpcZa8QhBfidLAilT60BDEIVUUGqosFp425KOgB3uYqcnQrWafQ==", + "version": "4.1.2", + "resolved": "https://registry.npmjs.org/default-gateway/-/default-gateway-4.1.2.tgz", + "integrity": "sha512-xhJUAp3u02JsBGovj0V6B6uYhKCUOmiNc8xGmReUwGu77NmvcpxPVB0pCielxMFumO7CmXBG02XjM8HB97k8Hw==", "dev": true, "requires": { - "execa": "^0.10.0", + "execa": "^1.0.0", "ip-regex": "^2.1.0" - }, - "dependencies": { - "execa": { - "version": "0.10.0", - "resolved": "https://registry.npmjs.org/execa/-/execa-0.10.0.tgz", - "integrity": "sha512-7XOMnz8Ynx1gGo/3hyV9loYNPWM94jG3+3T3Y8tsfSstFmETmENCMU/A/zj8Lyaj1lkgEepKepvd6240tBRvlw==", - "dev": true, - "requires": { - "cross-spawn": "^6.0.0", - "get-stream": "^3.0.0", - "is-stream": "^1.1.0", - "npm-run-path": "^2.0.0", - "p-finally": "^1.0.0", - "signal-exit": "^3.0.0", - "strip-eof": "^1.0.0" - } - }, - "get-stream": { - "version": "3.0.0", - "resolved": "https://registry.npmjs.org/get-stream/-/get-stream-3.0.0.tgz", - "integrity": "sha1-jpQ9E1jcN1VQVOy+LtsFqhdO3hQ=", - "dev": true - } } }, "default-require-extensions": { @@ -2335,12 +2308,27 @@ "integrity": "sha512-ZIzRpLJrOj7jjP2miAtgqIfmzbxa4ZOr5jJc601zklsfEx9oTzmmj2nVpIPRpNlRTIh8lc1kyViIY7BWSGNmKw==", "dev": true }, + "dezalgo": { + "version": "1.0.3", + "resolved": "https://registry.npmjs.org/dezalgo/-/dezalgo-1.0.3.tgz", + "integrity": "sha1-f3Qt4Gb8dIvI24IFad3c5Jvw1FY=", + "dev": true, + "requires": { + "asap": "^2.0.0", + "wrappy": "1" + } + }, "diff": { "version": "3.5.0", "resolved": "https://registry.npmjs.org/diff/-/diff-3.5.0.tgz", "integrity": "sha512-A46qtFgd+g7pDZinpnwiRJtxbC1hpgf0uzP3iG89scHk0AUC7A1TGxf5OiiOUv/JMZR8GOt8hL900hV0bOy5xA==", "dev": true }, + "diff-match-patch": { + "version": "1.0.4", + "resolved": "https://registry.npmjs.org/diff-match-patch/-/diff-match-patch-1.0.4.tgz", + "integrity": "sha512-Uv3SW8bmH9nAtHKaKSanOQmj2DnlH65fUpcrMdfdaOxUG02QQ4YGZ8AE7kKOMisF7UqvOlGKVYWRvezdncW9lg==" + }, "diff-sequences": { "version": "24.0.0", "resolved": "https://registry.npmjs.org/diff-sequences/-/diff-sequences-24.0.0.tgz", @@ -2359,12 +2347,11 @@ } }, "dir-glob": { - "version": "2.0.0", - "resolved": "https://registry.npmjs.org/dir-glob/-/dir-glob-2.0.0.tgz", - "integrity": "sha512-37qirFDz8cA5fimp9feo43fSuRo2gHwaIn6dXL8Ber1dGwUosDrGZeCCXq57WnIqE4aQ+u3eQZzsk1yOzhdwag==", + "version": "2.2.2", + "resolved": "https://registry.npmjs.org/dir-glob/-/dir-glob-2.2.2.tgz", + "integrity": "sha512-f9LBi5QWzIW3I6e//uxZoLBlUt9kcp66qo0sSCxL6YZKc75R1c4MFCoe/LaZiBGmgujvQdxc5Bn3QhfyvK5Hsw==", "dev": true, "requires": { - "arrify": "^1.0.1", "path-type": "^3.0.0" } }, @@ -2402,9 +2389,9 @@ } }, "dom4": { - "version": "2.1.4", - "resolved": "https://registry.npmjs.org/dom4/-/dom4-2.1.4.tgz", - "integrity": "sha512-7NNKNViuZYu4GaZMUsSbsV6MFsT/ZpYNKP1NT4YIUgAvwPR8ODuvQEZZ7vRC1u5Y4dHwQ7je/UNOlRRWkaCyvw==" + "version": "1.8.5", + "resolved": "https://registry.npmjs.org/dom4/-/dom4-1.8.5.tgz", + "integrity": "sha512-ehHzOGGkVQOwU9HyZ99gHwkx4ybrRl/P1vJM7EH1nS9XsgHwO+J0KwCnVQrn8iQvpstGwFrtrX7aSNQ43QuK4A==" }, "domain-browser": { "version": "1.2.0", @@ -2427,9 +2414,9 @@ "integrity": "sha512-0sYnfUHHMoajaud/i5BHKA12bUxiWEHJ9rxGqVEppFxsEcxef0TZQ5J59lU+UniEBcz/sG5fTESRyS7cOm3tSQ==" }, "duplexify": { - "version": "3.6.1", - "resolved": "https://registry.npmjs.org/duplexify/-/duplexify-3.6.1.tgz", - "integrity": "sha512-vM58DwdnKmty+FSPzT14K9JXb90H+j5emaR4KYbr2KTIz00WHGbWOe5ghQTx233ZCLZtrGDALzKwcjEtSt35mA==", + "version": "3.7.1", + "resolved": "https://registry.npmjs.org/duplexify/-/duplexify-3.7.1.tgz", + "integrity": "sha512-07z8uv2wMyS51kKhD1KsdXJg5WQ6t93RneqRxUHnskXVtlYYkLqM0gqStQZ3pj073g687jPCHrqNfCzawLYh5g==", "dev": true, "requires": { "end-of-stream": "^1.0.0", @@ -2455,9 +2442,9 @@ "dev": true }, "electron-to-chromium": { - "version": "1.3.108", - "resolved": "https://registry.npmjs.org/electron-to-chromium/-/electron-to-chromium-1.3.108.tgz", - "integrity": "sha512-/QI4hMpAh48a1Sea6PALGv+kuVne9A2EWGd8HrWHMdYhIzGtbhVVHh6heL5fAzGaDnZuPyrlWJRl8WPm4RyiQQ==", + "version": "1.3.113", + "resolved": "https://registry.npmjs.org/electron-to-chromium/-/electron-to-chromium-1.3.113.tgz", + "integrity": "sha512-De+lPAxEcpxvqPTyZAXELNpRZXABRxf+uL/rSykstQhzj/B0l1150G/ExIIxKc16lI89Hgz81J0BHAcbTqK49g==", "dev": true }, "elliptic": { @@ -2590,9 +2577,9 @@ "dev": true }, "escodegen": { - "version": "1.11.0", - "resolved": "https://registry.npmjs.org/escodegen/-/escodegen-1.11.0.tgz", - "integrity": "sha512-IeMV45ReixHS53K/OmfKAIztN/igDHzTJUhZM3k1jMhIZWjk45SMwAtBsEXiJp3vSPmTcu6CXn7mDvFHRN66fw==", + "version": "1.11.1", + "resolved": "https://registry.npmjs.org/escodegen/-/escodegen-1.11.1.tgz", + "integrity": "sha512-JwiqFD9KdGVVpeuRa68yU3zZnBEOcPs0nKW7wZzXky8Z7tffdYUHbe11bPCV5jYlK6DVdKLWLm0f5I/QlL0Kmw==", "dev": true, "requires": { "esprima": "^3.1.3", @@ -2755,6 +2742,12 @@ "requires": { "is-extendable": "^0.1.0" } + }, + "ms": { + "version": "2.0.0", + "resolved": "https://registry.npmjs.org/ms/-/ms-2.0.0.tgz", + "integrity": "sha1-VgiurfwAvmwpAd9fmGF4jeDVl8g=", + "dev": true } } }, @@ -2768,9 +2761,9 @@ } }, "expect": { - "version": "24.0.0", - "resolved": "https://registry.npmjs.org/expect/-/expect-24.0.0.tgz", - "integrity": "sha512-qDHRU4lGsme0xjg8dXp/RQhvO9XIo9FWqVo7dTHDPBwzy25JGEHAWFsnpmRYErB50tgi/6euo3ir5e/kF9LUTA==", + "version": "24.1.0", + "resolved": "https://registry.npmjs.org/expect/-/expect-24.1.0.tgz", + "integrity": "sha512-lVcAPhaYkQcIyMS+F8RVwzbm1jro20IG8OkvxQ6f1JfqhVZyyudCwYogQ7wnktlf14iF3ii7ArIUO/mqvrW9Gw==", "dev": true, "requires": { "ansi-styles": "^3.2.0", @@ -2833,6 +2826,12 @@ "ms": "2.0.0" } }, + "ms": { + "version": "2.0.0", + "resolved": "https://registry.npmjs.org/ms/-/ms-2.0.0.tgz", + "integrity": "sha1-VgiurfwAvmwpAd9fmGF4jeDVl8g=", + "dev": true + }, "path-to-regexp": { "version": "0.1.7", "resolved": "https://registry.npmjs.org/path-to-regexp/-/path-to-regexp-0.1.7.tgz", @@ -3071,6 +3070,12 @@ "requires": { "ms": "2.0.0" } + }, + "ms": { + "version": "2.0.0", + "resolved": "https://registry.npmjs.org/ms/-/ms-2.0.0.tgz", + "integrity": "sha1-VgiurfwAvmwpAd9fmGF4jeDVl8g=", + "dev": true } } }, @@ -3124,21 +3129,21 @@ "dev": true }, "flush-write-stream": { - "version": "1.0.3", - "resolved": "https://registry.npmjs.org/flush-write-stream/-/flush-write-stream-1.0.3.tgz", - "integrity": "sha512-calZMC10u0FMUqoiunI2AiGIIUtUIvifNwkHhNupZH4cbNnW1Itkoh/Nf5HFYmDrwWPjrUxpkZT0KhuCq0jmGw==", + "version": "1.1.1", + "resolved": "https://registry.npmjs.org/flush-write-stream/-/flush-write-stream-1.1.1.tgz", + "integrity": "sha512-3Z4XhFZ3992uIq0XOqb9AreonueSYphE6oYbpt5+3u06JWklbsPkNv3ZKkP9Bz/r+1MWCaMoSQ28P85+1Yc77w==", "dev": true, "requires": { - "inherits": "^2.0.1", - "readable-stream": "^2.0.4" + "inherits": "^2.0.3", + "readable-stream": "^2.3.6" } }, "follow-redirects": { - "version": "1.6.1", - "resolved": "https://registry.npmjs.org/follow-redirects/-/follow-redirects-1.6.1.tgz", - "integrity": "sha512-t2JCjbzxQpWvbhts3l6SH1DKzSrx8a+SsaVf4h6bG4kOXUuPYS/kg2Lr4gQSb7eemaHqJkOThF1BGyjlUkO1GQ==", + "version": "1.7.0", + "resolved": "https://registry.npmjs.org/follow-redirects/-/follow-redirects-1.7.0.tgz", + "integrity": "sha512-m/pZQy4Gj287eNy94nivy5wchN3Kp+Q5WgUPNy5lJSZ3sgkVKSYV/ZChMAQVIgx1SqfZ2zBZtPA2YlXIWxxJOQ==", "requires": { - "debug": "=3.1.0" + "debug": "^3.2.6" } }, "for-in": { @@ -3929,12 +3934,6 @@ "resolve-dir": "^1.0.0" } }, - "global-modules-path": { - "version": "2.3.1", - "resolved": "https://registry.npmjs.org/global-modules-path/-/global-modules-path-2.3.1.tgz", - "integrity": "sha512-y+shkf4InI7mPRHSo2b/k6ix6+NLDtyccYv86whhxrSGX9wjPX1VMITmrDbE1eh7zkzhiWtW2sHklJYoQ62Cxg==", - "dev": true - }, "global-prefix": { "version": "1.0.2", "resolved": "https://registry.npmjs.org/global-prefix/-/global-prefix-1.0.2.tgz", @@ -3949,30 +3948,30 @@ } }, "globals": { - "version": "11.10.0", - "resolved": "https://registry.npmjs.org/globals/-/globals-11.10.0.tgz", - "integrity": "sha512-0GZF1RiPKU97IHUO5TORo9w1PwrH/NBPl+fS7oMLdaTRiYmYbwK4NWoZWrAdd0/abG9R2BU+OiwyQpTpE6pdfQ==", + "version": "11.11.0", + "resolved": "https://registry.npmjs.org/globals/-/globals-11.11.0.tgz", + "integrity": "sha512-WHq43gS+6ufNOEqlrDBxVEbb8ntfXrfAUU2ZOpCxrBdGKW3gyv8mCxAfIBD0DroPKGrJ2eSsXsLtY9MPntsyTw==", "dev": true }, "globby": { - "version": "8.0.2", - "resolved": "https://registry.npmjs.org/globby/-/globby-8.0.2.tgz", - "integrity": "sha512-yTzMmKygLp8RUpG1Ymu2VXPSJQZjNAZPD4ywgYEaG7e4tBJeUQBO8OpXrf1RCNcEs5alsoJYPAMiIHP0cmeC7w==", + "version": "9.0.0", + "resolved": "https://registry.npmjs.org/globby/-/globby-9.0.0.tgz", + "integrity": "sha512-q0qiO/p1w/yJ0hk8V9x1UXlgsXUxlGd0AHUOXZVXBO6aznDtpx7M8D1kBrCAItoPm+4l8r6ATXV1JpjY2SBQOw==", "dev": true, "requires": { - "array-union": "^1.0.1", - "dir-glob": "2.0.0", - "fast-glob": "^2.0.2", - "glob": "^7.1.2", - "ignore": "^3.3.5", - "pify": "^3.0.0", - "slash": "^1.0.0" + "array-union": "^1.0.2", + "dir-glob": "^2.2.1", + "fast-glob": "^2.2.6", + "glob": "^7.1.3", + "ignore": "^4.0.3", + "pify": "^4.0.1", + "slash": "^2.0.0" }, "dependencies": { - "slash": { - "version": "1.0.0", - "resolved": "https://registry.npmjs.org/slash/-/slash-1.0.0.tgz", - "integrity": "sha1-xB8vbDn8FtHNF61LXYlhFK5HDVU=", + "pify": { + "version": "4.0.1", + "resolved": "https://registry.npmjs.org/pify/-/pify-4.0.1.tgz", + "integrity": "sha512-uB80kBFb/tfd68bVleG9T5GGsGPjJrLAUpR5PZIrhBnIaRTQRjqdJSsIKkOP6OAIFbj7GOrcudc5pNjZ+geV2g==", "dev": true } } @@ -4006,11 +4005,6 @@ "integrity": "sha1-8QdIy+dq+WS3yWyTxrzCivEgwIE=", "dev": true }, - "gud": { - "version": "1.0.0", - "resolved": "https://registry.npmjs.org/gud/-/gud-1.0.0.tgz", - "integrity": "sha512-zGEOVKFM5sVPPrYs7J5/hYEw2Pof8KCyOwyhG8sAF26mCAeUFAcYPu1mwB7hhpIP29zOIBaDqwuHdLp0jvZXjw==" - }, "handle-thing": { "version": "2.0.0", "resolved": "https://registry.npmjs.org/handle-thing/-/handle-thing-2.0.0.tgz", @@ -4018,9 +4012,9 @@ "dev": true }, "handlebars": { - "version": "4.0.12", - "resolved": "https://registry.npmjs.org/handlebars/-/handlebars-4.0.12.tgz", - "integrity": "sha512-RhmTekP+FZL+XNhwS1Wf+bTTZpdLougwt5pcgA1tuz6Jcx0fpH/7z0qd71RKnZHBCxIRBHfBOnio4gViPemNzA==", + "version": "4.1.0", + "resolved": "https://registry.npmjs.org/handlebars/-/handlebars-4.1.0.tgz", + "integrity": "sha512-l2jRuU1NAWK6AW5qqcTATWQJvNPEwkM7NEKSiv/gqOsoSQbVoWyqVEY5GS+XPQ88zLNmqASRpzfdm8d79hJS+w==", "dev": true, "requires": { "async": "^2.5.0", @@ -4161,16 +4155,6 @@ "resolve-pathname": "^2.2.0", "value-equal": "^0.4.0", "warning": "^3.0.0" - }, - "dependencies": { - "warning": { - "version": "3.0.0", - "resolved": "https://registry.npmjs.org/warning/-/warning-3.0.0.tgz", - "integrity": "sha1-MuU3fLVy3kqwR1O9+IIcAe1gW3w=", - "requires": { - "loose-envify": "^1.0.0" - } - } } }, "hjson": { @@ -4195,9 +4179,9 @@ "integrity": "sha512-rqcy4pJo55FTTLWt+bU8ukscqHeE/e9KWvsOW2b/a3afxQZhwkQdT1rPPCJ0rYXdj4vNcasY8zHTH+jF/qStxw==" }, "homedir-polyfill": { - "version": "1.0.1", - "resolved": "https://registry.npmjs.org/homedir-polyfill/-/homedir-polyfill-1.0.1.tgz", - "integrity": "sha1-TCu8inWJmP7r9e1oWA921GdotLw=", + "version": "1.0.3", + "resolved": "https://registry.npmjs.org/homedir-polyfill/-/homedir-polyfill-1.0.3.tgz", + "integrity": "sha512-eSmmWE5bZTK2Nou4g0AI3zZ9rswp7GRKoKXS1BLUkvPviOqs4YTN1djQIqrXy9k5gEtdLPy86JjRwsNM9tnDcA==", "dev": true, "requires": { "parse-passwd": "^1.0.0" @@ -4272,15 +4256,15 @@ } }, "http-proxy-middleware": { - "version": "0.18.0", - "resolved": "https://registry.npmjs.org/http-proxy-middleware/-/http-proxy-middleware-0.18.0.tgz", - "integrity": "sha512-Fs25KVMPAIIcgjMZkVHJoKg9VcXcC1C8yb9JUgeDvVXY0S/zgVIhMb+qVswDIgtJe2DfckMSY2d6TuTEutlk6Q==", + "version": "0.19.1", + "resolved": "https://registry.npmjs.org/http-proxy-middleware/-/http-proxy-middleware-0.19.1.tgz", + "integrity": "sha512-yHYTgWMQO8VvwNS22eLLloAkvungsKdKTLO8AJlftYIKNfJr3GK3zK0ZCfzDDGUBttdGc8xFy1mCitvNKQtC3Q==", "dev": true, "requires": { - "http-proxy": "^1.16.2", + "http-proxy": "^1.17.0", "is-glob": "^4.0.0", - "lodash": "^4.17.5", - "micromatch": "^3.1.9" + "lodash": "^4.17.11", + "micromatch": "^3.1.10" } }, "http-signature": { @@ -4345,9 +4329,9 @@ "dev": true }, "ignore": { - "version": "3.3.10", - "resolved": "https://registry.npmjs.org/ignore/-/ignore-3.3.10.tgz", - "integrity": "sha512-Pgs951kaMm5GXP7MOvxERINe3gsaVjUWFm+UZPSq9xYriQAksyhg0csnS0KXSNRD5NmNdapXEpjxG49+AKh/ug==", + "version": "4.0.6", + "resolved": "https://registry.npmjs.org/ignore/-/ignore-4.0.6.tgz", + "integrity": "sha512-cyFDKrqc/YdcWFniJhzI42+AzS+gNwmUzOSFcRCQYwySuBBBy/KjuxWLZ/FHEH6Moq1NizMOBWyTcv8O4OZIMg==", "dev": true }, "ignore-styles": { @@ -4440,13 +4424,21 @@ "dev": true }, "internal-ip": { - "version": "3.0.1", - "resolved": "https://registry.npmjs.org/internal-ip/-/internal-ip-3.0.1.tgz", - "integrity": "sha512-NXXgESC2nNVtU+pqmC9e6R8B1GpKxzsAQhffvh5AL79qKnodd+L7tnEQmTiUAVngqLalPbSqRA7XGIEL5nCd0Q==", + "version": "4.2.0", + "resolved": "https://registry.npmjs.org/internal-ip/-/internal-ip-4.2.0.tgz", + "integrity": "sha512-ZY8Rk+hlvFeuMmG5uH1MXhhdeMntmIaxaInvAmzMq/SHV8rv4Kh+6GiQNNDQd0wZFrcO+FiTBo8lui/osKOyJw==", "dev": true, "requires": { - "default-gateway": "^2.6.0", - "ipaddr.js": "^1.5.2" + "default-gateway": "^4.0.1", + "ipaddr.js": "^1.9.0" + }, + "dependencies": { + "ipaddr.js": { + "version": "1.9.0", + "resolved": "https://registry.npmjs.org/ipaddr.js/-/ipaddr.js-1.9.0.tgz", + "integrity": "sha512-M4Sjn6N/+O6/IXSJseKqHoFc+5FdGJ22sXqnjTpdZweHK64MzEPAyQZyEU3R/KRv2GLoa7nNtg/C2Ev6m7z+eA==", + "dev": true + } } }, "interpret": { @@ -4527,15 +4519,6 @@ "resolved": "https://registry.npmjs.org/is-buffer/-/is-buffer-1.1.6.tgz", "integrity": "sha512-NcdALwpXkTm5Zvvbk7owOUSvVvBKDgKP5/ewfXEznmQFfs4ZRmanOeKBTjRVjka3QFoN6XJ+9F3USqfHqTaU5w==" }, - "is-builtin-module": { - "version": "1.0.0", - "resolved": "https://registry.npmjs.org/is-builtin-module/-/is-builtin-module-1.0.0.tgz", - "integrity": "sha1-VAVy0096wxGfj3bDDLwbHgN6/74=", - "dev": true, - "requires": { - "builtin-modules": "^1.0.0" - } - }, "is-callable": { "version": "1.1.4", "resolved": "https://registry.npmjs.org/is-callable/-/is-callable-1.1.4.tgz", @@ -4773,9 +4756,9 @@ "dev": true }, "istanbul-api": { - "version": "2.1.0", - "resolved": "https://registry.npmjs.org/istanbul-api/-/istanbul-api-2.1.0.tgz", - "integrity": "sha512-+Ygg4t1StoiNlBGc6x0f8q/Bv26FbZqP/+jegzfNpU7Q8o+4ZRoJxJPhBkgE/UonpAjtxnE4zCZIyJX+MwLRMQ==", + "version": "2.1.1", + "resolved": "https://registry.npmjs.org/istanbul-api/-/istanbul-api-2.1.1.tgz", + "integrity": "sha512-kVmYrehiwyeBAk/wE71tW6emzLiHGjYIiDrc8sfyty4F8M02/lrgXSm+R1kXysmF20zArvmZXjlE/mg24TVPJw==", "dev": true, "requires": { "async": "^2.6.1", @@ -4786,7 +4769,7 @@ "istanbul-lib-instrument": "^3.1.0", "istanbul-lib-report": "^2.0.4", "istanbul-lib-source-maps": "^3.0.2", - "istanbul-reports": "^2.1.0", + "istanbul-reports": "^2.1.1", "js-yaml": "^3.12.0", "make-dir": "^1.3.0", "minimatch": "^3.0.4", @@ -4855,38 +4838,32 @@ "requires": { "ms": "^2.1.1" } - }, - "ms": { - "version": "2.1.1", - "resolved": "https://registry.npmjs.org/ms/-/ms-2.1.1.tgz", - "integrity": "sha512-tgp+dl5cGk28utYktBsrFqA7HKgrhgPsg6Z/EfhWI4gl1Hwq8B/GmY/0oXZ6nF8hDVesS/FpnYaD/kOWhYQvyg==", - "dev": true } } }, "istanbul-reports": { - "version": "2.1.0", - "resolved": "https://registry.npmjs.org/istanbul-reports/-/istanbul-reports-2.1.0.tgz", - "integrity": "sha512-azQdSX+dtTtkQEfqq20ICxWi6eOHXyHIgMFw1VOOVi8iIPWeCWRgCyFh/CsBKIhcgskMI8ExXmU7rjXTRCIJ+A==", + "version": "2.1.1", + "resolved": "https://registry.npmjs.org/istanbul-reports/-/istanbul-reports-2.1.1.tgz", + "integrity": "sha512-FzNahnidyEPBCI0HcufJoSEoKykesRlFcSzQqjH9x0+LC8tnnE/p/90PBLu8iZTxr8yYZNyTtiAujUqyN+CIxw==", "dev": true, "requires": { - "handlebars": "^4.0.11" + "handlebars": "^4.1.0" } }, "jest": { - "version": "24.0.0", - "resolved": "https://registry.npmjs.org/jest/-/jest-24.0.0.tgz", - "integrity": "sha512-1Z2EblP4BnERbWZGtipGb9zjHDq7nCHgCY7V57F5SYaFRJV4DE1HKoOz+CRC5OrAThN9OVhRlUhTzsTFArg2iQ==", + "version": "24.1.0", + "resolved": "https://registry.npmjs.org/jest/-/jest-24.1.0.tgz", + "integrity": "sha512-+q91L65kypqklvlRFfXfdzUKyngQLOcwGhXQaLmVHv+d09LkNXuBuGxlofTFW42XMzu3giIcChchTsCNUjQ78A==", "dev": true, "requires": { "import-local": "^2.0.0", - "jest-cli": "^24.0.0" + "jest-cli": "^24.1.0" }, "dependencies": { "jest-cli": { - "version": "24.0.0", - "resolved": "https://registry.npmjs.org/jest-cli/-/jest-cli-24.0.0.tgz", - "integrity": "sha512-mElnFipLaGxo1SiQ1CLvuaz3eX07MJc4HcyKrApSJf8xSdY1/EwaHurKwu1g2cDiwIgY8uHj7UcF5OYbtiBOWg==", + "version": "24.1.0", + "resolved": "https://registry.npmjs.org/jest-cli/-/jest-cli-24.1.0.tgz", + "integrity": "sha512-U/iyWPwOI0T1CIxVLtk/2uviOTJ/OiSWJSe8qt6X1VkbbgP+nrtLJlmT9lPBe4lK78VNFJtrJ7pttcNv/s7yCw==", "dev": true, "requires": { "ansi-escapes": "^3.0.0", @@ -4901,16 +4878,16 @@ "istanbul-lib-instrument": "^3.0.1", "istanbul-lib-source-maps": "^3.0.1", "jest-changed-files": "^24.0.0", - "jest-config": "^24.0.0", + "jest-config": "^24.1.0", "jest-environment-jsdom": "^24.0.0", "jest-get-type": "^24.0.0", "jest-haste-map": "^24.0.0", "jest-message-util": "^24.0.0", "jest-regex-util": "^24.0.0", - "jest-resolve-dependencies": "^24.0.0", - "jest-runner": "^24.0.0", - "jest-runtime": "^24.0.0", - "jest-snapshot": "^24.0.0", + "jest-resolve-dependencies": "^24.1.0", + "jest-runner": "^24.1.0", + "jest-runtime": "^24.1.0", + "jest-snapshot": "^24.1.0", "jest-util": "^24.0.0", "jest-validate": "^24.0.0", "jest-watcher": "^24.0.0", @@ -4942,27 +4919,26 @@ } }, "jest-config": { - "version": "24.0.0", - "resolved": "https://registry.npmjs.org/jest-config/-/jest-config-24.0.0.tgz", - "integrity": "sha512-9/soqWL5YSq1ZJtgVJ5YYPCL1f9Mi2lVCp5+OXuYBOaN8DHSFRCSWip0rQ6N+mPTOEIAlCvcUH8zaPOwK4hePg==", + "version": "24.1.0", + "resolved": "https://registry.npmjs.org/jest-config/-/jest-config-24.1.0.tgz", + "integrity": "sha512-FbbRzRqtFC6eGjG5VwsbW4E5dW3zqJKLWYiZWhB0/4E5fgsMw8GODLbGSrY5t17kKOtCWb/Z7nsIThRoDpuVyg==", "dev": true, "requires": { "@babel/core": "^7.1.0", - "babel-jest": "^24.0.0", + "babel-jest": "^24.1.0", "chalk": "^2.0.1", "glob": "^7.1.1", "jest-environment-jsdom": "^24.0.0", "jest-environment-node": "^24.0.0", "jest-get-type": "^24.0.0", - "jest-jasmine2": "^24.0.0", + "jest-jasmine2": "^24.1.0", "jest-regex-util": "^24.0.0", - "jest-resolve": "^24.0.0", + "jest-resolve": "^24.1.0", "jest-util": "^24.0.0", "jest-validate": "^24.0.0", "micromatch": "^3.1.10", "pretty-format": "^24.0.0", - "realpath-native": "^1.0.2", - "uuid": "^3.3.2" + "realpath-native": "^1.0.2" } }, "jest-diff": { @@ -5042,22 +5018,23 @@ } }, "jest-jasmine2": { - "version": "24.0.0", - "resolved": "https://registry.npmjs.org/jest-jasmine2/-/jest-jasmine2-24.0.0.tgz", - "integrity": "sha512-q1xEV9KHM0bgfBj3yrkrjRF5kxpNDkWPCwVfSPN1DC+pD6J5wrM9/u2BgzhKhALXiaZUUhJ+f/OcEC0Gwpw90A==", + "version": "24.1.0", + "resolved": "https://registry.npmjs.org/jest-jasmine2/-/jest-jasmine2-24.1.0.tgz", + "integrity": "sha512-H+o76SdSNyCh9fM5K8upK45YTo/DiFx5w2YAzblQebSQmukDcoVBVeXynyr7DDnxh+0NTHYRCLwJVf3tC518wg==", "dev": true, "requires": { "@babel/traverse": "^7.1.0", "chalk": "^2.0.1", "co": "^4.6.0", - "expect": "^24.0.0", + "expect": "^24.1.0", "is-generator-fn": "^2.0.0", "jest-each": "^24.0.0", "jest-matcher-utils": "^24.0.0", "jest-message-util": "^24.0.0", - "jest-snapshot": "^24.0.0", + "jest-snapshot": "^24.1.0", "jest-util": "^24.0.0", - "pretty-format": "^24.0.0" + "pretty-format": "^24.0.0", + "throat": "^4.0.0" } }, "jest-leak-detector": { @@ -5107,9 +5084,9 @@ "dev": true }, "jest-resolve": { - "version": "24.0.0", - "resolved": "https://registry.npmjs.org/jest-resolve/-/jest-resolve-24.0.0.tgz", - "integrity": "sha512-uKDGyJqNaBQKox1DJzm27CJobADsIMNgZGusXhtYzl98LKu/fKuokkRsd7EBVgoDA80HKHc3LOPKuYLryMu1vw==", + "version": "24.1.0", + "resolved": "https://registry.npmjs.org/jest-resolve/-/jest-resolve-24.1.0.tgz", + "integrity": "sha512-TPiAIVp3TG6zAxH28u/6eogbwrvZjBMWroSLBDkwkHKrqxB/RIdwkWDye4uqPlZIXWIaHtifY3L0/eO5Z0f2wg==", "dev": true, "requires": { "browser-resolve": "^1.11.3", @@ -5118,30 +5095,31 @@ } }, "jest-resolve-dependencies": { - "version": "24.0.0", - "resolved": "https://registry.npmjs.org/jest-resolve-dependencies/-/jest-resolve-dependencies-24.0.0.tgz", - "integrity": "sha512-CJGS5ME2g5wL16o3Y22ga9p5ntNT5CUYX40/0lYj9ic9jB5YHm/qMKTgbFt9kowEBiMOFpXy15dWtBTEU54+zg==", + "version": "24.1.0", + "resolved": "https://registry.npmjs.org/jest-resolve-dependencies/-/jest-resolve-dependencies-24.1.0.tgz", + "integrity": "sha512-2VwPsjd3kRPu7qe2cpytAgowCObk5AKeizfXuuiwgm1a9sijJDZe8Kh1sFj6FKvSaNEfCPlBVkZEJa2482m/Uw==", "dev": true, "requires": { "jest-regex-util": "^24.0.0", - "jest-snapshot": "^24.0.0" + "jest-snapshot": "^24.1.0" } }, "jest-runner": { - "version": "24.0.0", - "resolved": "https://registry.npmjs.org/jest-runner/-/jest-runner-24.0.0.tgz", - "integrity": "sha512-XefXm2XimKtwdfi2am4364GfCmLD1tOjiRtDexY65diCXt4Rw23rxj2wiW7p9s8Nh9dzJQNmrheqZ5rzvn762g==", + "version": "24.1.0", + "resolved": "https://registry.npmjs.org/jest-runner/-/jest-runner-24.1.0.tgz", + "integrity": "sha512-CDGOkT3AIFl16BLL/OdbtYgYvbAprwJ+ExKuLZmGSCSldwsuU2dEGauqkpvd9nphVdAnJUcP12e/EIlnTX0QXg==", "dev": true, "requires": { + "chalk": "^2.4.2", "exit": "^0.1.2", "graceful-fs": "^4.1.15", - "jest-config": "^24.0.0", + "jest-config": "^24.1.0", "jest-docblock": "^24.0.0", "jest-haste-map": "^24.0.0", - "jest-jasmine2": "^24.0.0", + "jest-jasmine2": "^24.1.0", "jest-leak-detector": "^24.0.0", "jest-message-util": "^24.0.0", - "jest-runtime": "^24.0.0", + "jest-runtime": "^24.1.0", "jest-util": "^24.0.0", "jest-worker": "^24.0.0", "source-map-support": "^0.5.6", @@ -5149,9 +5127,9 @@ } }, "jest-runtime": { - "version": "24.0.0", - "resolved": "https://registry.npmjs.org/jest-runtime/-/jest-runtime-24.0.0.tgz", - "integrity": "sha512-UeVoTGiij8upcqfyBlJvImws7IGY+ZWtgVpt1h4VmVbyei39tVGia/20VoP3yvodS6FdjTwBj+JzVNuoh/9UTw==", + "version": "24.1.0", + "resolved": "https://registry.npmjs.org/jest-runtime/-/jest-runtime-24.1.0.tgz", + "integrity": "sha512-59/BY6OCuTXxGeDhEMU7+N33dpMQyXq7MLK07cNSIY/QYt2QZgJ7Tjx+rykBI0skAoigFl0A5tmT8UdwX92YuQ==", "dev": true, "requires": { "@babel/core": "^7.1.0", @@ -5162,19 +5140,19 @@ "fast-json-stable-stringify": "^2.0.0", "glob": "^7.1.3", "graceful-fs": "^4.1.15", - "jest-config": "^24.0.0", + "jest-config": "^24.1.0", "jest-haste-map": "^24.0.0", "jest-message-util": "^24.0.0", "jest-regex-util": "^24.0.0", - "jest-resolve": "^24.0.0", - "jest-snapshot": "^24.0.0", + "jest-resolve": "^24.1.0", + "jest-snapshot": "^24.1.0", "jest-util": "^24.0.0", "jest-validate": "^24.0.0", "micromatch": "^3.1.10", "realpath-native": "^1.0.0", "slash": "^2.0.0", - "strip-bom": "3.0.0", - "write-file-atomic": "^2.4.2", + "strip-bom": "^3.0.0", + "write-file-atomic": "2.4.1", "yargs": "^12.0.2" } }, @@ -5185,9 +5163,9 @@ "dev": true }, "jest-snapshot": { - "version": "24.0.0", - "resolved": "https://registry.npmjs.org/jest-snapshot/-/jest-snapshot-24.0.0.tgz", - "integrity": "sha512-7OcrckVnfzVYxSGPYl2Sn+HyT30VpDv+FMBFbQxSQ6DV2K9Js6vYT6d4SBPKp6DfDiEL2txNssJBxtlvF+Dymw==", + "version": "24.1.0", + "resolved": "https://registry.npmjs.org/jest-snapshot/-/jest-snapshot-24.1.0.tgz", + "integrity": "sha512-th6TDfFqEmXvuViacU1ikD7xFb7lQsPn2rJl7OEmnfIVpnrx3QNY2t3PE88meeg0u/mQ0nkyvmC05PBqO4USFA==", "dev": true, "requires": { "@babel/types": "^7.0.0", @@ -5195,7 +5173,7 @@ "jest-diff": "^24.0.0", "jest-matcher-utils": "^24.0.0", "jest-message-util": "^24.0.0", - "jest-resolve": "^24.0.0", + "jest-resolve": "^24.1.0", "mkdirp": "^0.5.1", "natural-compare": "^1.4.0", "pretty-format": "^24.0.0", @@ -5393,9 +5371,9 @@ "dev": true }, "kleur": { - "version": "3.0.1", - "resolved": "https://registry.npmjs.org/kleur/-/kleur-3.0.1.tgz", - "integrity": "sha512-P3kRv+B+Ra070ng2VKQqW4qW7gd/v3iD8sy/zOdcYRsfiD+QBokQNOps/AfP6Hr48cBhIIBFWckB9aO+IZhrWg==", + "version": "3.0.2", + "resolved": "https://registry.npmjs.org/kleur/-/kleur-3.0.2.tgz", + "integrity": "sha512-3h7B2WRT5LNXOtQiAaWonilegHcPSf9nLVXlSTci8lu1dZUuui61+EsPEZqSVxY7rXYmB2DVKMQILxaO5WL61Q==", "dev": true }, "lcid": { @@ -5429,11 +5407,35 @@ "type-check": "~0.3.2" } }, - "lightercollective": { - "version": "0.1.0", - "resolved": "https://registry.npmjs.org/lightercollective/-/lightercollective-0.1.0.tgz", - "integrity": "sha512-J9tg5uraYoQKaWbmrzDDexbG6hHnMcWS1qLYgJSWE+mpA3U5OCSeMUhb+K55otgZJ34oFdR0ECvdIb3xuO5JOQ==", - "dev": true + "license-checker": { + "version": "25.0.1", + "resolved": "https://registry.npmjs.org/license-checker/-/license-checker-25.0.1.tgz", + "integrity": "sha512-mET5AIwl7MR2IAKYYoVBBpV0OnkKQ1xGj2IMMeEFIs42QAkEVjRtFZGWmQ28WeU7MP779iAgOaOy93Mn44mn6g==", + "dev": true, + "requires": { + "chalk": "^2.4.1", + "debug": "^3.1.0", + "mkdirp": "^0.5.1", + "nopt": "^4.0.1", + "read-installed": "~4.0.3", + "semver": "^5.5.0", + "spdx-correct": "^3.0.0", + "spdx-expression-parse": "^3.0.0", + "spdx-satisfies": "^4.0.0", + "treeify": "^1.1.0" + }, + "dependencies": { + "nopt": { + "version": "4.0.1", + "resolved": "https://registry.npmjs.org/nopt/-/nopt-4.0.1.tgz", + "integrity": "sha1-0NRoWv1UFRk8jHUFYC0NF81kR00=", + "dev": true, + "requires": { + "abbrev": "1", + "osenv": "^0.1.4" + } + } + } }, "load-json-file": { "version": "4.0.0", @@ -5503,6 +5505,16 @@ "resolved": "https://registry.npmjs.org/lodash.debounce/-/lodash.debounce-4.0.8.tgz", "integrity": "sha1-gteb/zCmfEAF/9XiUVMArZyk168=" }, + "lodash.get": { + "version": "4.4.2", + "resolved": "https://registry.npmjs.org/lodash.get/-/lodash.get-4.4.2.tgz", + "integrity": "sha1-LRd/ZS+jHpObRDjVNBSZ36OCXpk=" + }, + "lodash.isequal": { + "version": "4.5.0", + "resolved": "https://registry.npmjs.org/lodash.isequal/-/lodash.isequal-4.5.0.tgz", + "integrity": "sha1-QVxEePK8wwEgwizhDtMib30+GOA=" + }, "lodash.mergewith": { "version": "4.6.1", "resolved": "https://registry.npmjs.org/lodash.mergewith/-/lodash.mergewith-4.6.1.tgz", @@ -5607,6 +5619,12 @@ "tmpl": "1.0.x" } }, + "mamacro": { + "version": "0.0.3", + "resolved": "https://registry.npmjs.org/mamacro/-/mamacro-0.0.3.tgz", + "integrity": "sha512-qMEwh+UujcQ+kbz3T6V+wAmO2U8veoq2w+3wY8MquqwVA3jChfwY+Tk52GZKDfACEPjuZ7r2oJLejwpt8jtwTA==", + "dev": true + }, "map-age-cleaner": { "version": "0.1.3", "resolved": "https://registry.npmjs.org/map-age-cleaner/-/map-age-cleaner-0.1.3.tgz", @@ -5655,14 +5673,14 @@ "dev": true }, "mem": { - "version": "4.0.0", - "resolved": "https://registry.npmjs.org/mem/-/mem-4.0.0.tgz", - "integrity": "sha512-WQxG/5xYc3tMbYLXoXPm81ET2WDULiU5FxbuIoNbJqLOOI8zehXFdZuiUEgfdrU2mVB1pxBZUGlYORSrpuJreA==", + "version": "4.1.0", + "resolved": "https://registry.npmjs.org/mem/-/mem-4.1.0.tgz", + "integrity": "sha512-I5u6Q1x7wxO0kdOpYBB28xueHADYps5uty/zg936CiG8NTe5sJL8EjrCuLneuDW3PlMdZBGDIn8BirEVdovZvg==", "dev": true, "requires": { "map-age-cleaner": "^0.1.1", "mimic-fn": "^1.0.0", - "p-is-promise": "^1.1.0" + "p-is-promise": "^2.0.0" } }, "memory-fs": { @@ -5854,18 +5872,18 @@ "dev": true }, "mime-db": { - "version": "1.37.0", - "resolved": "https://registry.npmjs.org/mime-db/-/mime-db-1.37.0.tgz", - "integrity": "sha512-R3C4db6bgQhlIhPU48fUtdVmKnflq+hRdad7IyKhtFj06VPNVdk2RhiYL3UjQIlso8L+YxAtFkobT0VK+S/ybg==", + "version": "1.38.0", + "resolved": "https://registry.npmjs.org/mime-db/-/mime-db-1.38.0.tgz", + "integrity": "sha512-bqVioMFFzc2awcdJZIzR3HjZFX20QhilVS7hytkKrv7xFAn8bM1gzc/FOX2awLISvWe0PV8ptFKcon+wZ5qYkg==", "dev": true }, "mime-types": { - "version": "2.1.21", - "resolved": "https://registry.npmjs.org/mime-types/-/mime-types-2.1.21.tgz", - "integrity": "sha512-3iL6DbwpyLzjR3xHSFNFeb9Nz/M8WDkX33t1GFQnFOllWk8pOrh/LSrB5OXlnlW5P9LH73X6loW/eogc+F5lJg==", + "version": "2.1.22", + "resolved": "https://registry.npmjs.org/mime-types/-/mime-types-2.1.22.tgz", + "integrity": "sha512-aGl6TZGnhm/li6F7yx82bJiBZwgiEa4Hf6CNr8YO+r5UHr53tSTYZb102zyU50DOWWKeOv0uQLRL0/9EiKWCog==", "dev": true, "requires": { - "mime-db": "~1.37.0" + "mime-db": "~1.38.0" } }, "mimic-fn": { @@ -6000,6 +6018,15 @@ "integrity": "sha512-VlfT9F3V0v+jr4yxPc5gg9s62/fIVWsd2Bk2iD435um1NlGMYdVCq+MjcXnhYq2icNOizHr1kK+5TI6H0Hy0ag==", "dev": true }, + "debug": { + "version": "3.1.0", + "resolved": "https://registry.npmjs.org/debug/-/debug-3.1.0.tgz", + "integrity": "sha512-OX8XqP7/1a9cqkxYw2yXss15f26NKWBpDXQd0/uK/KPqdQhxbPa994hnzjcE2VqQpDslf55723cKPUOGSmMY3g==", + "dev": true, + "requires": { + "ms": "2.0.0" + } + }, "glob": { "version": "7.1.2", "resolved": "https://registry.npmjs.org/glob/-/glob-7.1.2.tgz", @@ -6014,6 +6041,12 @@ "path-is-absolute": "^1.0.0" } }, + "ms": { + "version": "2.0.0", + "resolved": "https://registry.npmjs.org/ms/-/ms-2.0.0.tgz", + "integrity": "sha1-VgiurfwAvmwpAd9fmGF4jeDVl8g=", + "dev": true + }, "supports-color": { "version": "5.4.0", "resolved": "https://registry.npmjs.org/supports-color/-/supports-color-5.4.0.tgz", @@ -6040,9 +6073,9 @@ } }, "ms": { - "version": "2.0.0", - "resolved": "https://registry.npmjs.org/ms/-/ms-2.0.0.tgz", - "integrity": "sha1-VgiurfwAvmwpAd9fmGF4jeDVl8g=" + "version": "2.1.1", + "resolved": "https://registry.npmjs.org/ms/-/ms-2.1.1.tgz", + "integrity": "sha512-tgp+dl5cGk28utYktBsrFqA7HKgrhgPsg6Z/EfhWI4gl1Hwq8B/GmY/0oXZ6nF8hDVesS/FpnYaD/kOWhYQvyg==" }, "multicast-dns": { "version": "6.2.3", @@ -6204,21 +6237,22 @@ "dev": true }, "node-notifier": { - "version": "5.3.0", - "resolved": "https://registry.npmjs.org/node-notifier/-/node-notifier-5.3.0.tgz", - "integrity": "sha512-AhENzCSGZnZJgBARsUjnQ7DnZbzyP+HxlVXuD0xqAnvL8q+OqtSX7lGg9e8nHzwXkMMXNdVeqq4E2M3EUAqX6Q==", + "version": "5.4.0", + "resolved": "https://registry.npmjs.org/node-notifier/-/node-notifier-5.4.0.tgz", + "integrity": "sha512-SUDEb+o71XR5lXSTyivXd9J7fCloE3SyP4lSgt3lU2oSANiox+SxlNRGPjDKrwU1YN3ix2KN/VGGCg0t01rttQ==", "dev": true, "requires": { "growly": "^1.3.0", + "is-wsl": "^1.1.0", "semver": "^5.5.0", "shellwords": "^0.1.1", "which": "^1.3.0" } }, "node-releases": { - "version": "1.1.5", - "resolved": "https://registry.npmjs.org/node-releases/-/node-releases-1.1.5.tgz", - "integrity": "sha512-6C2K0x1QlYTz9wCueMN/DVZFcBVg/qsj2k9iV5gV/+OvG4KNrl7Nu7TWbWFQ3/Z2V10qVFQWtj5Xa+VBodcI6g==", + "version": "1.1.8", + "resolved": "https://registry.npmjs.org/node-releases/-/node-releases-1.1.8.tgz", + "integrity": "sha512-gQm+K9mGCiT/NXHy+V/ZZS1N/LOaGGqRAAJJs3X9Ah1g+CIbRcBgNyoNYQ+SEtcyAtB9KqDruu+fF7nWjsqRaA==", "dev": true, "requires": { "semver": "^5.3.0" @@ -6329,13 +6363,13 @@ } }, "normalize-package-data": { - "version": "2.4.0", - "resolved": "https://registry.npmjs.org/normalize-package-data/-/normalize-package-data-2.4.0.tgz", - "integrity": "sha512-9jjUFbTPfEy3R/ad/2oNbKtW9Hgovl5O1FvFWKkKblNXoN/Oou6+9+KKohPK13Yc3/TyunyWhJp6gvRNR/PPAw==", + "version": "2.5.0", + "resolved": "https://registry.npmjs.org/normalize-package-data/-/normalize-package-data-2.5.0.tgz", + "integrity": "sha512-/5CMN3T0R4XTj4DcGaexo+roZSdSFW/0AOOTROrjxzCG1wrWXEsGbRKevjlIL+ZDE4sZlJr5ED4YW0yqmkK+eA==", "dev": true, "requires": { "hosted-git-info": "^2.1.4", - "is-builtin-module": "^1.0.0", + "resolve": "^1.10.0", "semver": "2 || 3 || 4 || 5", "validate-npm-package-license": "^3.0.1" } @@ -6356,9 +6390,9 @@ "dev": true }, "normalize.css": { - "version": "8.0.1", - "resolved": "https://registry.npmjs.org/normalize.css/-/normalize.css-8.0.1.tgz", - "integrity": "sha512-qizSNPO93t1YUuUhP22btGOo3chcvDFqFaj2TRybP0DMxkHOCTYwp3n34fel4a31ORXy4m1Xq0Gyqpb5m33qIg==" + "version": "4.1.1", + "resolved": "https://registry.npmjs.org/normalize.css/-/normalize.css-4.1.1.tgz", + "integrity": "sha1-TwsdWiNTgyUrBNhWa4Zsxfytnww=" }, "npm-run-path": { "version": "2.0.2", @@ -6399,9 +6433,9 @@ "integrity": "sha1-StCAk21EPCVhrtnyGX7//iX05QY=" }, "nwsapi": { - "version": "2.0.9", - "resolved": "https://registry.npmjs.org/nwsapi/-/nwsapi-2.0.9.tgz", - "integrity": "sha512-nlWFSCTYQcHk/6A9FFnfhKc14c3aFhfdNBXgo8Qgi9QTBu/qg3Ww+Uiz9wMzXd1T8GFxPc2QIHB6Qtf2XFryFQ==", + "version": "2.1.1", + "resolved": "https://registry.npmjs.org/nwsapi/-/nwsapi-2.1.1.tgz", + "integrity": "sha512-T5GaA1J/d34AC8mkrFD2O0DR17kwJ702ZOtJOsS8RpbsQZVOC2/xYFb1i/cw+xdM54JIlMuojjDOYct8GIWtwg==", "dev": true }, "oauth-sign": { @@ -6447,9 +6481,9 @@ } }, "object-keys": { - "version": "1.0.12", - "resolved": "https://registry.npmjs.org/object-keys/-/object-keys-1.0.12.tgz", - "integrity": "sha512-FTMyFUm2wBcGHnH2eXmz7tC6IwlqQZ6mVZ+6dm6vZ4IQIHjs6FdNsQBuKGPuUUUY6NfJw2PshC08Tn6LzLDOag==" + "version": "1.1.0", + "resolved": "https://registry.npmjs.org/object-keys/-/object-keys-1.1.0.tgz", + "integrity": "sha512-6OO5X1+2tYkNyNEx6TsCxEqFfRWaqx6EtMiSbGrw8Ob8v9Ne+Hl8rBAgLBZn5wjEz3s/s6U1WXFUFOcxxAwUpg==" }, "object-visit": { "version": "1.0.1", @@ -6516,9 +6550,9 @@ } }, "on-headers": { - "version": "1.0.1", - "resolved": "https://registry.npmjs.org/on-headers/-/on-headers-1.0.1.tgz", - "integrity": "sha1-ko9dD0cNSTQmUepnlLCFfBAGk/c=", + "version": "1.0.2", + "resolved": "https://registry.npmjs.org/on-headers/-/on-headers-1.0.2.tgz", + "integrity": "sha512-pZAE+FJLoyITytdqK0U5s+FIpjN0JP3OzFi/u8Rx+EV5/W+JTWGXG8xFzevE7AjBfDqHv/8vL8qQsIhHnqRkrA==", "dev": true }, "once": { @@ -6649,9 +6683,9 @@ "dev": true }, "p-is-promise": { - "version": "1.1.0", - "resolved": "https://registry.npmjs.org/p-is-promise/-/p-is-promise-1.1.0.tgz", - "integrity": "sha1-nJRWmJ6fZYgBewQ01WCXZ1w9oF4=", + "version": "2.0.0", + "resolved": "https://registry.npmjs.org/p-is-promise/-/p-is-promise-2.0.0.tgz", + "integrity": "sha512-pzQPhYMCAgLAKPWD2jC3Se9fEfrD9npNos0y150EeqZll7akhEgGhTW/slB6lHku8AvYGiJ+YJ5hfHKePPgFWg==", "dev": true }, "p-limit": { @@ -6708,9 +6742,9 @@ } }, "parse-asn1": { - "version": "5.1.3", - "resolved": "https://registry.npmjs.org/parse-asn1/-/parse-asn1-5.1.3.tgz", - "integrity": "sha512-VrPoetlz7B/FqjBLD2f5wBVZvsZVLnRUrxVLfRYhGXCODa/NWE4p3Wp+6+aV3ZPL3KM7/OZmxDIwwijD7yuucg==", + "version": "5.1.4", + "resolved": "https://registry.npmjs.org/parse-asn1/-/parse-asn1-5.1.4.tgz", + "integrity": "sha512-Qs5duJcuvNExRfFZ99HDD3z4mAi3r9Wl/FOjEOijlxwCZs7E7mW2vjTpgQ4J8LpTF8x5v+1Vn5UQFejmWT11aw==", "dev": true, "requires": { "asn1.js": "^4.0.0", @@ -6855,9 +6889,9 @@ } }, "pirates": { - "version": "4.0.0", - "resolved": "https://registry.npmjs.org/pirates/-/pirates-4.0.0.tgz", - "integrity": "sha512-8t5BsXy1LUIjn3WWOlOuFDuKswhQb/tkak641lvBgmPOBUQHXveORtlMCp6OdPV1dtuTaEahKA8VNz6uLfKBtA==", + "version": "4.0.1", + "resolved": "https://registry.npmjs.org/pirates/-/pirates-4.0.1.tgz", + "integrity": "sha512-WuNqLTbMI3tmfef2TKxlQmAiLHKtFhlsCZnPIpuv2Ow0RDVO8lfy1Opf4NUzlMXLjPl+Men7AuVdX6TA+s+uGA==", "dev": true, "requires": { "node-modules-regexp": "^1.0.0" @@ -6878,11 +6912,6 @@ "integrity": "sha512-2qHaIQr2VLRFoxe2nASzsV6ef4yOOH+Fi9FBOVH6cqeSgUnoyySPZkxzLuzd+RYOQTRpROA0ztTMqxROKSb/nA==", "dev": true }, - "popper.js": { - "version": "1.14.6", - "resolved": "https://registry.npmjs.org/popper.js/-/popper.js-1.14.6.tgz", - "integrity": "sha512-AGwHGQBKumlk/MDfrSOf0JHhJCImdDMcGNoqKmKkU+68GFazv3CQ6q9r7Ja1sKDZmYWTckY/uLyEznheTDycnA==" - }, "portfinder": { "version": "1.0.20", "resolved": "https://registry.npmjs.org/portfinder/-/portfinder-1.0.20.tgz", @@ -6908,6 +6937,12 @@ "requires": { "ms": "2.0.0" } + }, + "ms": { + "version": "2.0.0", + "resolved": "https://registry.npmjs.org/ms/-/ms-2.0.0.tgz", + "integrity": "sha1-VgiurfwAvmwpAd9fmGF4jeDVl8g=", + "dev": true } } }, @@ -6939,9 +6974,9 @@ } }, "postcss-cli": { - "version": "6.1.1", - "resolved": "https://registry.npmjs.org/postcss-cli/-/postcss-cli-6.1.1.tgz", - "integrity": "sha512-18PQO4qCDWY6vggnG3k+i5zrUnRc4I6P4MpKQWGbNyTfWBaRgu/nScunw6VH5QnUKtRu0NuPF5SpxhcYzWVXDg==", + "version": "6.1.2", + "resolved": "https://registry.npmjs.org/postcss-cli/-/postcss-cli-6.1.2.tgz", + "integrity": "sha512-jIWfIkqt8cTThSpH8DBaNxHlBf99OKSem2RseRpfVPqWayxHKQB0IWdS/IF5XSGeFU5QslSDTdVHnw6qggXGkA==", "dev": true, "requires": { "chalk": "^2.1.0", @@ -6949,7 +6984,7 @@ "dependency-graph": "^0.8.0", "fs-extra": "^7.0.0", "get-stdin": "^6.0.0", - "globby": "^8.0.0", + "globby": "^9.0.0", "postcss": "^7.0.0", "postcss-load-config": "^2.0.0", "postcss-reporter": "^6.0.0", @@ -7194,9 +7229,9 @@ } }, "postcss-modules-local-by-default": { - "version": "2.0.4", - "resolved": "https://registry.npmjs.org/postcss-modules-local-by-default/-/postcss-modules-local-by-default-2.0.4.tgz", - "integrity": "sha512-WvuSaTKXUqYJbnT7R3YrsNrHv/C5vRfr5VglS4bFOk0MYT4CLBfc/xgExA+x2RftlYgiBDvWmVs191Xv8S8gZQ==", + "version": "2.0.5", + "resolved": "https://registry.npmjs.org/postcss-modules-local-by-default/-/postcss-modules-local-by-default-2.0.5.tgz", + "integrity": "sha512-iFgxlCAVLno5wIJq+4hyuOmc4VjZEZxzpdeuZcBytLNWEK5Bx2oRF9PPcAz5TALbaFvrZm8sJYtJ3hV+tMSEIg==", "dev": true, "requires": { "css-selector-tokenizer": "^0.7.0", @@ -7442,22 +7477,23 @@ "dev": true }, "prompts": { - "version": "2.0.1", - "resolved": "https://registry.npmjs.org/prompts/-/prompts-2.0.1.tgz", - "integrity": "sha512-8lnEOSIGQbgbnO47+13S+H204L8ISogGulyi0/NNEFAQ9D1VMNTrJ9SBX2Ra03V4iPn/zt36HQMndRYkaPoWiQ==", + "version": "2.0.3", + "resolved": "https://registry.npmjs.org/prompts/-/prompts-2.0.3.tgz", + "integrity": "sha512-H8oWEoRZpybm6NV4to9/1limhttEo13xK62pNvn2JzY0MA03p7s0OjtmhXyon3uJmxiJJVSuUwEJFFssI3eBiQ==", "dev": true, "requires": { - "kleur": "^3.0.0", + "kleur": "^3.0.2", "sisteransi": "^1.0.0" } }, "prop-types": { - "version": "15.6.2", - "resolved": "https://registry.npmjs.org/prop-types/-/prop-types-15.6.2.tgz", - "integrity": "sha512-3pboPvLiWD7dkI3qf3KbUe6hKFKa52w+AE0VCqECtf+QHAKgOL37tTaNCnuX1nAAQ4ZhyP+kYVKf8rLmJ/feDQ==", + "version": "15.7.2", + "resolved": "https://registry.npmjs.org/prop-types/-/prop-types-15.7.2.tgz", + "integrity": "sha512-8QQikdH7//R2vurIJSutZ1smHYTcLpRWEOlHnzcWHmBYrOGUysKwSsrC89BCiFj3CbrfJ/nXFdJepOVrY1GCHQ==", "requires": { - "loose-envify": "^1.3.1", - "object-assign": "^4.1.1" + "loose-envify": "^1.4.0", + "object-assign": "^4.1.1", + "react-is": "^16.8.1" } }, "proxy-addr": { @@ -7541,6 +7577,14 @@ "integrity": "sha512-XRsRjdf+j5ml+y/6GKHPZbrF/8p2Yga0JPtdqTIY2Xe5ohJPD9saDJJLPvp9+NSBprVvevdXZybnj2cv8OEd0A==", "dev": true }, + "pure-render-decorator": { + "version": "1.1.1", + "resolved": "https://registry.npmjs.org/pure-render-decorator/-/pure-render-decorator-1.1.1.tgz", + "integrity": "sha1-9eC3bOEoeOadpBp+6GJ/71S19Xo=", + "requires": { + "fbjs": "^0.8.0" + } + }, "qs": { "version": "6.5.2", "resolved": "https://registry.npmjs.org/qs/-/qs-6.5.2.tgz", @@ -7566,9 +7610,9 @@ "dev": true }, "randombytes": { - "version": "2.0.6", - "resolved": "https://registry.npmjs.org/randombytes/-/randombytes-2.0.6.tgz", - "integrity": "sha512-CIQ5OFxf4Jou6uOKe9t1AOgqpeU5fd70A8NPdHSGeYXqXsPe6peOwI0cUl88RWZ6sP1vPMV3avd/R6cZ5/sP1A==", + "version": "2.1.0", + "resolved": "https://registry.npmjs.org/randombytes/-/randombytes-2.1.0.tgz", + "integrity": "sha512-vYl3iOX+4CKUWuxGi9Ukhie6fsqXqS9FE2Zaic4tNFD2N2QQaXOMFbuKK4QmDHC0JO6B1Zp41J0LpT0oR68amQ==", "dev": true, "requires": { "safe-buffer": "^5.1.0" @@ -7614,44 +7658,51 @@ } }, "react": { - "version": "16.7.0", - "resolved": "https://registry.npmjs.org/react/-/react-16.7.0.tgz", - "integrity": "sha512-StCz3QY8lxTb5cl2HJxjwLFOXPIFQp+p+hxQfc8WE0QiLfCtIlKj8/+5tjjKm8uSTlAW+fCPaavGFS06V9Ar3A==", + "version": "16.8.3", + "resolved": "https://registry.npmjs.org/react/-/react-16.8.3.tgz", + "integrity": "sha512-3UoSIsEq8yTJuSu0luO1QQWYbgGEILm+eJl2QN/VLDi7hL+EN18M3q3oVZwmVzzBJ3DkM7RMdRwBmZZ+b4IzSA==", "requires": { "loose-envify": "^1.1.0", "object-assign": "^4.1.1", "prop-types": "^15.6.2", - "scheduler": "^0.12.0" + "scheduler": "^0.13.3" + } + }, + "react-ace": { + "version": "6.4.0", + "resolved": "https://registry.npmjs.org/react-ace/-/react-ace-6.4.0.tgz", + "integrity": "sha512-woTTgGk9x4GRRWiM4QLNOspjaJAYLX3UZ3J2XRYQvJiN6wyxrFY9x7rdOKc+4Tj+khb/ccPiDj/kll4UeJEDPw==", + "requires": { + "brace": "^0.11.1", + "diff-match-patch": "^1.0.4", + "lodash.get": "^4.4.2", + "lodash.isequal": "^4.5.0", + "prop-types": "^15.6.2" + } + }, + "react-addons-css-transition-group": { + "version": "15.6.2", + "resolved": "https://registry.npmjs.org/react-addons-css-transition-group/-/react-addons-css-transition-group-15.6.2.tgz", + "integrity": "sha1-nkN2vPQLUhfRTsaFUwgc7ksIptY=", + "requires": { + "react-transition-group": "^1.2.0" } }, "react-dom": { - "version": "16.7.0", - "resolved": "https://registry.npmjs.org/react-dom/-/react-dom-16.7.0.tgz", - "integrity": "sha512-D0Ufv1ExCAmF38P2Uh1lwpminZFRXEINJe53zRAbm4KPwSyd6DY/uDoS0Blj9jvPpn1+wivKpZYc8aAAN/nAkg==", + "version": "16.8.3", + "resolved": "https://registry.npmjs.org/react-dom/-/react-dom-16.8.3.tgz", + "integrity": "sha512-ttMem9yJL4/lpItZAQ2NTFAbV7frotHk5DZEHXUOws2rMmrsvh1Na7ThGT0dTzUIl6pqTOi5tYREfL8AEna3lA==", "requires": { "loose-envify": "^1.1.0", "object-assign": "^4.1.1", "prop-types": "^15.6.2", - "scheduler": "^0.12.0" + "scheduler": "^0.13.3" } }, - "react-lifecycles-compat": { - "version": "3.0.4", - "resolved": "https://registry.npmjs.org/react-lifecycles-compat/-/react-lifecycles-compat-3.0.4.tgz", - "integrity": "sha512-fBASbA6LnOU9dOU2eW7aQ8xmYBSXUIWr+UmF9b1efZBazGNO+rcXT/icdKnYm2pTwcRylVUYwW7H1PHfLekVzA==" - }, - "react-popper": { - "version": "1.3.2", - "resolved": "https://registry.npmjs.org/react-popper/-/react-popper-1.3.2.tgz", - "integrity": "sha512-UbFWj55Yt9uqvy0oZ+vULDL2Bw1oxeZF9/JzGyxQ5ypgauRH/XlarA5+HLZWro/Zss6Ht2kqpegtb6sYL8GUGw==", - "requires": { - "@babel/runtime": "^7.1.2", - "create-react-context": "<=0.2.2", - "popper.js": "^1.14.4", - "prop-types": "^15.6.1", - "typed-styles": "^0.0.7", - "warning": "^4.0.2" - } + "react-is": { + "version": "16.8.3", + "resolved": "https://registry.npmjs.org/react-is/-/react-is-16.8.3.tgz", + "integrity": "sha512-Y4rC1ZJmsxxkkPuMLwvKvlL1Zfpbcu+Bf4ZigkHup3v9EfdYhAlWAaVyA19olXq2o2mGn0w+dFKvk3pVVlYcIA==" }, "react-router": { "version": "4.3.1", @@ -7665,6 +7716,16 @@ "path-to-regexp": "^1.7.0", "prop-types": "^15.6.1", "warning": "^4.0.1" + }, + "dependencies": { + "warning": { + "version": "4.0.3", + "resolved": "https://registry.npmjs.org/warning/-/warning-4.0.3.tgz", + "integrity": "sha512-rpJyN222KWIvHJ/F53XSZv0Zl/accqHR8et1kpaMTD/fLCRxtV8iX8czMzY7sVZupTI3zcUTg8eycS2kNF9l6w==", + "requires": { + "loose-envify": "^1.0.0" + } + } } }, "react-router-dom": { @@ -7678,6 +7739,16 @@ "prop-types": "^15.6.1", "react-router": "^4.3.1", "warning": "^4.0.1" + }, + "dependencies": { + "warning": { + "version": "4.0.3", + "resolved": "https://registry.npmjs.org/warning/-/warning-4.0.3.tgz", + "integrity": "sha512-rpJyN222KWIvHJ/F53XSZv0Zl/accqHR8et1kpaMTD/fLCRxtV8iX8czMzY7sVZupTI3zcUTg8eycS2kNF9l6w==", + "requires": { + "loose-envify": "^1.0.0" + } + } } }, "react-table": { @@ -7689,14 +7760,15 @@ } }, "react-transition-group": { - "version": "2.5.3", - "resolved": "https://registry.npmjs.org/react-transition-group/-/react-transition-group-2.5.3.tgz", - "integrity": "sha512-2DGFck6h99kLNr8pOFk+z4Soq3iISydwOFeeEVPjTN6+Y01CmvbWmnN02VuTWyFdnRtIDPe+wy2q6Ui8snBPZg==", + "version": "1.2.1", + "resolved": "https://registry.npmjs.org/react-transition-group/-/react-transition-group-1.2.1.tgz", + "integrity": "sha512-CWaL3laCmgAFdxdKbhhps+c0HRGF4c+hdM4H23+FI1QBNUyx/AMeIJGWorehPNSaKnQNOAxL7PQmqMu78CDj3Q==", "requires": { - "dom-helpers": "^3.3.1", - "loose-envify": "^1.4.0", - "prop-types": "^15.6.2", - "react-lifecycles-compat": "^3.0.4" + "chain-function": "^1.0.0", + "dom-helpers": "^3.2.0", + "loose-envify": "^1.3.1", + "prop-types": "^15.5.6", + "warning": "^3.0.0" } }, "read-cache": { @@ -7716,6 +7788,42 @@ } } }, + "read-installed": { + "version": "4.0.3", + "resolved": "https://registry.npmjs.org/read-installed/-/read-installed-4.0.3.tgz", + "integrity": "sha1-/5uLZ/GH0eTCm5/rMfayI6zRkGc=", + "dev": true, + "requires": { + "debuglog": "^1.0.1", + "graceful-fs": "^4.1.2", + "read-package-json": "^2.0.0", + "readdir-scoped-modules": "^1.0.0", + "semver": "2 || 3 || 4 || 5", + "slide": "~1.1.3", + "util-extend": "^1.0.1" + } + }, + "read-package-json": { + "version": "2.0.13", + "resolved": "https://registry.npmjs.org/read-package-json/-/read-package-json-2.0.13.tgz", + "integrity": "sha512-/1dZ7TRZvGrYqE0UAfN6qQb5GYBsNcqS1C0tNK601CFOJmtHI7NIGXwetEPU/OtoFHZL3hDxm4rolFFVE9Bnmg==", + "dev": true, + "requires": { + "glob": "^7.1.1", + "graceful-fs": "^4.1.2", + "json-parse-better-errors": "^1.0.1", + "normalize-package-data": "^2.0.0", + "slash": "^1.0.0" + }, + "dependencies": { + "slash": { + "version": "1.0.0", + "resolved": "https://registry.npmjs.org/slash/-/slash-1.0.0.tgz", + "integrity": "sha1-xB8vbDn8FtHNF61LXYlhFK5HDVU=", + "dev": true + } + } + }, "read-pkg": { "version": "3.0.0", "resolved": "https://registry.npmjs.org/read-pkg/-/read-pkg-3.0.0.tgz", @@ -7760,6 +7868,18 @@ } } }, + "readdir-scoped-modules": { + "version": "1.0.2", + "resolved": "https://registry.npmjs.org/readdir-scoped-modules/-/readdir-scoped-modules-1.0.2.tgz", + "integrity": "sha1-n6+jfShr5dksuuve4DDcm19AZ0c=", + "dev": true, + "requires": { + "debuglog": "^1.0.1", + "dezalgo": "^1.0.0", + "graceful-fs": "^4.1.2", + "once": "^1.3.0" + } + }, "readdirp": { "version": "2.2.1", "resolved": "https://registry.npmjs.org/readdirp/-/readdirp-2.2.1.tgz", @@ -7772,9 +7892,9 @@ } }, "realpath-native": { - "version": "1.0.2", - "resolved": "https://registry.npmjs.org/realpath-native/-/realpath-native-1.0.2.tgz", - "integrity": "sha512-+S3zTvVt9yTntFrBpm7TQmQ3tzpCrnA1a/y+3cUHAc9ZR6aIjG0WNLR+Rj79QpJktY+VeW/TQtFlQ1bzsehI8g==", + "version": "1.1.0", + "resolved": "https://registry.npmjs.org/realpath-native/-/realpath-native-1.1.0.tgz", + "integrity": "sha512-wlgPA6cCIIg9gKz0fgAPjnzh4yR/LnXovwuo9hvyGvx3h8nX4+/iLZplfUWasXpqD8BdnGnP5njOFjkUwPzvjA==", "dev": true, "requires": { "util.promisify": "^1.0.0" @@ -7911,23 +8031,23 @@ } }, "request-promise-core": { - "version": "1.1.1", - "resolved": "https://registry.npmjs.org/request-promise-core/-/request-promise-core-1.1.1.tgz", - "integrity": "sha1-Pu4AssWqgyOc+wTFcA2jb4HNCLY=", + "version": "1.1.2", + "resolved": "https://registry.npmjs.org/request-promise-core/-/request-promise-core-1.1.2.tgz", + "integrity": "sha512-UHYyq1MO8GsefGEt7EprS8UrXsm1TxEvFUX1IMTuSLU2Rh7fTIdFtl8xD7JiEYiWU2dl+NYAjCTksTehQUxPag==", "dev": true, "requires": { - "lodash": "^4.13.1" + "lodash": "^4.17.11" } }, "request-promise-native": { - "version": "1.0.5", - "resolved": "https://registry.npmjs.org/request-promise-native/-/request-promise-native-1.0.5.tgz", - "integrity": "sha1-UoF3D2jgyXGeUWP9P6tIIhX0/aU=", + "version": "1.0.7", + "resolved": "https://registry.npmjs.org/request-promise-native/-/request-promise-native-1.0.7.tgz", + "integrity": "sha512-rIMnbBdgNViL37nZ1b3L/VfPOpSi0TqVDQPAvO6U14lMzOLrt5nilxCQqtDKhZeDiW0/hkCXGoQjhgJd/tCh6w==", "dev": true, "requires": { - "request-promise-core": "1.1.1", - "stealthy-require": "^1.1.0", - "tough-cookie": ">=2.3.3" + "request-promise-core": "1.1.2", + "stealthy-require": "^1.1.1", + "tough-cookie": "^2.3.3" } }, "require-directory": { @@ -7954,11 +8074,6 @@ "integrity": "sha1-kl0mAdOaxIXgkc8NpcbmlNw9yv8=", "dev": true }, - "resize-observer-polyfill": { - "version": "1.5.1", - "resolved": "https://registry.npmjs.org/resize-observer-polyfill/-/resize-observer-polyfill-1.5.1.tgz", - "integrity": "sha512-LwZrotdHOo12nQuZlHEmtuXdqGoOD0OhaxopaNFxWzInpEgaLWoVuAMbTzixuosCx2nEG58ngzW3vxdWoxIgdg==" - }, "resolve": { "version": "1.10.0", "resolved": "https://registry.npmjs.org/resolve/-/resolve-1.10.0.tgz", @@ -8323,9 +8438,9 @@ "dev": true }, "scheduler": { - "version": "0.12.0", - "resolved": "https://registry.npmjs.org/scheduler/-/scheduler-0.12.0.tgz", - "integrity": "sha512-t7MBR28Akcp4Jm+QoR63XgAi9YgCUmgvDHqf5otgAj4QvdoBE4ImCX0ffehefePPG+aitiYHp0g/mW6s4Tp+dw==", + "version": "0.13.3", + "resolved": "https://registry.npmjs.org/scheduler/-/scheduler-0.13.3.tgz", + "integrity": "sha512-UxN5QRYWtpR1egNWzJcVLk8jlegxAugswQc984lD3kU7NuobsO37/sRfbpTdBjtnD5TBNFA2Q2oLV5+UmPSmEQ==", "requires": { "loose-envify": "^1.1.0", "object-assign": "^4.1.1" @@ -8413,6 +8528,12 @@ "requires": { "ms": "2.0.0" } + }, + "ms": { + "version": "2.0.0", + "resolved": "https://registry.npmjs.org/ms/-/ms-2.0.0.tgz", + "integrity": "sha1-VgiurfwAvmwpAd9fmGF4jeDVl8g=", + "dev": true } } }, @@ -8445,6 +8566,12 @@ "requires": { "ms": "2.0.0" } + }, + "ms": { + "version": "2.0.0", + "resolved": "https://registry.npmjs.org/ms/-/ms-2.0.0.tgz", + "integrity": "sha1-VgiurfwAvmwpAd9fmGF4jeDVl8g=", + "dev": true } } }, @@ -8568,6 +8695,12 @@ "integrity": "sha512-ZYKh3Wh2z1PpEXWr0MpSBZ0V6mZHAQfYevttO11c51CaWjGTaadiKZ+wVt1PbMlDV5qhMFslpZCemhwOK7C89A==", "dev": true }, + "slide": { + "version": "1.1.6", + "resolved": "https://registry.npmjs.org/slide/-/slide-1.1.6.tgz", + "integrity": "sha1-VusCfWW00tzmyy4tMsTUr8nh1wc=", + "dev": true + }, "snapdragon": { "version": "0.8.2", "resolved": "https://registry.npmjs.org/snapdragon/-/snapdragon-0.8.2.tgz", @@ -8611,6 +8744,12 @@ "is-extendable": "^0.1.0" } }, + "ms": { + "version": "2.0.0", + "resolved": "https://registry.npmjs.org/ms/-/ms-2.0.0.tgz", + "integrity": "sha1-VgiurfwAvmwpAd9fmGF4jeDVl8g=", + "dev": true + }, "source-map": { "version": "0.5.7", "resolved": "https://registry.npmjs.org/source-map/-/source-map-0.5.7.tgz", @@ -8714,15 +8853,6 @@ "url-parse": "^1.4.3" }, "dependencies": { - "debug": { - "version": "3.2.6", - "resolved": "https://registry.npmjs.org/debug/-/debug-3.2.6.tgz", - "integrity": "sha512-mel+jf7nrtEl5Pn1Qx46zARXKDpBbvzezse7p7LqINmdoIk8PYP5SySaxEmYv6TZ0JyEKA1hsCId6DIhgITtWQ==", - "dev": true, - "requires": { - "ms": "^2.1.1" - } - }, "faye-websocket": { "version": "0.11.1", "resolved": "https://registry.npmjs.org/faye-websocket/-/faye-websocket-0.11.1.tgz", @@ -8731,12 +8861,6 @@ "requires": { "websocket-driver": ">=0.5.1" } - }, - "ms": { - "version": "2.1.1", - "resolved": "https://registry.npmjs.org/ms/-/ms-2.1.1.tgz", - "integrity": "sha512-tgp+dl5cGk28utYktBsrFqA7HKgrhgPsg6Z/EfhWI4gl1Hwq8B/GmY/0oXZ6nF8hDVesS/FpnYaD/kOWhYQvyg==", - "dev": true } } }, @@ -8781,6 +8905,17 @@ "integrity": "sha1-PpNdfd1zYxuXZZlW1VEo6HtQhKM=", "dev": true }, + "spdx-compare": { + "version": "1.0.0", + "resolved": "https://registry.npmjs.org/spdx-compare/-/spdx-compare-1.0.0.tgz", + "integrity": "sha512-C1mDZOX0hnu0ep9dfmuoi03+eOdDoz2yvK79RxbcrVEG1NO1Ph35yW102DHWKN4pk80nwCgeMmSY5L25VE4D9A==", + "dev": true, + "requires": { + "array-find-index": "^1.0.2", + "spdx-expression-parse": "^3.0.0", + "spdx-ranges": "^2.0.0" + } + }, "spdx-correct": { "version": "3.1.0", "resolved": "https://registry.npmjs.org/spdx-correct/-/spdx-correct-3.1.0.tgz", @@ -8813,6 +8948,23 @@ "integrity": "sha512-uBIcIl3Ih6Phe3XHK1NqboJLdGfwr1UN3k6wSD1dZpmPsIkb8AGNbZYJ1fOBk834+Gxy8rpfDxrS6XLEMZMY2g==", "dev": true }, + "spdx-ranges": { + "version": "2.1.0", + "resolved": "https://registry.npmjs.org/spdx-ranges/-/spdx-ranges-2.1.0.tgz", + "integrity": "sha512-OOWghvosfmECc9edy/A9j7GabERmn8bJWHc0J1knVytQtO5Rw7VfxK6CDqmivJhfMJqWhWWUfffNNMPYvyvyQA==", + "dev": true + }, + "spdx-satisfies": { + "version": "4.0.1", + "resolved": "https://registry.npmjs.org/spdx-satisfies/-/spdx-satisfies-4.0.1.tgz", + "integrity": "sha512-WVzZ/cXAzoNmjCWiEluEA3BjHp5tiUmmhn9MK+X0tBbR9sOqtC6UQwmgCNrAIZvNlMuBUYAaHYfb2oqlF9SwKA==", + "dev": true, + "requires": { + "spdx-compare": "^1.0.0", + "spdx-expression-parse": "^3.0.0", + "spdx-ranges": "^2.0.0" + } + }, "spdy": { "version": "4.0.0", "resolved": "https://registry.npmjs.org/spdy/-/spdy-4.0.0.tgz", @@ -8834,12 +8986,6 @@ "requires": { "ms": "^2.1.1" } - }, - "ms": { - "version": "2.1.1", - "resolved": "https://registry.npmjs.org/ms/-/ms-2.1.1.tgz", - "integrity": "sha512-tgp+dl5cGk28utYktBsrFqA7HKgrhgPsg6Z/EfhWI4gl1Hwq8B/GmY/0oXZ6nF8hDVesS/FpnYaD/kOWhYQvyg==", - "dev": true } } }, @@ -8866,12 +9012,6 @@ "ms": "^2.1.1" } }, - "ms": { - "version": "2.1.1", - "resolved": "https://registry.npmjs.org/ms/-/ms-2.1.1.tgz", - "integrity": "sha512-tgp+dl5cGk28utYktBsrFqA7HKgrhgPsg6Z/EfhWI4gl1Hwq8B/GmY/0oXZ6nF8hDVesS/FpnYaD/kOWhYQvyg==", - "dev": true - }, "readable-stream": { "version": "3.1.1", "resolved": "https://registry.npmjs.org/readable-stream/-/readable-stream-3.1.1.tgz", @@ -9242,20 +9382,20 @@ } }, "terser": { - "version": "3.14.1", - "resolved": "https://registry.npmjs.org/terser/-/terser-3.14.1.tgz", - "integrity": "sha512-NSo3E99QDbYSMeJaEk9YW2lTg3qS9V0aKGlb+PlOrei1X02r1wSBHCNX/O+yeTRFSWPKPIGj6MqvvdqV4rnVGw==", + "version": "3.16.1", + "resolved": "https://registry.npmjs.org/terser/-/terser-3.16.1.tgz", + "integrity": "sha512-JDJjgleBROeek2iBcSNzOHLKsB/MdDf+E/BOAJ0Tk9r7p9/fVobfv7LMJ/g/k3v9SXdmjZnIlFd5nfn/Rt0Xow==", "dev": true, "requires": { "commander": "~2.17.1", "source-map": "~0.6.1", - "source-map-support": "~0.5.6" + "source-map-support": "~0.5.9" } }, "terser-webpack-plugin": { - "version": "1.2.1", - "resolved": "https://registry.npmjs.org/terser-webpack-plugin/-/terser-webpack-plugin-1.2.1.tgz", - "integrity": "sha512-GGSt+gbT0oKcMDmPx4SRSfJPE1XaN3kQRWG4ghxKQw9cn5G9x6aCKSsgYdvyM0na9NJ4Drv0RG6jbBByZ5CMjw==", + "version": "1.2.3", + "resolved": "https://registry.npmjs.org/terser-webpack-plugin/-/terser-webpack-plugin-1.2.3.tgz", + "integrity": "sha512-GOK7q85oAb/5kE12fMuLdn2btOS9OBZn4VsecpHDywoUC/jLhSAKOiYo0ezx7ss2EXPMzyEWFoE0s1WLE+4+oA==", "dev": true, "requires": { "cacache": "^11.0.2", @@ -9263,15 +9403,15 @@ "schema-utils": "^1.0.0", "serialize-javascript": "^1.4.0", "source-map": "^0.6.1", - "terser": "^3.8.1", + "terser": "^3.16.1", "webpack-sources": "^1.1.0", "worker-farm": "^1.5.2" } }, "test-exclude": { - "version": "5.0.0", - "resolved": "https://registry.npmjs.org/test-exclude/-/test-exclude-5.0.0.tgz", - "integrity": "sha512-bO3Lj5+qFa9YLfYW2ZcXMOV1pmQvw+KS/DpjqhyX6Y6UZ8zstpZJ+mA2ERkXfpOqhxsJlQiLeVXD3Smsrs6oLw==", + "version": "5.1.0", + "resolved": "https://registry.npmjs.org/test-exclude/-/test-exclude-5.1.0.tgz", + "integrity": "sha512-gwf0S2fFsANC55fSeSqpb8BYk6w3FDvwZxfNjeF6FRgvFa43r+7wRiA/Q0IxoRU37wB/LE8IQ4221BsNucTaCA==", "dev": true, "requires": { "arrify": "^1.0.1", @@ -9280,6 +9420,11 @@ "require-main-filename": "^1.0.1" } }, + "tether": { + "version": "1.4.5", + "resolved": "https://registry.npmjs.org/tether/-/tether-1.4.5.tgz", + "integrity": "sha512-fysT1Gug2wbRi7a6waeu39yVDwiNtvwj5m9eRD+qZDSHKNghLo6KqP/U3yM2ap6TNUL2skjXGJaJJTJqoC31vw==" + }, "throat": { "version": "4.1.0", "resolved": "https://registry.npmjs.org/throat/-/throat-4.1.0.tgz", @@ -9390,6 +9535,12 @@ "punycode": "^2.1.0" } }, + "treeify": { + "version": "1.1.0", + "resolved": "https://registry.npmjs.org/treeify/-/treeify-1.1.0.tgz", + "integrity": "sha512-1m4RA7xVAJrSGrrXGs0L3YTwyvBs2S8PbRHaLZAkFw7JR8oIFwYtysxlBZhYIa7xSyiYJKZ3iGrrk55cGA3i9A==", + "dev": true + }, "trim-newlines": { "version": "1.0.0", "resolved": "https://registry.npmjs.org/trim-newlines/-/trim-newlines-1.0.0.tgz", @@ -9525,11 +9676,6 @@ "mime-types": "~2.1.18" } }, - "typed-styles": { - "version": "0.0.7", - "resolved": "https://registry.npmjs.org/typed-styles/-/typed-styles-0.0.7.tgz", - "integrity": "sha512-pzP0PWoZUhsECYjABgCGQlRGL1n7tOHsgwYv3oIiEpJwGhFTuty/YNeduxQYzXXa3Ge5BdT6sHYIQYpl4uJ+5Q==" - }, "typedarray": { "version": "0.0.6", "resolved": "https://registry.npmjs.org/typedarray/-/typedarray-0.0.6.tgz", @@ -9537,9 +9683,9 @@ "dev": true }, "typescript": { - "version": "3.2.4", - "resolved": "https://registry.npmjs.org/typescript/-/typescript-3.2.4.tgz", - "integrity": "sha512-0RNDbSdEokBeEAkgNbxJ+BLwSManFy9TeXz8uW+48j/xhEXv1ePME60olyzw2XzUqUBNAYFeJadIqAgNqIACwg==", + "version": "3.3.3333", + "resolved": "https://registry.npmjs.org/typescript/-/typescript-3.3.3333.tgz", + "integrity": "sha512-JjSKsAfuHBE/fB2oZ8NxtRTk5iGcg6hkYXMnZ3Wc+b2RSqejEqTaem11mHASMnFilHrax3sLK0GDzcJrekZYLw==", "dev": true }, "ua-parser-js": { @@ -9745,6 +9891,12 @@ "integrity": "sha1-RQ1Nyfpw3nMnYvvS1KKJgUGaDM8=", "dev": true }, + "util-extend": { + "version": "1.0.3", + "resolved": "https://registry.npmjs.org/util-extend/-/util-extend-1.0.3.tgz", + "integrity": "sha1-p8IW0mdUUWljeztu3GypEZ4v+T8=", + "dev": true + }, "util.promisify": { "version": "1.0.0", "resolved": "https://registry.npmjs.org/util.promisify/-/util.promisify-1.0.0.tgz", @@ -9833,9 +9985,9 @@ } }, "warning": { - "version": "4.0.2", - "resolved": "https://registry.npmjs.org/warning/-/warning-4.0.2.tgz", - "integrity": "sha512-wbTp09q/9C+jJn4KKJfJfoS6VleK/Dti0yqWSm6KMvJ4MRCXFQNapHuJXutJIrWV0Cf4AhTdeIe4qdKHR1+Hug==", + "version": "3.0.0", + "resolved": "https://registry.npmjs.org/warning/-/warning-3.0.0.tgz", + "integrity": "sha1-MuU3fLVy3kqwR1O9+IIcAe1gW3w=", "requires": { "loose-envify": "^1.0.0" } @@ -9877,15 +10029,15 @@ "dev": true }, "webpack": { - "version": "4.29.0", - "resolved": "https://registry.npmjs.org/webpack/-/webpack-4.29.0.tgz", - "integrity": "sha512-pxdGG0keDBtamE1mNvT5zyBdx+7wkh6mh7uzMOo/uRQ/fhsdj5FXkh/j5mapzs060forql1oXqXN9HJGju+y7w==", + "version": "4.29.5", + "resolved": "https://registry.npmjs.org/webpack/-/webpack-4.29.5.tgz", + "integrity": "sha512-DuWlYUT982c7XVHodrLO9quFbNpVq5FNxLrMUfYUTlgKW0+yPimynYf1kttSQpEneAL1FH3P3OLNgkyImx8qIQ==", "dev": true, "requires": { - "@webassemblyjs/ast": "1.7.11", - "@webassemblyjs/helper-module-context": "1.7.11", - "@webassemblyjs/wasm-edit": "1.7.11", - "@webassemblyjs/wasm-parser": "1.7.11", + "@webassemblyjs/ast": "1.8.3", + "@webassemblyjs/helper-module-context": "1.8.3", + "@webassemblyjs/wasm-edit": "1.8.3", + "@webassemblyjs/wasm-parser": "1.8.3", "acorn": "^6.0.5", "acorn-dynamic-import": "^4.0.0", "ajv": "^6.1.0", @@ -9901,7 +10053,7 @@ "mkdirp": "~0.5.0", "neo-async": "^2.5.0", "node-libs-browser": "^2.0.0", - "schema-utils": "^0.4.4", + "schema-utils": "^1.0.0", "tapable": "^1.1.0", "terser-webpack-plugin": "^1.1.0", "watchpack": "^1.5.0", @@ -9909,27 +10061,17 @@ }, "dependencies": { "acorn": { - "version": "6.0.5", - "resolved": "https://registry.npmjs.org/acorn/-/acorn-6.0.5.tgz", - "integrity": "sha512-i33Zgp3XWtmZBMNvCr4azvOFeWVw1Rk6p3hfi3LUDvIFraOMywb1kAtrbi+med14m4Xfpqm3zRZMT+c0FNE7kg==", + "version": "6.1.0", + "resolved": "https://registry.npmjs.org/acorn/-/acorn-6.1.0.tgz", + "integrity": "sha512-MW/FjM+IvU9CgBzjO3UIPCE2pyEwUsoFl+VGdczOPEdxfGFjuKny/gN54mOuX7Qxmb9Rg9MCn2oKiSUeW+pjrw==", "dev": true - }, - "schema-utils": { - "version": "0.4.7", - "resolved": "https://registry.npmjs.org/schema-utils/-/schema-utils-0.4.7.tgz", - "integrity": "sha512-v/iwU6wvwGK8HbU9yi3/nhGzP0yGSuhQMzL6ySiec1FSrZZDkhm4noOSWzrNFo/jEc+SJY6jRTwuwbSXJPDUnQ==", - "dev": true, - "requires": { - "ajv": "^6.1.0", - "ajv-keywords": "^3.1.0" - } } } }, "webpack-cli": { - "version": "3.2.1", - "resolved": "https://registry.npmjs.org/webpack-cli/-/webpack-cli-3.2.1.tgz", - "integrity": "sha512-jeJveHwz/vwpJ3B8bxEL5a/rVKIpRNJDsKggfKnxuYeohNDW4Y/wB9N/XHJA093qZyS0r6mYL+/crLsIol4WKA==", + "version": "3.2.3", + "resolved": "https://registry.npmjs.org/webpack-cli/-/webpack-cli-3.2.3.tgz", + "integrity": "sha512-Ik3SjV6uJtWIAN5jp5ZuBMWEAaP5E4V78XJ2nI+paFPh8v4HPSwo/myN0r29Xc/6ZKnd2IdrAlpSgNOu2CDQ6Q==", "dev": true, "requires": { "chalk": "^2.4.1", @@ -9937,10 +10079,8 @@ "enhanced-resolve": "^4.1.0", "findup-sync": "^2.0.0", "global-modules": "^1.0.0", - "global-modules-path": "^2.3.0", "import-local": "^2.0.0", "interpret": "^1.1.0", - "lightercollective": "^0.1.0", "loader-utils": "^1.1.0", "supports-color": "^5.5.0", "v8-compile-cache": "^2.0.2", @@ -9959,12 +10099,12 @@ } }, "webpack-dev-middleware": { - "version": "3.4.0", - "resolved": "https://registry.npmjs.org/webpack-dev-middleware/-/webpack-dev-middleware-3.4.0.tgz", - "integrity": "sha512-Q9Iyc0X9dP9bAsYskAVJ/hmIZZQwf/3Sy4xCAZgL5cUkjZmUZLt4l5HpbST/Pdgjn3u6pE7u5OdGd1apgzRujA==", + "version": "3.6.0", + "resolved": "https://registry.npmjs.org/webpack-dev-middleware/-/webpack-dev-middleware-3.6.0.tgz", + "integrity": "sha512-oeXA3m+5gbYbDBGo4SvKpAHJJEGMoekUbHgo1RK7CP1sz7/WOSeu/dWJtSTk+rzDCLkPwQhGocgIq6lQqOyOwg==", "dev": true, "requires": { - "memory-fs": "~0.4.1", + "memory-fs": "^0.4.1", "mime": "^2.3.1", "range-parser": "^1.0.3", "webpack-log": "^2.0.0" @@ -9979,9 +10119,9 @@ } }, "webpack-dev-server": { - "version": "3.1.14", - "resolved": "https://registry.npmjs.org/webpack-dev-server/-/webpack-dev-server-3.1.14.tgz", - "integrity": "sha512-mGXDgz5SlTxcF3hUpfC8hrQ11yhAttuUQWf1Wmb+6zo3x6rb7b9mIfuQvAPLdfDRCGRGvakBWHdHOa0I9p/EVQ==", + "version": "3.2.1", + "resolved": "https://registry.npmjs.org/webpack-dev-server/-/webpack-dev-server-3.2.1.tgz", + "integrity": "sha512-sjuE4mnmx6JOh9kvSbPYw3u/6uxCLHNWfhWaIPwcXWsvWOPN+nc5baq4i9jui3oOBRXGonK9+OI0jVkaz6/rCw==", "dev": true, "requires": { "ansi-html": "0.0.7", @@ -9989,13 +10129,13 @@ "chokidar": "^2.0.0", "compression": "^1.5.2", "connect-history-api-fallback": "^1.3.0", - "debug": "^3.1.0", + "debug": "^4.1.1", "del": "^3.0.0", "express": "^4.16.2", "html-entities": "^1.2.0", - "http-proxy-middleware": "~0.18.0", + "http-proxy-middleware": "^0.19.1", "import-local": "^2.0.0", - "internal-ip": "^3.0.1", + "internal-ip": "^4.2.0", "ip": "^1.1.5", "killable": "^1.0.0", "loglevel": "^1.4.1", @@ -10009,9 +10149,9 @@ "sockjs-client": "1.3.0", "spdy": "^4.0.0", "strip-ansi": "^3.0.0", - "supports-color": "^5.1.0", + "supports-color": "^6.1.0", "url": "^0.11.0", - "webpack-dev-middleware": "3.4.0", + "webpack-dev-middleware": "^3.5.1", "webpack-log": "^2.0.0", "yargs": "12.0.2" }, @@ -10028,6 +10168,15 @@ "integrity": "sha1-1UVjW+HjPFQmScaRc+Xeas+uNN0=", "dev": true }, + "debug": { + "version": "4.1.1", + "resolved": "https://registry.npmjs.org/debug/-/debug-4.1.1.tgz", + "integrity": "sha512-pYAIzeRo8J6KPEaJ0VWOh5Pzkbw/RetuzehGM7QRRX5he4fPHx2rdKMB256ehJCkX+XRQm16eZLqLNS8RSZXZw==", + "dev": true, + "requires": { + "ms": "^2.1.1" + } + }, "decamelize": { "version": "2.0.0", "resolved": "https://registry.npmjs.org/decamelize/-/decamelize-2.0.0.tgz", @@ -10046,15 +10195,6 @@ "ansi-regex": "^2.0.0" } }, - "supports-color": { - "version": "5.5.0", - "resolved": "https://registry.npmjs.org/supports-color/-/supports-color-5.5.0.tgz", - "integrity": "sha512-QjVjwdXIt408MIiAqCX4oUKsgU2EqAGzs2Ppkm4aQYbjm+ZEWEcW4SfFNTr4uMNZma0ey4f5lgLrkB0aX0QMow==", - "dev": true, - "requires": { - "has-flag": "^3.0.0" - } - }, "yargs": { "version": "12.0.2", "resolved": "https://registry.npmjs.org/yargs/-/yargs-12.0.2.tgz", @@ -10246,9 +10386,9 @@ "dev": true }, "write-file-atomic": { - "version": "2.4.2", - "resolved": "https://registry.npmjs.org/write-file-atomic/-/write-file-atomic-2.4.2.tgz", - "integrity": "sha512-s0b6vB3xIVRLWywa6X9TOMA7k9zio0TMOsl9ZnDkliA/cfJlpHXAscj0gbHVJiTdIuAYpIyqS5GW91fqm6gG5g==", + "version": "2.4.1", + "resolved": "https://registry.npmjs.org/write-file-atomic/-/write-file-atomic-2.4.1.tgz", + "integrity": "sha512-TGHFeZEZMnv+gBFRfjAcxL5bPHrsGKtnb4qsFAws7/vlh+QfwAaySIw4AXP9ZskTTh5GWu3FLuJhsWVdiJPGvg==", "dev": true, "requires": { "graceful-fs": "^4.1.11", diff --git a/web-console/package.json b/web-console/package.json index 769407559f6b..201100c9c124 100644 --- a/web-console/package.json +++ b/web-console/package.json @@ -14,12 +14,14 @@ "pretest": "./script/build", "run": "./script/run", "test": "jest --silent 2>&1", + "generate-licenses-file": "license-checker --production --json --out licenses.json", + "check-licenses": "license-checker --production --onlyAllow 'Apache-1.1;Apache-2.0;BSD-2-Clause;BSD-3-Clause;MIT;CC0-1.0' --summary", "start": "webpack-dev-server --hot --open" }, "dependencies": { - "@blueprintjs/core": "^3.12.0", - "@types/hjson": "^2.4.0", + "@blueprintjs/core": "1.0.1", "axios": "^0.18.0", + "brace": "^0.11.1", "classnames": "^2.2.6", "d3-array": "^2.0.3", "druid-console": "^0.0.2", @@ -28,16 +30,20 @@ "hjson": "^3.1.2", "lodash.debounce": "^4.0.8", "numeral": "^2.0.6", - "react": "^16.7.0", - "react-dom": "^16.7.0", + "prop-types": "^15.7.2", + "react": "^16.8.3", + "react-ace": "^6.4.0", + "react-addons-css-transition-group": "^15.6.2", + "react-dom": "^16.8.3", "react-router": "^4.3.1", "react-router-dom": "^4.3.1", - "react-table": "^6.8.6", + "react-table": "~6.8.6", "tslib": "^1.9.3" }, "devDependencies": { "@types/classnames": "^2.2.7", "@types/d3-array": "^1.2.4", + "@types/hjson": "^2.4.0", "@types/jest": "^23.3.13", "@types/lodash.debounce": "^4.0.4", "@types/mocha": "^5.2.5", @@ -51,6 +57,7 @@ "identity-obj-proxy": "^3.0.0", "ignore-styles": "^5.0.1", "jest": "^24.0.0", + "license-checker": "^25.0.1", "mocha": "^5.2.0", "node-sass": "^4.11.0", "node-sass-chokidar": "^1.3.4", diff --git a/web-console/pom.xml b/web-console/pom.xml index 936830d98bed..6482737e123f 100644 --- a/web-console/pom.xml +++ b/web-console/pom.xml @@ -28,7 +28,7 @@ org.apache.druid druid - 0.14.0-incubating-SNAPSHOT + 0.14.1-incubating-SNAPSHOT @@ -63,6 +63,16 @@ ${project.build.directory} + + license-check + + npm + + + run check-licenses + ${project.build.directory} + + test-console @@ -74,6 +84,17 @@ ${project.build.directory} + + + generate-licenses-file + + npm + + + run generate-licenses-file + ${project.build.directory} + + diff --git a/web-console/script/build b/web-console/script/build index 147f0e60f0bd..75371226c92d 100755 --- a/web-console/script/build +++ b/web-console/script/build @@ -23,6 +23,10 @@ cp -r ./node_modules/druid-console/coordinator-console . cp -r ./node_modules/druid-console/pages . cp ./node_modules/druid-console/index.html . +echo "Copying blueprint assets in..." +sed 's|url("assets|url("/assets|g' "./node_modules/@blueprintjs/core/dist/blueprint.css" > lib/blueprint.css +cp -r "./node_modules/@blueprintjs/core/dist/assets" . + echo "Transpiling ReactTable CSS..." PATH="./target/node:$PATH" ./node_modules/.bin/stylus lib/react-table.styl -o lib/react-table.css diff --git a/web-console/script/clean b/web-console/script/clean index 3464483dbe3c..fd51cf861448 100755 --- a/web-console/script/clean +++ b/web-console/script/clean @@ -17,9 +17,10 @@ # limitations under the License. rm -rf \ - lib/react-table.css \ + lib/*.css \ node_modules \ coordinator-console \ pages \ public \ + assets \ index.html diff --git a/web-console/script/cp-to b/web-console/script/cp-to index 81240ff6108f..d2bdeb76447c 100755 --- a/web-console/script/cp-to +++ b/web-console/script/cp-to @@ -24,3 +24,4 @@ cp -r coordinator-console "$1" cp -r old-console "$1" cp -r pages "$1" cp -r public "$1" +cp -r assets "$1" diff --git a/web-console/src/components/auto-form.tsx b/web-console/src/components/auto-form.tsx index bdbb27a35d71..39e8baa55ced 100644 --- a/web-console/src/components/auto-form.tsx +++ b/web-console/src/components/auto-form.tsx @@ -19,20 +19,8 @@ import { resolveSrv } from 'dns'; import * as React from 'react'; import axios from 'axios'; -import { - FormGroup, - Button, - InputGroup, - Dialog, - NumericInput, - Classes, - Tooltip, - AnchorButton, - TagInput, - Intent, - ButtonGroup, - HTMLSelect -} from "@blueprintjs/core"; +import { InputGroup } from "@blueprintjs/core"; +import { HTMLSelect, FormGroup, NumericInput, TagInput } from "../components/filler"; interface Field { name: string; @@ -67,7 +55,7 @@ export class AutoForm extends React.Component, AutoFormState const { model, onChange } = this.props; return { + onValueChange={(v: any) => { if (isNaN(v)) return; onChange(Object.assign({}, model, { [field.name]: v })); }} @@ -79,7 +67,7 @@ export class AutoForm extends React.Component, AutoFormState const { model, onChange } = this.props; return { + onValueChange={(v: number) => { if (isNaN(v)) return; onChange(Object.assign({}, model, { [field.name]: v })); }} @@ -102,12 +90,14 @@ export class AutoForm extends React.Component, AutoFormState private renderBooleanInput(field: Field): JSX.Element { const { model, onChange } = this.props; return { + onChange={(e: any) => { onChange(Object.assign({}, model, { [field.name]: e.currentTarget.value === "True" })); }} - /> + > + + + } private renderStringArrayInput(field: Field): JSX.Element { @@ -118,7 +108,7 @@ export class AutoForm extends React.Component, AutoFormState onChange={(v: any) => { onChange(Object.assign({}, model, { [field.name]: v })); }} - addOnBlur={true} + fill />; } diff --git a/web-console/src/components/filler.scss b/web-console/src/components/filler.scss new file mode 100644 index 000000000000..b4ee310d49dd --- /dev/null +++ b/web-console/src/components/filler.scss @@ -0,0 +1,27 @@ +/* + * Licensed to the Apache Software Foundation (ASF) under one + * or more contributor license agreements. See the NOTICE file + * distributed with this work for additional information + * regarding copyright ownership. The ASF licenses this file + * to you under the Apache License, Version 2.0 (the + * "License"); you may not use this file except in compliance + * with the License. You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +.pt-select { + &.pt-fill { + flex: 1; + } +} + +.form-group { + margin: 0 0 15px; +} diff --git a/web-console/src/components/filler.tsx b/web-console/src/components/filler.tsx new file mode 100644 index 000000000000..30809b852952 --- /dev/null +++ b/web-console/src/components/filler.tsx @@ -0,0 +1,259 @@ +/* + * Licensed to the Apache Software Foundation (ASF) under one + * or more contributor license agreements. See the NOTICE file + * distributed with this work for additional information + * regarding copyright ownership. The ASF licenses this file + * to you under the Apache License, Version 2.0 (the + * "License"); you may not use this file except in compliance + * with the License. You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +import { Button } from '@blueprintjs/core'; +import * as React from 'react'; +import classNames from 'classnames'; +import './filler.scss'; + + +export const IconNames = { + ERROR: "error" as "error", + PLUS: "plus" as "plus", + REFRESH: "refresh" as "refresh", + APPLICATION: "application" as "application", + GRAPH: "graph" as "graph", + MAP: "map" as "map", + TH: "th" as "th", + USER: "user" as "user", + GIT_BRANCH: "git-branch" as "git-branch", + COG: "cog" as "cog", + MULTI_SELECT: "multi-select" as "multi-select", + STACKED_CHART: "stacked-chart" as "stacked-chart", + GANTT_CHART: "gantt-chart" as "gantt-chart", + DATABASE: "database" as "database", + SETTINGS: "settings" as "settings", + HELP: "help" as "help", + SHARE: "share" as "share", + CROSS: "cross" as "cross", + ARROW_LEFT: "arrow-left" as "arrow-left", + CARET_RIGHT: "caret-right" as "caret-right", + TICK: "tick" as "tick", + ARROW_RIGHT: "right-arrow" as "right-arrow", + TRASH: "trash" as "trash", + CARET_DOWN: "caret-down" as "caret-down", + ARROW_UP: "arrow-up" as "arrow-up", + ARROW_DOWN: "arrow-down" as "arrow-down", +}; +export type IconNames = typeof IconNames[keyof typeof IconNames]; + +export class H5 extends React.Component<{}, {}> { + render() { + const { children } = this.props; + return
{children}
; + } +} + +export class Card extends React.Component<{ interactive?: boolean }, {}> { + render() { + const { interactive, children } = this.props; + return
+ {children} +
; + } +} + +export class Icon extends React.Component<{ icon: string, color?: string }, {}> { + render() { + const { color, icon } = this.props; + return ; + } +} + +export class ControlGroup extends React.Component<{}, {}> { + render() { + return
; + } +} + +export class ButtonGroup extends React.Component<{ vertical?: boolean, fixed?: boolean }, {}> { + render() { + const { vertical, fixed, children } = this.props; + return
+ {children} +
; + } +} + +export class Label extends React.Component<{}, {}> { + render() { + const { children } = this.props; + return ; + } +} + +export class FormGroup extends React.Component<{ className?: string, label?: string }, {}> { + render() { + const { className, label, children } = this.props; + return
+ { label ? : null } + {children} +
; + } +} + + +export const Alignment = { + LEFT: "left" as "left", + RIGHT: "right" as "right", +}; +export type Alignment = typeof Alignment[keyof typeof Alignment]; + +export class Navbar extends React.Component<{ className?: string }, {}> { + render() { + const { className, children } = this.props; + return ; + } +} + +export class NavbarGroup extends React.Component<{ align: Alignment }, {}> { + render() { + const { align, children } = this.props; + return
+ {children} +
; + } +} + +export class NavbarDivider extends React.Component<{}, {}> { + render() { + return ; + } +} + +export class HTMLSelect extends React.Component<{ key?: string; style?: any; onChange: any; value: any; fill?: boolean }, {}> { + render() { + const { key, style, onChange, value, fill, children } = this.props; + return
+ +
; + } +} + +export class TextArea extends React.Component<{ className?: string; onChange?: any; value?: string }, {}> { + render() { + const { className, value, onChange } = this.props; + return